/external/tensorflow/tensorflow/python/keras/optimizer_v2/ |
D | utils.py | 28 def all_reduce_sum_gradients(grads_and_vars): argument 37 grads_and_vars = list(grads_and_vars) 38 filtered_grads_and_vars = filter_empty_gradients(grads_and_vars) 52 for g, v in grads_and_vars: 62 def filter_empty_gradients(grads_and_vars): argument 64 grads_and_vars = tuple(grads_and_vars) 65 if not grads_and_vars: 66 return grads_and_vars 70 for grad, var in grads_and_vars: 79 ([v.name for _, v in grads_and_vars],)) [all …]
|
D | optimizer_v2.py | 479 def _transform_unaggregated_gradients(self, grads_and_vars): argument 481 return grads_and_vars 483 def _aggregate_gradients(self, grads_and_vars): argument 485 return self.gradient_aggregator(grads_and_vars) 487 def _transform_gradients(self, grads_and_vars): argument 490 grads_and_vars = self._clipvalue_fn(grads_and_vars) 492 grads_and_vars = self._clipnorm_fn(grads_and_vars) 494 grads_and_vars = self._global_clipnorm_fn(grads_and_vars) 497 grads_and_vars = fn(grads_and_vars) 498 return grads_and_vars [all …]
|
/external/tensorflow/tensorflow/python/training/experimental/ |
D | loss_scale_optimizer.py | 119 grads_and_vars = self._optimizer.compute_gradients( 127 grads = [g for g, _ in grads_and_vars] 128 variables = [v for _, v in grads_and_vars] 156 def apply_gradients(self, grads_and_vars, global_step=None, name=None): argument 182 return self._optimizer.apply_gradients(grads_and_vars, global_step, name) 185 grads_and_vars = tuple(grads_and_vars) 189 self._distributed_apply, args=(grads_and_vars, global_step, name)) 193 grads_and_vars, argument 216 grads = [g for g, _ in grads_and_vars] 220 return self._apply_gradients(distribution, grads_and_vars, global_step, [all …]
|
/external/tensorflow/tensorflow/python/training/ |
D | optimizer.py | 407 grads_and_vars = self.compute_gradients( 413 vars_with_grad = [v for g, v in grads_and_vars if g is not None] 418 ([str(v) for _, v in grads_and_vars], loss)) 420 return self.apply_gradients(grads_and_vars, global_step=global_step, 523 grads_and_vars = list(zip(grads, var_list)) 525 [v for g, v in grads_and_vars 527 return grads_and_vars 539 def apply_gradients(self, grads_and_vars, global_step=None, name=None): argument 575 grads_and_vars = get_filtered_grad_fn(lambda: grads_and_vars)() 577 self._distributed_apply, args=(grads_and_vars, global_step, name)) [all …]
|
D | optimizer_test.py | 196 grads_and_vars = sgd_op.compute_gradients(loss, [var0, var1]) 201 for j, gv in enumerate(grads_and_vars) 205 for j, gv in enumerate(grads_and_vars) 230 grads_and_vars = sgd_op.compute_gradients(f, [x]) 231 self.assertEqual(1, len(grads_and_vars)) 232 grad, x_as_var = grads_and_vars[0] 237 sgd_op.apply_gradients(grads_and_vars)
|
D | sync_replicas_optimizer.py | 224 def apply_gradients(self, grads_and_vars, global_step=None, name=None): argument 247 if not grads_and_vars: 276 for grad, var in grads_and_vars:
|
/external/tensorflow/tensorflow/python/keras/mixed_precision/ |
D | loss_scale_optimizer.py | 681 grads_and_vars = self._optimizer._compute_gradients( # pylint: disable=protected-access 686 grads = [g for g, _ in grads_and_vars] 687 weights = [v for _, v in grads_and_vars] 700 grads_and_vars, argument 709 grads_and_vars = tuple(grads_and_vars) 712 args=(grads_and_vars, name, experimental_aggregate_gradients)) 714 def _apply_gradients_cross_replica(self, distribution, grads_and_vars, name, argument 716 grads = [g for g, _ in grads_and_vars] 729 wrapped_vars = _UnwrapPreventer([v for _, v in grads_and_vars]) 864 def _aggregate_gradients(self, grads_and_vars): argument [all …]
|
/external/tensorflow/tensorflow/python/distribute/ |
D | step_fn.py | 102 grads_and_vars = self.distribution.extended.call_for_each_replica( 109 self.distribution, grads_and_vars)
|
/external/tensorflow/tensorflow/python/tpu/ |
D | tpu_embedding_gradient.py | 46 grads_and_vars = optimizer.compute_gradients(loss, activation_list) 47 grads = [grad for grad, _ in grads_and_vars]
|
D | tpu_optimizer.py | 163 def apply_gradients(self, grads_and_vars, global_step=None, name=None): argument 185 for (grad, var) in grads_and_vars:
|
/external/tensorflow/tensorflow/python/keras/distribute/ |
D | mirrored_strategy_test.py | 80 grads_and_vars = distribution.extended.call_for_each_replica( 84 …update_ops = optimizer._distributed_apply(distribution, grads_and_vars) # pylint: disable=protect…
|
/external/tensorflow/tensorflow/tools/api/golden/v1/ |
D | tensorflow.train.-optimizer.pbtxt | 24 …argspec: "args=[\'self\', \'grads_and_vars\', \'global_step\', \'name\'], varargs=None, keywords=N…
|
D | tensorflow.train.-adam-optimizer.pbtxt | 25 …argspec: "args=[\'self\', \'grads_and_vars\', \'global_step\', \'name\'], varargs=None, keywords=N…
|
D | tensorflow.train.-momentum-optimizer.pbtxt | 25 …argspec: "args=[\'self\', \'grads_and_vars\', \'global_step\', \'name\'], varargs=None, keywords=N…
|
D | tensorflow.train.-gradient-descent-optimizer.pbtxt | 25 …argspec: "args=[\'self\', \'grads_and_vars\', \'global_step\', \'name\'], varargs=None, keywords=N…
|
D | tensorflow.train.-r-m-s-prop-optimizer.pbtxt | 25 …argspec: "args=[\'self\', \'grads_and_vars\', \'global_step\', \'name\'], varargs=None, keywords=N…
|
D | tensorflow.train.-proximal-adagrad-optimizer.pbtxt | 25 …argspec: "args=[\'self\', \'grads_and_vars\', \'global_step\', \'name\'], varargs=None, keywords=N…
|
D | tensorflow.tpu.-cross-shard-optimizer.pbtxt | 25 …argspec: "args=[\'self\', \'grads_and_vars\', \'global_step\', \'name\'], varargs=None, keywords=N…
|
D | tensorflow.train.-proximal-gradient-descent-optimizer.pbtxt | 25 …argspec: "args=[\'self\', \'grads_and_vars\', \'global_step\', \'name\'], varargs=None, keywords=N…
|
D | tensorflow.train.-adagrad-d-a-optimizer.pbtxt | 25 …argspec: "args=[\'self\', \'grads_and_vars\', \'global_step\', \'name\'], varargs=None, keywords=N…
|
D | tensorflow.train.experimental.-mixed-precision-loss-scale-optimizer.pbtxt | 25 …argspec: "args=[\'self\', \'grads_and_vars\', \'global_step\', \'name\'], varargs=None, keywords=N…
|
D | tensorflow.mixed_precision.-mixed-precision-loss-scale-optimizer.pbtxt | 25 …argspec: "args=[\'self\', \'grads_and_vars\', \'global_step\', \'name\'], varargs=None, keywords=N…
|
D | tensorflow.train.-adadelta-optimizer.pbtxt | 25 …argspec: "args=[\'self\', \'grads_and_vars\', \'global_step\', \'name\'], varargs=None, keywords=N…
|
D | tensorflow.train.-adagrad-optimizer.pbtxt | 25 …argspec: "args=[\'self\', \'grads_and_vars\', \'global_step\', \'name\'], varargs=None, keywords=N…
|
/external/tensorflow/tensorflow/python/keras/premade/ |
D | linear_test.py | 148 grads_and_vars = zip(grads, model.trainable_variables) 149 opt.apply_gradients(grads_and_vars)
|