Home
last modified time | relevance | path

Searched refs:grads_and_vars (Results 1 – 25 of 72) sorted by relevance

123

/external/tensorflow/tensorflow/python/keras/optimizer_v2/
Dutils.py28 def all_reduce_sum_gradients(grads_and_vars): argument
37 grads_and_vars = list(grads_and_vars)
38 filtered_grads_and_vars = filter_empty_gradients(grads_and_vars)
52 for g, v in grads_and_vars:
62 def filter_empty_gradients(grads_and_vars): argument
64 grads_and_vars = tuple(grads_and_vars)
65 if not grads_and_vars:
66 return grads_and_vars
70 for grad, var in grads_and_vars:
79 ([v.name for _, v in grads_and_vars],))
[all …]
Doptimizer_v2.py479 def _transform_unaggregated_gradients(self, grads_and_vars): argument
481 return grads_and_vars
483 def _aggregate_gradients(self, grads_and_vars): argument
485 return self.gradient_aggregator(grads_and_vars)
487 def _transform_gradients(self, grads_and_vars): argument
490 grads_and_vars = self._clipvalue_fn(grads_and_vars)
492 grads_and_vars = self._clipnorm_fn(grads_and_vars)
494 grads_and_vars = self._global_clipnorm_fn(grads_and_vars)
497 grads_and_vars = fn(grads_and_vars)
498 return grads_and_vars
[all …]
/external/tensorflow/tensorflow/python/training/experimental/
Dloss_scale_optimizer.py119 grads_and_vars = self._optimizer.compute_gradients(
127 grads = [g for g, _ in grads_and_vars]
128 variables = [v for _, v in grads_and_vars]
156 def apply_gradients(self, grads_and_vars, global_step=None, name=None): argument
182 return self._optimizer.apply_gradients(grads_and_vars, global_step, name)
185 grads_and_vars = tuple(grads_and_vars)
189 self._distributed_apply, args=(grads_and_vars, global_step, name))
193 grads_and_vars, argument
216 grads = [g for g, _ in grads_and_vars]
220 return self._apply_gradients(distribution, grads_and_vars, global_step,
[all …]
/external/tensorflow/tensorflow/python/training/
Doptimizer.py407 grads_and_vars = self.compute_gradients(
413 vars_with_grad = [v for g, v in grads_and_vars if g is not None]
418 ([str(v) for _, v in grads_and_vars], loss))
420 return self.apply_gradients(grads_and_vars, global_step=global_step,
523 grads_and_vars = list(zip(grads, var_list))
525 [v for g, v in grads_and_vars
527 return grads_and_vars
539 def apply_gradients(self, grads_and_vars, global_step=None, name=None): argument
575 grads_and_vars = get_filtered_grad_fn(lambda: grads_and_vars)()
577 self._distributed_apply, args=(grads_and_vars, global_step, name))
[all …]
Doptimizer_test.py196 grads_and_vars = sgd_op.compute_gradients(loss, [var0, var1])
201 for j, gv in enumerate(grads_and_vars)
205 for j, gv in enumerate(grads_and_vars)
230 grads_and_vars = sgd_op.compute_gradients(f, [x])
231 self.assertEqual(1, len(grads_and_vars))
232 grad, x_as_var = grads_and_vars[0]
237 sgd_op.apply_gradients(grads_and_vars)
Dsync_replicas_optimizer.py224 def apply_gradients(self, grads_and_vars, global_step=None, name=None): argument
247 if not grads_and_vars:
276 for grad, var in grads_and_vars:
/external/tensorflow/tensorflow/python/keras/mixed_precision/
Dloss_scale_optimizer.py681 grads_and_vars = self._optimizer._compute_gradients( # pylint: disable=protected-access
686 grads = [g for g, _ in grads_and_vars]
687 weights = [v for _, v in grads_and_vars]
700 grads_and_vars, argument
709 grads_and_vars = tuple(grads_and_vars)
712 args=(grads_and_vars, name, experimental_aggregate_gradients))
714 def _apply_gradients_cross_replica(self, distribution, grads_and_vars, name, argument
716 grads = [g for g, _ in grads_and_vars]
729 wrapped_vars = _UnwrapPreventer([v for _, v in grads_and_vars])
864 def _aggregate_gradients(self, grads_and_vars): argument
[all …]
/external/tensorflow/tensorflow/python/distribute/
Dstep_fn.py102 grads_and_vars = self.distribution.extended.call_for_each_replica(
109 self.distribution, grads_and_vars)
/external/tensorflow/tensorflow/python/tpu/
Dtpu_embedding_gradient.py46 grads_and_vars = optimizer.compute_gradients(loss, activation_list)
47 grads = [grad for grad, _ in grads_and_vars]
Dtpu_optimizer.py163 def apply_gradients(self, grads_and_vars, global_step=None, name=None): argument
185 for (grad, var) in grads_and_vars:
/external/tensorflow/tensorflow/python/keras/distribute/
Dmirrored_strategy_test.py80 grads_and_vars = distribution.extended.call_for_each_replica(
84 …update_ops = optimizer._distributed_apply(distribution, grads_and_vars) # pylint: disable=protect…
/external/tensorflow/tensorflow/tools/api/golden/v1/
Dtensorflow.train.-optimizer.pbtxt24 …argspec: "args=[\'self\', \'grads_and_vars\', \'global_step\', \'name\'], varargs=None, keywords=N…
Dtensorflow.train.-adam-optimizer.pbtxt25 …argspec: "args=[\'self\', \'grads_and_vars\', \'global_step\', \'name\'], varargs=None, keywords=N…
Dtensorflow.train.-momentum-optimizer.pbtxt25 …argspec: "args=[\'self\', \'grads_and_vars\', \'global_step\', \'name\'], varargs=None, keywords=N…
Dtensorflow.train.-gradient-descent-optimizer.pbtxt25 …argspec: "args=[\'self\', \'grads_and_vars\', \'global_step\', \'name\'], varargs=None, keywords=N…
Dtensorflow.train.-r-m-s-prop-optimizer.pbtxt25 …argspec: "args=[\'self\', \'grads_and_vars\', \'global_step\', \'name\'], varargs=None, keywords=N…
Dtensorflow.train.-proximal-adagrad-optimizer.pbtxt25 …argspec: "args=[\'self\', \'grads_and_vars\', \'global_step\', \'name\'], varargs=None, keywords=N…
Dtensorflow.tpu.-cross-shard-optimizer.pbtxt25 …argspec: "args=[\'self\', \'grads_and_vars\', \'global_step\', \'name\'], varargs=None, keywords=N…
Dtensorflow.train.-proximal-gradient-descent-optimizer.pbtxt25 …argspec: "args=[\'self\', \'grads_and_vars\', \'global_step\', \'name\'], varargs=None, keywords=N…
Dtensorflow.train.-adagrad-d-a-optimizer.pbtxt25 …argspec: "args=[\'self\', \'grads_and_vars\', \'global_step\', \'name\'], varargs=None, keywords=N…
Dtensorflow.train.experimental.-mixed-precision-loss-scale-optimizer.pbtxt25 …argspec: "args=[\'self\', \'grads_and_vars\', \'global_step\', \'name\'], varargs=None, keywords=N…
Dtensorflow.mixed_precision.-mixed-precision-loss-scale-optimizer.pbtxt25 …argspec: "args=[\'self\', \'grads_and_vars\', \'global_step\', \'name\'], varargs=None, keywords=N…
Dtensorflow.train.-adadelta-optimizer.pbtxt25 …argspec: "args=[\'self\', \'grads_and_vars\', \'global_step\', \'name\'], varargs=None, keywords=N…
Dtensorflow.train.-adagrad-optimizer.pbtxt25 …argspec: "args=[\'self\', \'grads_and_vars\', \'global_step\', \'name\'], varargs=None, keywords=N…
/external/tensorflow/tensorflow/python/keras/premade/
Dlinear_test.py148 grads_and_vars = zip(grads, model.trainable_variables)
149 opt.apply_gradients(grads_and_vars)

123