/external/tensorflow/tensorflow/contrib/constrained_optimization/python/ |
D | constrained_optimizer.py | 67 grad_loss=None): argument 99 grad_loss=None): argument 131 grad_loss=grad_loss) 152 grad_loss=None): argument 184 grad_loss=grad_loss) 206 grad_loss=None): argument 249 grad_loss=grad_loss) 261 grad_loss=grad_loss)
|
D | external_regret_optimizer.py | 202 grad_loss=None): argument 273 grad_loss=grad_loss) 288 grad_loss=grad_loss)
|
D | swap_regret_optimizer.py | 307 grad_loss=None): argument 388 grad_loss=grad_loss) 403 grad_loss=grad_loss)
|
/external/tensorflow/tensorflow/contrib/mixed_precision/python/ |
D | loss_scale_optimizer.py | 120 grad_loss=None): argument 141 grad_loss=grad_loss)
|
/external/tensorflow/tensorflow/contrib/optimizer_v2/ |
D | optimizer_v2.py | 661 grad_loss=None, argument 712 grad_loss=grad_loss, 731 grad_loss=None, argument 786 grads = tape.gradient(loss_value, var_list, grad_loss) 803 if grad_loss is not None: 804 self._assert_valid_dtypes([grad_loss]) 821 grad_ys=grad_loss,
|
D | optimizer_v2_test.py | 93 grad_loss = constant_op.constant([42, -42], dtype=dtype) 98 cost, global_step, [var0, var1], grad_loss=grad_loss)
|
/external/tensorflow/tensorflow/python/training/ |
D | optimizer.py | 357 grad_loss=None): argument 402 grad_loss=grad_loss) 418 grad_loss=None): argument 468 grads = tape.gradient(loss_value, var_list, grad_loss) 483 if grad_loss is not None: 484 self._assert_valid_dtypes([grad_loss]) 499 loss, var_refs, grad_ys=grad_loss,
|
D | optimizer_test.py | 98 grad_loss = constant_op.constant([42, -42], dtype=dtype) 103 cost, global_step, [var0, var1], grad_loss=grad_loss)
|
/external/tensorflow/tensorflow/tools/api/golden/v1/ |
D | tensorflow.train.-optimizer.pbtxt | 28 …gradients\', \'aggregation_method\', \'colocate_gradients_with_ops\', \'grad_loss\'], varargs=None… 44 …', \'aggregation_method\', \'colocate_gradients_with_ops\', \'name\', \'grad_loss\'], varargs=None…
|
D | tensorflow.train.-gradient-descent-optimizer.pbtxt | 29 …gradients\', \'aggregation_method\', \'colocate_gradients_with_ops\', \'grad_loss\'], varargs=None… 45 …', \'aggregation_method\', \'colocate_gradients_with_ops\', \'name\', \'grad_loss\'], varargs=None…
|
D | tensorflow.train.-proximal-adagrad-optimizer.pbtxt | 29 …gradients\', \'aggregation_method\', \'colocate_gradients_with_ops\', \'grad_loss\'], varargs=None… 45 …', \'aggregation_method\', \'colocate_gradients_with_ops\', \'name\', \'grad_loss\'], varargs=None…
|
D | tensorflow.train.-adagrad-optimizer.pbtxt | 29 …gradients\', \'aggregation_method\', \'colocate_gradients_with_ops\', \'grad_loss\'], varargs=None… 45 …', \'aggregation_method\', \'colocate_gradients_with_ops\', \'name\', \'grad_loss\'], varargs=None…
|
D | tensorflow.train.-proximal-gradient-descent-optimizer.pbtxt | 29 …gradients\', \'aggregation_method\', \'colocate_gradients_with_ops\', \'grad_loss\'], varargs=None… 45 …', \'aggregation_method\', \'colocate_gradients_with_ops\', \'name\', \'grad_loss\'], varargs=None…
|
D | tensorflow.train.-momentum-optimizer.pbtxt | 29 …gradients\', \'aggregation_method\', \'colocate_gradients_with_ops\', \'grad_loss\'], varargs=None… 45 …', \'aggregation_method\', \'colocate_gradients_with_ops\', \'name\', \'grad_loss\'], varargs=None…
|
D | tensorflow.train.-adadelta-optimizer.pbtxt | 29 …gradients\', \'aggregation_method\', \'colocate_gradients_with_ops\', \'grad_loss\'], varargs=None… 45 …', \'aggregation_method\', \'colocate_gradients_with_ops\', \'name\', \'grad_loss\'], varargs=None…
|
D | tensorflow.train.-adagrad-d-a-optimizer.pbtxt | 29 …gradients\', \'aggregation_method\', \'colocate_gradients_with_ops\', \'grad_loss\'], varargs=None… 45 …', \'aggregation_method\', \'colocate_gradients_with_ops\', \'name\', \'grad_loss\'], varargs=None…
|
D | tensorflow.train.-r-m-s-prop-optimizer.pbtxt | 29 …gradients\', \'aggregation_method\', \'colocate_gradients_with_ops\', \'grad_loss\'], varargs=None… 45 …', \'aggregation_method\', \'colocate_gradients_with_ops\', \'name\', \'grad_loss\'], varargs=None…
|
D | tensorflow.train.-adam-optimizer.pbtxt | 29 …gradients\', \'aggregation_method\', \'colocate_gradients_with_ops\', \'grad_loss\'], varargs=None… 45 …', \'aggregation_method\', \'colocate_gradients_with_ops\', \'name\', \'grad_loss\'], varargs=None…
|
D | tensorflow.train.-ftrl-optimizer.pbtxt | 29 …gradients\', \'aggregation_method\', \'colocate_gradients_with_ops\', \'grad_loss\'], varargs=None… 45 …', \'aggregation_method\', \'colocate_gradients_with_ops\', \'name\', \'grad_loss\'], varargs=None…
|
D | tensorflow.train.-sync-replicas-optimizer.pbtxt | 57 …', \'aggregation_method\', \'colocate_gradients_with_ops\', \'name\', \'grad_loss\'], varargs=None…
|
/external/tensorflow/tensorflow/python/keras/mixed_precision/experimental/ |
D | loss_scale_optimizer.py | 72 def _compute_gradients(self, loss, var_list, grad_loss=None): argument 75 grad_loss)
|
/external/tensorflow/tensorflow/contrib/opt/python/training/ |
D | weight_decay_optimizers.py | 99 name=None, grad_loss=None, decay_var_list=None): argument 135 grad_loss=grad_loss)
|
/external/tensorflow/tensorflow/python/keras/optimizer_v2/ |
D | optimizer_v2.py | 268 def minimize(self, loss, var_list, grad_loss=None, name=None): argument 298 loss, var_list=var_list, grad_loss=grad_loss) 302 def _compute_gradients(self, loss, var_list, grad_loss=None): argument 331 grads = tape.gradient(loss_value, var_list, grad_loss)
|
/external/tensorflow/tensorflow/cc/gradients/ |
D | nn_grad.cc | 90 auto grad_loss = grad_inputs[0]; in SoftmaxCrossEntropyWithLogitsGrad() local 93 auto grad = BroadcastMul(scope, grad_loss, softmax_grad); in SoftmaxCrossEntropyWithLogitsGrad() 110 grad_outputs->push_back(BroadcastMul(scope, grad_loss, minus_log_softmax)); in SoftmaxCrossEntropyWithLogitsGrad()
|
/external/tensorflow/tensorflow/python/ops/ |
D | ctc_ops.py | 182 def _CTCLossGrad(op, grad_loss, _): argument 204 return [_BroadcastMul(grad_loss, grad_without_gradient), None, None, None] 576 def _ctc_loss_grad(op, grad_loss, _): argument 578 grad = [array_ops.reshape(grad_loss, [1, -1, 1]) * grad]
|