/external/tensorflow/tensorflow/python/eager/ |
D | backprop_test.py | 24 from tensorflow.python.eager import backprop 66 grad = backprop.gradients_function(fn, [0])(var)[0] 95 grads_and_vars = backprop.implicit_grad(fn)() 105 with backprop.GradientTape() as t: 119 g = backprop.gradients_function(f) 125 with backprop.GradientTape() as t: 144 with backprop.GradientTape() as t: 150 with backprop.GradientTape() as t: 163 grad_fn = backprop.gradients_function(f) 172 self.assertEqual(backprop.gradients_function(f)(int_tensor)[0], None) [all …]
|
D | function_gradients_test.py | 23 from tensorflow.python.eager import backprop 53 return backprop.implicit_grad(inner)()[0][0] 107 return backprop.implicit_grad(inner)()[0][0] 121 with backprop.GradientTape() as t: 136 with backprop.GradientTape() as t: 149 self.assertAllEqual(backprop.implicit_grad(f)()[0][0], 2.0) 158 self.assertAllEqual(backprop.implicit_grad(f)()[0][0], 2.0) 160 self.assertAllEqual(backprop.implicit_grad(f)()[0][0], 2.0) 186 return backprop.gradients_function(f, [0])(x)[0] 200 backprop.implicit_val_and_grad(f)() [all …]
|
D | tape_test.py | 22 from tensorflow.python.eager import backprop 74 da, db = backprop.gradients_function(fn, [0, 1])(a, b) 94 da, = backprop.gradients_function(forward, ['a'])(aa, bb) 108 da, = backprop.gradients_function(forward, [0])(aa, bb) 122 val, (da,) = backprop.val_and_grad_function(forward, ['a'])(aa, bb) 137 da, db = backprop.gradients_function(fn, [0, 1])(a, b) 156 grad, = backprop.gradients_function(fn, [0])(logits, labels) 165 g, = backprop.gradients_function(fn, [0])(t)
|
D | BUILD | 53 ":backprop", 154 ":backprop", 195 ":backprop", 210 ":backprop", 226 ":backprop", 244 ":backprop", 376 name = "backprop", 377 srcs = ["backprop.py"], 404 ":backprop", 427 ":backprop", [all …]
|
D | memory_test.py | 31 from tensorflow.python.eager import backprop 103 with backprop.GradientTape(): 116 with backprop.GradientTape() as tape:
|
D | pywrap_tfe_test.py | 22 from tensorflow.python.eager import backprop 79 with backprop.GradientTape(persistent=True) as tape: 93 with backprop.GradientTape(persistent=True) as tape: 126 with backprop.GradientTape(persistent=True) as tape: 159 with backprop.GradientTape(persistent=True) as tape:
|
/external/tensorflow/tensorflow/core/kernels/ |
D | sparse_xent_op.h | 165 typename TTypes<T>::Matrix backprop); 178 typename TTypes<T>::Matrix backprop) { in Compute() 217 To32Bit(backprop).device(d) = in Compute() 222 To32Bit(scratch).device(d) = To32Bit(backprop).exp().sum(along_class); in Compute() 228 sparse_xent_helpers::To32BitConst<T>(backprop), in Compute() 230 backprop.dimension(1) /* max_depth */); in Compute() 232 To32Bit(backprop).generate(sparse_xent_loss_gen).sum(along_class); in Compute() 236 To32Bit(backprop).device(d) = To32Bit(backprop).exp(); in Compute() 238 sparse_xent_helpers::To32BitConst<T>(backprop), in Compute() 240 backprop.dimension(1) /* max_depth */); in Compute() [all …]
|
D | xent_op.h | 45 typename TTypes<T>::Matrix backprop); 61 typename TTypes<T>::Matrix backprop) { in Compute() 100 backprop.device(d) = in Compute() 104 scratch.reshape(batch_only).device(d) = backprop.exp().sum(along_class); in Compute() 114 (scratch.log().eval().broadcast(one_by_class) - backprop)) in Compute() 120 backprop.device(d) = (backprop.exp() / scratch.broadcast(one_by_class)) - in Compute()
|
D | relu_op_gpu.cu.cc | 39 Eigen::half* backprop, int32 count) { in ReluGradHalfKernel() argument 49 half2* p_backprop_h2 = reinterpret_cast<half2*>(backprop) + index; in ReluGradHalfKernel() 84 backprop[count - 1] = backprop_h; in ReluGradHalfKernel() 99 typename TTypes<Eigen::half>::Tensor backprop) { in operator ()() 111 backprop.data(), count); in operator ()()
|
D | xent_op_gpu.cu.cc | 42 typename TTypes<T>::Matrix backprop) { in operator ()() 45 backprop); in operator ()()
|
/external/tensorflow/tensorflow/core/api_def/base_api/ |
D | api_def_BatchNormWithGlobalNormalizationGrad.pbtxt | 34 name: "backprop" 36 4D backprop Tensor. 42 4D backprop tensor for input. 48 1D backprop tensor for mean. 54 1D backprop tensor for variance. 60 1D backprop tensor for beta. 66 1D backprop tensor for gamma.
|
D | api_def_CudnnRNNBackprop.pbtxt | 5 Compute the backprop of both data and weights in a RNN. 36 input_backprop: The backprop to input in the forward pass. Has the same shape 38 input_h_backprop: The backprop to input_h in the forward pass. Has the same 40 input_c_backprop: The backprop to input_c in the forward pass. Has the same 42 params_backprop: The backprop to the params buffer in the forward pass. Has the
|
D | api_def_CudnnRNNBackpropV2.pbtxt | 6 Compute the backprop of both data and weights in a RNN. Takes an extra 40 input_backprop: The backprop to input in the forward pass. Has the same shape 42 input_h_backprop: The backprop to input_h in the forward pass. Has the same 44 input_c_backprop: The backprop to input_c in the forward pass. Has the same 46 params_backprop: The backprop to the params buffer in the forward pass. Has the
|
D | api_def_SparseFillEmptyRowsGrad.pbtxt | 12 1-D. The gradients from backprop. 18 1-D. The backprop into values. 24 0-D. The backprop into default_value.
|
D | api_def_CudnnRNNBackpropV3.pbtxt | 6 Compute the backprop of both data and weights in a RNN. Takes an extra 45 input_backprop: The backprop to input in the forward pass. Has the same shape 47 input_h_backprop: The backprop to input_h in the forward pass. Has the same 49 input_c_backprop: The backprop to input_c in the forward pass. Has the same 51 params_backprop: The backprop to the params buffer in the forward pass. Has the
|
/external/tensorflow/tensorflow/contrib/eager/python/ |
D | tfe.py | 104 from tensorflow.python.eager import backprop 147 implicit_gradients = backprop.implicit_grad 148 implicit_value_and_gradients = backprop.implicit_val_and_grad 149 gradients_function = backprop.gradients_function 150 value_and_gradients_function = backprop.val_and_grad_function 151 GradientTape = backprop.GradientTape # pylint: disable=invalid-name
|
/external/tensorflow/tensorflow/compiler/tf2xla/kernels/ |
D | softmax_op.cc | 137 xla::XlaOp backprop = in CrossEntropyWithLogits() local 139 return {loss, backprop}; in CrossEntropyWithLogits() 165 xla::XlaOp loss, backprop; in Compile() local 166 std::tie(loss, backprop) = in Compile() 169 ctx->SetOutput(1, backprop); in Compile() 232 xla::XlaOp loss, backprop; in Compile() local 233 std::tie(loss, backprop) = CrossEntropyWithLogits( in Compile() 236 ctx->SetOutput(1, backprop); in Compile()
|
/external/tensorflow/tensorflow/python/kernel_tests/ |
D | sparse_xent_op_test.py | 68 loss, backprop = gen_nn_ops.sparse_softmax_cross_entropy_with_logits( 70 tf_loss, tf_backprop = self.evaluate([loss, backprop]) 77 loss, backprop = gen_nn_ops.sparse_softmax_cross_entropy_with_logits( 80 tf_loss, tf_backprop = self.evaluate([loss, backprop]) 92 loss, backprop = ( 95 tf_loss, tf_backprop = self.evaluate([loss, backprop]) 106 loss, backprop = ( 109 self.evaluate([loss, backprop]) 233 backprop = loss.op.inputs[0].op.outputs[1] 234 tf_loss, tf_backprop = self.evaluate([loss, backprop])
|
D | depthwise_conv_op_test.py | 533 backprop = nn_ops.depthwise_conv2d_native_backprop_input( 535 ret = self.evaluate(backprop) 536 self.assertShapeEqual(ret, backprop) 553 backprop = nn_ops.depthwise_conv2d_native_backprop_input( 555 ret = self.evaluate(backprop) 556 self.assertShapeEqual(ret, backprop) 585 backprop = nn_ops.depthwise_conv2d_native_backprop_filter( 587 ret = self.evaluate(backprop) 588 self.assertShapeEqual(ret, backprop) 605 backprop = nn_ops.depthwise_conv2d_native_backprop_filter( [all …]
|
D | relu_op_test.py | 26 from tensorflow.python.eager import backprop 126 with backprop.GradientTape() as tape: 175 with backprop.GradientTape() as tape: 194 with backprop.GradientTape() as tape: 348 with backprop.GradientTape() as tape: 368 with backprop.GradientTape() as tape: 448 with backprop.GradientTape(persistent=True) as tape: 465 with backprop.GradientTape() as tape: 484 with backprop.GradientTape() as tape: 553 with backprop.GradientTape() as tape: [all …]
|
D | reduce_benchmark_test.py | 28 from tensorflow.python.eager import backprop 59 backprop.gradients_function(math_ops.reduce_sum, [0])(tensor) 68 backprop.gradients_function(math_ops.reduce_sum, [0])(tensor)
|
D | xent_op_test.py | 58 loss, backprop = gen_nn_ops.softmax_cross_entropy_with_logits( 60 tf_loss, tf_backprop = self.evaluate([loss, backprop]) 81 loss, backprop = gen_nn_ops.softmax_cross_entropy_with_logits( 84 tf_loss, tf_backprop = self.evaluate([loss, backprop]) 151 loss, backprop = gen_nn_ops.softmax_cross_entropy_with_logits( 153 tf_loss, tf_backprop = self.evaluate([loss, backprop])
|
/external/tensorflow/tensorflow/compiler/tests/ |
D | depthwise_conv_op_test.py | 330 backprop = nn_ops.depthwise_conv2d_native_backprop_input( 333 backprop = nn_ops.depthwise_conv2d_native_backprop_input( 336 ret = backprop.eval({t1: x1, t2: x2}) 337 self.assertShapeEqual(ret, backprop) 380 backprop = nn_ops.depthwise_conv2d_native_backprop_filter( 390 backprop = nn_ops.depthwise_conv2d_native_backprop_filter( 392 ret = backprop.eval({t0: x0, t2: x2}) 393 self.assertShapeEqual(ret, backprop)
|
D | eager_test.py | 25 from tensorflow.python.eager import backprop 61 with backprop.GradientTape(persistent=True) as tape: 163 grad_fn = backprop.gradients_function(f) 174 grads = backprop.implicit_grad(f)() 267 with backprop.GradientTape() as tape: 281 with backprop.GradientTape() as tape: 440 with backprop.GradientTape() as tape: 454 with backprop.GradientTape() as tape: 471 with backprop.GradientTape() as tape: 529 with backprop.GradientTape() as tape: [all …]
|
/external/tensorflow/tensorflow/python/ops/ |
D | gradient_checker.py | 114 backprop = sess.run( 116 jacobian[:, col] = backprop.ravel().view(jacobian.dtype) 122 backprop = sess.run( 124 if backprop.shape != x_data.shape: 126 (x_data.shape, backprop.shape)) 127 if np.any(backprop):
|