/external/tensorflow/tensorflow/contrib/losses/python/losses/ |
D | loss_ops_test.py | 52 loss = loss_ops.absolute_difference(self._predictions, self._predictions) 54 self.assertAlmostEqual(0.0, loss.eval(), 3) 57 loss = loss_ops.absolute_difference(self._predictions, self._labels) 59 self.assertAlmostEqual(5.5, loss.eval(), 3) 63 loss = loss_ops.absolute_difference(self._predictions, self._labels, 66 self.assertAlmostEqual(5.5 * weights, loss.eval(), 3) 70 loss = loss_ops.absolute_difference(self._predictions, self._labels, 73 self.assertAlmostEqual(5.5 * weights, loss.eval(), 3) 77 loss = loss_ops.absolute_difference(self._predictions, self._labels, 80 self.assertAlmostEqual(5.6, loss.eval(), 3) [all …]
|
/external/tensorflow/tensorflow/python/kernel_tests/ |
D | losses_test.py | 56 loss = losses.absolute_difference(self._predictions, self._predictions) 58 self.assertAlmostEqual(0.0, self.evaluate(loss), 3) 61 loss = losses.absolute_difference(self._labels, self._predictions) 63 self.assertAlmostEqual(5.5, self.evaluate(loss), 3) 67 loss = losses.absolute_difference(self._labels, self._predictions, weights) 69 self.assertAlmostEqual(5.5 * weights, self.evaluate(loss), 3) 73 loss = losses.absolute_difference(self._labels, self._predictions, 76 self.assertAlmostEqual(5.5 * weights, self.evaluate(loss), 3) 80 loss = losses.absolute_difference(self._labels, self._predictions, weights) 82 self.assertAlmostEqual(5.6, self.evaluate(loss), 3) [all …]
|
/external/tensorflow/tensorflow/python/keras/ |
D | losses_test.py | 147 loss = keras.backend.eval(keras.losses.categorical_hinge(y_true, y_pred)) 148 self.assertAllClose(expected_loss, np.mean(loss)) 167 loss = _MSEMAELoss(0.3) 171 model.compile(optimizer='sgd', loss={'model_output': loss}) 194 loss = mse_obj(y_true, y_pred, sample_weight=sample_weight) 200 self.assertAllClose(self.evaluate(loss), 16, 1e-2) 215 loss = mse_obj(y_true, y_true) 216 self.assertAlmostEqual(self.evaluate(loss), 0.0, 3) 224 loss = mse_obj(y_true, y_pred) 225 self.assertAlmostEqual(self.evaluate(loss), 49.5, 3) [all …]
|
/external/tensorflow/tensorflow/contrib/gan/python/losses/python/ |
D | losses_impl_test.py | 51 loss = self._g_loss_fn(self._discriminator_gen_outputs) 52 self.assertEqual(self._discriminator_gen_outputs.dtype, loss.dtype) 53 self.assertEqual(self._generator_loss_name, loss.op.name) 55 self.assertAlmostEqual(self._expected_g_loss, loss.eval(), 5) 58 loss = self._d_loss_fn( 60 self.assertEqual(self._discriminator_gen_outputs.dtype, loss.dtype) 61 self.assertEqual(self._discriminator_loss_name, loss.op.name) 63 self.assertAlmostEqual(self._expected_d_loss, loss.eval(), 5) 79 loss = self._g_loss_fn( 81 self.assertAllEqual([4], loss.shape) [all …]
|
D | losses_impl.py | 108 loss = - discriminator_gen_outputs 109 loss = losses.compute_weighted_loss( 110 loss, weights, scope, loss_collection, reduction) 113 summary.scalar('generator_wass_loss', loss) 115 return loss 163 loss = loss_on_generated - loss_on_real 164 util.add_loss(loss, loss_collection) 169 summary.scalar('discriminator_wass_loss', loss) 171 return loss 236 loss = loss_on_generated + loss_on_real [all …]
|
/external/tensorflow/tensorflow/contrib/kernel_methods/python/ |
D | losses_test.py | 98 loss = losses.sparse_multiclass_hinge_loss(labels, logits) 100 loss.eval() 108 loss = losses.sparse_multiclass_hinge_loss(labels, logits) 109 self.assertAlmostEqual(loss.eval(), 0.0, 3) 117 loss = losses.sparse_multiclass_hinge_loss(labels, logits) 118 self.assertAlmostEqual(loss.eval(), 0.0, 3) 137 loss = losses.sparse_multiclass_hinge_loss(labels, logits) 138 result = loss.eval(feed_dict={logits: logits_np, labels: labels_np}) 147 loss = losses.sparse_multiclass_hinge_loss(labels, logits) 149 self.assertAlmostEqual(loss.eval(), 0.4333, 3) [all …]
|
/external/tensorflow/tensorflow/contrib/layers/python/layers/ |
D | optimizers_test.py | 42 loss = math_ops.abs(var * x) 49 return x, var, loss, global_step 70 x, var, loss, global_step = _setup_model() 72 loss, global_step, learning_rate=0.1, optimizer=optimizer) 86 x, var, loss, global_step = _setup_model() 88 loss, global_step, learning_rate=None, optimizer=optimizer_fn) 100 _, _, loss, global_step = _setup_model() 103 loss, global_step, learning_rate=0.1, optimizer=optimizer) 107 _, _, loss, global_step = _setup_model() 110 loss, global_step, learning_rate=0.1, optimizer="SGD", [all …]
|
D | regularizers_test.py | 78 loss = regularizers.l1_l2_regularizer(1.0, 1.0)(tensor) 79 self.assertEquals(loss.op.name, 'l1_l2_regularizer') 80 self.assertAlmostEqual(loss.eval(), num_elem + num_elem / 2, 5) 86 loss = regularizers.l1_l2_regularizer(0.0, 1.0)(tensor) 88 self.assertEquals(loss.op.name, 'l1_l2_regularizer') 89 self.assertAlmostEqual(loss.eval(), num_elem / 2, 5) 95 loss = regularizers.l1_l2_regularizer(1.0, 0.0)(tensor) 97 self.assertEquals(loss.op.name, 'l1_l2_regularizer') 98 self.assertAlmostEqual(loss.eval(), num_elem, 5) 103 loss = regularizers.l1_l2_regularizer(0.0, 0.0)(tensor) [all …]
|
D | target_column_test.py | 38 5. / 3, sess.run(target_column.loss(prediction, labels, {}))) 49 sess.run(target_column.loss(prediction, labels, features)), 68 sess.run(target_column.loss(logits, labels, {})), 82 sess.run(target_column.loss(logits, labels, features)), 106 sess.run(target_column.loss(logits, labels, {}))) 118 1.5514446, sess.run(target_column.loss(logits, labels, features))) 146 loss = target_column.loss(predictions, labels, {}) 152 self.assertAlmostEqual(0.25, sess.run(loss)) 160 loss = target_column.loss(predictions, labels, features) 166 self.assertAlmostEqual(8.6 / 12, sess.run(loss), places=3)
|
/external/tensorflow/tensorflow/contrib/compiler/ |
D | xla_test.py | 274 loss = constant_op.constant(_EXPECTED_LOSS) 276 mode=mode, loss=loss, train_op=array_ops.identity(loss)) 311 loss = constant_op.constant(_EXPECTED_LOSS) 312 mock_xla_compile.return_value = [loss] 322 self.assertEqual(sess.run(estimator_spec.loss), sess.run(loss)) 323 self.assertEqual(sess.run(estimator_spec.train_op), sess.run(loss)) 335 loss = constant_op.constant(_EXPECTED_LOSS) 336 mock_xla_compile.return_value = [loss] 346 self.assertEqual(sess.run(estimator_spec.loss), sess.run(loss)) 347 self.assertEqual(sess.run(estimator_spec.train_op), sess.run(loss)) [all …]
|
/external/tensorflow/tensorflow/python/keras/mixed_precision/experimental/ |
D | loss_scale_optimizer.py | 72 def _compute_gradients(self, loss, var_list, grad_loss=None): argument 73 loss = self._scale_loss(loss) 74 …grads_and_vars = self._optimizer._compute_gradients(loss, var_list, # pylint: disable=protected-a… 81 def get_gradients(self, loss, params): argument 82 loss = self._scale_loss(loss) 83 grads = self._optimizer.get_gradients(loss, params) 86 def _scale_loss(self, loss): argument 88 if callable(loss): 89 return lambda: loss() * self._loss_scale 91 return loss * self._loss_scale
|
/external/tensorflow/tensorflow/contrib/training/python/training/ |
D | training_test.py | 99 loss = losses.log_loss(tf_labels, tf_predictions) 101 train_op = training.create_train_op(loss, optimizer) 116 loss = losses.log_loss(tf_labels, tf_predictions) 119 train_op = training.create_train_op(loss, optimizer) 150 loss = losses.log_loss(tf_labels, tf_predictions) 152 train_op = training.create_train_op(loss, optimizer, update_ops=[]) 183 loss = losses.log_loss(tf_labels, tf_predictions) 185 train_op = training.create_train_op(loss, optimizer) 206 loss = losses.log_loss(tf_labels, tf_predictions) 208 train_op = training.create_train_op(loss, optimizer, global_step=None) [all …]
|
/external/tensorflow/tensorflow/contrib/slim/python/slim/ |
D | learning_test.py | 256 loss = learning.train( 258 self.assertLess(loss, .1) 443 loss = learning.train( 445 self.assertIsNotNone(loss) 446 self.assertLess(loss, .015) 462 loss = learning.train( 464 self.assertIsNotNone(loss) 465 self.assertLess(loss, .015) 482 loss = learning.train( 488 self.assertIsNotNone(loss) [all …]
|
/external/tensorflow/tensorflow/python/ops/ |
D | nn_xent_test.py | 61 loss = nn_impl.sigmoid_cross_entropy_with_logits( 63 self.assertEqual("mylogistic", loss.op.name) 70 loss = nn_impl.sigmoid_cross_entropy_with_logits( 73 tf_loss = self.evaluate(loss) 81 loss = nn_impl.sigmoid_cross_entropy_with_logits( 84 tf_loss = self.evaluate(loss) 92 loss = nn_impl.sigmoid_cross_entropy_with_logits( 94 err = gradient_checker.compute_gradient_error(logits, sizes, loss, sizes) 103 loss = nn_impl.sigmoid_cross_entropy_with_logits( 105 grads = gradients_impl.gradients(loss, logits)[0].eval() [all …]
|
/external/tensorflow/tensorflow/contrib/seq2seq/python/kernel_tests/ |
D | loss_test.py | 23 from tensorflow.contrib.seq2seq.python.ops import loss 61 average_loss_per_example = loss.sequence_loss( 68 average_loss_per_sequence = loss.sequence_loss( 76 average_loss_per_batch = loss.sequence_loss( 84 total_loss = loss.sequence_loss( 96 seq_loss = loss.SequenceLoss(average_across_timesteps=True, 105 seq_loss = loss.SequenceLoss(average_across_timesteps=False, 115 seq_loss = loss.SequenceLoss(average_across_timesteps=True, 125 seq_loss = loss.SequenceLoss(average_across_timesteps=False, 139 seq_loss = loss.SequenceLoss(average_across_timesteps=False, [all …]
|
/external/tensorflow/tensorflow/contrib/opt/python/training/ |
D | external_optimizer_test.py | 78 loss = math_ops.reduce_sum( 80 loss += math_ops.reduce_sum( 82 loss += math_ops.reduce_sum( 86 optimizer = MockOptimizerInterface(loss) 105 loss = math_ops.reduce_sum(math_ops.square(vector - minimum_location)) / 2. 108 optimizer = MockOptimizerInterface(loss) 115 extra_fetches = [loss] 236 loss = math_ops.reduce_sum(math_ops.square(vector)) 243 loss, equalities=equalities, inequalities=inequalities, method='SLSQP') 255 loss = math_ops.reduce_sum(math_ops.square(vector)) [all …]
|
/external/tensorflow/tensorflow/contrib/learn/python/learn/estimators/ |
D | model_fn.py | 89 loss=None, argument 146 get_graph_from_inputs((predictions, loss, train_op)) 157 if loss is None: 161 loss = ops.convert_to_tensor(loss) 162 loss_shape = loss.get_shape() 164 raise ValueError('Loss must be scalar: %s.' % loss) 166 loss = array_ops.reshape(loss, []) 203 loss=loss, 301 loss=self.loss,
|
/external/deqp-deps/glslang/Test/ |
D | hlsl.promotions.frag | 29 float3 Fn_R_F3D(out float3 p) { p = d3; return d3; } // valid, but loss of precision on downconve… 34 int3 Fn_R_I3D(out int3 p) { p = d3; return d3; } // valid, but loss of precision on downconvers… 39 uint3 Fn_R_U3D(out uint3 p) { p = d3; return d3; } // valid, but loss of precision on downconver… 57 float3 r03 = d3; // valid, but loss of precision on downconversion. 62 int3 r13 = d3; // valid, but loss of precision on downconversion. 67 uint3 r23 = d3; // valid, but loss of precision on downconversion. 83 r03 *= d3; // valid, but loss of precision on downconversion. 88 r13 *= d3; // valid, but loss of precision on downconversion. 93 r23 *= d3; // valid, but loss of precision on downconversion. 106 r03 *= ds; // valid, but loss of precision on downconversion. [all …]
|
/external/tensorflow/tensorflow/python/keras/engine/ |
D | training_test.py | 65 def _do_test_compile_with_model_and_single_loss(self, model, loss): argument 66 model.compile(optimizer='adam', loss=loss) 67 self.assertEqual(model.loss, loss) 69 loss = losses.get(loss) 70 if not isinstance(loss, list): 71 loss_list = [loss] * len(model.outputs) 84 def test_compile_with_single_output(self, loss): argument 87 self._do_test_compile_with_model_and_single_loss(model, loss) 93 def test_compile_with_multi_output(self, loss): argument 95 self._do_test_compile_with_model_and_single_loss(model, loss) [all …]
|
/external/tensorflow/tensorflow/contrib/distribute/python/ |
D | keras_test.py | 116 loss='categorical_crossentropy', 350 loss='categorical_crossentropy', 380 loss='categorical_crossentropy', 459 loss='categorical_crossentropy', 614 loss = 'mse' 616 model.compile(optimizer, loss, metrics=metrics) 646 loss = 'mse' 647 model.compile(optimizer, loss) 681 loss = 'mse' 682 model.compile(optimizer, loss) [all …]
|
/external/tensorflow/tensorflow/python/keras/optimizer_v2/ |
D | optimizer_v2_test.py | 66 loss = lambda: 5 * var0 + 3 * var1 # pylint: disable=cell-var-from-loop function 74 opt_op = sgd.minimize(loss, var_list=[var0, var1]) 87 def loss(): function 97 opt_op = sgd.minimize(loss, [var0, var1]) 108 sgd.minimize(loss, [var0, var1]) 120 sgd.minimize(loss, [var0, var1]) 130 loss = lambda: 5 * var0 + 3 * var1 # pylint: disable=cell-var-from-loop function 139 opt_op = sgd.minimize(loss, var_list=[var0, var1], grad_loss=grad_loss) 154 loss = lambda: 5 * var0 # pylint: disable=cell-var-from-loop function 158 sgd_op.minimize(loss, var_list=[var1]) [all …]
|
/external/tensorflow/tensorflow/python/saved_model/model_utils/ |
D | export_output_test.py | 245 loss = {'my_loss': constant_op.constant([0])} 254 outputter = MockSupervisedOutput(loss, predictions, metrics) 255 self.assertEqual(outputter.loss['loss/my_loss'], loss['my_loss']) 265 loss['my_loss'], predictions['output1'], metrics['metrics']) 266 self.assertEqual(outputter.loss, {'loss': loss['my_loss']}) 275 self.assertEqual(len(outputter.loss), 1) 292 loss = {('my', 'loss'): constant_op.constant([0])} 303 outputter = MockSupervisedOutput(loss, predictions, metrics) 304 self.assertEqual(set(outputter.loss.keys()), set(['loss/my/loss'])) 317 loss = {'loss': constant_op.constant([0])} [all …]
|
/external/tensorflow/tensorflow/examples/udacity/ |
D | 6_lstm.ipynb | 577 " loss = tf.reduce_mean(\n", 586 " gradients, v = zip(*optimizer.compute_gradients(loss))\n", 668 " [optimizer, loss, train_prediction, learning_rate], feed_dict=feed_dict)\n", 673 " # The mean loss is an estimate of the loss over the last few batches.\n", 675 " 'Average loss at step %d: %f learning rate: %f' % (step, mean_loss, lr))\n", 708 "Average loss at step 0 : 3.29904174805 learning rate: 10.0\n", 718 "Average loss at step 100 : 2.59553678274 learning rate: 10.0\n", 721 "Average loss at step 200 : 2.24747137785 learning rate: 10.0\n", 724 "Average loss at step 300 : 2.09438110709 learning rate: 10.0\n", 727 "Average loss at step 400 : 1.99440989017 learning rate: 10.0\n", [all …]
|
D | 4_convolutions.ipynb | 288 " loss = tf.reduce_mean(\n", 292 " optimizer = tf.train.GradientDescentOptimizer(0.05).minimize(loss)\n", 349 " [optimizer, loss, train_prediction], feed_dict=feed_dict)\n", 351 " print('Minibatch loss at step %d: %f' % (step, l))\n", 362 "Minibatch loss at step 0 : 3.51275\n", 365 "Minibatch loss at step 50 : 1.48703\n", 368 "Minibatch loss at step 100 : 1.04377\n", 371 "Minibatch loss at step 150 : 0.601682\n", 374 "Minibatch loss at step 200 : 0.898649\n", 377 "Minibatch loss at step 250 : 1.3637\n", [all …]
|
/external/autotest/client/common_lib/cros/network/ |
D | ping_runner.py | 122 loss = _regex_float_from_string('([0-9]+\.[0-9]+)% packet loss', 124 if None in (sent, received, loss): 129 return PingResult(sent, received, loss, 137 return PingResult(sent, received, loss) 215 loss = _regex_float_from_string('([0-9]+(\.[0-9]+)?)% packet loss', 217 if None in (sent, received, loss): 223 return PingResult(sent, received, loss, 231 return PingResult(sent, received, loss) 261 def __init__(self, sent, received, loss, argument 278 self.loss = loss [all …]
|