Home
last modified time | relevance | path

Searched refs:logits (Results 1 – 25 of 248) sorted by relevance

12345678910

/external/tensorflow/tensorflow/contrib/kernel_methods/python/
Dlosses_test.py36 logits = constant_op.constant([-1.0, 2.1], shape=(2,))
39 _ = losses.sparse_multiclass_hinge_loss(labels, logits)
44 logits = constant_op.constant([-1.0, 2.1], shape=(2, 1))
47 _ = losses.sparse_multiclass_hinge_loss(labels, logits)
52 logits = constant_op.constant([-1.0, 2.1], shape=(2, 1))
56 _ = losses.sparse_multiclass_hinge_loss(labels, logits, weights)
61 logits = constant_op.constant([-1.0, 2.1], shape=(2, 1))
64 _ = losses.sparse_multiclass_hinge_loss(labels, logits)
69 logits = constant_op.constant([-1.0, 2.1], shape=(2, 1))
72 _ = losses.sparse_multiclass_hinge_loss(labels, logits, weights=None)
[all …]
/external/tensorflow/tensorflow/python/ops/
Dnn_xent_test.py40 def _SigmoidCrossEntropyWithLogits(self, logits, targets): argument
41 assert len(logits) == len(targets)
42 pred = [1 / (1 + exp(-x)) for x in logits]
52 logits = constant_op.constant(x, shape=sizes, dtype=dtype, name="logits")
55 return logits, targets, losses
60 logits, targets, _ = self._Inputs()
62 labels=targets, logits=logits, name="mylogistic")
69 logits, targets, losses = self._Inputs(dtype=dtype)
71 labels=targets, logits=logits)
80 logits, targets, losses = self._Inputs(dtype=dtype, sizes=[2, 2, 2])
[all …]
/external/tensorflow/tensorflow/contrib/distributions/python/ops/
Donehot_categorical.py97 logits=None, argument
128 with ops.name_scope(name, values=[logits, probs]) as name:
130 name=name, logits=logits, probs=probs, validate_args=validate_args,
162 def logits(self): member in OneHotCategorical
172 return array_ops.shape(self.logits)[:-1]
175 return self.logits.get_shape()[:-1]
178 return array_ops.shape(self.logits)[-1:]
181 return self.logits.get_shape().with_rank_at_least(1)[-1:]
184 sample_shape = array_ops.concat([[n], array_ops.shape(self.logits)], 0)
185 logits = self.logits
[all …]
Drelaxed_onehot_categorical.py140 logits=None, argument
175 with ops.name_scope(name, values=[logits, probs, temperature]) as name:
178 name=name, logits=logits, probs=probs, validate_args=validate_args,
227 def logits(self): member in ExpRelaxedOneHotCategorical
240 return self.logits.get_shape()[:-1]
243 return array_ops.shape(self.logits)[-1:]
246 return self.logits.get_shape().with_rank_at_least(1)[-1:]
249 sample_shape = array_ops.concat([[n], array_ops.shape(self.logits)], 0)
250 logits = self.logits * array_ops.ones(sample_shape, dtype=self.dtype)
251 logits_2d = array_ops.reshape(logits, [-1, self.event_size])
[all …]
/external/tensorflow/tensorflow/contrib/slim/python/slim/nets/
Dvgg_test.py39 logits, _ = vgg.vgg_a(inputs, num_classes)
40 self.assertEquals(logits.op.name, 'vgg_a/fc8/squeezed')
41 self.assertListEqual(logits.get_shape().as_list(),
50 logits, _ = vgg.vgg_a(inputs, num_classes, spatial_squeeze=False)
51 self.assertEquals(logits.op.name, 'vgg_a/fc8/BiasAdd')
52 self.assertListEqual(logits.get_shape().as_list(),
112 logits, _ = vgg.vgg_a(eval_inputs, is_training=False)
113 self.assertListEqual(logits.get_shape().as_list(),
115 predictions = math_ops.argmax(logits, 1)
127 logits, _ = vgg.vgg_a(train_inputs)
[all …]
Doverfeat_test.py38 logits, _ = overfeat.overfeat(inputs, num_classes)
39 self.assertEquals(logits.op.name, 'overfeat/fc8/squeezed')
40 self.assertListEqual(logits.get_shape().as_list(),
49 logits, _ = overfeat.overfeat(inputs, num_classes, spatial_squeeze=False)
50 self.assertEquals(logits.op.name, 'overfeat/fc8/BiasAdd')
51 self.assertListEqual(logits.get_shape().as_list(),
103 logits, _ = overfeat.overfeat(eval_inputs, is_training=False)
104 self.assertListEqual(logits.get_shape().as_list(),
106 predictions = math_ops.argmax(logits, 1)
118 logits, _ = overfeat.overfeat(train_inputs)
[all …]
Dalexnet_test.py38 logits, _ = alexnet.alexnet_v2(inputs, num_classes)
39 self.assertEquals(logits.op.name, 'alexnet_v2/fc8/squeezed')
40 self.assertListEqual(logits.get_shape().as_list(),
49 logits, _ = alexnet.alexnet_v2(inputs, num_classes, spatial_squeeze=False)
50 self.assertEquals(logits.op.name, 'alexnet_v2/fc8/BiasAdd')
51 self.assertListEqual(logits.get_shape().as_list(),
103 logits, _ = alexnet.alexnet_v2(eval_inputs, is_training=False)
104 self.assertListEqual(logits.get_shape().as_list(),
106 predictions = math_ops.argmax(logits, 1)
118 logits, _ = alexnet.alexnet_v2(train_inputs)
[all …]
/external/tensorflow/tensorflow/contrib/boosted_trees/python/utils/
Dlosses.py31 loss = losses.hinge_loss(labels=labels, logits=predictions, weights=weights)
49 labels=labels, logits=predictions)
97 def per_example_maxent_loss(labels, weights, logits, num_classes, eps=1e-15): argument
127 unnormalized_probs = math_ops.exp(logits)
136 zeros = array_ops.zeros_like(probs_for_real_class, dtype=logits.dtype) + eps
138 probs_for_real_class, dtype=logits.dtype) - eps
201 def exp_with_logits(name, eps, labels=None, logits=None): argument
222 with ops.name_scope(name, "exp_loss", [logits, labels]) as name:
223 logits = ops.convert_to_tensor(logits, name="logits")
226 labels.get_shape().merge_with(logits.get_shape())
[all …]
/external/tensorflow/tensorflow/python/ops/distributions/
Dbernoulli.py52 logits=None, argument
86 logits=logits,
104 def logits(self): member in Bernoulli
139 event = math_ops.cast(event, self.logits.dtype)
140 logits = self.logits
144 def _broadcast(logits, event): argument
145 return (array_ops.ones_like(event) * logits,
146 array_ops.ones_like(logits) * event)
149 logits.get_shape().is_fully_defined() and
150 event.get_shape() == logits.get_shape()):
[all …]
Dcategorical.py163 logits=None, argument
194 with ops.name_scope(name, values=[logits, probs]) as name:
196 logits=logits,
250 def logits(self): member in Categorical
263 return self.logits.get_shape()[:-1]
272 if self.logits.get_shape().ndims == 2:
273 logits_2d = self.logits
275 logits_2d = array_ops.reshape(self.logits, [-1, self.event_size])
311 k, logits = _broadcast_cat_event_and_params(
312 k, self.logits, base_dtype=self.dtype.base_dtype)
[all …]
/external/tensorflow/tensorflow/contrib/distributions/python/kernel_tests/
Drelaxed_onehot_categorical_test.py33 logits = random_ops.random_uniform(
38 temperatures, logits, dtype=dtype)
45 logits = [2.0, 3.0, -4.0]
47 logits)
48 expected_p = np.exp(logits)/np.sum(np.exp(logits))
55 logits = [.3, .1, .4]
56 k = len(logits)
57 p = np.exp(logits)/np.sum(np.exp(logits))
59 logits)
74 logits = [2.0, 3.0, -4.0]
[all …]
Donehot_categorical_test.py34 logits = random_ops.random_uniform(
36 return onehot_categorical.OneHotCategorical(logits, dtype=dtype)
49 self.assertAllEqual([2], dist.logits.get_shape())
53 logits = np.log(p) - 50.
54 dist = onehot_categorical.OneHotCategorical(logits=logits)
57 self.assertAllEqual([2], dist.logits.get_shape())
59 self.assertAllClose(dist.logits.eval(), logits)
92 self.assertEqual(dist.logits.dtype, dtypes.float32)
93 self.assertEqual(dist.logits.dtype, dist.entropy().dtype)
94 self.assertEqual(dist.logits.dtype, dist.prob(
[all …]
Destimator_test.py54 def actual_loss(logits, labels): argument
55 mu = actual_mean(logits)
56 sigma = actual_stddev(logits)
62 def actual_mean(logits): argument
63 return logits[..., 0]
65 def actual_stddev(logits): argument
66 return softplus(logits[..., 1] + scale_bias)
68 def make_distribution_fn(logits): argument
70 loc=logits[..., 0],
71 scale=nn_ops.softplus(logits[..., 1] + scale_bias))
[all …]
/external/tensorflow/tensorflow/contrib/learn/python/learn/estimators/
Dhead.py154 logits=None, argument
559 def _mean_squared_loss(labels, logits, weights=None): argument
560 with ops.name_scope(None, "mean_squared_loss", (logits, labels)) as name:
561 logits = ops.convert_to_tensor(logits)
567 if len(logits.get_shape()) == 1:
568 logits = array_ops.expand_dims(logits, axis=1)
569 logits.get_shape().assert_is_compatible_with(labels.get_shape())
571 logits, math_ops.cast(labels, dtypes.float32), name=name)
575 def _poisson_loss(labels, logits, weights=None): argument
577 with ops.name_scope(None, "_poisson_loss", (logits, labels)) as name:
[all …]
Dhead_test.py107 def _log_poisson_loss(self, logits, labels): argument
108 x = np.array([f[0] for f in logits])
118 logits = ((0.,), (-1.,), (3.,))
125 logits=logits)
129 loss = self._log_poisson_loss(logits, labels)
151 logits=((1.,), (1.,), (3.,)))
169 logits=((1.,), (1.,), (3.,)))
185 logits=((1., 1.), (1., 1.), (3., 1.)))
216 logits=((1.,), (1.,), (3.,)))
226 logits=((0.,), (1.,), (1.,)))
[all …]
/external/tensorflow/tensorflow/contrib/layers/python/layers/
Dtarget_column.py164 def logits_to_predictions(self, logits, proba=False): argument
168 def get_eval_ops(self, features, logits, labels, metrics=None): argument
204 def training_loss(self, logits, target, features, name="training_loss"): argument
226 loss_unweighted = self._loss_fn(logits, target)
234 def loss(self, logits, target, features): argument
251 loss_unweighted = self._loss_fn(logits, target)
274 def logits_to_predictions(self, logits, proba=False): argument
276 return array_ops.squeeze(logits, axis=[1])
277 return logits
279 def get_eval_ops(self, features, logits, labels, metrics=None): argument
[all …]
/external/tensorflow/tensorflow/contrib/losses/python/losses/
Dloss_ops_test.py114 logits = constant_op.constant([[10.0, 0.0, 0.0],
122 loss_ops.softmax_cross_entropy(logits, labels, weights=None)
126 logits = constant_op.constant([[10.0, 0.0, 0.0],
132 loss = loss_ops.softmax_cross_entropy(logits, labels)
137 logits = constant_op.constant([[10.0, 0.0, 0.0],
145 loss = loss_ops.softmax_cross_entropy(logits, labels)
150 logits = constant_op.constant([[10.0, 0.0, 0.0],
158 loss = loss_ops.softmax_cross_entropy(logits, labels, weights)
162 logits = constant_op.constant([[10.0, 0.0, 0.0],
170 loss = loss_ops.softmax_cross_entropy(logits, labels,
[all …]
/external/tensorflow/tensorflow/contrib/sparsemax/python/ops/
Dsparsemax.py30 def sparsemax(logits, name=None): argument
47 with ops.name_scope(name, "sparsemax", [logits]) as name:
48 logits = ops.convert_to_tensor(logits, name="logits")
49 obs = array_ops.shape(logits)[0]
50 dims = array_ops.shape(logits)[1]
59 z = logits
67 1, math_ops.cast(dims, logits.dtype) + 1, dtype=logits.dtype)
82 tau_z = (tau_sum - 1) / math_ops.cast(k_z, logits.dtype)
86 math_ops.cast(0, logits.dtype), z - tau_z[:, array_ops.newaxis])
91 array_ops.fill([obs, dims], math_ops.cast(float("nan"), logits.dtype)),
/external/tensorflow/tensorflow/examples/speech_commands/
Dmodels_test.py56 logits, dropout_prob = models.create_model(fingerprint_input,
58 self.assertIsNotNone(logits)
60 self.assertIsNotNone(sess.graph.get_tensor_by_name(logits.name))
68 logits = models.create_model(fingerprint_input, model_settings, "conv",
70 self.assertIsNotNone(logits)
71 self.assertIsNotNone(sess.graph.get_tensor_by_name(logits.name))
78 logits, dropout_prob = models.create_model(
80 self.assertIsNotNone(logits)
82 self.assertIsNotNone(sess.graph.get_tensor_by_name(logits.name))
90 logits, dropout_prob = models.create_model(
[all …]
/external/tensorflow/tensorflow/contrib/nn/python/ops/
Dcross_entropy.py28 def deprecated_flipped_softmax_cross_entropy_with_logits(logits, argument
68 labels=labels, logits=logits, dim=dim, name=name)
75 def deprecated_flipped_sparse_softmax_cross_entropy_with_logits(logits, argument
122 labels=labels, logits=logits, name=name)
129 def deprecated_flipped_sigmoid_cross_entropy_with_logits(logits, argument
177 labels=targets, logits=logits, name=name)
/external/tensorflow/tensorflow/python/kernel_tests/
Dlosses_test.py120 logits = constant_op.constant([[10.0, 0.0, 0.0], [0.0, 10.0, 0.0],
125 losses.softmax_cross_entropy(labels, logits, weights=None)
130 logits = constant_op.constant([[10.0, 0.0, 0.0], [0.0, 10.0, 0.0],
133 loss = losses.softmax_cross_entropy(labels, logits)
139 logits = constant_op.constant([[10.0, 0.0, 0.0], [0.0, 10.0, 0.0],
144 loss = losses.softmax_cross_entropy(labels, logits)
150 logits = constant_op.constant([[10.0, 0.0, 0.0], [0.0, 10.0, 0.0],
155 loss = losses.softmax_cross_entropy(labels, logits, weights)
160 logits = constant_op.constant([[10.0, 0.0, 0.0], [0.0, 10.0, 0.0],
165 loss = losses.softmax_cross_entropy(labels, logits,
[all …]
Dsparse_xent_op_test.py149 labels=[[0, 2]], logits=[[0., 1.], [2., 3.], [2., 3.]])
155 labels=constant_op.constant(0), logits=constant_op.constant(1.0))
162 labels=labels, logits=[[7.]])
169 labels=constant_op.constant(0), logits=constant_op.constant([1.0]))
203 labels=l, logits=f, name="xent")
215 logits = math_ops.matmul(images_placeholder, weights_with_zeros)
217 labels=labels_placeholder, logits=logits)
232 labels=labels, logits=features)
257 logits = array_ops.placeholder(dtypes.float32, shape=[None, 3])
259 labels=array_ops.squeeze(labels), logits=logits)
[all …]
/external/tensorflow/tensorflow/python/kernel_tests/random/
Dmultinomial_op_test.py40 def composed_sampler(logits, num_samples): argument
42 unif = random_ops.random_uniform(logits.get_shape().concatenate(
46 logits = array_ops.expand_dims(logits, -1)
49 return math_ops.argmax(logits + noise, axis=1)
63 logits = constant_op.constant([[-10., 10., -10.], [-10., -10., 10.]])
66 logits, num_samples, output_dtype=output_dtype))
102 logits = np.array([[1000.] * 5])
104 logits *= -1
105 samples = self.evaluate(random_ops.multinomial(logits, 10))
121 logits = np.log(probs).astype(np.float32)
[all …]
/external/tensorflow/tensorflow/contrib/boosted_trees/estimator_batch/
Dmodel.py131 logits = predictions_dict["predictions"]
133 logits = logits_modifier_function(logits, features, mode)
163 logits=logits)
172 logits=logits)
187 logits=logits)
319 logits = predictions_dict[gbdt_batch.PREDICTIONS]
321 logits = logits_modifier_function(logits, features, mode)
346 logits = predictions_1 - predictions_2
348 logits = logits_modifier_function(logits, features, mode)
351 predictions_dict[gbdt_batch.PREDICTIONS] = logits
[all …]
/external/tensorflow/tensorflow/python/ops/losses/
Dlosses_impl.py321 def hinge_loss(labels, logits, weights=1.0, scope=None, argument
355 if logits is None:
357 with ops.name_scope(scope, "hinge_loss", (logits, labels, weights)) as scope:
358 logits = math_ops.cast(logits, dtype=dtypes.float32)
360 logits.get_shape().assert_is_compatible_with(labels.get_shape())
365 math_ops.subtract(all_ones, math_ops.multiply(labels, logits)))
656 multi_class_labels, logits, weights=1.0, label_smoothing=0, scope=None, argument
699 if logits is None:
702 (logits, multi_class_labels, weights)) as scope:
703 logits = ops.convert_to_tensor(logits)
[all …]

12345678910