Home
last modified time | relevance | path

Searched refs:vocab_size (Results 1 – 25 of 38) sorted by relevance

12

/external/tensorflow/tensorflow/python/kernel_tests/
Dembedding_ops_test.py132 vocab_size, argument
142 shard_shape = [vocab_size // num_shards] + shape
143 if i < vocab_size % num_shards: # Excess goes evenly on the first shards
162 vocab_size, argument
167 num_shards, vocab_size, dtype=dtype, shape=shape)
171 shape=[vocab_size] + shape,
182 vocab_size, argument
203 ids_per_partition, extras = divmod(vocab_size, num_shards)
245 vocab_size = 4
246 p, params, feed_dict = _EmbeddingParams(num_shards, vocab_size)
[all …]
Dlookup_ops_test.py420 vocab_size=0)
431 vocab_size=0)
437 vocabulary_file=vocabulary_file, vocab_size=2)
449 vocabulary_file=vocabulary_file, vocab_size=4)
460 vocab_size=0)
464 vocabulary_file=vocabulary_file, vocab_size=3)
478 vocab_size=3,
484 vocab_size=3,
683 vocab_size=2,
695 vocabulary_file=vocabulary_file, vocab_size=4)
[all …]
Dsparse_ops_test.py134 def _AssertResultsSorted(self, output, vocab_size): argument
138 self.assertAllEqual(output.dense_shape, [3, vocab_size])
140 def _AssertResultsNotSorted(self, output, vocab_size): argument
144 self.assertAllEqual(output.dense_shape, [3, vocab_size])
147 vocab_size = 50
154 sp_output = sparse_ops.sparse_merge(indices, values, vocab_size)
157 self._AssertResultsSorted(output, vocab_size)
160 vocab_size = 50
163 sp_output = sparse_ops.sparse_merge(indices, values, vocab_size)
166 self._AssertResultsSorted(output, vocab_size)
[all …]
/external/tensorflow/tensorflow/python/ops/
Dlookup_ops.py429 vocab_size=None, argument
491 if (vocab_size is not None) and (vocab_size <= 0):
492 raise ValueError("Invalid vocab_size %s." % vocab_size)
497 self._vocab_size = vocab_size
546 vocab_size=None, argument
585 vocab_size=vocab_size,
597 vocab_size=None, argument
638 vocab_size=vocab_size,
889 vocab_size=None, argument
974 if vocab_size is not None and vocab_size < 1:
[all …]
Dsparse_ops.py1015 def sparse_to_indicator(sp_input, vocab_size, name=None): argument
1069 sp_new = sparse_merge(sp_input, sp_values, vocab_size, name)
1078 def sparse_merge(sp_ids, sp_values, vocab_size, name=None, argument
1174 if not (isinstance(vocab_size, ops.Tensor) or
1175 isinstance(vocab_size, numbers.Integral)):
1177 type(vocab_size))
1178 vocab_size = [vocab_size]
1183 if not isinstance(vocab_size, collections.Iterable):
1185 "Found %s" % type(vocab_size))
1186 for dim in vocab_size:
[all …]
/external/tensorflow/tensorflow/contrib/layers/python/layers/
Dencoders.py33 vocab_size, argument
68 if not vocab_size or not embed_dim:
73 'embeddings', shape=[vocab_size, embed_dim],
92 vocab_size=None, argument
127 if not (reuse or (vocab_size and embed_dim)):
130 vocab_size, embed_dim))
133 shape = [vocab_size, embed_dim]
134 if reuse and vocab_size is None or embed_dim is None:
Dembedding_ops_test.py45 def _random_weights(self, vocab_size=4, embed_dim=4, num_shards=1): argument
46 assert vocab_size > 0
49 assert num_shards <= vocab_size
52 shape=[vocab_size, embed_dim],
55 mean=0.0, stddev=1.0 / math.sqrt(vocab_size), dtype=dtypes.float32))
571 vocab_size, argument
581 shard_shape = [vocab_size // num_shards] + shape
582 if i < vocab_size % num_shards: # Excess goes evenly on the first shards
603 vocab_size, argument
624 ids_per_partition, extras = divmod(vocab_size, num_shards)
[all …]
Dfeature_column.py381 lookup_config.vocab_size is None):
403 return self.lookup_config.vocab_size + self.lookup_config.num_oov_buckets
438 vocab_size=self.length,
694 keys=keys, vocab_size=len(keys), default_value=default_value),
714 vocab_size=self.lookup_config.vocab_size,
723 vocab_size=None, argument
758 if vocab_size is None:
767 vocab_size=vocab_size,
834 vocab_size=self.length,
996 vocab_size=self.length)
[all …]
Dfeature_column_test.py43 def _sparse_id_tensor(shape, vocab_size, seed=112123): argument
49 values = np.random.randint(0, vocab_size + 1, size=np.prod(shape))
52 keep = values < vocab_size
83 "bbb", vocabulary_file="a_file", vocab_size=454)
85 self.assertEqual(b.lookup_config.vocab_size, 454)
93 "bbb", vocabulary_file="a_file", vocab_size=454, dtype=dtypes.int64)
98 "bbb", vocabulary_file="a_file", vocab_size=454, dtype=dtypes.float32)
107 "ids", "a_file", num_oov_buckets=7, vocab_size=3)
111 self.assertEqual(weighted_ids.lookup_config.vocab_size, 3)
313 vocab_size = len(sparse_column.lookup_config.keys)
[all …]
/external/tensorflow/tensorflow/contrib/lookup/
Dlookup_ops_test.py1310 vocabulary_file=vocabulary_file, vocab_size=2)
1322 vocabulary_file=vocabulary_file, vocab_size=4)
1333 vocab_size=0)
1337 vocabulary_file=vocabulary_file, vocab_size=3)
1351 vocab_size=3,
1357 vocab_size=3,
1540 vocab_size=2,
1552 vocabulary_file=vocabulary_file, vocab_size=4)
1564 vocabulary_file=vocabulary_file, vocab_size=3)
1835 vocab_size = 3
[all …]
Dlookup_ops.py50 vocab_size=None, argument
55 vocabulary_file, num_oov_buckets, vocab_size, default_value, hasher_spec,
/external/tensorflow/tensorflow/contrib/eager/python/examples/rnn_ptb/
Drnn_ptb.py73 def __init__(self, vocab_size, embedding_dim, **kwargs): argument
75 self.vocab_size = vocab_size
81 shape=[self.vocab_size, self.embedding_dim],
102 vocab_size, argument
112 self.embedding = self.track_layer(Embedding(vocab_size, embedding_dim))
123 vocab_size,
228 def vocab_size(self): member in Datasets
262 vocab_size=10000,
273 vocab_size=10000,
284 vocab_size=100,
[all …]
/external/tensorflow/tensorflow/contrib/seq2seq/python/kernel_tests/
Dbeam_search_decoder_test.py113 self.vocab_size = 5
128 logits_ = np.full([self.batch_size, self.beam_width, self.vocab_size],
183 logits_ = np.full([self.batch_size, self.beam_width, self.vocab_size],
237 self.vocab_size = 5
281 logits_ = np.full([self.batch_size, self.beam_width, self.vocab_size],
330 vocab_size = 20
331 end_token = vocab_size - 1
335 output_layer = layers_core.Dense(vocab_size, use_bias=True, activation=None)
340 embedding = np.random.randn(vocab_size, embedding_dim).astype(np.float32)
/external/tensorflow/tensorflow/contrib/eager/python/examples/spinn/
Dspinn_test.py45 vocab_size): argument
50 (sequence_length, batch_size), maxval=vocab_size, dtype=tf.int64)
56 (sequence_length, batch_size), maxval=vocab_size, dtype=tf.int64)
268 vocab_size = 40
277 embed = tf.random_normal((vocab_size, d_embed))
285 vocab_size)
440 vocab_size = 1000
446 embed = tf.random_normal((vocab_size, d_embed))
455 vocab_size)
/external/tensorflow/tensorflow/core/kernels/
Dlookup_util.cc75 Status Init(const string& filename, int64 vocab_size, char delimiter, in Init() argument
78 if (vocab_size == -1) { in Init()
79 TF_RETURN_IF_ERROR(GetNumLinesInTextFile(env, filename, &vocab_size)); in Init()
82 vocab_size_ = vocab_size; in Init()
325 Status InitializeTableFromTextFile(const string& filename, int64 vocab_size, in InitializeTableFromTextFile() argument
354 TF_RETURN_IF_ERROR(iter.Init(filename, vocab_size, delimiter, key_dtype, in InitializeTableFromTextFile()
Dword2vec_kernels.cc284 const int64 vocab_size = w_in.dim_size(0); in Compute() local
287 OP_REQUIRES(ctx, vocab_size == sampler_->num(), in Compute()
288 errors::InvalidArgument("vocab_size mismatches: ", vocab_size, in Compute()
307 DCHECK(0 <= example && example < vocab_size) << example; in Compute()
309 DCHECK(0 <= label && label < vocab_size) << label; in Compute()
Dlookup_table_init_op.h25 Status InitializeTableFromTextFile(const string& filename, int64 vocab_size,
Dlookup_util.h44 Status InitializeTableFromTextFile(const string& filename, int64 vocab_size,
/external/tensorflow/tensorflow/contrib/seq2seq/python/ops/
Dbeam_search_decoder.py553 vocab_size = logits.shape[-1].value or array_ops.shape(logits)[-1]
556 depth=vocab_size,
589 range_size=beam_width * vocab_size,
598 word_indices, vocab_size, name="next_beam_word_ids")
601 word_indices / vocab_size, name="next_beam_parent_ids")
720 vocab_size = array_ops.shape(probs)[2]
725 vocab_size,
733 array_ops.expand_dims(finished, 2), [1, 1, vocab_size])
/external/tensorflow/tensorflow/contrib/text/python/ops/
Dskip_gram_ops.py333 vocab_size = 0
342 vocab_size += 1
367 vocab_size=vocab_size,
/external/tensorflow/tensorflow/contrib/kfac/python/kernel_tests/
Dfisher_factors_test.py425 vocab_size = 5
426 factor = ff.EmbeddingInputKroneckerFactor((input_ids,), vocab_size)
428 self.assertEqual(cov.shape.as_list(), [vocab_size])
433 vocab_size = 5
434 factor = ff.EmbeddingInputKroneckerFactor((input_ids,), vocab_size)
Dfisher_blocks_test.py386 vocab_size = 5
387 block = fb.EmbeddingKFACFB(lc.LayerCollection(), vocab_size)
404 vocab_size = 5
405 block = fb.EmbeddingKFACFB(lc.LayerCollection(), vocab_size)
421 values, indices, dense_shape=[vocab_size, 1])
422 dense_vector = array_ops.reshape([0., 1., 0., 1., 1.], [vocab_size, 1])
/external/tensorflow/tensorflow/core/api_def/base_api/
Dapi_def_InitializeTableFromTextFile.pbtxt30 name: "vocab_size"
Dapi_def_InitializeTableFromTextFileV2.pbtxt32 name: "vocab_size"
/external/tensorflow/tensorflow/examples/learn/
Dtext_classification_cnn.py49 features[WORDS_FEATURE], vocab_size=n_words, embed_dim=EMBEDDING_SIZE)

12