Home
last modified time | relevance | path

Searched full:parallelism (Results 1 – 25 of 220) sorted by relevance

123456789

/external/tensorflow/tensorflow/python/kernel_tests/
Ddecode_jpeg_op_test.py43 parallelism, argument
54 parallelism: the number of concurrent decode_jpeg ops to be run.
85 for _ in xrange(parallelism):
120 for parallelism in [1, 100]:
121 duration_decode = self._evalDecodeJpeg('small.jpg', parallelism,
123 duration_decode_crop = self._evalDecodeJpeg('small.jpg', parallelism,
126 'small.jpg', parallelism, num_iters, True, crop_window)
128 name='decode_jpeg_small_p%d' % (parallelism),
132 name='decode_crop_jpeg_small_p%d' % (parallelism),
136 name='decode_after_crop_jpeg_small_p%d' % (parallelism),
[all …]
Drecord_input_test.py53 parallelism=1,
70 parallelism=1,
89 parallelism=1,
108 parallelism=2,
137 parallelism=1,
165 parallelism=2,
/external/tensorflow/tensorflow/core/util/
Dwork_sharder.h28 // manually cap parallelism.
41 // parallelism.
47 // Too much parallelism can also cause excessive thread switches,
48 // therefore, Shard() often limits the maximum parallelism. Each
51 // thread parallelism.
61 // parallelism. Its default is a very large quantity.
63 // Within TF runtime, per-thread max parallelism affects Shard() and
64 // intra-op parallelism. E.g., if SetPerThreadMaxParallelism(1) is
71 // Helper to set and unset per-thread max parallelism.
/external/tensorflow/tensorflow/core/framework/
Dmodel.cc139 double parallelism = inputs_.size() - 1; // default to cycle length in OutputTimeLocked() local
140 if (auto* parameter = gtl::FindOrNull(parameters_, "parallelism")) { in OutputTimeLocked()
141 parallelism = std::min(static_cast<int>(parallelism), in OutputTimeLocked()
147 static_cast<double>(inputs_.size() - 1) / parallelism; in OutputTimeLocked()
149 old_input_time, parallelism); in OutputTimeLocked()
224 double parallelism = 1.0; in OutputTimeLocked() local
225 if (auto* parameter = gtl::FindOrNull(parameters_, "parallelism")) { in OutputTimeLocked()
226 parallelism = (*parameter)->value; in OutputTimeLocked()
230 static_cast<double>(NanosPerElementLocked()) / parallelism; in OutputTimeLocked()
231 return ComputeWaitTime(output_time, input_times->back(), parallelism); in OutputTimeLocked()
[all …]
Dmodel_test.cc31 const int64 parallelism = std::get<0>(GetParam()); in TEST_P() local
37 "parallelism", in TEST_P()
38 std::make_shared<SharedState>(parallelism, nullptr, nullptr), 1, in TEST_P()
39 parallelism)}); in TEST_P()
77 100 + 250 / parallelism); in TEST_P()
82 50 + 250 / parallelism); in TEST_P()
95 const int64 parallelism = std::get<0>(GetParam()); in TEST_P() local
101 "parallelism", in TEST_P()
102 std::make_shared<SharedState>(parallelism, nullptr, nullptr), 1, in TEST_P()
103 parallelism)}); in TEST_P()
[all …]
Ddevice_base.cc55 const int parallelism = std::max<int>( in eigen_cpu_device() local
58 return eigen_cpu_devices_[parallelism - 1]; in eigen_cpu_device()
/external/tensorflow/tensorflow/python/tpu/
Dtpu_context.py77 and non-model-parallelism, total invocation count is equal to
108 For non-model-parallelism, num_replicas should be the total num of TPU
145 This should be used for full replicate for non-model-parallelism.
153 # Note that: For the non-model parallelism, the mapping could be
368 'The num of cores required by the model parallelism, specified by '
377 'of cores ({}) required by the model parallelism, specified by '
613 'be ({}), got ({}). For non-model-parallelism, num_replicas should '
615 'model-parallelism, the total number of TPU cores should be '
628 'The num of cores required by the model parallelism, specified by '
677 This should be used for full replicate for non-model-parallelism.
Dtpu_config.py66 The number of model replicas in the system. For non-model-parallelism
68 model-parallelism, the total number of TPU cores equals
70 num_cores_per_replica: Defaults to `None`, which disables model parallelism.
72 is required by model-parallelism which enables partitioning
/external/tensorflow/tensorflow/core/api_def/base_api/
Dapi_def_ExperimentalMaxIntraOpParallelismDataset.pbtxt6 Identifies the maximum intra-op parallelism to use.
10 Creates a dataset that overrides the maximum intra-op parallelism.
Dapi_def_MapDefun.pbtxt49 limit the intra op parallelism. To limit inter-op parallelism, a user can
Dapi_def_FixedUnigramCandidateSampler.pbtxt92 in order to speed up the whole computation through parallelism. This parameter
101 in order to speed up the whole computation through parallelism. This parameter
/external/tensorflow/tensorflow/python/framework/
Dconfig.py73 """Get number of threads used within an individual op for parallelism.
87 """Set number of threads used within an individual op for parallelism.
101 """Get number of threads used for parallelism between independent operations.
114 """Set number of threads used for parallelism between independent operations.
/external/skqp/third_party/libjpeg-turbo/
Djsimdcfg.inc70 ; To maximize parallelism, Type DCTELEM is changed to short (originally, int).
76 ; To maximize parallelism, Type short is changed to short.
/external/libjpeg-turbo/simd/nasm/
Djsimdcfg.inc70 ; To maximize parallelism, Type DCTELEM is changed to short (originally, int).
76 ; To maximize parallelism, Type short is changed to short.
Djsimdcfg.inc.h102 ; To maximize parallelism, Type DCTELEM is changed to short (originally, int). variable
110 ; To maximize parallelism, Type MULTIPLIER is changed to short. variable
/external/skia/third_party/libjpeg-turbo/
Djsimdcfg.inc70 ; To maximize parallelism, Type DCTELEM is changed to short (originally, int).
76 ; To maximize parallelism, Type short is changed to short.
/external/llvm/tools/gold/
Dgold-plugin.cpp167 // Default parallelism of 0 used to indicate that user did not specify.
168 // Actual parallelism default value depends on implementation.
169 // Currently, code generation defaults to no parallelism, whereas
171 static unsigned Parallelism = 0; variable
247 if (StringRef(opt_ + 5).getAsInteger(10, Parallelism)) in process_plugin_option()
248 message(LDPL_FATAL, "Invalid parallelism level: %s", opt_ + 5); in process_plugin_option()
1073 // Note that the default parallelism is 1 instead of the in runSplitCodeGen()
1075 // parallelism levels (e.g. symbol ordering will be different, and some uses in runSplitCodeGen()
1076 // of inline asm currently have issues with parallelism >1). in runSplitCodeGen()
1077 unsigned int MaxThreads = options::Parallelism ? options::Parallelism : 1; in runSplitCodeGen()
[all …]
/external/tensorflow/tensorflow/compiler/xla/service/cpu/
Dparallel_task_assignment.cc74 // Limit max parallelism for I/O bound instructions by assuming a in GetParallelTaskCount()
83 // Use max parallelism for compute bound instructions. in GetParallelTaskCount()
167 // TODO(b/27458679) Support inter-op parallelism. in Run()
/external/tensorflow/tensorflow/core/kernels/
Drecord_yielder.h48 // opts.parallelism = 8; // Uses 8 tfrecord iterators to iterate
80 int32 parallelism = 1; member
/external/jemalloc_new/
DTUNING.md53 since arenas manage memory independently. When high degree of parallelism
57 Suggested: if low parallelism is expected, try lower arena count while
/external/swiftshader/third_party/llvm-7.0/llvm/tools/gold/
Dgold-plugin.cpp135 // Default parallelism of 0 used to indicate that user did not specify.
136 // Actual parallelism default value depends on implementation.
139 static unsigned Parallelism = 0; variable
140 // Default regular LTO codegen parallelism (number of partitions).
260 if (StringRef(opt_ + 5).getAsInteger(10, Parallelism)) in process_plugin_option()
261 message(LDPL_FATAL, "Invalid parallelism level: %s", opt_ + 5); in process_plugin_option()
849 if (options::Parallelism) in createLTO()
850 Backend = createInProcessThinBackend(options::Parallelism); in createLTO()
/external/tensorflow/tensorflow/core/lib/core/
Dthreadpool.h75 // parallelism, function calls will be automatically queued.
93 // parallelism.
/external/tensorflow/tensorflow/compiler/xla/g3doc/
Djit.md22 > Note: The XLA CPU backend supports intra-op parallelism (i.e. it can shard a
24 > parallelism (i.e. it cannot execute independent operations concurrently across
/external/libevent/test/
Dbench_httpclient.c60 const int PARALLELISM = 200; variable
196 for (i=0; i < PARALLELISM; ++i) { in main()
/external/tensorflow/tensorflow/core/kernels/data/
Dmap_defun_op.cc38 // limit the intra op parallelism. To limit inter-op parallelism, a user
319 // If this value is positive, limit the max intra op parallelism when the

123456789