Home
last modified time | relevance | path

Searched full:parallelism (Results 1 – 25 of 391) sorted by relevance

12345678910>>...16

/external/tensorflow/tensorflow/python/kernel_tests/
Ddecode_jpeg_op_test.py42 parallelism, argument
53 parallelism: the number of concurrent decode_jpeg ops to be run.
92 for _ in xrange(parallelism):
127 for parallelism in [1, 100]:
128 duration_decode = self._evalDecodeJpeg('small.jpg', parallelism,
130 duration_decode_crop = self._evalDecodeJpeg('small.jpg', parallelism,
133 'small.jpg', parallelism, num_iters, True, crop_window)
135 name='decode_jpeg_small_p%d' % (parallelism),
139 name='decode_crop_jpeg_small_p%d' % (parallelism),
143 name='decode_after_crop_jpeg_small_p%d' % (parallelism),
[all …]
Drecord_input_test.py53 parallelism=1,
70 parallelism=1,
89 parallelism=1,
108 parallelism=2,
137 parallelism=1,
165 parallelism=2,
/external/kotlinx.coroutines/kotlinx-coroutines-core/jvm/src/scheduling/
DDispatcher.kt89 …* Creates a coroutine execution context with limited parallelism to execute tasks which may potent… in toString()
93 …* @param parallelism parallelism level, indicating how many threads can execute tasks in the resul… in toString()
95 public fun blocking(parallelism: Int = BLOCKING_DEFAULT_PARALLELISM): CoroutineDispatcher { in toString()
96 require(parallelism > 0) { "Expected positive parallelism level, but have $parallelism" } in toString()
97 return LimitingDispatcher(this, parallelism, null, TASK_PROBABLY_BLOCKING) in toString()
101 … * Creates a coroutine execution context with limited parallelism to execute CPU-intensive tasks. in toString()
105 …* @param parallelism parallelism level, indicating how many threads can execute tasks in the resul… in toString()
107 public fun limited(parallelism: Int): CoroutineDispatcher { in toString()
108 require(parallelism > 0) { "Expected positive parallelism level, but have $parallelism" } in toString()
109 …require(parallelism <= corePoolSize) { "Expected parallelism level lesser than core pool size ($co… in toString()
[all …]
/external/kotlinx.coroutines/kotlinx-coroutines-core/jvm/src/
DCommonPool.kt26 * Name of the property that controls default parallelism level of [CommonPool].
31 public const val DEFAULT_PARALLELISM_PROPERTY_NAME = "kotlinx.coroutines.default.parallelism"
39 val parallelism = property.toIntOrNull() in <lambda>() constant
40 if (parallelism == null || parallelism < 1) { in <lambda>()
43 parallelism in <lambda>()
46 private val parallelism: Int constant
63 … // Try to use commonPool unless parallelism was explicitly specified or in debug privatePool mode in createPool()
70 … Try { fjpClass.getConstructor(Int::class.java).newInstance(parallelism) as? ExecutorService } in createPool()
77 * Checks that this ForkJoinPool's parallelism is at least one to avoid pathological bugs.
82 // A broken FJP (that is configured for 0 parallelism) would not execute the task and in isGoodCommonPool()
[all …]
DDispatchers.kt17 public const val IO_PARALLELISM_PROPERTY_NAME: String = "kotlinx.coroutines.io.parallelism"
28 … It is backed by a shared pool of threads on JVM. By default, the maximal level of parallelism used
30 …* Level of parallelism X guarantees that no more than X tasks can be executed in this dispatcher i…
101 * "`kotlinx.coroutines.io.parallelism`" ([IO_PARALLELISM_PROPERTY_NAME]) system property.
114 …* As a result of thread sharing, more than 64 (default parallelism) threads can be created (but no…
/external/tensorflow/tensorflow/core/util/
Dwork_sharder.h29 // parallelism.
43 // parallelism.
49 // Too much parallelism can also cause excessive thread switches,
50 // therefore, Shard() often limits the maximum parallelism. Each
53 // thread parallelism.
63 // parallelism. Its default is a very large quantity.
65 // Within TF runtime, per-thread max parallelism affects Shard() and
66 // intra-op parallelism. E.g., if SetPerThreadMaxParallelism(1) is
73 // Helper to set and unset per-thread max parallelism.
/external/tensorflow/tensorflow/python/data/experimental/benchmarks/
Dautotune_benchmark.py54 print("autotune parallelism vs no autotuning speedup: {}".format(a / b))
55 print("autotune parallelism and buffer sizes vs no autotuning speedup: {}"
77 print("autotune parallelism vs no autotuning speedup: {}".format(a / b))
78 print("autotune parallelism and buffer sizes vs no autotuning speedup: {}"
98 print("autotune parallelism vs no autotuning speedup: {}".format(a / b))
99 print("autotune parallelism and buffer sizes vs no autotuning speedup: {}"
121 print("autotune parallelism vs no autotuning speedup: {}".format(a / b))
122 print("autotune parallelism and buffer sizes vs no autotuning speedup: {}"
146 print("autotune parallelism vs no autotuning speedup: {}".format(a / b))
147 print("autotune parallelism and buffer sizes vs no autotuning speedup: {}"
[all …]
/external/kotlinx.coroutines/benchmarks/src/jmh/kotlin/benchmarks/
DSemaphoreBenchmark.kt85 enum class SemaphoreBenchDispatcherCreator(val create: (parallelism: Int) -> CoroutineDispatcher) {
86 FORK_JOIN({ parallelism -> ForkJoinPool(parallelism).asCoroutineDispatcher() }), in parallelism() method
87 …EXPERIMENTAL({ parallelism -> ExperimentalCoroutineDispatcher(corePoolSize = parallelism, maxPoolS… in parallelism() method
DChannelProducerConsumerBenchmark.kt130 enum class DispatcherCreator(val create: (parallelism: Int) -> CoroutineDispatcher) {
131 FORK_JOIN({ parallelism -> ForkJoinPool(parallelism).asCoroutineDispatcher() }) in parallelism() method
/external/llvm-project/llvm/utils/lit/tests/
Dparallelism-groups.py1 # Check that we do not crash if a parallelism group is set to None. Permits
13 # RUN: %{lit} -j2 %{inputs}/parallelism-groups | FileCheck %s
16 # CHECK-DAG: PASS: parallelism-groups :: test1.txt
17 # CHECK-DAG: PASS: parallelism-groups :: test2.txt
/external/tensorflow/tensorflow/core/framework/
Dmodel_test.cc33 const int64 parallelism = std::get<0>(GetParam()); in TEST_P() local
38 {model::MakeParameter("parallelism", in TEST_P()
40 /*value=*/parallelism, nullptr, nullptr), in TEST_P()
68 110 * parallelism / 10); in TEST_P()
95 100 + 250 / parallelism); in TEST_P()
102 50 + 250 / parallelism); in TEST_P()
115 const int64 parallelism = std::get<0>(GetParam()); in TEST_P() local
120 {model::MakeParameter("parallelism", in TEST_P()
121 std::make_shared<SharedState>(/*value=*/parallelism, in TEST_P()
139 ? 110.0 * parallelism / 10 in TEST_P()
[all …]
Dmodel.cc46 // Collects "essential" parallelism parameters and buffer size parameters in the
47 // tree rooted in the given node. Which parallelism parameters are essential is
58 // Parallelism parameter is considered to be essential if the corresponding in CollectParameters()
303 // `ComputeWaitTime(producer_time, consumer_time, parallelism, ...)`, where
305 // interleave "cycle" divided by `parallelism`, `consumer_time` is the
307 // and if the node has parallelism parameter, then `buffer_size` is derived
308 // from `parallelism`.
330 double parallelism = num_inputs() - 1; // default to cycle length in OutputTimeLocked() local
333 parallelism = std::min(parallelism, (*parameter)->value); in OutputTimeLocked()
339 static_cast<double>(num_inputs() - 1) / parallelism; in OutputTimeLocked()
[all …]
/external/kotlinx.coroutines/kotlinx-coroutines-core/jvm/test/scheduling/
DSchedulerTestBase.kt90 protected fun blockingDispatcher(parallelism: Int): CoroutineDispatcher { in blockingDispatcher()
92 return _dispatcher!!.blocking(parallelism) in blockingDispatcher()
95 protected fun view(parallelism: Int): CoroutineDispatcher { in view()
97 return _dispatcher!!.limited(parallelism) in view()
/external/tensorflow/tensorflow/core/profiler/protobuf/
Dop_stats.proto81 // The number of replicas, corresponds to input parallelism.
82 // If there is no model parallelism, replica_count = device_core_count
84 // The number of cores used for a single replica, e.g. model parallelism.
85 // If there is no model parallelism, then num_cores_per_replica = 1
Doverview_page.proto182 // The number of replicas, corresponds to input parallelism.
183 // If there is no model parallelism, replica_count = device_core_count
185 // The number of cores used for a single replica, e.g. model parallelism.
186 // If there is no model parallelism, then num_cores_per_replica = 1
/external/tensorflow/tensorflow/core/kernels/data/
Dmap_defun_op.h29 // limit the intra op parallelism. To limit inter-op parallelism, a user
68 // If this value is positive, limit the max intra op parallelism when the
Dparallel_batch_dataset_op.cc237 {model::MakeParameter("parallelism", num_parallel_calls_, /*min=*/1, in CreateNode()
273 int64 parallelism = -1; in GetTraceMeMetadata() local
274 // NOTE: We only set the parallelism value if the lock can be acquired in GetTraceMeMetadata()
277 parallelism = num_parallel_calls_->value; in GetTraceMeMetadata()
282 "parallelism", in GetTraceMeMetadata()
283 strings::Printf("%lld", static_cast<long long>(parallelism)))); in GetTraceMeMetadata()
493 // parallelism and there are slots available in the `invocation_results_`
/external/tensorflow/tensorflow/core/api_def/base_api/
Dapi_def_MaxIntraOpParallelismDataset.pbtxt6 Identifies the maximum intra-op parallelism to use.
10 Creates a dataset that overrides the maximum intra-op parallelism.
Dapi_def_MapDefun.pbtxt49 limit the intra op parallelism. To limit inter-op parallelism, a user can
/external/tensorflow/tensorflow/core/tpu/graph_rewrite/
Ddistributed_tpu_rewrite_pass.h22 // Model parallelism and data parallelism:
24 // We support two different kinds of parallelism on TPU:
25 // * data parallelism (replication), or parallelization across batches, and
26 // * model parallelism, or parallelization within a batch.
29 // times across a TPU pod (data parallelism). The `num_replicas` attribute
35 // parallelism). The `num_cores_per_replica` attribute controls how many cores
347 // TODO(b/33943292): at present, for model parallelism with Send/Recv to work
/external/tensorflow/tensorflow/core/kernels/
Drecord_yielder.h41 // (opts.bufsize + opts.parallelism * 16).
48 // opts.parallelism = 8; // Uses 8 tfrecord iterators to iterate
80 int32 parallelism = 1; member
/external/lz4/.circleci/
Dconfig.yml13 # Parallelism is broken in this file : it just plays the same tests twice.
14 # The script will have to be modified to support parallelism properly
16 parallelism: 1
/external/kotlinx.coroutines/kotlinx-coroutines-core/jvm/test/
DCommonPoolTest.kt44 parallelism: Int, in createFJP()
49 parallelism, in createFJP()
/external/rust/crates/rayon/src/
Dlib.rs7 //! Data-parallelism library that makes it easy to convert sequential
10 //! Rayon is lightweight and convenient for introducing parallelism into existing
12 //! parallelism when sensible, based on work-load at runtime.
/external/ms-tpm-20-ref/Samples/Nucleo-TPM/L476RG/
DNucleo-L476RG.elf.launch15 … Set flash parallelism mode to 32, 16, or 8 bit when using STM32 F2/F4 microcontrollers&#13;&#10;#…
32 … Set flash parallelism mode to 32, 16, or 8 bit when using STM32 F2/F4 microcontrollers&#13;&#10;#…

12345678910>>...16