Home
last modified time | relevance | path

Searched refs:num_gpus (Results 1 – 25 of 60) sorted by relevance

123

/external/tensorflow/tensorflow/python/keras/mixed_precision/
Dloss_scale_benchmark.py36 def _get_strategy(num_gpus): argument
37 if num_gpus > 1:
39 ['/GPU:%d' % i for i in range(num_gpus)])
47 def _benchmark(self, gradient_type, num_gpus, mode, loss_scaling): argument
67 name = '%s_%d_GPU_%s_%s' % (gradient_type, num_gpus, mode, ls_str)
68 with context.eager_mode(), _get_strategy(num_gpus).scope() as strategy:
140 num_gpus = len(config.list_logical_devices('GPU'))
142 if num_gpus >= 1:
144 if num_gpus >= 2:
146 if num_gpus >= 8:
[all …]
/external/tensorflow/tensorflow/python/distribute/v1/
Dall_reduce_test.py89 def _buildInput(self, num_workers, num_gpus): argument
96 for d in range(0, num_gpus):
97 dn = "/replica:0/task:%d/device:GPU:%d" % (w, d % num_gpus)
144 def _buildRing(self, num_workers, num_gpus, subdiv): argument
145 gpu_perm = range(0, num_gpus)
149 def _testAllReduce(self, num_workers, num_gpus, shape, build_f): argument
151 num_devices = num_workers * num_gpus
165 def _testRingAllReduce(self, num_workers, num_gpus, shape, subdiv): argument
167 build_f = self._buildRing(num_workers, num_gpus, subdiv)
168 self._testAllReduce(num_workers, num_gpus, shape, build_f)
[all …]
Dcross_device_ops_test.py163 if context.num_gpus() < sum(1 for d in devices if "GPU" in d.upper()):
340 if context.num_gpus() < 1:
460 num_gpus=0, argument
467 if num_gpus:
468 devices = ["/device:GPU:%d" % i for i in range(num_gpus)]
487 if num_gpus:
490 for i in range(num_gpus)
503 num_accelerators={"GPU": num_gpus})
531 num_gpus, argument
539 num_gpus,
[all …]
Dall_reduce.py223 num_gpus = len(gpu_perm)
224 devices = num_workers * num_gpus
227 if num_subchunks > num_gpus:
229 "num_subchunks %d must be <= num_gpus %d" % (num_subchunks, num_gpus))
230 rotation_interval = max(1, int(num_gpus / num_subchunks))
236 default_order = [(w * num_gpus) + i for i in gpu_perm]
/external/tensorflow/tensorflow/python/distribute/
Dparameter_server_strategy_test.py76 num_gpus=None, argument
79 if num_gpus is None:
80 num_gpus = context.num_gpus()
86 num_accelerators={'GPU': num_gpus})
92 central_storage_strategy.CentralStorageStrategy._from_num_gpus(num_gpus)
115 def _get_test_objects(self, task_type, task_id, num_gpus): argument
120 num_gpus=num_gpus,
123 def _test_device_assignment_distributed(self, task_type, task_id, num_gpus): argument
125 d, _, sess_config = self._get_test_objects(task_type, task_id, num_gpus)
136 if num_gpus == 0:
[all …]
Dcollective_all_reduce_strategy_test.py72 num_gpus=None): argument
74 if num_gpus is None:
75 num_gpus = context.num_gpus()
82 num_accelerators={'GPU': num_gpus})
86 ClusterSpec({}), num_accelerators={'GPU': num_gpus})
105 def _get_test_object(self, task_type, task_id, num_gpus=0): argument
110 num_gpus=num_gpus)
113 def _test_minimize_loss_graph(self, task_type, task_id, num_gpus): argument
115 num_gpus)
169 if context.num_gpus() < d.extended._num_gpus_per_worker:
[all …]
Dparameter_server_strategy.py208 num_gpus = context.num_gpus()
210 num_gpus = cluster_resolver.num_accelerators().get("GPU", 0)
213 self._num_gpus_per_worker = num_gpus
230 if num_gpus > 0:
233 for i in range(num_gpus))
293 num_gpus = context.num_gpus()
295 num_gpus = cluster_resolver.num_accelerators().get("GPU", 0)
298 self._num_gpus_per_worker = num_gpus
300 compute_devices = device_util.local_devices_from_num_gpus(num_gpus)
Dcollective_all_reduce_strategy.py357 num_gpus = context.num_gpus()
359 num_gpus = cluster_resolver.num_accelerators().get("GPU", 0)
364 if num_gpus:
365 local_devices = tuple("/device:GPU:%d" % i for i in range(num_gpus))
398 self._num_gpus_per_worker = num_gpus
477 num_gpus = context.num_gpus()
479 num_gpus = cluster_resolver.num_accelerators().get("GPU", 0)
481 if num_gpus:
483 for i in range(num_gpus))
506 self._num_gpus_per_worker = num_gpus
Dmirrored_strategy.py156 num_gpus = None
159 if num_gpus is None:
160 num_gpus = sum(1 for d in device_in_task if _is_gpu_device(d))
163 elif num_gpus != sum(1 for d in device_in_task if _is_gpu_device(d)):
169 d_spec.device_index >= num_gpus):
172 return num_gpus
175 def all_local_devices(num_gpus=None): argument
177 if num_gpus is not None:
178 devices = devices[:num_gpus]
187 context.num_gpus())
Dcentral_storage_strategy.py74 def _from_num_gpus(cls, num_gpus): argument
75 return cls(device_util.local_devices_from_num_gpus(num_gpus))
Ddevice_util.py130 def local_devices_from_num_gpus(num_gpus): argument
132 return (tuple("/device:GPU:%d" % i for i in range(num_gpus)) or
Dmirrored_strategy_test.py1172 self.assertEqual(context.num_gpus() * 2, distribution.num_replicas_in_sync)
1191 num_gpus = context.num_gpus()
1194 expected_values = [[i+j for j in range(num_gpus)] * num_workers
1195 for i in range(0, 100, num_gpus)]
1202 expected_num_replicas_in_sync=num_workers*num_gpus,
1215 num_gpus = context.num_gpus()
1219 for i in range(0, 100, num_gpus):
1220 expected_values.append([i+j for j in range(num_gpus)] * num_workers)
1227 expected_num_replicas_in_sync=num_workers*num_gpus,
1253 i) for i in range(context.num_gpus())]),
[all …]
/external/tensorflow/tensorflow/python/keras/benchmarks/
Ddistribution_util.py87 num_gpus=0, argument
105 if num_gpus < 0:
111 if num_gpus > 1:
113 "flag cannot be set to `off`.".format(num_gpus))
121 if num_gpus == 0:
123 if num_gpus > 1:
129 if num_gpus == 0:
132 devices = ["device:GPU:%d" % i for i in range(num_gpus)]
Dbenchmark_util.py108 num_gpus=0, argument
151 if num_gpus < 0:
163 distribution_strategy=distribution_strategy, num_gpus=num_gpus)
216 'num_gpus': num_gpus
/external/tensorflow/tensorflow/core/distributed_runtime/rpc/
Dgrpc_testlib_server.cc39 int num_cpus, int num_gpus, int task_index, in FillServerDef() argument
71 (*config->mutable_device_count())["GPU"] = num_gpus; in FillServerDef()
83 int num_gpus = 0; in main() local
90 tensorflow::Flag("num_gpus", &num_gpus, "number of GPUs"), in main()
101 num_gpus, task_index, &def); in main()
Dgrpc_testlib.cc52 int num_gpus = 0; in MakeTestCluster() local
59 num_gpus = iter->second; in MakeTestCluster()
70 strings::StrCat("--num_gpus=", num_gpus)}); in MakeTestCluster()
/external/tensorflow/tensorflow/core/grappler/optimizers/
Dgeneric_layout_optimizer.cc51 int num_gpus = 0; in GetNumGPUs() local
57 num_gpus++; in GetNumGPUs()
69 return {num_gpus, num_volta}; in GetNumGPUs()
109 const TransposeContext& context, int num_gpus, int num_voltas) { in GetSrcAndDstDataFormats() argument
112 if (((static_cast<float>(num_voltas) / static_cast<float>(num_gpus)) >= in GetSrcAndDstDataFormats()
412 const int num_gpus = num_gpus_and_num_volta.first; in Optimize() local
417 if (num_gpus > 0) { in Optimize()
422 context, num_gpus, num_gpus_and_num_volta.second); in Optimize()
/external/tensorflow/tensorflow/python/keras/utils/
Dmulti_gpu_utils.py157 num_gpus = len(gpus)
164 num_gpus = gpus
165 target_gpu_ids = range(num_gpus)
226 'parts': num_gpus
/external/tensorflow/tensorflow/python/eager/
Dbenchmarks_test.py184 with ops.device("GPU:0" if context.num_gpus() else "CPU:0"):
196 if context.num_gpus():
202 if context.num_gpus():
211 with ops.device("GPU:0" if context.num_gpus() else "CPU:0"):
220 with ops.device("GPU:0" if context.num_gpus() else "CPU:0"):
272 if not context.num_gpus():
277 if not context.num_gpus():
285 if not context.num_gpus():
291 if not context.num_gpus():
334 if not context.num_gpus():
[all …]
/external/tensorflow/tensorflow/core/profiler/internal/gpu/
Dcupti_collector.h191 uint32 num_gpus; member
201 explicit AnnotationMap(uint64 max_size, uint32 num_gpus) in AnnotationMap() argument
202 : max_size_(max_size), per_device_map_(num_gpus) {} in AnnotationMap()
229 annotation_map_(options.max_annotation_strings, options.num_gpus) {} in CuptiTraceCollector()
/external/tensorflow/tensorflow/python/ops/
Dcollective_ops_gpu_test.py50 def _setup_context(self, num_gpus=2): argument
53 if len(gpus) < num_gpus:
55 num_gpus, len(gpus)))
278 self._setup_context(num_gpus=1)
289 self._setup_context(num_gpus=2)
/external/mesa3d/src/amd/addrlib/src/chip/r800/
Dsi_gb_reg.h64 unsigned int num_gpus : 3; member
82 unsigned int num_gpus : 3; member
/external/tensorflow/tensorflow/core/distributed_runtime/
Drpcbench_test.cc57 int num_gpus = 0; in MakeGRPCCluster() local
64 num_gpus = iter->second; in MakeGRPCCluster()
69 worker_threads->Schedule([worker_idx, n, num_cpus, num_gpus, &port] { in MakeGRPCCluster()
84 (*config->mutable_device_count())["GPU"] = num_gpus; in MakeGRPCCluster()
/external/tensorflow/tensorflow/core/grappler/
Ddevices.cc46 int num_gpus = gpu_manager->VisibleDeviceCount(); in GetNumAvailableGPUs() local
47 for (int i = 0; i < num_gpus; i++) { in GetNumAvailableGPUs()
/external/tensorflow/tensorflow/core/grappler/clusters/
Dsingle_machine.cc41 SingleMachine::SingleMachine(int timeout_s, int num_cpu_cores, int num_gpus) in SingleMachine() argument
44 << " Number of GPUs: " << num_gpus; in SingleMachine()
49 if (num_gpus > 0) { in SingleMachine()
50 (*options_.config.mutable_device_count())["GPU"] = num_gpus; in SingleMachine()

123