Searched refs:cross_device_ops_lib (Results 1 – 9 of 9) sorted by relevance
/external/tensorflow/tensorflow/contrib/distribute/python/ |
D | cross_device_ops_test.py | 31 from tensorflow.python.distribute import cross_device_ops as cross_device_ops_lib unknown 209 cross_device_ops_lib.ReductionToOneDevice()), 212 cross_device_ops_lib.ReductionToOneDevice( 216 cross_device_ops_lib.ReductionToOneDevice( 231 cross_device_ops_lib.AllReduceCrossDeviceOps("nccl", 1, 0, 0)), 234 cross_device_ops_lib.AllReduceCrossDeviceOps("nccl", 0, 0, 0)), 236 cross_device_ops_lib.NcclAllReduce()), 239 cross_device_ops_lib.HierarchicalCopyAllReduce(8)), 242 cross_device_ops_lib.AllReduceCrossDeviceOps( 259 result = cross_device_ops_lib._choose_all_reduce_algorithm(device_links) [all …]
|
D | collective_all_reduce_strategy.py | 22 from tensorflow.python.distribute import cross_device_ops as cross_device_ops_lib unknown 47 communication=cross_device_ops_lib.CollectiveCommunication.AUTO):
|
D | estimator_training_test.py | 37 from tensorflow.python.distribute import cross_device_ops as cross_device_ops_lib unknown 306 communication=cross_device_ops_lib.CollectiveCommunication.NCCL))
|
D | collective_all_reduce_strategy_test.py | 33 from tensorflow.python.distribute import cross_device_ops as cross_device_ops_lib unknown 67 communication=cross_device_ops_lib.CollectiveCommunication.AUTO,
|
D | mirrored_strategy_multigpu_test.py | 33 from tensorflow.python.distribute import cross_device_ops as cross_device_ops_lib unknown 1558 cross_device_ops_lib.NcclAllReduce)
|
/external/tensorflow/tensorflow/python/distribute/ |
D | collective_all_reduce_strategy.py | 26 from tensorflow.python.distribute import cross_device_ops as cross_device_ops_lib unknown 75 communication=cross_device_ops_lib.CollectiveCommunication.AUTO): 94 cross_device_ops_lib.CollectiveCommunication) 98 cross_device_ops_lib.CollectiveAllReduce) 128 self._cross_device_ops = cross_device_ops_lib.CollectiveAllReduce( 194 self._cross_device_ops = cross_device_ops_lib.CollectiveAllReduce( 381 cross_device_ops_lib.CollectiveAllReduce) 401 cross_device_ops_lib.CollectiveCommunication.NCCL) and 448 return cross_device_ops_lib.reduce_non_distributed_value(
|
D | parameter_server_strategy.py | 24 from tensorflow.python.distribute import cross_device_ops as cross_device_ops_lib unknown 107 cross_device_ops_lib.ReductionToOneDevice(reduce_to_device=_LOCAL_CPU)) 291 if not cross_device_ops_lib.check_destinations(destinations): 373 for d in cross_device_ops_lib.get_devices_from(destinations): 384 return cross_device_ops_lib.reduce_non_distributed_value(
|
D | mirrored_strategy.py | 26 from tensorflow.python.distribute import cross_device_ops as cross_device_ops_lib unknown 473 self._inferred_cross_device_ops = cross_device_ops_lib.choose_the_best( 512 cross_device_ops_lib.MultiWorkerAllReduce( 517 self._inferred_cross_device_ops = cross_device_ops_lib.NcclAllReduce() 704 return cross_device_ops_lib.reduce_non_distributed_value(
|
D | tpu_strategy.py | 24 from tensorflow.python.distribute import cross_device_ops as cross_device_ops_lib unknown 456 return cross_device_ops_lib.reduce_non_distributed_value( 459 devices = cross_device_ops_lib.get_devices_from(destinations)
|