Home
last modified time | relevance | path

Searched refs:on_host (Results 1 – 25 of 31) sorted by relevance

12

/external/tensorflow/tensorflow/core/kernels/
Dcuda_solvers.h167 const string& debug_info, bool on_host);
170 bool on_host);
336 ScratchSpace(OpKernelContext* context, int64 size, bool on_host)
337 : ScratchSpace(context, TensorShape({size}), "", on_host) {}
340 bool on_host)
341 : ScratchSpace(context, TensorShape({size}), debug_info, on_host) {}
344 const string& debug_info, bool on_host)
345 : context_(context), debug_info_(debug_info), on_host_(on_host) {
347 if (on_host) {
379 bool on_host() const { return on_host_; }
[all …]
Dops_testutil.h57 const bool on_host = in SetOutputAttrs() local
59 attr.set_on_host(on_host); in SetOutputAttrs()
Ddebug_ops.h76 !context->input_alloc_attr(0).on_host(); in Compute()
94 !context->input_alloc_attr(0).on_host(); in Compute()
/external/tensorflow/tensorflow/contrib/gdr/
Dgdr_worker.cc91 const bool on_host = send_args.alloc_attrs.on_host(); in GrpcRecvTensorAsync() local
104 on_host, [proto, done, response](const Status& s) { in GrpcRecvTensorAsync()
116 if (src_dev->tensorflow_gpu_device_info() && (!on_host)) { in GrpcRecvTensorAsync()
177 hook->prod_dev, hook->prod_ctx, hook->prod_attr.on_host(), in RecvBufAsync()
Dgdr_memory_manager.h44 Device* device, DeviceContext* device_context, bool on_host,
51 Device* device, DeviceContext* device_context, bool on_host,
Dgdr_rendezvous_mgr.cc63 const bool on_host = recv_args_.alloc_attrs.on_host(); in Start() local
66 recv_args_.device_context, on_host, in Start()
Dgdr_memory_manager.cc123 Device* device, DeviceContext* device_context, bool on_host,
128 Device* device, DeviceContext* device_context, bool on_host,
367 Device* device, DeviceContext* device_context, bool on_host, in TransportOptionsFromTensor() argument
406 if (copy && device->tensorflow_gpu_device_info() && !on_host) { in TransportOptionsFromTensor()
425 Device* device, DeviceContext* device_context, bool on_host, in TensorFromTransportOptions() argument
478 StatusCallback callback = [done, copy, device, device_context, on_host, in TensorFromTransportOptions()
492 if (copy && device->tensorflow_gpu_device_info() && !on_host) { in TensorFromTransportOptions()
Dgdr_collective_executor_mgr.cc107 to_device_ctx, to_alloc_attr.on_host(), done); in RecvFromPeer()
/external/tensorflow/tensorflow/core/framework/
Dallocator_test.cc49 for (bool on_host : {false, true}) { in TEST()
53 aa.set_on_host(on_host); in TEST()
56 EXPECT_EQ(on_host, aa.on_host()); in TEST()
Dallocator.h361 bool on_host() const { return value & 0x1; } in on_host() function
/external/tensorflow/tensorflow/core/grappler/optimizers/
Dgpu_swapping_kernels.cc32 ctx, !ctx->input_alloc_attr(0).on_host(), in ComputeAsync()
65 ctx, ctx->input_alloc_attr(0).on_host(), in ComputeAsync()
/external/tensorflow/tensorflow/core/common_runtime/sycl/
Dsycl_device.cc39 if (attr.on_host()) in GetAllocator()
58 if (alloc_attrs.on_host()) { in MakeTensorFromProto()
/external/tensorflow/tensorflow/core/distributed_runtime/rpc/
Dgrpc_worker_service.cc491 const bool on_host = send_args.alloc_attrs.on_host(); in GrpcRecvTensorAsync() local
494 if (src_dev->tensorflow_gpu_device_info() && (!on_host)) { in GrpcRecvTensorAsync()
583 const bool on_host = in RecvBufAsync() local
585 hook->prod_attr.on_host(); in RecvBufAsync()
586 if ((!on_host) && (num_bytes > 0)) { in RecvBufAsync()
/external/tensorflow/tensorflow/core/common_runtime/
Dcollective_rma_local.cc95 src_attr.on_host() ? DEVICE_CPU : src_dev->attributes().device_type()); in MemCpyAsync()
97 dst_attr.on_host() ? DEVICE_CPU : dst_dev->attributes().device_type()); in MemCpyAsync()
Drendezvous_mgr.cc69 (send_args.alloc_attrs.on_host() || parsed.src.type == "CPU"); in SameWorkerRecvDone()
71 (recv_args.alloc_attrs.on_host() || parsed.dst.type == "CPU"); in SameWorkerRecvDone()
Dcopy_tensor.cc259 src_alloc_attr.on_host() ? DEVICE_CPU : src->attributes().device_type()); in ViaDMA()
261 dst_alloc_attr.on_host() ? DEVICE_CPU : dst->attributes().device_type()); in ViaDMA()
Dpartitioning_utils_test.cc166 ASSERT_EQ(expected[i], actual[i].on_host()) << " at index " << i; in CheckAlloc()
/external/tensorflow/tensorflow/core/kernels/data/
Dsingle_threaded_executor.cc129 bool on_host = op_kernel->output_memory_types()[out] == HOST_MEMORY; in Initialize() local
130 if (on_host) { in Initialize()
132 h.set_on_host(on_host); in Initialize()
Ddataset_test_base.cc201 const bool on_host = in CreateOpKernelContext() local
203 attr.set_on_host(on_host); in CreateOpKernelContext()
/external/tensorflow/tensorflow/core/common_runtime/gpu/
Dgpu_device_factory.cc45 if (attr.on_host()) { in GetAllocator()
/external/tensorflow/tensorflow/core/distributed_runtime/
Dbase_rendezvous_mgr.cc236 (send_args.alloc_attrs.on_host() || parsed.src.type == "CPU"); in SameWorkerRecvDone()
238 (recv_args.alloc_attrs.on_host() || parsed.dst.type == "CPU"); in SameWorkerRecvDone()
Dtensor_coding.cc47 if (alloc_attrs_.on_host() || da.device_type() == "CPU") { in InitAlloc()
/external/tensorflow/tensorflow/compiler/jit/
Dxla_device.cc246 if (attr.on_host()) { in GetAllocatorLocked()
452 if (alloc_attrs.on_host()) { in MakeTensorFromProto()
/external/tensorflow/tensorflow/contrib/verbs/
Drdma.cc1060 const bool on_host = send_args.alloc_attrs.on_host(); in RecvHandler() local
1061 if (src_dev_->tensorflow_gpu_device_info() && !on_host) { in RecvHandler()
1566 bool on_host = recv_args_.alloc_attrs.on_host(); in AllocateTensorsAsync() local
1567 if (dst_dev_->tensorflow_gpu_device_info() && !on_host && in AllocateTensorsAsync()
/external/tensorflow/tensorflow/contrib/mpi/
Dmpi_rendezvous_mgr.cc242 (!send_args.alloc_attrs.on_host())) { in AddRequest()

12