Home
last modified time | relevance | path

Searched refs:cpu_tensor (Results 1 – 22 of 22) sorted by relevance

/external/tensorflow/tensorflow/compiler/jit/
Dxla_device_context.cc106 void XlaDeviceContext::CopyCPUTensorToDevice(const Tensor* cpu_tensor, in CopyCPUTensorToDevice() argument
110 if (cpu_tensor->NumElements() == 0) { in CopyCPUTensorToDevice()
117 << reinterpret_cast<const void*>(cpu_tensor->tensor_data().data()) in CopyCPUTensorToDevice()
120 << " " << cpu_tensor->NumElements() << " " in CopyCPUTensorToDevice()
121 << cpu_tensor->shape().DebugString() << " " in CopyCPUTensorToDevice()
136 xla_tensor->set_host_tensor(*cpu_tensor); in CopyCPUTensorToDevice()
147 static_cast<const char*>(DMAHelper::base(cpu_tensor)), in CopyCPUTensorToDevice()
181 TensorReference ref(*cpu_tensor); in CopyCPUTensorToDevice()
201 Device* device, Tensor* cpu_tensor, in CopyDeviceTensorToCPU() argument
211 << reinterpret_cast<const void*>(cpu_tensor->tensor_data().data()) in CopyDeviceTensorToCPU()
[all …]
Dxla_device_context.h63 void CopyCPUTensorToDevice(const Tensor* cpu_tensor, Device* device,
68 Tensor* cpu_tensor, StatusCallback done) override;
/external/tensorflow/tensorflow/core/distributed_runtime/
Dcollective_rma_distributed.cc65 Tensor* cpu_tensor) { in PopulateTensorFromExtra() argument
66 char* head = reinterpret_cast<char*>(DMAHelper::base(cpu_tensor)); in PopulateTensorFromExtra()
131 Tensor* cpu_tensor = new Tensor(cpu_dev->GetAllocator(cpu_attr), in RecvFromPeer() local
133 PopulateTensorFromExtra(extra, cpu_tensor); in RecvFromPeer()
137 to_device, cpu_attr, to_alloc_attr, cpu_tensor, in RecvFromPeer()
139 [this, cpu_tensor, done](const Status& s) { in RecvFromPeer()
140 delete cpu_tensor; in RecvFromPeer()
/external/tensorflow/tensorflow/core/kernels/
Dstack.cc256 Tensor* cpu_tensor = in ComputeAsync() local
259 &tensor, "StackPush", device, cpu_tensor, in ComputeAsync()
260 [cpu_tensor, stack, ctx, done](const Status& s) { in ComputeAsync()
264 ctx->SetStatus(stack->Push({*cpu_tensor, alloc_attrs, true})); in ComputeAsync()
267 ctx->set_output(0, *cpu_tensor); in ComputeAsync()
270 delete cpu_tensor; in ComputeAsync()
303 Tensor* cpu_tensor = &value.tensor; in ComputeAsync() local
306 new Tensor(gpu_allocator, cpu_tensor->dtype(), cpu_tensor->shape()); in ComputeAsync()
308 cpu_tensor, device, device_tensor, in ComputeAsync()
Dcollective_nccl_reducer_test.cc234 Tensor cpu_tensor(dtype, shape); in InitTensor() local
235 init_f(&cpu_tensor); in InitTensor()
236 VLOG(2) << "cpu_tensor " << cpu_tensor.DebugString(); in InitTensor()
240 &cpu_tensor, device_, &tensor_, in InitTensor()
Ddynamic_partition_op_gpu.cu.cc280 Tensor cpu_tensor; in ComputeAsync() local
287 &cpu_tensor, alloc_attr), in ComputeAsync()
293 ->ThenMemcpy(cpu_tensor.flat<int32>().data(), wrapped, in ComputeAsync()
305 partition_ref, cpu_tensor, done]() { in ComputeAsync()
307 this->AllocateOutputs(c, &data, &partitions, &cpu_tensor, &outputs, done); in ComputeAsync()
/external/tensorflow/tensorflow/core/common_runtime/gpu/
Dgpu_util_platform_specific.cc26 void GPUDeviceContext::CopyCPUTensorToDevice(const Tensor* cpu_tensor, in CopyCPUTensorToDevice() argument
30 GPUUtil::CopyCPUTensorToGPU(cpu_tensor, this, device, device_tensor, done); in CopyCPUTensorToDevice()
35 Device* device, Tensor* cpu_tensor, in CopyDeviceTensorToCPU() argument
37 GPUUtil::CopyGPUTensorToCPU(device, this, device_tensor, cpu_tensor, done); in CopyDeviceTensorToCPU()
Dgpu_util.cc257 const Tensor* gpu_tensor, Tensor* cpu_tensor, in CopyGPUTensorToCPU() argument
262 Status s = PrepareCopy(gpu_device, device_context, *gpu_tensor, cpu_tensor, in CopyGPUTensorToCPU()
283 void* dst_ptr = GetBase(cpu_tensor); in CopyGPUTensorToCPU()
300 void GPUUtil::CopyCPUTensorToGPU(const Tensor* cpu_tensor, in CopyCPUTensorToGPU() argument
307 Status s = PrepareCopy(gpu_device, device_context, *cpu_tensor, gpu_tensor, in CopyCPUTensorToGPU()
324 const int64 total_bytes = cpu_tensor->TotalBytes(); in CopyCPUTensorToGPU()
327 void* src_ptr = GetBase(cpu_tensor); in CopyCPUTensorToGPU()
333 TensorReference input_ref(*cpu_tensor); in CopyCPUTensorToGPU()
Dgpu_util.h49 const Tensor* gpu_tensor, Tensor* cpu_tensor,
88 static void CopyCPUTensorToGPU(const Tensor* cpu_tensor,
/external/tensorflow/tensorflow/core/common_runtime/sycl/
Dsycl_device_context.cc25 void SYCLDeviceContext::CopyCPUTensorToDevice(const Tensor *cpu_tensor, in CopyCPUTensorToDevice() argument
29 const int64 total_bytes = cpu_tensor->TotalBytes(); in CopyCPUTensorToDevice()
31 const void *src_ptr = DMAHelper::base(cpu_tensor); in CopyCPUTensorToDevice()
33 switch (cpu_tensor->dtype()) { in CopyCPUTensorToDevice()
105 Tensor *cpu_tensor, in CopyDeviceTensorToCPU() argument
110 void *dst_ptr = DMAHelper::base(cpu_tensor); in CopyDeviceTensorToCPU()
Dsycl_device_context.h34 void CopyCPUTensorToDevice(const Tensor *cpu_tensor, Device *device,
39 Device *device, Tensor *cpu_tensor,
/external/tensorflow/tensorflow/core/common_runtime/
Dcopy_tensor.cc295 Tensor* cpu_tensor = in ViaDMA() local
298 [cpu_tensor](StatusCallback done_, in ViaDMA()
301 delete cpu_tensor; in ViaDMA()
306 [delete_and_done, recv_dev_context, cpu_tensor, cpu_allocator, in ViaDMA()
314 CopyHostToDevice(cpu_tensor, cpu_allocator, out_allocator, edge_name, in ViaDMA()
320 cpu_tensor, send_dev_context, in ViaDMA()
Dgpu_device_context.h52 void CopyCPUTensorToDevice(const Tensor* cpu_tensor, Device* device,
57 Device* device, Tensor* cpu_tensor,
Dprocess_function_library_runtime_test.cc114 Tensor cpu_tensor(device_tensor.dtype(), device_tensor.shape()); in GPUToCPU() local
116 &cpu_tensor, in GPUToCPU()
123 return cpu_tensor; in GPUToCPU()
129 Tensor CPUToGPU(const Tensor& cpu_tensor) { in CPUToGPU() argument
138 Tensor device_tensor(gpu_device_->GetAllocator({}), cpu_tensor.dtype(), in CPUToGPU()
139 cpu_tensor.shape(), {}); in CPUToGPU()
140 device_context->CopyCPUTensorToDevice(&cpu_tensor, gpu_device_, in CPUToGPU()
Dring_alg.cc256 Tensor cpu_tensor(tensor.dtype(), tensor.shape()); in TensorDebugString() local
259 &tensor, "" /*tensor_name*/, col_ctx_->device, &cpu_tensor, in TensorDebugString()
265 return cpu_tensor.SummarizeValue(64); in TensorDebugString()
Dring_gatherer_test.cc425 Tensor cpu_tensor(dtype, shape); in InitTensor() local
426 init_f(&cpu_tensor); in InitTensor()
431 &cpu_tensor, device_, &input_tensor_, [&note](const Status& s) { in InitTensor()
Dring_reducer_test.cc449 Tensor cpu_tensor(dtype, shape); in InitTensor() local
450 init_f(&cpu_tensor); in InitTensor()
455 &cpu_tensor, device_, &tensor_, [&note](const Status& s) { in InitTensor()
Dhierarchical_tree_broadcaster_test.cc613 Tensor cpu_tensor(dtype, shape); in InitTensor() local
614 f(&cpu_tensor); in InitTensor()
619 &cpu_tensor, device_, &tensor_, [&notification](Status s) { in InitTensor()
/external/tensorflow/tensorflow/python/eager/
Dops_test.py280 cpu_tensor = constant_op.constant(1.0)
281 gpu_tensor = cpu_tensor.gpu()
282 self.assertAllEqual(cpu_tensor + gpu_tensor, 2.0)
298 cpu_tensor = constant_op.constant(1.0)
299 result = cpu_tensor + cpu_tensor
/external/tensorflow/tensorflow/python/framework/
Dconfig_test.py63 cpu_tensor = constant_op.constant(1, dtype=dtype)
64 gpu_tensor = cpu_tensor.gpu()
65 self.assertAllEqual(cpu_tensor + gpu_tensor, 2.0)
/external/tensorflow/tensorflow/core/distributed_runtime/rpc/
Dgrpc_worker_service.cc593 Tensor* cpu_tensor = new Tensor(cpu_dev->GetAllocator(cpu_attr), in RecvBufAsync() local
597 hook->prod_value, "empty_name", hook->prod_dev, cpu_tensor, in RecvBufAsync()
599 cpu_tensor](const Status& s) { in RecvBufAsync()
601 SetTensorInRecvBufResp(recv_buf_max_chunk_, cpu_tensor, in RecvBufAsync()
607 delete cpu_tensor; in RecvBufAsync()
/external/tensorflow/tensorflow/core/framework/
Ddevice_base.h79 virtual void CopyCPUTensorToDevice(const Tensor* cpu_tensor, Device* device, in CopyCPUTensorToDevice() argument
97 Tensor* cpu_tensor, StatusCallback done) { in CopyDeviceTensorToCPU() argument