/external/tensorflow/tensorflow/compiler/xla/tests/ |
D | transfer_manager_test.cc | 74 auto device_buffer = AllocateDeviceBuffer(shape); in XLA_TEST_F() local 78 device_buffer)); in XLA_TEST_F() 81 transfer_manager_->TransferLiteralFromDevice(stream_, device_buffer)); in XLA_TEST_F() 90 auto device_buffer = AllocateDeviceBuffer(shape); in XLA_TEST_F() local 94 device_buffer)); in XLA_TEST_F() 97 transfer_manager_->TransferLiteralFromDevice(stream_, device_buffer)); in XLA_TEST_F() 112 auto device_buffer = AllocateDeviceBuffer(shape); in XLA_TEST_F() local 116 device_buffer)); in XLA_TEST_F() 119 transfer_manager_->TransferLiteralFromDevice(stream_, device_buffer)); in XLA_TEST_F() 130 auto device_buffer = AllocateDeviceBuffer(shape); in XLA_TEST_F() local [all …]
|
/external/tensorflow/tensorflow/compiler/xla/service/ |
D | transfer_manager.h | 78 se::Stream* stream, const ShapedBuffer& device_buffer, 81 se::Stream* stream, const ShapedBuffer& device_buffer) { in TransferLiteralFromDevice() argument 82 return TransferLiteralFromDevice(stream, device_buffer, nullptr); in TransferLiteralFromDevice() 85 se::Stream* stream, const ShapedBuffer& device_buffer, 89 const ShapedBuffer& device_buffer, in TransferLiteralFromDevice() argument 91 return TransferLiteralFromDevice(stream, device_buffer, literal, nullptr); in TransferLiteralFromDevice() 107 se::Stream* stream, const ShapedBuffer& device_buffer, 111 const ShapedBuffer& device_buffer, in TransferLiteralFromDevice() argument 114 return TransferLiteralFromDevice(stream, device_buffer, literal, done, in TransferLiteralFromDevice() 131 const ShapedBuffer& device_buffer, [all …]
|
D | generic_transfer_manager.cc | 62 se::Stream* stream, const ShapedBuffer& device_buffer, in TransferLiteralFromDevice() argument 67 << "; device buffer: " << device_buffer; in TransferLiteralFromDevice() 70 device_buffer.device_ordinal()); in TransferLiteralFromDevice() 73 device_buffer.on_device_shape(), in TransferLiteralFromDevice() 78 /*gpu_src=*/device_buffer.buffer(index), in TransferLiteralFromDevice() 94 const ShapedBuffer& device_buffer, in TransferLiteralToDeviceAsync() argument 99 << "; device buffer: " << device_buffer; in TransferLiteralToDeviceAsync() 102 ShapeUtil::Compatible(literal.shape(), device_buffer.on_device_shape())); in TransferLiteralToDeviceAsync() 104 device_buffer.device_ordinal()); in TransferLiteralToDeviceAsync() 106 TF_RETURN_IF_ERROR(WriteTupleIndexTablesAsync(stream, device_buffer)); in TransferLiteralToDeviceAsync() [all …]
|
D | transfer_manager.cc | 51 se::Stream* stream, const ShapedBuffer& device_buffer, in TransferLiteralFromDevice() argument 62 Literal literal(device_buffer.on_host_shape()); in TransferLiteralFromDevice() 64 substream, device_buffer, &literal, in TransferLiteralFromDevice() 78 se::Stream* stream, const ShapedBuffer& device_buffer, in TransferLiteralFromDevice() argument 88 substream, device_buffer, literal, in TransferLiteralFromDevice() 100 const ShapedBuffer& device_buffer, in TransferLiteralToDevice() argument 110 substream, literal, device_buffer, transfer_metadata)); in TransferLiteralToDevice() 203 ShapedBuffer* device_buffer, in ReadDynamicShapes() argument 211 TF_RETURN_IF_ERROR(device_buffer->buffers().ForEachMutableElementWithStatus( in ReadDynamicShapes() 288 se::Stream* stream, const ShapedBuffer& device_buffer) { in WriteTupleIndexTables() argument [all …]
|
D | generic_transfer_manager.h | 44 se::Stream* stream, const ShapedBuffer& device_buffer, 50 const ShapedBuffer& device_buffer,
|
/external/tensorflow/tensorflow/compiler/xla/pjrt/ |
D | pjrt_stream_executor_client.cc | 328 void RecordUsage(PjRtStreamExecutorBuffer::ScopedHold device_buffer, in RecordUsage() argument 345 buffer_local_device->ThenRelease(usage_stream, device_buffer.buffer()); in RecordUsage() 347 device_buffer.ConvertUsageHold(usage_stream, event, in RecordUsage() 481 PjRtStreamExecutorBuffer::ScopedHold device_buffer, in AddDestinationBufferSynchronization() argument 498 RecordUsage(std::move(device_buffer), local_device, local_device, in AddDestinationBufferSynchronization() 710 auto device_buffer = std::make_shared<TrackedDeviceBuffer>( in BufferFromHostBuffer() local 716 shape, std::move(device_buffer), this, device)); in BufferFromHostBuffer() 726 PjRtStreamExecutorBuffer::ScopedHold device_buffer( in BufferFromHostBuffer() local 728 CHECK(device_buffer.ok()); in BufferFromHostBuffer() 764 movable_device_buffer{device_buffer.ToClosure()}, shape, in BufferFromHostBuffer() [all …]
|
D | tracked_device_buffer_test.cc | 105 std::shared_ptr<TrackedDeviceBuffer> device_buffer = in TEST() local 108 EXPECT_EQ(device_buffer->device_memory().size(), in TEST()
|
D | pjrt_stream_executor_client.h | 460 std::shared_ptr<TrackedDeviceBuffer> device_buffer, 574 void ConfirmDonation(TrackedDeviceBuffer* device_buffer);
|
/external/tensorflow/tensorflow/stream_executor/tpu/ |
D | tpu_transfer_manager.cc | 69 const xla::ShapedBuffer& device_buffer, in TransferLiteralToDeviceAsync() argument 77 ApiConverter::ToC(device_buffer, &c_device_buffer); in TransferLiteralToDeviceAsync() 191 stream_executor::Stream* stream, const xla::ShapedBuffer& device_buffer, in TransferLiteralFromDevice() argument 198 ApiConverter::ToC(device_buffer, &c_device_buffer); in TransferLiteralFromDevice() 245 const xla::ShapedBuffer& device_buffer) const { in CanShapedBufferBeAccessedNow() 248 ApiConverter::ToC(device_buffer, &c_device_buffer); in CanShapedBufferBeAccessedNow() 258 const se::DeviceMemoryBase& device_buffer) const { in CanBufferBeAccessedNow() 260 SE_DeviceMemoryBase c_device_buffer{const_cast<void*>(device_buffer.opaque()), in CanBufferBeAccessedNow() 261 device_buffer.size(), in CanBufferBeAccessedNow() 262 device_buffer.payload()}; in CanBufferBeAccessedNow()
|
D | tpu_transfer_manager.h | 46 const xla::ShapedBuffer& device_buffer, 50 stream_executor::Stream* stream, const xla::ShapedBuffer& device_buffer, 75 const xla::ShapedBuffer& device_buffer) const override; 79 const se::DeviceMemoryBase& device_buffer) const override;
|
D | tpu_executor_c_api.h | 184 XLA_ShapedBuffer* device_buffer, TF_Status* status); 187 XLA_ShapedBuffer* device_buffer, XLA_Literal* literal, 196 XLA_ShapedBuffer* device_buffer); 199 SE_DeviceMemoryBase* device_buffer);
|
/external/tensorflow/tensorflow/compiler/xla/service/cpu/ |
D | cpu_transfer_manager.h | 49 const ShapedBuffer& device_buffer) const override { in CanShapedBufferBeAccessedNow() argument 55 const se::DeviceMemoryBase& device_buffer) const override { in CanBufferBeAccessedNow() argument
|
/external/tensorflow/tensorflow/compiler/xla/python/tpu_driver/client/ |
D | tpu_client.cc | 266 Shape on_host_shape, std::shared_ptr<TpuSharedBuffer> device_buffer, in PyTpuBuffer() argument 271 device_(device_buffer->device), in PyTpuBuffer() 272 device_buffer_(std::move(device_buffer)), in PyTpuBuffer() 430 std::shared_ptr<TpuSharedBuffer> device_buffer = DeviceBuffer(); in BlockHostUntilReady() local 431 if (!device_buffer) { in BlockHostUntilReady() 435 return device_buffer->handle->OnReady()->Await(); in BlockHostUntilReady() 492 auto device_buffer = std::make_shared<TpuSharedBuffer>( in CreateBuffer() local 497 non_tuple_shape, std::move(device_buffer), in CreateBuffer()
|
D | tpu_client.h | 199 std::shared_ptr<TpuSharedBuffer> device_buffer,
|
/external/deqp-deps/amber/src/dawn/ |
D | engine_dawn.cc | 369 auto& device_buffer = compute_pipeline.buffers[i]; in MapDeviceBufferToHostBuffer() local 385 encoder.CopyBufferToBuffer(device_buffer, source_offset, copy_device_buffer, in MapDeviceBufferToHostBuffer()
|