/external/tensorflow/tensorflow/compiler/xrt/ |
D | xrt_state.cc | 149 const xla::Shape& on_host_shape, in XRTTupleAllocation() argument 153 on_host_shape_(on_host_shape), in XRTTupleAllocation() 189 shaped_buffer.on_host_shape(), shaped_buffer.on_device_shape()); in CreateAndTransfer() 212 shaped_buffer.on_host_shape(), shaped_buffer.on_device_shape()); in CreateUninitialized() 221 const xla::ShapedBuffer& shaped_buffer, const xla::Shape& on_host_shape, in CreateFromBuffer() argument 226 *allocation = new XRTTupleAllocation(device_ordinal, allocator, on_host_shape, in CreateFromBuffer() 237 return CreateFromBuffer(shaped_buffer, shaped_buffer.on_host_shape(), in CreateFromBuffer() 260 if (!xla::ShapeUtil::Equal(literal.shape(), on_host_shape())) { in WriteLiteral() 264 " device=", xla::ShapeUtil::HumanStringWithLayout(on_host_shape())); in WriteLiteral() 283 xla::Literal literal(on_host_shape()); in SwapOut() [all …]
|
D | xrt_state.h | 106 const xla::Shape& on_host_shape, 196 const xla::Shape& on_host_shape() const; 238 const xla::Shape& on_host_shape,
|
D | xrt_util.cc | 279 if (!InputShapeMatches(input_shape, tuple->on_host_shape())) { in GetInputTupleAllocations() 283 "; got ", tuple->on_host_shape().DebugString()); in GetInputTupleAllocations() 349 if (return_exploded_tuple && output_tuple->on_host_shape().IsTuple()) { in CreateExecuteOutput()
|
/external/tensorflow/tensorflow/compiler/xla/service/ |
D | shaped_buffer.h | 49 ShapedBuffer(Shape on_host_shape, Shape on_device_shape, int device_ordinal); 65 const Shape& on_host_shape() const { return on_host_shape_; } in on_host_shape() function 110 void set_shapes(const Shape& on_host_shape, const Shape& on_device_shape) { in set_shapes() argument 156 explicit ScopedShapedBuffer(Shape on_host_shape, Shape on_device_shape,
|
D | transfer_manager.cc | 62 Literal literal(device_buffer.on_host_shape()); in TransferLiteralFromDevice() 388 const Shape& on_host_shape, se::DeviceMemoryAllocator* allocator, in AllocateScopedShapedBuffer() argument 390 if (!LayoutUtil::HasLayout(on_host_shape)) { in AllocateScopedShapedBuffer() 392 ShapeUtil::HumanStringWithLayout(on_host_shape)); in AllocateScopedShapedBuffer() 394 TF_RETURN_IF_ERROR(ShapeUtil::ValidateShape(on_host_shape)); in AllocateScopedShapedBuffer() 395 Shape on_device_shape = HostShapeToDeviceShape(on_host_shape); in AllocateScopedShapedBuffer()
|
D | shaped_buffer.cc | 41 ShapedBuffer::ShapedBuffer(Shape on_host_shape, Shape on_device_shape, in ShapedBuffer() argument 122 ScopedShapedBuffer::ScopedShapedBuffer(Shape on_host_shape, in ScopedShapedBuffer() argument
|
D | transfer_manager.h | 250 const Shape& on_host_shape, se::DeviceMemoryAllocator* allocator,
|
D | shaped_buffer_test.cc | 170 EXPECT_EQ(ssb.on_host_shape(), array_shape); in TEST()
|
D | service.cc | 279 argument_shapes.push_back(&arg->on_host_shape()); in CreateModuleConfig() 858 const Shape& shape_arg = replicated_arguments.front()[i]->on_host_shape(); in Execute() 928 return_shape = Shape(shaped_buffer->on_host_shape()); in TransferToClient() 1099 *result->mutable_shape() = buffer->on_host_shape().ToProto(); in GetShape()
|
D | executable.h | 157 ExecutionOutput(Shape on_host_shape, Shape on_device_shape, in ExecutionOutput() argument
|
/external/tensorflow/tensorflow/compiler/jit/ |
D | xla_tensor.cc | 43 const xla::Shape& on_host_shape, in AllocateShapedBuffer() argument 48 on_host_shape); in AllocateShapedBuffer() 50 xla::ScopedShapedBuffer shaped_buffer(on_host_shape, on_device_shape, in AllocateShapedBuffer()
|
D | xla_tensor.h | 51 Status AllocateShapedBuffer(DataType dtype, const xla::Shape& on_host_shape,
|
D | xla_launch_util.cc | 432 VLOG(2) << "Result tuple shape: " << output.on_host_shape().DebugString(); in PopulateOutputs() 440 if (!output.on_host_shape().IsTuple()) { in PopulateOutputs() 443 xla::ShapeUtil::MakeTupleShape({nontuple_buffer.on_host_shape()}), in PopulateOutputs() 463 if (output.on_host_shape().is_dynamic()) { in PopulateOutputs()
|
D | xla_device_context.cc | 249 xla_tensor->shaped_buffer().on_host_shape()), in CopyDeviceTensorToCPU()
|
/external/tensorflow/tensorflow/core/tpu/kernels/ |
D | tpu_execute_op.cc | 231 const xla::Shape& xla_shape = xla_tensor->shaped_buffer().on_host_shape(); in BuildComputationInputs() 405 xla::ShapeUtil::TupleElementCount(scoped_buffers.on_host_shape()); in AllocateOutputTensors() 422 xla::ShapeUtil::GetSubshape(scoped_buffers.on_host_shape(), {i}); in AllocateOutputTensors() 433 TF_RET_CHECK(scoped_buffers.on_host_shape().IsTuple()); in AllocateOutputTensors() 434 TF_RET_CHECK(!xla::ShapeUtil::IsNestedTuple(scoped_buffers.on_host_shape())); in AllocateOutputTensors() 709 std::make_shared<xla::Literal>(shaped_buffer.on_host_shape()); in DoWork() 742 shaped_buffer.on_host_shape())); in DoWork() 774 std::make_shared<xla::Literal>(output_buffers->buffers.on_host_shape()); in DoWork()
|
/external/tensorflow/tensorflow/compiler/xla/tests/ |
D | local_client_execute_test.cc | 211 EXPECT_TRUE(result.on_host_shape().IsTuple()); in XLA_TEST_F() 212 EXPECT_EQ(3, ShapeUtil::TupleElementCount(result.on_host_shape())); in XLA_TEST_F() 239 EXPECT_TRUE(result.on_host_shape().IsTuple()); in XLA_TEST_F() 240 EXPECT_EQ(2, ShapeUtil::TupleElementCount(result.on_host_shape())); in XLA_TEST_F() 317 EXPECT_TRUE(result.on_host_shape().IsTuple()); in XLA_TEST_F() 318 EXPECT_EQ(2, ShapeUtil::TupleElementCount(result.on_host_shape())); in XLA_TEST_F() 979 auto executables, client->Compile(computation, {&buffer.on_host_shape()}, in BM_LocalClientOverhead()
|
D | local_client_test_base.cc | 194 argument_layouts[i] = &arguments[i]->on_host_shape(); in ExecuteLocally()
|
D | cpu_gpu_fusion_test.cc | 887 {&buffer0.on_host_shape(), &buffer1.on_host_shape(), in BM_ParallelFusion() 888 &buffer2.on_host_shape()}, in BM_ParallelFusion()
|
/external/tensorflow/tensorflow/compiler/xla/python/tpu_driver/client/ |
D | tpu_client_extension.cc | 171 .def("shape", &PyTpuBuffer::on_host_shape) in PYBIND11_MODULE() 172 .def("xla_shape", &PyTpuBuffer::on_host_shape) in PYBIND11_MODULE()
|
D | tpu_client.cc | 239 child_shapes.push_back(child_buffer->on_host_shape()); in MakeTuple() 266 Shape on_host_shape, std::shared_ptr<TpuSharedBuffer> device_buffer, in PyTpuBuffer() argument 270 on_host_shape_(std::move(on_host_shape)), in PyTpuBuffer() 657 if (result.buffer->on_host_shape().IsTuple()) { in Execute() 744 if (results[i].buffer->on_host_shape().IsTuple()) { in ExecuteOnLocalDevices()
|
D | tpu_client.h | 198 PyTpuBuffer(Shape on_host_shape, 208 const Shape& on_host_shape() const { return on_host_shape_; } in on_host_shape() function
|
/external/tensorflow/tensorflow/compiler/xla/client/ |
D | local_client.cc | 206 auto literal = std::make_shared<Literal>(arg->on_host_shape()); in DumpArguments() 225 auto literal = std::make_shared<Literal>(outputs.on_host_shape()); in DumpOutputsAndSaveSnapshot()
|
/external/tensorflow/tensorflow/compiler/xrt/kernels/ |
D | tpu_execute_op.cc | 113 input_tuples.back()->on_host_shape())) { in GetChainedOpInputs() 118 tuple->on_host_shape().DebugString()); in GetChainedOpInputs()
|
D | xrt_state_ops.h | 521 xla::Literal literal(allocation->on_host_shape()); in Compute() 574 xla::Shape shape = allocation->on_host_shape(); in Compute()
|
/external/tensorflow/tensorflow/compiler/xla/pjrt/ |
D | pjrt_stream_executor_client.cc | 364 const Shape& on_host_shape, PjRtDevice* device, in AllocateDestinationBuffer() argument 368 if (on_host_shape.IsTuple() && on_host_shape.tuple_shapes_size() == 0) { in AllocateDestinationBuffer() 377 on_host_shape, se_client->allocator(), in AllocateDestinationBuffer()
|