Home
last modified time | relevance | path

Searched refs:on_host_shape (Results 1 – 25 of 27) sorted by relevance

12

/external/tensorflow/tensorflow/compiler/xrt/
Dxrt_state.cc149 const xla::Shape& on_host_shape, in XRTTupleAllocation() argument
153 on_host_shape_(on_host_shape), in XRTTupleAllocation()
189 shaped_buffer.on_host_shape(), shaped_buffer.on_device_shape()); in CreateAndTransfer()
212 shaped_buffer.on_host_shape(), shaped_buffer.on_device_shape()); in CreateUninitialized()
221 const xla::ShapedBuffer& shaped_buffer, const xla::Shape& on_host_shape, in CreateFromBuffer() argument
226 *allocation = new XRTTupleAllocation(device_ordinal, allocator, on_host_shape, in CreateFromBuffer()
237 return CreateFromBuffer(shaped_buffer, shaped_buffer.on_host_shape(), in CreateFromBuffer()
260 if (!xla::ShapeUtil::Equal(literal.shape(), on_host_shape())) { in WriteLiteral()
264 " device=", xla::ShapeUtil::HumanStringWithLayout(on_host_shape())); in WriteLiteral()
283 xla::Literal literal(on_host_shape()); in SwapOut()
[all …]
Dxrt_state.h106 const xla::Shape& on_host_shape,
196 const xla::Shape& on_host_shape() const;
238 const xla::Shape& on_host_shape,
Dxrt_util.cc279 if (!InputShapeMatches(input_shape, tuple->on_host_shape())) { in GetInputTupleAllocations()
283 "; got ", tuple->on_host_shape().DebugString()); in GetInputTupleAllocations()
349 if (return_exploded_tuple && output_tuple->on_host_shape().IsTuple()) { in CreateExecuteOutput()
/external/tensorflow/tensorflow/compiler/xla/service/
Dshaped_buffer.h49 ShapedBuffer(Shape on_host_shape, Shape on_device_shape, int device_ordinal);
65 const Shape& on_host_shape() const { return on_host_shape_; } in on_host_shape() function
110 void set_shapes(const Shape& on_host_shape, const Shape& on_device_shape) { in set_shapes() argument
156 explicit ScopedShapedBuffer(Shape on_host_shape, Shape on_device_shape,
Dtransfer_manager.cc62 Literal literal(device_buffer.on_host_shape()); in TransferLiteralFromDevice()
388 const Shape& on_host_shape, se::DeviceMemoryAllocator* allocator, in AllocateScopedShapedBuffer() argument
390 if (!LayoutUtil::HasLayout(on_host_shape)) { in AllocateScopedShapedBuffer()
392 ShapeUtil::HumanStringWithLayout(on_host_shape)); in AllocateScopedShapedBuffer()
394 TF_RETURN_IF_ERROR(ShapeUtil::ValidateShape(on_host_shape)); in AllocateScopedShapedBuffer()
395 Shape on_device_shape = HostShapeToDeviceShape(on_host_shape); in AllocateScopedShapedBuffer()
Dshaped_buffer.cc41 ShapedBuffer::ShapedBuffer(Shape on_host_shape, Shape on_device_shape, in ShapedBuffer() argument
122 ScopedShapedBuffer::ScopedShapedBuffer(Shape on_host_shape, in ScopedShapedBuffer() argument
Dtransfer_manager.h250 const Shape& on_host_shape, se::DeviceMemoryAllocator* allocator,
Dshaped_buffer_test.cc170 EXPECT_EQ(ssb.on_host_shape(), array_shape); in TEST()
Dservice.cc279 argument_shapes.push_back(&arg->on_host_shape()); in CreateModuleConfig()
858 const Shape& shape_arg = replicated_arguments.front()[i]->on_host_shape(); in Execute()
928 return_shape = Shape(shaped_buffer->on_host_shape()); in TransferToClient()
1099 *result->mutable_shape() = buffer->on_host_shape().ToProto(); in GetShape()
Dexecutable.h157 ExecutionOutput(Shape on_host_shape, Shape on_device_shape, in ExecutionOutput() argument
/external/tensorflow/tensorflow/compiler/jit/
Dxla_tensor.cc43 const xla::Shape& on_host_shape, in AllocateShapedBuffer() argument
48 on_host_shape); in AllocateShapedBuffer()
50 xla::ScopedShapedBuffer shaped_buffer(on_host_shape, on_device_shape, in AllocateShapedBuffer()
Dxla_tensor.h51 Status AllocateShapedBuffer(DataType dtype, const xla::Shape& on_host_shape,
Dxla_launch_util.cc432 VLOG(2) << "Result tuple shape: " << output.on_host_shape().DebugString(); in PopulateOutputs()
440 if (!output.on_host_shape().IsTuple()) { in PopulateOutputs()
443 xla::ShapeUtil::MakeTupleShape({nontuple_buffer.on_host_shape()}), in PopulateOutputs()
463 if (output.on_host_shape().is_dynamic()) { in PopulateOutputs()
Dxla_device_context.cc249 xla_tensor->shaped_buffer().on_host_shape()), in CopyDeviceTensorToCPU()
/external/tensorflow/tensorflow/core/tpu/kernels/
Dtpu_execute_op.cc231 const xla::Shape& xla_shape = xla_tensor->shaped_buffer().on_host_shape(); in BuildComputationInputs()
405 xla::ShapeUtil::TupleElementCount(scoped_buffers.on_host_shape()); in AllocateOutputTensors()
422 xla::ShapeUtil::GetSubshape(scoped_buffers.on_host_shape(), {i}); in AllocateOutputTensors()
433 TF_RET_CHECK(scoped_buffers.on_host_shape().IsTuple()); in AllocateOutputTensors()
434 TF_RET_CHECK(!xla::ShapeUtil::IsNestedTuple(scoped_buffers.on_host_shape())); in AllocateOutputTensors()
709 std::make_shared<xla::Literal>(shaped_buffer.on_host_shape()); in DoWork()
742 shaped_buffer.on_host_shape())); in DoWork()
774 std::make_shared<xla::Literal>(output_buffers->buffers.on_host_shape()); in DoWork()
/external/tensorflow/tensorflow/compiler/xla/tests/
Dlocal_client_execute_test.cc211 EXPECT_TRUE(result.on_host_shape().IsTuple()); in XLA_TEST_F()
212 EXPECT_EQ(3, ShapeUtil::TupleElementCount(result.on_host_shape())); in XLA_TEST_F()
239 EXPECT_TRUE(result.on_host_shape().IsTuple()); in XLA_TEST_F()
240 EXPECT_EQ(2, ShapeUtil::TupleElementCount(result.on_host_shape())); in XLA_TEST_F()
317 EXPECT_TRUE(result.on_host_shape().IsTuple()); in XLA_TEST_F()
318 EXPECT_EQ(2, ShapeUtil::TupleElementCount(result.on_host_shape())); in XLA_TEST_F()
979 auto executables, client->Compile(computation, {&buffer.on_host_shape()}, in BM_LocalClientOverhead()
Dlocal_client_test_base.cc194 argument_layouts[i] = &arguments[i]->on_host_shape(); in ExecuteLocally()
Dcpu_gpu_fusion_test.cc887 {&buffer0.on_host_shape(), &buffer1.on_host_shape(), in BM_ParallelFusion()
888 &buffer2.on_host_shape()}, in BM_ParallelFusion()
/external/tensorflow/tensorflow/compiler/xla/python/tpu_driver/client/
Dtpu_client_extension.cc171 .def("shape", &PyTpuBuffer::on_host_shape) in PYBIND11_MODULE()
172 .def("xla_shape", &PyTpuBuffer::on_host_shape) in PYBIND11_MODULE()
Dtpu_client.cc239 child_shapes.push_back(child_buffer->on_host_shape()); in MakeTuple()
266 Shape on_host_shape, std::shared_ptr<TpuSharedBuffer> device_buffer, in PyTpuBuffer() argument
270 on_host_shape_(std::move(on_host_shape)), in PyTpuBuffer()
657 if (result.buffer->on_host_shape().IsTuple()) { in Execute()
744 if (results[i].buffer->on_host_shape().IsTuple()) { in ExecuteOnLocalDevices()
Dtpu_client.h198 PyTpuBuffer(Shape on_host_shape,
208 const Shape& on_host_shape() const { return on_host_shape_; } in on_host_shape() function
/external/tensorflow/tensorflow/compiler/xla/client/
Dlocal_client.cc206 auto literal = std::make_shared<Literal>(arg->on_host_shape()); in DumpArguments()
225 auto literal = std::make_shared<Literal>(outputs.on_host_shape()); in DumpOutputsAndSaveSnapshot()
/external/tensorflow/tensorflow/compiler/xrt/kernels/
Dtpu_execute_op.cc113 input_tuples.back()->on_host_shape())) { in GetChainedOpInputs()
118 tuple->on_host_shape().DebugString()); in GetChainedOpInputs()
Dxrt_state_ops.h521 xla::Literal literal(allocation->on_host_shape()); in Compute()
574 xla::Shape shape = allocation->on_host_shape(); in Compute()
/external/tensorflow/tensorflow/compiler/xla/pjrt/
Dpjrt_stream_executor_client.cc364 const Shape& on_host_shape, PjRtDevice* device, in AllocateDestinationBuffer() argument
368 if (on_host_shape.IsTuple() && on_host_shape.tuple_shapes_size() == 0) { in AllocateDestinationBuffer()
377 on_host_shape, se_client->allocator(), in AllocateDestinationBuffer()

12