/external/tensorflow/tensorflow/compiler/xla/ |
D | literal.cc | 113 const Shape& subshape = shape.tuple_shapes(i); in SetPiece() local 116 child_piece.set_subshape(&subshape); in SetPiece() 118 SetPiece(subshape, &child_piece, allocate_arrays); in SetPiece() 151 CHECK(&root_piece_->subshape() == shape_.get()); in Literal() 178 DCHECK(&other.root_piece_->subshape() == other.shape_.get()); in operator =() 182 DCHECK(&root_piece_->subshape() == shape_.get()); in operator =() 191 if (piece->subshape().IsArray()) { in CreateFromShape() 315 if (piece->subshape().IsTuple()) { in CreateFromProto() 317 ShapeUtil::TupleElementCount(piece->subshape())) { in CreateFromProto() 320 ShapeUtil::TupleElementCount(piece->subshape()), in CreateFromProto() [all …]
|
D | literal.h | 68 const Shape& shape() const { return root_piece().subshape(); } in shape() 360 const Shape& subshape() const { return *subshape_; } in subshape() function 361 void set_subshape(const Shape* subshape) { subshape_ = subshape; } in set_subshape() argument 364 int64 size_bytes() const { return ShapeUtil::ByteSizeOf(subshape()); } in size_bytes() 370 return LayoutUtil::IsSparseArray(subshape()) in element_count() 372 : ShapeUtil::ElementsIn(subshape()); in element_count() 870 DCHECK(subshape().IsArray()) << ShapeUtil::HumanString(subshape()); in data() 871 DCHECK_EQ(subshape().element_type(), in data() 876 << PrimitiveType_Name(subshape().element_type()); in data() 883 DCHECK(subshape().IsArray()) << ShapeUtil::HumanString(subshape()); in data() [all …]
|
D | literal_util.cc | 51 &result_shape, [](Shape* subshape, const ShapeIndex&) { in ConvertType() argument 52 if (subshape->element_type() == in ConvertType() 54 subshape->set_element_type( in ConvertType() 64 [&](const Shape& subshape, const ShapeIndex& shape_index) { in ConvertType() argument 65 if (subshape.IsArray()) { in ConvertType() 66 if (subshape.element_type() == in ConvertType()
|
D | shape_util.cc | 771 const Shape* subshape = &shape; in IndexIsValid() local 773 if (!subshape->IsTuple() || i >= subshape->tuple_shapes_size() || i < 0) { in IndexIsValid() 776 subshape = &subshape->tuple_shapes(i); in IndexIsValid() 827 for (const Shape& subshape : shape.tuple_shapes()) { in GetLeafCount() local 828 count += GetLeafCount(subshape); in GetLeafCount() 897 [&func](const Shape& subshape, const ShapeIndex& index) { in ForEachSubshape() argument 898 func(subshape, index); in ForEachSubshape() 910 [&func](Shape* subshape, const ShapeIndex& index) { in ForEachMutableSubshape() argument 911 func(subshape, index); in ForEachMutableSubshape() 1449 for (const Shape& subshape : shape.tuple_shapes()) { in Hash() local [all …]
|
D | shape_util_test.cc | 479 shape, [&calls, &shape](const Shape& subshape, const ShapeIndex& index) { in TEST() argument 480 EXPECT_EQ(&shape, &subshape); in TEST() 494 shape, [&calls, &shape](const Shape& subshape, const ShapeIndex& index) { in TEST() argument 496 ShapeUtil::Equal(subshape, ShapeUtil::GetSubshape(shape, index))); in TEST() 502 EXPECT_EQ(33, ShapeUtil::ElementsIn(subshape)); in TEST() 516 &shape, [&calls, &shape](const Shape* subshape, const ShapeIndex& index) { in TEST() argument 518 EXPECT_EQ(subshape, ShapeUtil::GetMutableSubshape(&shape, index)); in TEST() 524 EXPECT_EQ(33, ShapeUtil::ElementsIn(*subshape)); in TEST()
|
D | shape.cc | 88 for (const Shape& subshape : tuple_shapes_) { in is_static() local 89 if (!subshape.is_static()) { in is_static()
|
/external/tensorflow/tensorflow/compiler/xla/service/ |
D | optimize_input_output_buffer_alias.cc | 52 input_shape, [&](const Shape& subshape, const ShapeIndex& index) { in Build() argument 53 if (subshape.IsTuple()) { in Build() 56 int64 bytes = size_func_(subshape); in Build() 63 output_shape, [&](const Shape& subshape, const ShapeIndex& index) { in Build() argument 64 if (subshape.IsTuple()) { in Build() 67 int64 bytes = size_func_(subshape); in Build() 83 << ShapeUtil::HumanStringWithLayout(subshape) << " at index " in Build()
|
D | bfloat16_propagation.cc | 53 root->shape(), [&](const Shape& subshape, const ShapeIndex& index) { in DetermineFusionComputationPrecision() argument 54 if (subshape.element_type() != F32) { in DetermineFusionComputationPrecision() 101 inst->shape(), [&](const Shape& subshape, const ShapeIndex& index) { in RevertIfFusionInternalBF16Changes() argument 108 if (subshape.element_type() != F32) { in RevertIfFusionInternalBF16Changes() 178 const Shape& subshape, const ShapeIndex& index) { in DetermineWhileComputationsPrecision() argument 179 if (subshape.element_type() != F32) { in DetermineWhileComputationsPrecision() 209 const Shape& subshape = ShapeUtil::GetSubshape(hlo.shape(), index); in AllUsersConsumeBF16() local 210 if (subshape.element_type() != BF16 && subshape.element_type() != F32) { in AllUsersConsumeBF16() 797 auto subshape = entry.first; in Run() local 798 CHECK_EQ(subshape->element_type(), F32); in Run() [all …]
|
D | hlo_element_type_converter.cc | 70 Shape subshape = ShapeUtil::GetTupleElementShape(shape, i); in GetConvertedTupleShape() local 71 CHECK(!subshape.IsTuple()); in GetConvertedTupleShape() 72 if (subshape.element_type() == from_type) { in GetConvertedTupleShape() 73 subshape = ShapeUtil::ChangeElementType(subshape, to_type); in GetConvertedTupleShape() 75 new_tuple_subshapes.push_back(subshape); in GetConvertedTupleShape()
|
D | while_loop_invariant_code_motion.cc | 223 [&input_size](const Shape& subshape, const ShapeIndex& /*index*/) { in TryHoistingInvariantInstructionsFromWhileBody() argument 224 if (subshape.IsArray()) { in TryHoistingInvariantInstructionsFromWhileBody() 225 input_size += ShapeUtil::ByteSizeOfElements(subshape); in TryHoistingInvariantInstructionsFromWhileBody() 231 [&output_size](const Shape& subshape, const ShapeIndex& /*index*/) { in TryHoistingInvariantInstructionsFromWhileBody() argument 232 if (subshape.IsArray()) { in TryHoistingInvariantInstructionsFromWhileBody() 233 output_size += ShapeUtil::ByteSizeOfElements(subshape); in TryHoistingInvariantInstructionsFromWhileBody()
|
D | shaped_buffer.cc | 86 [this, &s](const Shape& subshape, const ShapeIndex& index) { in ToString() argument 88 if (subshape.IsTuple()) { in ToString() 91 shape_str = ShapeUtil::HumanStringWithLayout(subshape); in ToString()
|
D | generic_transfer_manager.cc | 85 [&](const Shape& subshape, const ShapeIndex& index) -> Status { in TransferLiteralFromDeviceInternal() argument 86 if (subshape.IsArray()) { in TransferLiteralFromDeviceInternal() 89 /*size=*/GetByteSizeRequirement(subshape), in TransferLiteralFromDeviceInternal()
|
D | instruction_fusion.cc | 176 [&output_rank](const Shape& subshape, const ShapeIndex& shape_index) { in EffectivelyAtMostUnary() argument 177 if (subshape.IsArray()) { in EffectivelyAtMostUnary() 178 output_rank = std::max(output_rank, ShapeUtil::TrueRank(subshape)); in EffectivelyAtMostUnary() 275 [&size](const Shape& subshape, const ShapeIndex& shape_index) { in ComputeGloballyUnfusible() argument 276 if (subshape.IsArray()) { in ComputeGloballyUnfusible() 277 size += ShapeUtil::ElementsIn(subshape); in ComputeGloballyUnfusible()
|
D | bfloat16_normalization.cc | 239 auto subshape = ShapeUtil::GetMutableSubshape(hlo->mutable_shape(), {i}); in HandleMultipleOutputs() local 242 HloInstruction::CreateGetTupleElement(*subshape, hlo, i)); in HandleMultipleOutputs() 245 subshape->set_element_type(F32); in HandleMultipleOutputs() 247 HloInstruction::CreateGetTupleElement(*subshape, hlo, i)); in HandleMultipleOutputs() 250 ShapeUtil::ChangeElementType(*subshape, BF16), gte)); in HandleMultipleOutputs()
|
D | layout_assignment.cc | 304 [this, instruction, mandatory](const Shape& subshape, in SetInstructionLayout() 313 if (subshape.IsArray()) { in SetInstructionLayout() 314 return SetBufferLayout(subshape.layout(), *buffers[0], mandatory); in SetInstructionLayout() 1313 [&](const Shape& subshape, const ShapeIndex& shape_index) { in PropagateOperandConstraint() argument 1314 if (subshape.IsTuple()) { in PropagateOperandConstraint() 1317 if (subshape.rank() <= 1) { in PropagateOperandConstraint() 1323 if (subshape.rank() != operand->shape().rank()) { in PropagateOperandConstraint() 1350 user->shape(), [&](const Shape& subshape, const ShapeIndex& shape_index) { in PropagateOperandConstraint() argument 1351 if (subshape.IsTuple()) { in PropagateOperandConstraint() 1354 if (subshape.rank() <= 1) { in PropagateOperandConstraint() [all …]
|
D | hlo_cost_analysis.cc | 584 [&](const Shape& subshape, const ShapeIndex&) { in HandleAllReduce() argument 585 if (subshape.IsArray()) { in HandleAllReduce() 586 flops += ShapeUtil::ElementsIn(subshape); in HandleAllReduce() 625 [this](const Shape& subshape, const ShapeIndex& /*shape_index*/) { in HandleFusion() argument 626 current_properties_[kBytesAccessedKey] += GetShapeSize(subshape); in HandleFusion()
|
D | pattern_matcher_test.cc | 138 Shape* subshape; in TEST() local 142 {0}, match::Shape(&subshape).WithElementType(F32).WithRank(3)))); in TEST() 143 ASSERT_NE(subshape, nullptr); in TEST() 145 ShapeUtil::Equal(*subshape, ShapeUtil::GetSubshape(tuple_shape, {0}))); in TEST() 158 {1}, match::Shape(&subshape).WithElementType(S32).WithRank(2)))); in TEST() 159 ASSERT_NE(subshape, nullptr); in TEST() 161 ShapeUtil::Equal(*subshape, ShapeUtil::GetSubshape(tuple_shape, {1}))); in TEST()
|
/external/tensorflow/tensorflow/compiler/xrt/ |
D | xrt_state.cc | 112 xla::Shape subshape = in AllocateScopedShapedBuffer() local 114 uint64 size = transfer_manager->GetByteSizeRequirement(subshape); in AllocateScopedShapedBuffer() 296 XRTTupleAllocation* parent, const xla::ShapeIndex& subshape, in MakeSubBuffer() argument 300 xla::ShapeUtil::TryGetSubshape(parent->on_host_shape(), subshape)); in MakeSubBuffer() 303 xla::ShapeUtil::TryGetSubshape(parent->on_device_shape(), subshape)); in MakeSubBuffer() 310 (*allocation)->buffers_.CopySubtreeFrom(parent->buffers_, subshape, {}); in MakeSubBuffer() 324 xla::ShapeIndex parent_index = subshape; in MakeSubBuffer() 411 xla::Shape subshape = in MakeTuple() local 413 uint64 size = transfer_manager->GetByteSizeRequirement(subshape); in MakeTuple()
|
/external/tensorflow/tensorflow/compiler/xla/service/gpu/ |
D | gpu_hlo_support_checker.cc | 31 [&instruction](const Shape& subshape, const ShapeIndex&) { in Run() argument 32 if (LayoutUtil::IsSparseArray(subshape)) { in Run()
|
/external/tensorflow/tensorflow/compiler/xla/service/cpu/ |
D | cpu_hlo_support_checker.cc | 31 [&instruction](const Shape& subshape, const ShapeIndex&) { in Run() argument 32 if (LayoutUtil::IsSparseArray(subshape)) { in Run()
|
/external/tensorflow/tensorflow/compiler/xla/tests/ |
D | client_library_test_base.cc | 332 &layout_shape, [&](Shape* subshape, const ShapeIndex& /*index*/) { in ComputeAndCompareLiteralWithStatus() argument 333 if (subshape->element_type() == F32) { in ComputeAndCompareLiteralWithStatus() 334 subshape->set_element_type(BF16); in ComputeAndCompareLiteralWithStatus() 389 &layout_shape, [&](Shape* subshape, const ShapeIndex& /*index*/) { in ComputeAndCompareLiteralWithStatus() argument 390 if (subshape->element_type() == F32) { in ComputeAndCompareLiteralWithStatus() 391 subshape->set_element_type(BF16); in ComputeAndCompareLiteralWithStatus() 624 [](Shape* subshape, const ShapeIndex&) { in MaybeConvertShapeToBfloat16() argument 625 if (subshape->element_type() == F32) { in MaybeConvertShapeToBfloat16() 626 subshape->set_element_type(BF16); in MaybeConvertShapeToBfloat16()
|
/external/tensorflow/tensorflow/core/ops/ |
D | math_ops.cc | 991 ShapeHandle subshape; in SegmentReductionShapeFn() local 992 TF_RETURN_IF_ERROR(c->Subshape(data_shape, 1, &subshape)); in SegmentReductionShapeFn() 996 c->Concatenate(c->Vector(InferenceContext::kUnknownDim), subshape, &out)); in SegmentReductionShapeFn() 1015 ShapeHandle subshape; in SparseSegmentReductionShapeFn() local 1016 TF_RETURN_IF_ERROR(c->Subshape(data_shape, 1, &subshape)); in SparseSegmentReductionShapeFn() 1020 c->Concatenate(c->Vector(InferenceContext::kUnknownDim), subshape, &out)); in SparseSegmentReductionShapeFn() 1039 ShapeHandle subshape; in SparseSegmentReductionGradShapeFn() local 1040 TF_RETURN_IF_ERROR(c->Subshape(data_shape, 1, &subshape)); in SparseSegmentReductionGradShapeFn() 1058 TF_RETURN_IF_ERROR(c->Concatenate(dim0_shape, subshape, &out)); in SparseSegmentReductionGradShapeFn() 1080 ShapeHandle subshape; in SparseSegmentReductionWithNumSegmentsShapeFn() local [all …]
|
/external/tensorflow/tensorflow/compiler/xla/python_api/ |
D | xla_shape.py | 57 if not all(isinstance(subshape, Shape) for subshape in dimensions):
|
/external/tensorflow/tensorflow/compiler/jit/ |
D | xla_tensor.cc | 58 xla::Shape subshape = in AllocateShapedBuffer() local 61 client->backend().transfer_manager()->GetByteSizeRequirement(subshape); in AllocateShapedBuffer()
|
/external/tensorflow/tensorflow/compiler/xrt/kernels/ |
D | xrt_state_ops.h | 553 [&](xla::Shape* subshape, const xla::ShapeIndex& index) -> Status { in Compute() 554 if (subshape->IsTuple()) return Status::OK(); in Compute() 559 if (xla_type != subshape->element_type()) { in Compute() 561 "Type mismatch between buffer type (", subshape->ToString(), in Compute() 568 TF_RETURN_IF_ERROR(XLAShapeToTensorShape(*subshape, &output_shape)); in Compute() 581 xla::LayoutUtil::GetWithDefaultLayout(*subshape), output_tensor, in Compute()
|