Home
last modified time | relevance | path

Searched refs:sharding (Results 1 – 25 of 34) sorted by relevance

12

/external/tensorflow/tensorflow/compiler/xla/service/
Dhlo_sharding_test.cc42 HloSharding sharding = HloSharding::Replicate(); in TEST_F() local
43 EXPECT_TRUE(sharding.IsReplicated()); in TEST_F()
44 EXPECT_TRUE(sharding.IsTileMaximal()); in TEST_F()
45 EXPECT_TRUE(sharding.UsesDevice(0)); in TEST_F()
46 EXPECT_TRUE(sharding.UsesDevice(65535)); in TEST_F()
49 EXPECT_EQ(other, sharding); in TEST_F()
51 EXPECT_IS_OK(sharding.Validate(ShapeUtil::MakeShape(U32, {4}), in TEST_F()
53 EXPECT_FALSE(sharding.HasUniqueDevice()); in TEST_F()
57 HloSharding sharding = HloSharding::AssignDevice(5); in TEST_F() local
58 EXPECT_FALSE(sharding.IsReplicated()); in TEST_F()
[all …]
Dhlo_sharding_metadata.cc53 const HloSharding& sharding) { in SetSingleSharding() argument
54 VLOG(4) << " " << instruction->name() << " to " << sharding; in SetSingleSharding()
55 instruction->set_single_sharding(sharding); in SetSingleSharding()
120 const HloSharding& sharding) { in FixupPassThroughDomainLinks() argument
127 gte->set_sharding(sharding); in FixupPassThroughDomainLinks()
143 std::shared_ptr<const HloSharding> sharding) { in CloneShardingForDomain() argument
144 auto single_sharding = sharding->ExtractSingleSharding(); in CloneShardingForDomain()
146 return sharding; in CloneShardingForDomain()
152 const HloSharding& sharding) { in ApplyDomainSingleSharding() argument
153 VLOG(4) << "Applying " << sharding << " sharding"; in ApplyDomainSingleSharding()
[all …]
Dhlo_domain_test.cc431 new_tuple->sharding(), in TEST_F()
472 EXPECT_EQ(root->sharding(), HloSharding::AssignDevice(1)); in TEST_F()
529 tpl->sharding()); in TEST_F()
540 domain={kind="sharding", entry={maximal device=1}, exit={maximal device=0}} in TEST_F()
543 domain={kind="sharding", entry={maximal device=1}, exit={maximal device=0}} in TEST_F()
546 domain={kind="sharding", entry={maximal device=0}, exit={maximal device=1}} in TEST_F()
550 domain={kind="sharding", entry={maximal device=0}, exit={maximal device=1}} in TEST_F()
555 domain={kind="sharding", entry={maximal device=0}, exit={maximal device=1}} in TEST_F()
668 tuple0->sharding()); in TEST_F()
673 copy0->sharding()); in TEST_F()
[all …]
Dhlo_sharding.cc62 for (auto& sharding : shardings) { in Tuple() local
63 CHECK(!sharding.IsTuple()) << sharding.ToString(); in Tuple()
73 const HloSharding& sharding) { in SingleTuple() argument
75 CHECK(!sharding.IsTuple()) << sharding.ToString(); in SingleTuple()
78 flattened_list.resize(leaf_count, sharding); in SingleTuple()
83 const HloSharding& sharding) { in Single() argument
84 return shape.IsTuple() ? SingleTuple(shape, sharding) : sharding; in Single()
366 TF_ASSIGN_OR_RETURN(HloSharding sharding, in FromProto()
368 tuple_shardings.push_back(sharding); in FromProto()
501 std::ostream& operator<<(std::ostream& out, const HloSharding& sharding) { in operator <<() argument
[all …]
Dhlo_sharding_metadata.h30 explicit ShardingMetadata(std::shared_ptr<const HloSharding> sharding) in ShardingMetadata() argument
31 : sharding_(std::move(sharding)) {} in ShardingMetadata()
43 const HloSharding* sharding() const { return sharding_.get(); } in sharding() function
77 std::shared_ptr<const HloSharding> sharding; member
Dhlo_sharding.h74 const HloSharding& sharding);
78 static HloSharding Single(const Shape& shape, const HloSharding& sharding);
198 size_t operator()(const HloSharding& sharding) const { in operator()
199 return sharding.Hash(); in operator()
283 std::ostream& operator<<(std::ostream& out, const HloSharding& sharding);
Dbatchnorm_expander.cc305 const HloSharding& sharding = batch_norm->sharding(); in HandleBatchNormTraining() local
307 sharding.GetAsShapeTree(batch_norm->shape()).element({0}); in HandleBatchNormTraining()
320 tuple->set_sharding(sharding); in HandleBatchNormTraining()
411 const HloSharding& sharding = batch_norm->sharding(); in HandleBatchNormInference() local
419 inst->set_sharding(sharding); in HandleBatchNormInference()
424 shifted_normalized->set_sharding(sharding); in HandleBatchNormInference()
584 const HloSharding& sharding = batch_norm->sharding(); in HandleBatchNormGrad() local
589 sharding.GetAsShapeTree(batch_norm->shape()).element({0}); in HandleBatchNormGrad()
602 tuple->set_sharding(sharding); in HandleBatchNormGrad()
Dhlo_matchers.h138 explicit HloShardingMatcher(const absl::optional<HloSharding>& sharding) in HloShardingMatcher() argument
139 : sharding_(sharding) {} in HloShardingMatcher()
373 const HloSharding& sharding) { in Sharding() argument
375 new ::xla::testing::HloShardingMatcher(sharding)); in Sharding()
379 absl::string_view sharding) { in Sharding() argument
381 ParseSharding(sharding).ValueOrDie())); in Sharding()
Dhlo_parser_test.cc223 %v1 = f32[4]{0} parameter(0), sharding={maximal device=1} in CreateTestCases()
224 %v2 = f32[4]{0} parameter(1), sharding={maximal device=1} in CreateTestCases()
225 …%greater-than = pred[4]{0} compare(f32[4]{0} %v1, f32[4]{0} %v2), direction=GT, sharding={replicat… in CreateTestCases()
226 …ROOT %select = f32[4]{0} select(pred[4]{0} %greater-than, f32[4]{0} %v1, f32[4]{0} %v2), sharding=… in CreateTestCases()
264 …{0}, f32[2,3]{1,0}) tuple(f32[] %v1, f32[3]{0} %v2, f32[2,3]{1,0} %v3), sharding={{replicated}, {m… in CreateTestCases()
312 %recv = (f32[], u32[], token[]) recv(token[] %token0), channel_id=15, sharding={maximal device=1} in CreateTestCases()
313 …32[], token[]) recv-done((f32[], u32[], token[]) %recv), channel_id=15, sharding={maximal device=1} in CreateTestCases()
314 %constant = f32[] constant(2.1), sharding={maximal device=0} in CreateTestCases()
315 …%send = (f32[], u32[], token[]) send(f32[] %constant, token[] %token0), channel_id=16, sharding={m… in CreateTestCases()
316 …%send-done = token[] send-done((f32[], u32[], token[]) %send), channel_id=16, sharding={maximal de… in CreateTestCases()
[all …]
Dhlo_cse_test.cc718 domain={kind="sharding", entry={maximal device=0}, exit={maximal device=1}} in TEST_F()
720 domain={kind="sharding", entry={maximal device=0}, exit={maximal device=1}} in TEST_F()
722 domain={kind="sharding", entry={maximal device=0}, exit={maximal device=2}} in TEST_F()
727 domain={kind="sharding", entry={maximal device=1}, exit={maximal device=0}} in TEST_F()
729 domain={kind="sharding", entry={maximal device=1}, exit={maximal device=0}} in TEST_F()
731 domain={kind="sharding", entry={maximal device=2}, exit={maximal device=0}} in TEST_F()
Dhlo_instruction.h1139 const HloSharding& sharding() const { in sharding() function
1158 void set_sharding(const HloSharding& sharding) { in set_sharding() argument
1159 sharding_ = std::make_shared<const HloSharding>(sharding); in set_sharding()
1161 void set_sharding(std::shared_ptr<const HloSharding> sharding) { in set_sharding() argument
1162 sharding_ = std::move(sharding); in set_sharding()
1164 void set_single_sharding(const HloSharding& sharding);
1179 return other->has_sharding() ? sharding() == other->sharding() : false; in has_compatible_sharding()
Dhlo_parser.cc268 bool ParseSharding(OpSharding* sharding);
269 bool ParseSingleSharding(OpSharding* sharding, bool lbrace_pre_lexed);
668 optional<OpSharding> sharding; in ParseInstructionRhs() local
669 attrs["sharding"] = {/*required=*/false, AttrTy::kSharding, &sharding}; in ParseInstructionRhs()
1721 if (sharding) { in ParseInstructionRhs()
1723 HloSharding::FromProto(sharding.value()).ValueOrDie()); in ParseInstructionRhs()
1758 bool HloParser::ParseSharding(OpSharding* sharding) { in ParseSharding() argument
1769 return ParseSingleSharding(sharding, /*lbrace_pre_lexed=*/true); in ParseSharding()
1776 if (!ParseSingleSharding(sharding->add_tuple_shardings(), in ParseSharding()
1782 sharding->set_type(OpSharding::Type::OpSharding_Type_TUPLE); in ParseSharding()
[all …]
Dhlo_instruction.cc571 HloSharding sharding, in CreateFromProto()
573 entry_hlo_sharding = std::make_shared<const HloSharding>(sharding); in CreateFromProto()
577 HloSharding sharding, in CreateFromProto()
579 exit_hlo_sharding = std::make_shared<const HloSharding>(sharding); in CreateFromProto()
629 TF_ASSIGN_OR_RETURN(const auto& sharding, in CreateFromProto()
630 HloSharding::FromProto(proto.sharding())); in CreateFromProto()
631 instruction->set_sharding(sharding); in CreateFromProto()
1130 broadcast->set_sharding(operand->sharding()); in CreateBroadcastSequence()
1155 reshaped_operand->set_sharding(operand->sharding()); in CreateBroadcastSequence()
1162 broadcast->set_sharding(operand->sharding()); in CreateBroadcastSequence()
[all …]
Dhlo_matchers_test.cc159 auto sharding = HloSharding::Tuple( in TEST() local
162 p2->set_sharding(sharding); in TEST()
Dhlo_matchers.cc193 if (instruction->sharding() == sharding_.value()) { in MatchAndExplain()
Dlayout_assignment.cc861 HloSharding sharding = in SetupCopiedInstruction() local
862 !index.empty() && instruction.sharding().IsTuple() in SetupCopiedInstruction()
863 ? instruction.sharding().GetSubSharding(instruction.shape(), index) in SetupCopiedInstruction()
864 : instruction.sharding(); in SetupCopiedInstruction()
869 auto device = sharding.UniqueDevice(); in SetupCopiedInstruction()
871 copy->set_sharding(sharding); in SetupCopiedInstruction()
/external/tensorflow/tensorflow/compiler/tf2xla/
Dsharding_util.cc35 xla::OpSharding sharding; in GetShardingFromNodeDef() local
37 if (!sharding.ParseFromString(value)) { in GetShardingFromNodeDef()
42 return absl::optional<xla::OpSharding>(sharding); in GetShardingFromNodeDef()
83 TF_ASSIGN_OR_RETURN(absl::optional<xla::OpSharding> sharding, in ParseShardingFromDevice()
85 return ParseShardingFromDevice(device_name, num_cores_per_replica, sharding); in ParseShardingFromDevice()
94 TF_ASSIGN_OR_RETURN(absl::optional<xla::OpSharding> sharding, in ParseShardingFromDevice()
96 return ParseShardingFromDevice(device_name, num_cores_per_replica, sharding); in ParseShardingFromDevice()
Dsharding_util_test.cc26 [](absl::optional<xla::OpSharding> sharding) -> int64 { in TEST() argument
27 if (sharding.has_value() && in TEST()
28 sharding.value().type() == in TEST()
30 return sharding.value().tile_assignment_devices(0); in TEST()
Dtf2xla_util.cc507 absl::optional<xla::OpSharding> sharding, in SetNodeShardingFromNeighbors()
511 if (sharding.has_value()) { in SetNodeShardingFromNeighbors()
512 TF_RET_CHECK(sharding.value().type() == in SetNodeShardingFromNeighbors()
514 const int core_annotation = sharding.value().tile_assignment_devices(0); in SetNodeShardingFromNeighbors()
Dxla_compiler.cc82 auto sharding, in ComputeArgAndRetvalCores()
84 if (sharding.has_value()) { in ComputeArgAndRetvalCores()
85 TF_RET_CHECK(sharding.value().type() == in ComputeArgAndRetvalCores()
87 return sharding.value().tile_assignment_devices(0); in ComputeArgAndRetvalCores()
/external/junit-params/
DREADME.google20 38419944 - Fix sharding on CTS.
/external/tensorflow/tensorflow/compiler/xla/client/
Dxla_builder.h159 void SetSharding(const OpSharding& sharding) { sharding_ = sharding; } in SetSharding() argument
166 const absl::optional<OpSharding>& sharding() const { return sharding_; } in sharding() function
1014 absl::optional<OpSharding> sharding) in XlaScopedShardingAssignment() argument
1015 : builder_(builder), prev_sharding_(builder->sharding()) { in XlaScopedShardingAssignment()
1016 SetSharding(sharding); in XlaScopedShardingAssignment()
1026 void SetSharding(const absl::optional<OpSharding>& sharding) { in SetSharding() argument
1027 if (sharding.has_value()) { in SetSharding()
1028 builder_->SetSharding(sharding.value()); in SetSharding()
Dxla_builder.cc1292 if (shape.IsArray() && sharding() && in Infeed()
1293 sharding()->type() == OpSharding::Type::OpSharding_Type_OTHER) { in Infeed()
1299 if (sharding() && in Infeed()
1300 sharding()->type() == OpSharding::Type::OpSharding_Type_REPLICATED) { in Infeed()
1313 if (sharding()) { in Infeed()
1315 OpSharding sharding = sharding_builder::AssignDevice(0); in Infeed() local
1316 XlaScopedShardingAssignment scoped_sharding(this, sharding); in Infeed()
1327 if (sharding() && in Infeed()
1328 sharding()->type() == OpSharding::Type::OpSharding_Type_TUPLE) { in Infeed()
1331 OpSharding infeed_instruction_sharding = *sharding(); in Infeed()
[all …]
/external/tensorflow/tensorflow/compiler/xla/
Dxla_data.proto577 // This sharding is replicated across all devices (implies maximal,
580 // This sharding is maximal - one device runs the entire operation.
582 // This sharding is a tuple - only the tuple_shardings field is valid.
601 // applied, this is inferred from the instruction this sharding gets attached
/external/autotest/
Dglobal_config.ini42 # This is for sharding: Even when sharding, the results (tko tables) should

12