Home
last modified time | relevance | path

Searched refs:to_apply (Results 1 – 25 of 65) sorted by relevance

123

/external/tensorflow/tensorflow/compiler/xla/service/
Dhlo_subcomputation_unification_test.cc88 EXPECT_NE(x->to_apply(), y->to_apply()); in TEST_F()
91 EXPECT_EQ(x->to_apply(), y->to_apply()); in TEST_F()
117 EXPECT_NE(x->to_apply(), y->to_apply()); in TEST_F()
120 EXPECT_EQ(x->to_apply(), y->to_apply()); in TEST_F()
147 EXPECT_NE(x->to_apply(), y->to_apply()); in TEST_F()
150 EXPECT_NE(x->to_apply(), y->to_apply()); in TEST_F()
Dall_reduce_combiner.cc56 HloComputation* reduction = to_combine[0]->to_apply(); in CombineAllReduces()
68 TF_RET_CHECK(hlo->to_apply() == reduction || in CombineAllReduces()
69 (hlo->to_apply()->instruction_count() == 3 && in CombineAllReduces()
70 hlo->to_apply()->num_parameters() == 2 && in CombineAllReduces()
71 hlo->to_apply()->root_instruction()->opcode() == type)); in CombineAllReduces()
109 : opcode(hlo->to_apply()->root_instruction()->opcode()), in GroupKey()
110 accum_type(hlo->to_apply()->root_instruction()->shape().element_type()), in GroupKey()
191 if (instruction->to_apply()->instruction_count() != 3 || in CreateComputationGroups()
192 instruction->to_apply()->num_parameters() != 2) { in CreateComputationGroups()
230 if (instruction->to_apply()->instruction_count() != 3 || in CreateComputationGroups()
[all …]
Dstable_sort_expander_test.cc111 root->operand(0)->to_apply()->root_instruction(), /*iota_parameter=*/1); in TEST_F()
157 root->operand(0)->to_apply()->root_instruction(), /*iota_parameter=*/1); in TEST_F()
191 root->operand(0)->operand(0)->to_apply()->root_instruction(), in TEST_F()
255 root->operand(0)->to_apply()->root_instruction(), in TEST_F()
292 root->operand(0)->to_apply()->root_instruction(), in TEST_F()
323 root->operand(0)->to_apply()->root_instruction(), /*iota_parameter=*/1); in TEST_F()
354 root->operand(0)->operand(0)->to_apply()->root_instruction(), in TEST_F()
Dall_reduce_simplifier.cc88 if (all_reduce->to_apply()->instruction_count() != 3 || in Run()
89 all_reduce->to_apply()->num_parameters() != 2) { in Run()
93 switch (all_reduce->to_apply()->root_instruction()->opcode()) { in Run()
Dhlo_parser_test.cc387 ROOT %call = f32[] call(f32[] %constant), to_apply=%Identity.v1 in CreateTestCases()
444 …d, f32[] %constant), window={size=1x1x7x1 stride=1x4x1x1 pad=0_0x0_0x3_3x0_0}, to_apply=%add_F32.v3 in CreateTestCases()
463 …ROOT %reduce-window = f32[] reduce-window(f32[] %constant, f32[] %constant.1), to_apply=%add_F32.v3 in CreateTestCases()
486 …indow(f32[] %constant, f32[] %constant, f32[] %constant.1, f32[] %constant.1), to_apply=%add_F32.v3 in CreateTestCases()
945 …_window_dims={}, scatter_dims_to_operand_dims={0,1,2,3,4}, index_vector_dim=4, to_apply=%add_F32.v3 in CreateTestCases()
964 …dims_to_operand_dims={0,1,2,3,4}, index_vector_dim=4, indices_are_sorted=true, to_apply=%add_F32.v3 in CreateTestCases()
983 …ter_dims_to_operand_dims={0,1,2,3,4}, index_vector_dim=4, unique_indices=true, to_apply=%add_F32.v3 in CreateTestCases()
1167 ROOT map = f32[4]{0} map(param0, param1), dimensions={0}, to_apply=add_F32.v3 in CreateTestCases()
1186 ROOT reduce = f32[8,16]{1,0} reduce(input, constant), dimensions={2}, to_apply=add_F32.v3 in CreateTestCases()
1212 …(f32[], s32[]) reduce(values, indices, init_value, init_index), dimensions={0}, to_apply=max_argmax in CreateTestCases()
[all …]
Dtree_reduction_rewriter.cc86 hlo->to_apply())); in HandleReduce()
90 hlo->dimensions(), hlo->to_apply()); in HandleReduce()
Dhlo_module_test.cc285 HloComputation* reduction = root->to_apply(); in TEST_F()
385 EXPECT_EQ(root->to_apply()->root_instruction()->opcode(), in TEST_F()
387 EXPECT_EQ(root->to_apply()->root_instruction()->comparison_direction(), in TEST_F()
391 replacement[root->to_apply()] = new_comp; in TEST_F()
394 EXPECT_EQ(root->to_apply(), new_comp); in TEST_F()
Dshape_inference_test.cc66 ProgramShape to_apply = ShapeUtil::MakeProgramShape({f32_, f32_}, f32_); in ExpectInferredReduceShape() local
68 {&arg, &f32_}, dimensions_to_reduce, to_apply); in ExpectInferredReduceShape()
329 ProgramShape to_apply = ShapeUtil::MakeProgramShape( in TEST_F() local
332 matrix_shape, init_value_shape, window, to_apply); in TEST_F()
944 ProgramShape to_apply = ShapeUtil::MakeProgramShape({f32_}, s32_); in TEST_F() local
945 auto inferred_status = ShapeInference::InferMapShape({&arg}, to_apply, {0}); in TEST_F()
1015 ProgramShape to_apply = ShapeUtil::MakeProgramShape({f32_}, f32_); in TEST_F() local
1016 auto inferred_status = ShapeInference::InferMapShape({&arg}, to_apply, {0}); in TEST_F()
1093 ProgramShape to_apply = ShapeUtil::MakeProgramShape( in TEST_F() local
1096 {&f32_arg_shape, &s32_arg_shape, &f32_, &s32_}, {0, 1}, to_apply); in TEST_F()
[all …]
Dshape_inference.h83 absl::Span<const Shape* const> arg_shapes, const ProgramShape& to_apply,
162 const ProgramShape& to_apply);
300 absl::Span<const Shape* const> arg_shapes, const ProgramShape& to_apply);
Dstable_sort_expander.cc79 auto comparator = sort->to_apply(); in ExpandInstruction()
130 auto comparator = sort->to_apply(); in ExpandInstruction()
Dhlo_instructions.cc710 eq_computations(to_apply(), casted_other.to_apply()); in IdenticalSlowPathIgnoringChannelIdValues()
718 shape, new_operands, to_apply(), replica_groups(), constrain_layout(), in CloneWithNewOperandsImpl()
934 eq_computations(to_apply(), casted_other.to_apply()); in IdenticalSlowPath()
942 dimensions(), to_apply()); in CloneWithNewOperandsImpl()
988 return eq_computations(to_apply(), other.to_apply()); in IdenticalSlowPath()
995 shape, dimensions(0), new_operands, to_apply(), is_stable()); in CloneWithNewOperandsImpl()
1188 return eq_computations(to_apply(), casted_other.to_apply()) && in IdenticalSlowPath()
1195 return absl::make_unique<HloMapInstruction>(shape, new_operands, to_apply()); in CloneWithNewOperandsImpl()
2272 return eq_computations(to_apply(), casted_other.to_apply()) && in IdenticalSlowPath()
2286 window(), to_apply()); in CloneWithNewOperandsImpl()
[all …]
Dhlo_verifier.cc547 HloComputation* compare = sort->to_apply(); in HandleSort()
644 const ProgramShape& to_apply = instruction.to_apply()->ComputeProgramShape(); in SameElementTypesForOperandsAndToApplyParameters() local
646 const Shape& parameter_shape = to_apply.parameters(i); in SameElementTypesForOperandsAndToApplyParameters()
673 reduce->to_apply()->ComputeProgramShape()))); in HandleReduce()
784 CheckParameterCount(call, call->to_apply(), call->operand_count())); in HandleCall()
785 for (int64 i = 0; i < call->to_apply()->num_parameters(); ++i) { in HandleCall()
786 TF_RETURN_IF_ERROR(CheckOperandAndParameter(call, i, call->to_apply(), i)); in HandleCall()
789 return CheckShape(call, call->to_apply()->root_instruction()->shape()); in HandleCall()
884 operand_shapes, map->to_apply()->ComputeProgramShape(), map_dims))); in HandleMap()
902 reduce_window->to_apply()->ComputeProgramShape()))); in HandleReduceWindow()
[all …]
Dshape_inference.cc1117 absl::Span<const Shape* const> arg_shapes, const ProgramShape& to_apply, in InferMapShape() argument
1171 if (arg_shapes.size() != to_apply.parameters_size()) { in InferMapShape()
1175 to_apply.parameters_size(), arg_shapes.size()); in InferMapShape()
1179 const Shape& output_shape = to_apply.result(); in InferMapShape()
1186 for (int i = 0; i < to_apply.parameters_size(); ++i) { in InferMapShape()
1187 const Shape& parameter_shape = to_apply.parameters(i); in InferMapShape()
2116 const ProgramShape& to_apply) { in InferReduceShape() argument
2154 TF_RETURN_IF_ERROR(VerifyReducerShape(to_apply, init_values, element_types, in InferReduceShape()
2174 if (ShapeUtil::IsScalar(to_apply.result())) { in InferReduceShape()
2175 return ShapeUtil::MakeShape(to_apply.result().element_type(), in InferReduceShape()
[all …]
Dmap_inliner.cc67 HloComputation* function = map->to_apply(); in HandleMap()
Dflatten_call_graph.cc47 CHECK_EQ(instruction->to_apply(), computation); in ReplaceCalledComputation()
Dhlo_parser.cc1227 optional<HloComputation*> to_apply; in ParseInstructionRhs() local
1233 &to_apply}; in ParseInstructionRhs()
1249 shape, operands, *to_apply, replica_groups, in ParseInstructionRhs()
1399 optional<HloComputation*> to_apply; in ParseInstructionRhs() local
1401 &to_apply}; in ParseInstructionRhs()
1420 to_apply.value(), is_stable.value())); in ParseInstructionRhs()
1547 optional<HloComputation*> to_apply; in ParseInstructionRhs() local
1549 &to_apply}; in ParseInstructionRhs()
1561 arg_shapes, to_apply.value()->ComputeProgramShape()); in ParseInstructionRhs()
1567 HloInstruction::CreateCall(shape, operands, *to_apply)); in ParseInstructionRhs()
[all …]
/external/tensorflow/tensorflow/compiler/xla/service/gpu/
Dtree_reduction_rewriter.cc177 dims_to_reduce, hlo->to_apply())); in RewriteReduction()
193 {outer_reduced_dimension}, hlo->to_apply()); in RewriteReduction()
215 {reduced_input_dimension}, hlo->to_apply())); in RewriteBatchDimensionLargerThanTile()
219 hlo->to_apply()); in RewriteBatchDimensionLargerThanTile()
Dreduction_splitter.cc88 reduce->mutable_operand(1), pre_reduce_dims, reduce->to_apply()); in HandleReduce()
105 reduce->mutable_operand(1), final_reduce_dims, reduce->to_apply()); in HandleReduce()
Dreduction_dimension_grouper.cc94 reduced_dims_grouped, reduce->to_apply()); in HandleReduce()
Dreduction_degenerate_dim_remover.cc79 updated_reduced_dimensions, instr->to_apply()); in HandleReduce()
Dreduction_layout_normalizer.cc109 new_reduce_dimensions, reduce->to_apply()); in HandleReduce()
/external/tensorflow/tensorflow/compiler/xla/service/cpu/
Dparallel_task_assignment.cc209 changed |= AssignParallelTasksHelper(module, instruction->to_apply(), in AssignParallelTasksHelper()
236 auto* new_root = call->to_apply()->root_instruction(); in AssignParallelTasksHelper()
/external/tensorflow/tensorflow/compiler/mlir/xla/
Dhlo_function_importer.cc308 ImportAsFunc(*instruction->to_apply())); in ImportInstructionImpl()
444 TF_RETURN_IF_ERROR(ImportAsRegion(*scatter->to_apply(), in ImportInstructionImpl()
499 ImportAsRegion(*sort_instruction->to_apply(), &sort_op.comparator())); in ImportInstructionImpl()
561 TF_RETURN_IF_ERROR(ImportAsRegion(*all_reduce->to_apply(), in ImportInstructionImpl()
574 ImportAsRegion(*instruction->to_apply(), &reduce.body())); in ImportInstructionImpl()
679 ImportAsRegion(*instruction->to_apply(), &reduce.body())); in ImportInstructionImpl()
687 ImportAsRegion(*instruction->to_apply(), &op.computation())); in ImportInstructionImpl()
/external/tensorflow/tensorflow/compiler/mlir/xla/tests/translate/
Dexport.mlir41 // CHECK-SAME: to_apply=%[[COMPUTATION]]
70 // CHECK-SAME: to_apply=%[[COMPUTATION]]
191 // CHECK-NEXT: [[CALL:%.*]] = () call(), to_apply=[[CALLEE]]
222 // CHECK: [[CALL_OUT:%.*]] = s32[4] call(s32[4] %[[ARG]], s32[4] %[[ARG]]), to_apply=[[CALLEE_1]]
224 // CHECK-SAME: s32[4] call(s32[4] [[CALL_OUT]], s32[4] [[CALL_OUT]]), to_apply=[[CALLEE_2]]
246 // CHECK: [[CALL_OUT:%.*]] = (s32[4], s32[4]) call(s32[4] %[[ARG]], s32[4] %[[ARG]]), to_apply=[[C…
402 // CHECK-SAME: to_apply=%[[SUM_COMPUTATION]]
573 // CHECK-SAME: f32[4] map(f32[4] [[ARG_2]], f32[4] [[ARG_3]]), dimensions={0}, to_apply=[[COMPUTAT…
664 …RG0]], s32[1,10] %[[ARG1]], f32[] %[[ARG2]], s32[] %[[ARG3]]), dimensions={1}, to_apply=%[[REGION]]
696 // CHECK-SAME: to_apply=%[[MAX_COMPUTATION]]
[all …]
/external/tensorflow/tensorflow/compiler/xla/service/gpu/tests/
Dscatter.hlo63 to_apply=update_s32,
124 to_apply=update_s32
204 to_apply=mul_s32,
265 to_apply=update_s32,

123