/external/tensorflow/tensorflow/compiler/xla/service/ |
D | stable_sort_expander.cc | 86 sort->operand_count() * 2, scalar_shape, in ExpandInstruction() 87 absl::StrCat("p.", sort->operand_count(), ".lhs"))); in ExpandInstruction() 90 sort->operand_count() * 2 + 1, scalar_shape, in ExpandInstruction() 91 absl::StrCat("p.", sort->operand_count(), ".rhs"))); in ExpandInstruction() 101 std::vector<Shape> new_shapes = sort->operand_count() == 1 in ExpandInstruction() 114 tuple_elements.reserve(sort->operand_count()); in ExpandInstruction() 115 for (int64 i = 0; i < sort->operand_count(); ++i) { in ExpandInstruction() 126 iota_index = sort->operand_count() - 1; in ExpandInstruction() 164 if (inst->operand_count() == 0) { in ExpandInstruction() 168 new_operands.reserve(inst->operand_count()); in ExpandInstruction()
|
D | dfs_hlo_visitor_with_default_test.cc | 43 TF_RET_CHECK(!(hlo->IsElementwise() && hlo->operand_count() == 2)) in TEST_F() 45 TF_RET_CHECK(!(hlo->IsElementwise() && hlo->operand_count() == 1)) in TEST_F() 52 TF_RET_CHECK(hlo->IsElementwise() && hlo->operand_count() == 2) in TEST_F() 58 TF_RET_CHECK(hlo->IsElementwise() && hlo->operand_count() == 1) in TEST_F()
|
D | bfloat16_normalization.cc | 154 std::vector<PrimitiveType> operand_types(hlo->operand_count()); in HandleMultipleOutputs() 155 std::vector<PrimitiveType> output_types(hlo->operand_count()); in HandleMultipleOutputs() 160 for (int64 i = 0; i < hlo->operand_count(); ++i) { in HandleMultipleOutputs() 199 for (int64 i = 0; i < hlo->operand_count(); ++i) { in HandleMultipleOutputs() 236 std::vector<HloInstruction*> output_elements(hlo->operand_count()); in HandleMultipleOutputs() 238 for (int64 i = 0; i < hlo->operand_count(); ++i) { in HandleMultipleOutputs() 273 for (int64 i = 0; i < hlo->operand_count(); ++i) { in HandleInstruction() 310 for (int i = 0; i < hlo->operand_count(); ++i) { in HandleInstruction() 339 for (int i = 0; i < hlo->operand_count(); ++i) { in HandleInstruction() 353 for (int i = 0; i < hlo->operand_count(); ++i) { in HandleInstruction() [all …]
|
D | sort_simplifier.cc | 56 for (int64 i = 0; i < sort->operand_count() * 2; ++i) { in RemoveUnusedOperandFromSort() 64 if (used_indices.size() == sort->operand_count()) { in RemoveUnusedOperandFromSort() 71 for (int64 i = 0; i < sort->operand_count(); ++i) { in RemoveUnusedOperandFromSort() 86 for (int64 i = 0; i < sort->operand_count(); ++i) { in RemoveUnusedOperandFromSort() 115 for (int64 i = 0; i < sort->operand_count(); ++i) { in RemoveUnusedOperandFromSort()
|
D | dynamic_dimension_inference.cc | 171 int64 operand_count = reduce->operand_count(); in HandleReduce() local 172 CHECK_EQ(operand_count % 2, 0); in HandleReduce() 173 if (operand_index >= operand_count / 2) { in HandleReduce() 467 int64 operand_count = hlo->shape().tuple_shapes_size(); in HandleWhile() local 472 for (int64 i = 0; i < tuple_operand->operand_count(); ++i) { in HandleWhile() 481 dynamic_size_to_operand_id_index_map[dynamic_size] = operand_count++; in HandleWhile() 557 for (int64 operand_index = 0; operand_index < inst->operand_count(); in ForEachOperandDynamicDimension()
|
D | stable_sort_expander_test.cc | 38 if (a->opcode() != b->opcode() || a->operand_count() != b->operand_count()) { in IsSameComputationExceptParams() 46 if (a->operand_count() == 0) { in IsSameComputationExceptParams() 50 for (int64 i = 0; i < a->operand_count(); ++i) { in IsSameComputationExceptParams()
|
D | hlo_verifier.cc | 64 if (hlo->operand_count() != expected) { in CheckOperandCount() 338 if (sort->operand_count() < 1) { in HandleSort() 356 CheckParameterCount(sort, compare, sort->operand_count() * 2)); in HandleSort() 378 for (int64 operand = 1; operand < sort->operand_count(); ++operand) { in HandleSort() 442 if (reduce->operand_count() % 2 != 0) { in HandleReduce() 515 if (fused_parameters.size() != fusion->operand_count()) { in HandleFusion() 519 fused_parameters.size(), fusion->operand_count(), in HandleFusion() 536 CheckParameterCount(call, call->to_apply(), call->operand_count())); in HandleCall() 553 TF_RET_CHECK(custom_call->operand_count() == in HandleCustomCall() 555 for (int64 i = 0; i < custom_call->operand_count(); ++i) { in HandleCustomCall() [all …]
|
D | hlo_evaluator.cc | 1437 TF_RET_CHECK(sort->operand_count() >= 1) in HandleSort() 1439 for (int64 i = 1; i < sort->operand_count(); ++i) { in HandleSort() 1446 for (int64 i = 0; i < sort->operand_count(); ++i) { in HandleSort() 1454 result_literals.reserve(sort->operand_count()); in HandleSort() 1455 for (int64 i = 0; i < sort->operand_count(); ++i) { in HandleSort() 1474 literals_to_sort.reserve(sort->operand_count()); in HandleSort() 1475 for (int64 i = 0; i < sort->operand_count(); ++i) { in HandleSort() 1488 literals.reserve(2 * sort->operand_count()); in HandleSort() 1489 for (int64 i = 0; i < sort->operand_count(); ++i) { in HandleSort() 1532 for (int64 i = 0; i < sort->operand_count(); ++i) { in HandleSort() [all …]
|
D | bfloat16_conversion_folding.cc | 206 crs->operand_count()); in HandleAllReduce() 214 for (int64 i = 0; i < crs->operand_count(); ++i) { in HandleAllReduce()
|
D | transpose_folding.cc | 44 for (int64 i = 0; i < dot.operand_count(); ++i) { in CanFoldOperandsIntoDot() 65 for (int64 i = 0; i < convolution.operand_count(); ++i) { in CanFoldOperandsIntoConvolution()
|
D | layout_assignment.cc | 360 for (int64 i = 0; i < instruction->operand_count(); ++i) { in ToString() 472 for (int64 i = 0; i < custom_call->operand_count(); ++i) { in AddMandatoryConstraints() 540 TF_RET_CHECK(instruction->operand_count() == in AddMandatoryConstraints() 542 for (int64 i = 0; i < instruction->operand_count(); ++i) { in AddMandatoryConstraints() 559 CHECK_EQ(1, instruction->operand_count()); in AddMandatoryConstraints() 660 TF_RET_CHECK(computation->num_parameters() == call->operand_count()); in CheckCallLayout() 676 for (int64 i = 0; i < custom_call->operand_count(); ++i) { in CheckCustomCallLayout() 735 for (int64 i = 0; i < fusion->operand_count(); ++i) { in CheckFusionLayout() 1279 for (int64 operand_no = 0; operand_no < user->operand_count(); in PropagateOperandConstraint() 1390 for (int64 operand_no = 0; operand_no < instruction->operand_count(); in PropagateBufferConstraintToOperands() [all …]
|
/external/v8/src/interpreter/ |
D | bytecode-node.h | 27 DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), operand_count()); in bytecode_() 36 DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), operand_count()); in bytecode_() 47 DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), operand_count()); in bytecode_() 59 DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), operand_count()); in bytecode_() 73 DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), operand_count()); in bytecode_() 88 DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), operand_count()); in bytecode_() 111 DCHECK_LT(i, operand_count()); in operand() 118 int operand_count() const { return operand_count_; } in operand_count() function 134 V8_INLINE BytecodeNode(Bytecode bytecode, int operand_count, 140 operand_count_(operand_count), in bytecode_() [all …]
|
D | bytecode-node.cc | 20 for (int i = 0; i < operand_count(); ++i) { in Print() 41 for (int i = 0; i < this->operand_count(); ++i) { in operator ==()
|
D | bytecode-array-writer.cc | 207 const int operand_count = node->operand_count(); in EmitBytecode() local 210 for (int i = 0; i < operand_count; ++i) { in EmitBytecode()
|
/external/google-breakpad/src/third_party/libdisasm/ |
D | x86_operand_list.c | 14 insn->operand_count = 1; in x86_oplist_append() 29 insn->operand_count = insn->operand_count + 1; in x86_oplist_append() 62 insn->operand_count = 0; in x86_oplist_free() 159 return insn->operand_count; in x86_operand_count()
|
/external/tensorflow/tensorflow/compiler/xla/service/gpu/ |
D | instruction_fusion.cc | 101 if (a->operand_count() + b->operand_count() - 1 + num_output_buffers <= in FusionWouldBeTooLarge() 128 if (consumer->operand_count() == 1 && in ShouldFuseInexpensiveChecks() 150 } else if (consumer->operand_count() == 2 && in ShouldFuseInexpensiveChecks() 159 } else if (consumer->operand_count() == 2 && in ShouldFuseInexpensiveChecks()
|
D | gpu_copy_insertion.cc | 51 for (int64 i = hlo->operand_count() - 2; i < hlo->operand_count(); ++i) { in Run()
|
D | gpu_hlo_schedule.cc | 111 CHECK_EQ(hlo->operand_count(), 0); in GpuHloOrdering() 164 if (hlo->operand_count() == 0) { in BFSLaunchOrder()
|
D | gpu_layout_assignment.cc | 160 if (instr->operand_count() == 4) { in AddBackendConstraintsToDnnConvCustomCall() 223 for (int64 i = 0; i < instruction->operand_count(); ++i) { in AddBackendConstraints()
|
/external/tensorflow/tensorflow/compiler/xla/service/cpu/ |
D | cpu_layout_assignment.cc | 119 for (int i = 0; i < instruction->operand_count(); i++) { in AddBackendConstraints() 129 for (int64 operand_no = 0; operand_no < instruction->operand_count(); in AddBackendConstraints()
|
/external/v8/src/compiler/ |
D | register-allocator-verifier.cc | 65 const size_t operand_count = OperandCount(instr); in RegisterAllocatorVerifier() local 67 zone->NewArray<OperandConstraint>(operand_count); in RegisterAllocatorVerifier() 86 InstructionConstraint instr_constraint = {instr, operand_count, in RegisterAllocatorVerifier() 125 const size_t operand_count = instr_constraint.operand_constaints_size_; in VerifyAssignment() local 129 CHECK(operand_count == OperandCount(instr)); in VerifyAssignment()
|
/external/tensorflow/tensorflow/compiler/xla/service/cpu/tests/ |
D | cpu_fusion_test.cc | 278 EXPECT_EQ(0, fusion1->operand_count()); in TEST_F() 279 EXPECT_EQ(0, fusion2->operand_count()); in TEST_F() 333 EXPECT_EQ(0, fusion_inst->operand_count()); in TEST_F()
|
/external/v8/src/runtime/ |
D | runtime-interpreter.cc | 87 int operand_count = interpreter::Bytecodes::NumberOfOperands(bytecode); in PrintRegisters() local 88 for (int operand_index = 0; operand_index < operand_count; operand_index++) { in PrintRegisters()
|
/external/tensorflow/tensorflow/compiler/xla/tests/ |
D | test_utils.cc | 351 if (computation.num_parameters() != 2 || root->operand_count() != 2 || in GetInitValue() 377 op_num >= instruction->operand_count() / 2)); in NeedsInitValue() 422 instruction->operand_count() >= 2 && op_num == 0) { in FindConstrainedUses() 455 for (int64 operand = first_index; operand < use->operand_count(); in CreateLiteralForConstrainedUses()
|
/external/tensorflow/tensorflow/compiler/xla/service/llvm_ir/ |
D | fused_ir_emitter.cc | 248 weight = user->operand_count(); in IsFusedIrEmitterInefficient() 274 for (int64 operand_num = 0; operand_num < consumer->operand_count(); in IsFusedIrEmitterInefficient()
|