/external/vixl/src/aarch32/ |
D | disasm-aarch32.cc | 55 T32CodeAddressIncrementer(uint32_t instr, uint32_t* code_address) in T32CodeAddressIncrementer() argument 57 increment_(Disassembler::Is16BitEncoding(instr) ? 2 : 4) {} in T32CodeAddressIncrementer() 6974 int Disassembler::T32Size(uint32_t instr) { in T32Size() argument 6975 if ((instr & 0xe0000000) == 0xe0000000) { in T32Size() 6976 switch (instr & 0x08000000) { in T32Size() 6978 if ((instr & 0x10000000) == 0x10000000) return 4; in T32Size() 6989 void Disassembler::DecodeT32(uint32_t instr) { in DecodeT32() argument 6990 T32CodeAddressIncrementer incrementer(instr, &code_address_); in DecodeT32() 6993 switch (instr & 0xe0000000) { in DecodeT32() 6996 switch (instr & 0x18000000) { in DecodeT32() [all …]
|
/external/mesa3d/src/gallium/drivers/lima/ir/gp/ |
D | disasm.c | 50 print_dest(gpir_codegen_instr *instr, gp_unit unit, unsigned cur_dest_index) in print_dest() argument 56 if (instr->store0_src_x == src || in print_dest() 57 instr->store0_src_y == src) { in print_dest() 58 if (instr->store0_temporary) { in print_dest() 64 if (instr->store0_varying) in print_dest() 68 printf("%u", instr->store0_addr); in print_dest() 72 if (instr->store0_src_x == src) in print_dest() 74 if (instr->store0_src_y == src) in print_dest() 78 if (instr->store1_src_z == src || in print_dest() 79 instr->store1_src_w == src) { in print_dest() [all …]
|
D | instr.c | 33 gpir_instr *instr = rzalloc(block, gpir_instr); in gpir_instr_create() local 34 if (unlikely(!instr)) in gpir_instr_create() 43 instr->index = block->sched.instr_index++; in gpir_instr_create() 44 instr->alu_num_slot_free = 6; in gpir_instr_create() 45 instr->alu_non_cplx_slot_free = 5; in gpir_instr_create() 46 instr->alu_max_allowed_next_max = 5; in gpir_instr_create() 48 list_add(&instr->list, &block->instr_list); in gpir_instr_create() 49 return instr; in gpir_instr_create() 52 static gpir_node *gpir_instr_get_the_other_acc_node(gpir_instr *instr, int slot) in gpir_instr_get_the_other_acc_node() argument 55 return instr->slots[GPIR_INSTR_SLOT_ADD1]; in gpir_instr_get_the_other_acc_node() [all …]
|
/external/mesa3d/src/amd/compiler/ |
D | aco_validate.cpp | 79 …auto check = [&program, &is_valid](bool check, const char * msg, aco::Instruction * instr) -> void… in validate_ir() argument 88 aco_print_instr(instr, memf); in validate_ir() 106 for (aco_ptr<Instruction>& instr : block.instructions) { in validate_ir() 109 Format base_format = instr->format; in validate_ir() 119 if (instr->opcode == aco_opcode::v_interp_p1ll_f16 || in validate_ir() 120 instr->opcode == aco_opcode::v_interp_p1lv_f16 || in validate_ir() 121 instr->opcode == aco_opcode::v_interp_p2_legacy_f16 || in validate_ir() 122 instr->opcode == aco_opcode::v_interp_p2_f16) { in validate_ir() 131 …check(base_format == instr_info.format[(int)instr->opcode], "Wrong base format for instruction", i… in validate_ir() 134 if (((uint32_t)instr->format & (uint32_t)Format::VOP3) && instr->format != Format::VOP3) { in validate_ir() [all …]
|
D | aco_optimizer.cpp | 39 void perfwarn(Program *program, bool cond, const char *msg, Instruction *instr) in perfwarn() argument 49 aco_print_instr(instr, memf); in perfwarn() 82 mad_info(aco_ptr<Instruction> instr, uint32_t id) in mad_info() 83 : add_instr(std::move(instr)), mul_temp_id(id), literal_idx(0), check_literal(false) {} in mad_info() 137 Instruction* instr; member 175 instr = vec; in set_vec() 295 instr = mul; in set_mul() 318 instr = mad; in set_mad() 329 instr = mul; in set_omod2() 340 instr = mul; in set_omod4() [all …]
|
D | aco_insert_NOPs.cpp | 180 int get_wait_states(aco_ptr<Instruction>& instr) in get_wait_states() argument 182 if (instr->opcode == aco_opcode::s_nop) in get_wait_states() 183 return static_cast<SOPP_instruction*>(instr.get())->imm + 1; in get_wait_states() 184 else if (instr->opcode == aco_opcode::p_constaddr) in get_wait_states() 289 aco_ptr<Instruction>& instr, int *NOPs) in handle_smem_clause_hazards() argument 295 …if (ctx.smem_write || instr->definitions.empty() || instr_info.is_atomic[(unsigned)instr->opcode])… in handle_smem_clause_hazards() 298 for (Operand op : instr->operands) { in handle_smem_clause_hazards() 305 Definition def = instr->definitions[0]; in handle_smem_clause_hazards() 314 … aco_ptr<Instruction>& instr, std::vector<aco_ptr<Instruction>>& new_instructions) in handle_instruction_gfx6() argument 319 if (instr->format == Format::SMEM) { in handle_instruction_gfx6() [all …]
|
/external/mesa3d/src/freedreno/ir3/ |
D | ir3.c | 132 static int emit_cat0(struct ir3_instruction *instr, void *ptr, in emit_cat0() argument 139 cat0->a5xx.immed = instr->cat0.immed; in emit_cat0() 141 cat0->a4xx.immed = instr->cat0.immed; in emit_cat0() 143 cat0->a3xx.immed = instr->cat0.immed; in emit_cat0() 145 cat0->repeat = instr->repeat; in emit_cat0() 146 cat0->ss = !!(instr->flags & IR3_INSTR_SS); in emit_cat0() 147 cat0->inv0 = instr->cat0.inv; in emit_cat0() 148 cat0->comp0 = instr->cat0.comp; in emit_cat0() 149 cat0->opc = instr->opc; in emit_cat0() 150 cat0->opc_hi = instr->opc >= 16; in emit_cat0() [all …]
|
D | ir3_print.c | 66 static void print_instr_name(struct ir3_instruction *instr, bool flags) in print_instr_name() argument 68 if (!instr) in print_instr_name() 71 printf("%04u:", instr->serialno); in print_instr_name() 73 printf("%04u:", instr->name); in print_instr_name() 74 printf("%04u:", instr->ip); in print_instr_name() 75 if (instr->flags & IR3_INSTR_UNUSED) { in print_instr_name() 78 printf("%03u: ", instr->use_count); in print_instr_name() 83 if (instr->flags & IR3_INSTR_SY) in print_instr_name() 85 if (instr->flags & IR3_INSTR_SS) in print_instr_name() 87 if (instr->flags & IR3_INSTR_JP) in print_instr_name() [all …]
|
D | ir3_sched.c | 43 #define di(instr, fmt, ...) do { if (SCHED_DEBUG) { \ argument 45 ir3_print_instr(instr); \ 106 struct ir3_instruction *instr; member 148 static void sched_node_init(struct ir3_sched_ctx *ctx, struct ir3_instruction *instr); 149 static void sched_node_add_dep(struct ir3_instruction *instr, struct ir3_instruction *src, int i); 151 static bool is_scheduled(struct ir3_instruction *instr) in is_scheduled() argument 153 return !!(instr->flags & IR3_INSTR_MARK); in is_scheduled() 157 schedule(struct ir3_sched_ctx *ctx, struct ir3_instruction *instr) in schedule() argument 159 debug_assert(ctx->block == instr->block); in schedule() 163 list_delinit(&instr->node); in schedule() [all …]
|
D | ir3_cp.c | 56 static bool is_eligible_mov(struct ir3_instruction *instr, in is_eligible_mov() argument 59 if (is_same_type_mov(instr)) { in is_eligible_mov() 60 struct ir3_register *dst = instr->regs[0]; in is_eligible_mov() 61 struct ir3_register *src = instr->regs[1]; in is_eligible_mov() 152 lower_immed(struct ir3_cp_ctx *ctx, struct ir3_instruction *instr, unsigned n, in lower_immed() argument 161 if (!ir3_valid_flags(instr, n, new_flags)) in lower_immed() 169 bool f_opcode = (is_cat2_float(instr->opc) || in lower_immed() 170 is_cat3_float(instr->opc)) ? true : false; in lower_immed() 235 instr->regs[n + 1] = reg; in lower_immed() 241 unuse(struct ir3_instruction *instr) in unuse() argument [all …]
|
D | ir3_postsched.c | 44 #define di(instr, fmt, ...) do { if (SCHED_DEBUG) { \ argument 46 ir3_print_instr(instr); \ 70 struct ir3_instruction *instr; member 84 schedule(struct ir3_postsched_ctx *ctx, struct ir3_instruction *instr) in schedule() argument 86 debug_assert(ctx->block == instr->block); in schedule() 90 list_delinit(&instr->node); in schedule() 92 di(instr, "schedule"); in schedule() 94 list_addtail(&instr->node, &instr->block->instr_list); in schedule() 96 struct ir3_postsched_node *n = instr->data; in schedule() 99 if (is_meta(instr) && (instr->opc != OPC_META_TEX_PREFETCH)) in schedule() [all …]
|
/external/vixl/src/aarch64/ |
D | disasm-aarch64.cc | 65 void Disassembler::VisitAddSubImmediate(const Instruction *instr) { in VisitAddSubImmediate() argument 66 bool rd_is_zr = RdIsZROrSP(instr); in VisitAddSubImmediate() 68 (rd_is_zr || RnIsZROrSP(instr)) && (instr->GetImmAddSub() == 0) ? true in VisitAddSubImmediate() 75 switch (instr->Mask(AddSubImmediateMask)) { in VisitAddSubImmediate() 110 Format(instr, mnemonic, form); in VisitAddSubImmediate() 114 void Disassembler::VisitAddSubShifted(const Instruction *instr) { in VisitAddSubShifted() argument 115 bool rd_is_zr = RdIsZROrSP(instr); in VisitAddSubShifted() 116 bool rn_is_zr = RnIsZROrSP(instr); in VisitAddSubShifted() 122 switch (instr->Mask(AddSubShiftedMask)) { in VisitAddSubShifted() 160 Format(instr, mnemonic, form); in VisitAddSubShifted() [all …]
|
D | cpu-features-auditor-aarch64.cc | 80 void CPUFeaturesAuditor::LoadStoreHelper(const Instruction* instr) { in LoadStoreHelper() argument 82 switch (instr->Mask(LoadStoreMask)) { in LoadStoreHelper() 103 void CPUFeaturesAuditor::LoadStorePairHelper(const Instruction* instr) { in LoadStorePairHelper() argument 105 switch (instr->Mask(LoadStorePairMask)) { in LoadStorePairHelper() 123 void CPUFeaturesAuditor::VisitAddSubExtended(const Instruction* instr) { in VisitAddSubExtended() argument 125 USE(instr); in VisitAddSubExtended() 128 void CPUFeaturesAuditor::VisitAddSubImmediate(const Instruction* instr) { in VisitAddSubImmediate() argument 130 USE(instr); in VisitAddSubImmediate() 133 void CPUFeaturesAuditor::VisitAddSubShifted(const Instruction* instr) { in VisitAddSubShifted() argument 135 USE(instr); in VisitAddSubShifted() [all …]
|
D | disasm-aarch64.h | 52 virtual void Visit##A(const Instruction* instr) VIXL_OVERRIDE; 57 virtual void ProcessOutput(const Instruction* instr); 65 virtual void AppendRegisterNameToOutput(const Instruction* instr, 70 virtual void AppendPCRelativeOffsetToOutput(const Instruction* instr, 75 virtual void AppendCodeRelativeAddressToOutput(const Instruction* instr, 83 virtual void AppendCodeRelativeCodeAddressToOutput(const Instruction* instr, 89 virtual void AppendCodeRelativeDataAddressToOutput(const Instruction* instr, 94 virtual void AppendAddressToOutput(const Instruction* instr, 96 virtual void AppendCodeAddressToOutput(const Instruction* instr, 98 virtual void AppendDataAddressToOutput(const Instruction* instr, [all …]
|
/external/mesa3d/src/broadcom/qpu/ |
D | qpu_disasm.c | 60 const struct v3d_qpu_instr *instr, uint8_t mux) in v3d_qpu_disasm_raddr() argument 63 append(disasm, "rf%d", instr->raddr_a); in v3d_qpu_disasm_raddr() 65 if (instr->sig.small_imm) { in v3d_qpu_disasm_raddr() 69 instr->raddr_b, in v3d_qpu_disasm_raddr() 78 append(disasm, "rf%d", instr->raddr_b); in v3d_qpu_disasm_raddr() 102 const struct v3d_qpu_instr *instr) in v3d_qpu_disasm_add() argument 104 bool has_dst = v3d_qpu_add_op_has_dst(instr->alu.add.op); in v3d_qpu_disasm_add() 105 int num_src = v3d_qpu_add_op_num_src(instr->alu.add.op); in v3d_qpu_disasm_add() 107 append(disasm, "%s", v3d_qpu_add_op_name(instr->alu.add.op)); in v3d_qpu_disasm_add() 108 if (!v3d_qpu_sig_writes_address(disasm->devinfo, &instr->sig)) in v3d_qpu_disasm_add() [all …]
|
/external/mesa3d/src/gallium/drivers/freedreno/a2xx/ |
D | ir2.c | 29 static bool scalar_possible(struct ir2_instr *instr) in scalar_possible() argument 31 if (instr->alu.scalar_opc == SCALAR_NONE) in scalar_possible() 34 return src_ncomp(instr) == 1; in scalar_possible() 56 static unsigned alu_vector_prio(struct ir2_instr *instr) in alu_vector_prio() argument 58 if (instr->alu.vector_opc == VECTOR_NONE) in alu_vector_prio() 61 if (is_export(instr)) in alu_vector_prio() 65 if (instr->src_count == 3) in alu_vector_prio() 68 if (!scalar_possible(instr)) in alu_vector_prio() 71 return instr->src_count == 2 ? 2 : 3; in alu_vector_prio() 75 static unsigned alu_scalar_prio(struct ir2_instr *instr) in alu_scalar_prio() argument [all …]
|
D | ir2_nir.c | 244 reg = &ctx->instr[res.num].ssa; in make_src() 253 struct ir2_instr *instr) in set_index() argument 255 struct ir2_reg *reg = &instr->ssa; in set_index() 258 ctx->ssa_map[dst->ssa.index] = instr->idx; in set_index() 260 assert(instr->is_ssa); in set_index() 263 instr->is_ssa = false; in set_index() 264 instr->reg = reg; in set_index() 272 struct ir2_instr *instr; in ir2_instr_create() local 274 instr = &ctx->instr[ctx->instr_count++]; in ir2_instr_create() 275 instr->idx = ctx->instr_count - 1; in ir2_instr_create() [all …]
|
/external/llvm-project/lld/ELF/ |
D | AArch64ErrataFix.cpp | 55 static bool isADRP(uint32_t instr) { in isADRP() argument 56 return (instr & 0x9f000000) == 0x90000000; in isADRP() 65 static bool isLoadStoreClass(uint32_t instr) { in isLoadStoreClass() argument 66 return (instr & 0x0a000000) == 0x08000000; in isLoadStoreClass() 81 static bool isST1MultipleOpcode(uint32_t instr) { in isST1MultipleOpcode() argument 82 return (instr & 0x0000f000) == 0x00002000 || in isST1MultipleOpcode() 83 (instr & 0x0000f000) == 0x00006000 || in isST1MultipleOpcode() 84 (instr & 0x0000f000) == 0x00007000 || in isST1MultipleOpcode() 85 (instr & 0x0000f000) == 0x0000a000; in isST1MultipleOpcode() 88 static bool isST1Multiple(uint32_t instr) { in isST1Multiple() argument [all …]
|
/external/mesa3d/src/gallium/drivers/r600/sfn/ |
D | sfn_emitaluinstruction.h | 52 bool do_emit(nir_instr* instr) override; 54 void split_constants(const nir_alu_instr& instr, unsigned nsrc_comp); 56 bool emit_mov(const nir_alu_instr& instr); 57 bool emit_alu_op1(const nir_alu_instr& instr, EAluOp opcode, const AluOpFlags &flags = 0); 58 bool emit_alu_op2(const nir_alu_instr& instr, EAluOp opcode, AluOp2Opts ops = op2_opt_none); 59 …bool emit_alu_op2_split_src_mods(const nir_alu_instr& instr, EAluOp opcode, AluOp2Opts ops = op2_o… 61 bool emit_alu_trans_op2(const nir_alu_instr& instr, EAluOp opcode); 63 bool emit_alu_inot(const nir_alu_instr& instr); 64 bool emit_alu_iabs(const nir_alu_instr& instr); 65 bool emit_alu_ineg(const nir_alu_instr& instr); [all …]
|
D | sfn_emitaluinstruction.cpp | 45 const nir_alu_instr& instr = *nir_instr_as_alu(ir); in do_emit() local 47 r600::sfn_log << SfnLog::instr << "emit '" in do_emit() 49 << " bitsize: " << static_cast<int>(instr.dest.dest.ssa.bit_size) in do_emit() 52 preload_src(instr); in do_emit() 54 switch (instr.op) { in do_emit() 55 case nir_op_f2b32: return emit_alu_f2b32(instr); in do_emit() 56 case nir_op_b2f32: return emit_alu_b2f(instr); in do_emit() 57 case nir_op_i2b1: return emit_alu_i2orf2_b1(instr, op2_setne_int); in do_emit() 58 case nir_op_i2b32: return emit_alu_i2orf2_b1(instr, op2_setne_int); in do_emit() 59 case nir_op_f2b1: return emit_alu_i2orf2_b1(instr, op2_setne_dx10); in do_emit() [all …]
|
/external/mesa3d/src/compiler/nir/ |
D | nir_opt_shrink_vectors.c | 68 opt_shrink_vectors_alu(nir_builder *b, nir_alu_instr *instr) in opt_shrink_vectors_alu() argument 70 nir_ssa_def *def = &instr->dest.dest.ssa; in opt_shrink_vectors_alu() 72 if (nir_op_infos[instr->op].output_size == 0) { in opt_shrink_vectors_alu() 74 instr->dest.write_mask &= in opt_shrink_vectors_alu() 81 switch (instr->op) { in opt_shrink_vectors_alu() 95 srcs[i] = nir_ssa_for_alu_src(b, instr, i); in opt_shrink_vectors_alu() 113 opt_shrink_vectors_image_store(nir_builder *b, nir_intrinsic_instr *instr) in opt_shrink_vectors_image_store() argument 116 if (instr->intrinsic == nir_intrinsic_image_deref_store) { in opt_shrink_vectors_image_store() 117 nir_deref_instr *deref = nir_src_as_deref(instr->src[0]); in opt_shrink_vectors_image_store() 120 format = nir_intrinsic_format(instr); in opt_shrink_vectors_image_store() [all …]
|
D | nir_opt_gcm.c | 66 nir_instr *instr; member 188 nir_foreach_instr_safe(instr, block) { in gcm_pin_instructions() 190 instr->index = state->num_instrs++; in gcm_pin_instructions() 192 switch (instr->type) { in gcm_pin_instructions() 194 switch (nir_instr_as_alu(instr)->op) { in gcm_pin_instructions() 202 instr->pass_flags = GCM_INSTR_SCHEDULE_EARLIER_ONLY; in gcm_pin_instructions() 206 if (!is_src_scalarizable(&(nir_instr_as_alu(instr)->src[0].src))) { in gcm_pin_instructions() 207 instr->pass_flags = GCM_INSTR_PINNED; in gcm_pin_instructions() 213 instr->pass_flags = 0; in gcm_pin_instructions() 219 if (nir_tex_instr_has_implicit_derivative(nir_instr_as_tex(instr))) in gcm_pin_instructions() [all …]
|
D | nir_search_helpers.h | 36 is_pos_power_of_two(UNUSED struct hash_table *ht, nir_alu_instr *instr, in is_pos_power_of_two() argument 41 if (!nir_src_is_const(instr->src[src].src)) in is_pos_power_of_two() 45 nir_alu_type type = nir_op_infos[instr->op].input_types[src]; in is_pos_power_of_two() 48 int64_t val = nir_src_comp_as_int(instr->src[src].src, swizzle[i]); in is_pos_power_of_two() 54 uint64_t val = nir_src_comp_as_uint(instr->src[src].src, swizzle[i]); in is_pos_power_of_two() 68 is_neg_power_of_two(UNUSED struct hash_table *ht, nir_alu_instr *instr, in is_neg_power_of_two() argument 73 if (!nir_src_is_const(instr->src[src].src)) in is_neg_power_of_two() 77 nir_alu_type type = nir_op_infos[instr->op].input_types[src]; in is_neg_power_of_two() 80 int64_t val = nir_src_comp_as_int(instr->src[src].src, swizzle[i]); in is_neg_power_of_two() 95 is_unsigned_multiple_of_ ## test(UNUSED struct hash_table *ht, nir_alu_instr *instr, \ [all …]
|
/external/mesa3d/src/gallium/drivers/zink/ |
D | nir_lower_dynamic_bo_access.c | 45 recursive_generate_bo_ssa_def(nir_builder *b, nir_intrinsic_instr *instr, nir_ssa_def *index, unsig… in recursive_generate_bo_ssa_def() argument 49 unsigned block_idx = instr->intrinsic == nir_intrinsic_store_ssbo; in recursive_generate_bo_ssa_def() 50 nir_intrinsic_instr *new_instr = nir_intrinsic_instr_create(b->shader, instr->intrinsic); in recursive_generate_bo_ssa_def() 52 for (unsigned i = 0; i < nir_intrinsic_infos[instr->intrinsic].num_srcs; i++) { in recursive_generate_bo_ssa_def() 54 nir_src_copy(&new_instr->src[i], &instr->src[i], &new_instr->instr); in recursive_generate_bo_ssa_def() 56 if (instr->intrinsic != nir_intrinsic_load_ubo_vec4) { in recursive_generate_bo_ssa_def() 57 …_intrinsic_set_align(new_instr, nir_intrinsic_align_mul(instr), nir_intrinsic_align_offset(instr)); in recursive_generate_bo_ssa_def() 58 if (instr->intrinsic != nir_intrinsic_load_ssbo) in recursive_generate_bo_ssa_def() 59 nir_intrinsic_set_range(new_instr, nir_intrinsic_range(instr)); in recursive_generate_bo_ssa_def() 61 new_instr->num_components = instr->num_components; in recursive_generate_bo_ssa_def() [all …]
|
/external/tensorflow/tensorflow/compiler/xla/service/gpu/ |
D | gpu_fusible.cc | 40 void AppendParams(const HloInstruction& instr, in AppendParams() argument 42 if (instr.opcode() == HloOpcode::kFusion) { in AppendParams() 43 params->insert(std::end(*params), std::begin(instr.fused_parameters()), in AppendParams() 44 std::end(instr.fused_parameters())); in AppendParams() 46 for (HloInstruction* operand : instr.operands()) { in AppendParams() 52 bool IfFusedReadsElementsMultipleTimes(const HloInstruction& instr) { in IfFusedReadsElementsMultipleTimes() argument 53 CHECK_NE(instr.opcode(), HloOpcode::kFusion) << "`instr` has to be unfused."; in IfFusedReadsElementsMultipleTimes() 54 if (instr.opcode() == HloOpcode::kReduce && in IfFusedReadsElementsMultipleTimes() 55 !IsReductionFromOrToContiguousDimensions(instr)) { in IfFusedReadsElementsMultipleTimes() 60 if (instr.opcode() == HloOpcode::kReduceWindow) { in IfFusedReadsElementsMultipleTimes() [all …]
|