/external/swiftshader/third_party/llvm-7.0/llvm/lib/Target/Hexagon/ |
D | HexagonMapAsm2IntrinV65.gen.td | 10 …_vcmpbeq_notany DoubleRegs:$src1, DoubleRegs:$src2), (A6_vcmpbeq_notany DoubleRegs:$src1, DoubleRe… 11 …hexagon_V6_vasruwuhsat HvxVR:$src1, HvxVR:$src2, IntRegsLow8:$src3), (V6_vasruwuhsat HvxVR:$src1, … 12 …on_V6_vasruwuhsat_128B HvxVR:$src1, HvxVR:$src2, IntRegsLow8:$src3), (V6_vasruwuhsat HvxVR:$src1, … 13 …hexagon_V6_vasruhubsat HvxVR:$src1, HvxVR:$src2, IntRegsLow8:$src3), (V6_vasruhubsat HvxVR:$src1, … 14 …on_V6_vasruhubsat_128B HvxVR:$src1, HvxVR:$src2, IntRegsLow8:$src3), (V6_vasruhubsat HvxVR:$src1, … 15 …gon_V6_vasruhubrndsat HvxVR:$src1, HvxVR:$src2, IntRegsLow8:$src3), (V6_vasruhubrndsat HvxVR:$src1… 16 …6_vasruhubrndsat_128B HvxVR:$src1, HvxVR:$src2, IntRegsLow8:$src3), (V6_vasruhubrndsat HvxVR:$src1… 21 …(int_hexagon_V6_vaslh_acc HvxVR:$src1, HvxVR:$src2, IntRegs:$src3), (V6_vaslh_acc HvxVR:$src1, Hvx… 22 …hexagon_V6_vaslh_acc_128B HvxVR:$src1, HvxVR:$src2, IntRegs:$src3), (V6_vaslh_acc HvxVR:$src1, Hvx… 23 …(int_hexagon_V6_vasrh_acc HvxVR:$src1, HvxVR:$src2, IntRegs:$src3), (V6_vasrh_acc HvxVR:$src1, Hvx… [all …]
|
D | HexagonMapAsm2IntrinV62.gen.td | 11 def: Pat<(IntID HvxVR:$src1, IntRegs:$src2), 12 (MI HvxVR:$src1, IntRegs:$src2)>; 13 def: Pat<(!cast<Intrinsic>(IntID#"_128B") HvxVR:$src1, IntRegs:$src2), 14 (MI HvxVR:$src1, IntRegs:$src2)>; 18 def: Pat<(IntID HvxVR:$src1, HvxVR:$src2, IntRegsLow8:$src3), 19 (MI HvxVR:$src1, HvxVR:$src2, IntRegsLow8:$src3)>; 20 def: Pat<(!cast<Intrinsic>(IntID#"_128B") HvxVR:$src1, HvxVR:$src2, 22 (MI HvxVR:$src1, HvxVR:$src2, IntRegsLow8:$src3)>; 26 def: Pat<(IntID HvxVR:$src1, HvxVR:$src2), 27 (MI HvxVR:$src1, HvxVR:$src2)>; [all …]
|
/external/swiftshader/third_party/LLVM/lib/Target/SystemZ/ |
D | SystemZInstrInfo.td | 79 def Select32 : Pseudo<(outs GR32:$dst), (ins GR32:$src1, GR32:$src2, i8imm:$cc), 82 (SystemZselect GR32:$src1, GR32:$src2, imm:$cc, PSW))]>; 83 def Select64 : Pseudo<(outs GR64:$dst), (ins GR64:$src1, GR64:$src2, i8imm:$cc), 86 (SystemZselect GR64:$src1, GR64:$src2, imm:$cc, PSW))]>; 546 def ADD32rr : RRI<0x1A, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2), 547 "ar\t{$dst, $src2}", 548 [(set GR32:$dst, (add GR32:$src1, GR32:$src2)), 550 def ADD64rr : RREI<0xB908, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2), 551 "agr\t{$dst, $src2}", 552 [(set GR64:$dst, (add GR64:$src1, GR64:$src2)), [all …]
|
D | SystemZInstrFP.td | 29 def SelectF32 : Pseudo<(outs FP32:$dst), (ins FP32:$src1, FP32:$src2, i8imm:$cc), 32 (SystemZselect FP32:$src1, FP32:$src2, imm:$cc, PSW))]>; 33 def SelectF64 : Pseudo<(outs FP64:$dst), (ins FP64:$src1, FP64:$src2, i8imm:$cc), 36 (SystemZselect FP64:$src1, FP64:$src2, imm:$cc, PSW))]>; 89 def FCOPYSIGN32 : Pseudo<(outs FP32:$dst), (ins FP32:$src1, FP32:$src2), 90 "cpsdr\t{$dst, $src2, $src1}", 91 [(set FP32:$dst, (fcopysign FP32:$src1, FP32:$src2))]>; 92 def FCOPYSIGN64 : Pseudo<(outs FP64:$dst), (ins FP64:$src1, FP64:$src2), 93 "cpsdr\t{$dst, $src2, $src1}", 94 [(set FP64:$dst, (fcopysign FP64:$src1, FP64:$src2))]>; [all …]
|
/external/llvm/lib/Target/X86/ |
D | X86InstrXOP.td | 89 (ins VR128:$src1, VR128:$src2), 90 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"), 92 (vt128 (OpNode (vt128 VR128:$src1), (vt128 VR128:$src2))))]>, 95 (ins VR128:$src1, i128mem:$src2), 96 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"), 99 (vt128 (bitconvert (loadv2i64 addr:$src2))))))]>, 102 (ins i128mem:$src1, VR128:$src2), 103 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"), 106 (vt128 VR128:$src2))))]>, 128 (ins VR128:$src1, u8imm:$src2), [all …]
|
D | X86InstrFMA.td | 44 (ins VR128:$src1, VR128:$src2, VR128:$src3), 46 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"), 47 [(set VR128:$dst, (OpVT128 (Op VR128:$src2, 52 (ins VR128:$src1, VR128:$src2, f128mem:$src3), 54 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"), 55 [(set VR128:$dst, (OpVT128 (Op VR128:$src2, VR128:$src1, 60 (ins VR256:$src1, VR256:$src2, VR256:$src3), 62 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"), 63 [(set VR256:$dst, (OpVT256 (Op VR256:$src2, VR256:$src1, 68 (ins VR256:$src1, VR256:$src2, f256mem:$src3), [all …]
|
D | X86InstrSSE.td | 246 def rr : SI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2), 248 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"), 249 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")), 250 [(set RC:$dst, (OpNode RC:$src1, RC:$src2))], itins.rr, d>, 253 def rm : SI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2), 255 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"), 256 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")), 257 [(set RC:$dst, (OpNode RC:$src1, (load addr:$src2)))], itins.rm, d>, 267 def rr_Int : SI_Int<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2), 269 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"), [all …]
|
/external/llvm/lib/Target/Hexagon/ |
D | HexagonInstrInfoV5.td | 48 (sra (i64 (add (i64 (sra I64:$src1, u6ImmPred:$src2)), 1)), 51 bits<6> src2; 52 let Inst{13-8} = src2; 57 : MInst<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, u6Imm:$src2), 58 "$dst = asrrnd($src1, #$src2)">; 110 (ins PredRegs:$src1, f32Ext:$src2), 111 "if ($src1) $dst = #$src2", []>, 117 (ins PredRegs:$src1, f32Ext:$src2), 118 "if (!$src1) $dst = #$src2", []>, 179 def: Pat<(f32 (fadd F32:$src1, F32:$src2)), [all …]
|
D | HexagonInstrInfoV60.td | 60 : V6_LDInst <(outs VectorRegs:$dst), (ins IntRegs:$src1, s4_6Imm:$src2), 65 : V6_LDInst <(outs VectorRegs128B:$dst), (ins IntRegs:$src1, s4_7Imm:$src2), 69 def V6_vL32b_ai : T_vload_ai <"$dst = vmem($src1+#$src2)">, 71 def V6_vL32b_nt_ai : T_vload_ai <"$dst = vmem($src1+#$src2):nt">, 74 def V6_vL32b_ai_128B : T_vload_ai_128B <"$dst = vmem($src1+#$src2)">, 76 def V6_vL32b_nt_ai_128B : T_vload_ai_128B <"$dst = vmem($src1+#$src2):nt">, 81 def V6_vL32Ub_ai : T_vload_ai <"$dst = vmemu($src1+#$src2)">, 83 def V6_vL32Ub_ai_128B : T_vload_ai_128B <"$dst = vmemu($src1+#$src2)">, 89 def V6_vL32b_cur_ai : T_vload_ai <"$dst.cur = vmem($src1+#$src2)">, 91 def V6_vL32b_nt_cur_ai : T_vload_ai <"$dst.cur = vmem($src1+#$src2):nt">, [all …]
|
/external/pcre/dist2/src/sljit/ |
D | sljitNativePPC_32.c | 45 sljit_s32 dst, sljit_s32 src1, sljit_s32 src2) in emit_single_op() argument 53 if (dst != src2) in emit_single_op() 54 return push_inst(compiler, OR | S(src2) | A(dst) | B(src2)); in emit_single_op() 62 return push_inst(compiler, EXTSB | S(src2) | A(dst)); in emit_single_op() 63 return push_inst(compiler, INS_CLEAR_LEFT(dst, src2, 24)); in emit_single_op() 66 return push_inst(compiler, EXTSB | S(src2) | A(dst)); in emit_single_op() 68 SLJIT_ASSERT(dst == src2); in emit_single_op() 77 return push_inst(compiler, EXTSH | S(src2) | A(dst)); in emit_single_op() 78 return push_inst(compiler, INS_CLEAR_LEFT(dst, src2, 16)); in emit_single_op() 81 SLJIT_ASSERT(dst == src2); in emit_single_op() [all …]
|
D | sljitNativePPC_64.c | 126 FAIL_IF(push_inst(compiler, EXTSW | S(src2) | A(TMP_REG2))); \ 127 src2 = TMP_REG2; \ 137 FAIL_IF(push_inst(compiler, EXTSW | S(src2) | A(TMP_REG2))); \ 138 src2 = TMP_REG2; \ 149 sljit_s32 dst, sljit_s32 src1, sljit_s32 src2) in emit_single_op() argument 155 if (dst != src2) in emit_single_op() 156 return push_inst(compiler, OR | S(src2) | A(dst) | B(src2)); in emit_single_op() 164 return push_inst(compiler, EXTSW | S(src2) | A(dst)); in emit_single_op() 165 return push_inst(compiler, INS_CLEAR_LEFT(dst, src2, 0)); in emit_single_op() 168 SLJIT_ASSERT(dst == src2); in emit_single_op() [all …]
|
D | sljitNativeMIPS_32.c | 44 FAIL_IF(push_inst(compiler, op_imm | S(src1) | TA(EQUAL_FLAG) | IMM(src2), EQUAL_FLAG)); \ 46 FAIL_IF(push_inst(compiler, op_imm | S(src1) | T(dst) | IMM(src2), DR(dst))); \ 50 FAIL_IF(push_inst(compiler, op_norm | S(src1) | T(src2) | DA(EQUAL_FLAG), EQUAL_FLAG)); \ 52 FAIL_IF(push_inst(compiler, op_norm | S(src1) | T(src2) | D(dst), DR(dst))); \ 58 FAIL_IF(push_inst(compiler, op_imm | T(src1) | DA(EQUAL_FLAG) | SH_IMM(src2), EQUAL_FLAG)); \ 60 FAIL_IF(push_inst(compiler, op_imm | T(src1) | D(dst) | SH_IMM(src2), DR(dst))); \ 64 FAIL_IF(push_inst(compiler, op_v | S(src2) | T(src1) | DA(EQUAL_FLAG), EQUAL_FLAG)); \ 66 FAIL_IF(push_inst(compiler, op_v | S(src2) | T(src1) | D(dst), DR(dst))); \ 70 sljit_s32 dst, sljit_s32 src1, sljit_sw src2) in emit_single_op() argument 80 if (dst != src2) in emit_single_op() [all …]
|
/external/swiftshader/third_party/LLVM/lib/Target/X86/ |
D | X86InstrSSE.td | 26 def rr : SI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2), 28 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"), 29 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")), 30 [(set RC:$dst, (OpNode RC:$src1, RC:$src2))]>; 32 def rm : SI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2), 34 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"), 35 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")), 36 [(set RC:$dst, (OpNode RC:$src1, (load addr:$src2)))]>; 44 def rr_Int : SI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2), 46 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"), [all …]
|
D | X86InstrShiftRotate.td | 34 def SHL8ri : Ii8<0xC0, MRM4r, (outs GR8 :$dst), (ins GR8 :$src1, i8imm:$src2), 35 "shl{b}\t{$src2, $dst|$dst, $src2}", 36 [(set GR8:$dst, (shl GR8:$src1, (i8 imm:$src2)))]>; 39 def SHL16ri : Ii8<0xC1, MRM4r, (outs GR16:$dst), (ins GR16:$src1, i8imm:$src2), 40 "shl{w}\t{$src2, $dst|$dst, $src2}", 41 [(set GR16:$dst, (shl GR16:$src1, (i8 imm:$src2)))]>, OpSize; 42 def SHL32ri : Ii8<0xC1, MRM4r, (outs GR32:$dst), (ins GR32:$src1, i8imm:$src2), 43 "shl{l}\t{$src2, $dst|$dst, $src2}", 44 [(set GR32:$dst, (shl GR32:$src1, (i8 imm:$src2)))]>; 46 (ins GR64:$src1, i8imm:$src2), [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/lib/Target/X86/ |
D | X86InstrXOP.td | 98 (ins VR128:$src1, VR128:$src2), 99 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"), 101 (vt128 (OpNode (vt128 VR128:$src1), (vt128 VR128:$src2))))]>, 104 (ins VR128:$src1, i128mem:$src2), 105 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"), 108 (vt128 (bitconvert (loadv2i64 addr:$src2))))))]>, 111 (ins i128mem:$src1, VR128:$src2), 112 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"), 115 (vt128 VR128:$src2))))]>, 120 (ins VR128:$src1, VR128:$src2), [all …]
|
D | X86InstrFMA.td | 41 (ins RC:$src1, RC:$src2, RC:$src3), 43 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"), 44 [(set RC:$dst, (VT (Op RC:$src2, RC:$src1, RC:$src3)))]>, 49 (ins RC:$src1, RC:$src2, x86memop:$src3), 51 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"), 52 [(set RC:$dst, (VT (Op RC:$src2, RC:$src1, 62 (ins RC:$src1, RC:$src2, RC:$src3), 64 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"), 69 (ins RC:$src1, RC:$src2, x86memop:$src3), 71 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"), [all …]
|
D | X86InstrShiftRotate.td | 34 def SHL8ri : Ii8<0xC0, MRM4r, (outs GR8 :$dst), (ins GR8 :$src1, u8imm:$src2), 35 "shl{b}\t{$src2, $dst|$dst, $src2}", 36 [(set GR8:$dst, (shl GR8:$src1, (i8 imm:$src2)))]>; 39 def SHL16ri : Ii8<0xC1, MRM4r, (outs GR16:$dst), (ins GR16:$src1, u8imm:$src2), 40 "shl{w}\t{$src2, $dst|$dst, $src2}", 41 [(set GR16:$dst, (shl GR16:$src1, (i8 imm:$src2)))]>, 43 def SHL32ri : Ii8<0xC1, MRM4r, (outs GR32:$dst), (ins GR32:$src1, u8imm:$src2), 44 "shl{l}\t{$src2, $dst|$dst, $src2}", 45 [(set GR32:$dst, (shl GR32:$src1, (i8 imm:$src2)))]>, 48 (ins GR64:$src1, u8imm:$src2), [all …]
|
/external/swiftshader/third_party/LLVM/lib/Target/MSP430/ |
D | MSP430InstrInfo.td | 125 def Select8 : Pseudo<(outs GR8:$dst), (ins GR8:$src, GR8:$src2, i8imm:$cc), 128 (MSP430selectcc GR8:$src, GR8:$src2, imm:$cc))]>; 129 def Select16 : Pseudo<(outs GR16:$dst), (ins GR16:$src, GR16:$src2, i8imm:$cc), 132 (MSP430selectcc GR16:$src, GR16:$src2, imm:$cc))]>; 345 (outs GR8:$dst), (ins GR8:$src, GR8:$src2), 346 "add.b\t{$src2, $dst}", 347 [(set GR8:$dst, (add GR8:$src, GR8:$src2)), 350 (outs GR16:$dst), (ins GR16:$src, GR16:$src2), 351 "add.w\t{$src2, $dst}", 352 [(set GR16:$dst, (add GR16:$src, GR16:$src2)), [all …]
|
/external/llvm/lib/Target/MSP430/ |
D | MSP430InstrInfo.td | 125 def Select8 : Pseudo<(outs GR8:$dst), (ins GR8:$src, GR8:$src2, i8imm:$cc), 128 (MSP430selectcc GR8:$src, GR8:$src2, imm:$cc))]>; 129 def Select16 : Pseudo<(outs GR16:$dst), (ins GR16:$src, GR16:$src2, i8imm:$cc), 132 (MSP430selectcc GR16:$src, GR16:$src2, imm:$cc))]>; 345 (outs GR8:$dst), (ins GR8:$src, GR8:$src2), 346 "add.b\t{$src2, $dst}", 347 [(set GR8:$dst, (add GR8:$src, GR8:$src2)), 350 (outs GR16:$dst), (ins GR16:$src, GR16:$src2), 351 "add.w\t{$src2, $dst}", 352 [(set GR16:$dst, (add GR16:$src, GR16:$src2)), [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/lib/Target/MSP430/ |
D | MSP430InstrInfo.td | 132 def Select8 : Pseudo<(outs GR8:$dst), (ins GR8:$src, GR8:$src2, i8imm:$cc), 135 (MSP430selectcc GR8:$src, GR8:$src2, imm:$cc))]>; 136 def Select16 : Pseudo<(outs GR16:$dst), (ins GR16:$src, GR16:$src2, i8imm:$cc), 139 (MSP430selectcc GR16:$src, GR16:$src2, imm:$cc))]>; 353 (outs GR8:$dst), (ins GR8:$src, GR8:$src2), 354 "add.b\t{$src2, $dst}", 355 [(set GR8:$dst, (add GR8:$src, GR8:$src2)), 358 (outs GR16:$dst), (ins GR16:$src, GR16:$src2), 359 "add.w\t{$src2, $dst}", 360 [(set GR16:$dst, (add GR16:$src, GR16:$src2)), [all …]
|
/external/v8/src/ia32/ |
D | assembler-ia32.h | 1157 void vfmadd132sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) { in vfmadd132sd() argument 1158 vfmadd132sd(dst, src1, Operand(src2)); in vfmadd132sd() 1160 void vfmadd213sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) { in vfmadd213sd() argument 1161 vfmadd213sd(dst, src1, Operand(src2)); in vfmadd213sd() 1163 void vfmadd231sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) { in vfmadd231sd() argument 1164 vfmadd231sd(dst, src1, Operand(src2)); in vfmadd231sd() 1166 void vfmadd132sd(XMMRegister dst, XMMRegister src1, Operand src2) { in vfmadd132sd() argument 1167 vfmasd(0x99, dst, src1, src2); in vfmadd132sd() 1169 void vfmadd213sd(XMMRegister dst, XMMRegister src1, Operand src2) { in vfmadd213sd() argument 1170 vfmasd(0xa9, dst, src1, src2); in vfmadd213sd() [all …]
|
/external/libvpx/libvpx/vpx_dsp/mips/ |
D | sum_squares_msa.c | 22 uint64_t src0, src1, src2, src3; in vpx_sum_squares_2d_i16_msa() local 26 LD4(src, src_stride, src0, src1, src2, src3); in vpx_sum_squares_2d_i16_msa() 28 INSERT_D2_SH(src2, src3, diff1); in vpx_sum_squares_2d_i16_msa() 35 v8i16 src0, src1, src2, src3, src4, src5, src6, src7; in vpx_sum_squares_2d_i16_msa() local 37 LD_SH8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7); in vpx_sum_squares_2d_i16_msa() 39 DPADD_SH2_SW(src2, src3, src2, src3, mul0, mul1); in vpx_sum_squares_2d_i16_msa() 47 v8i16 src0, src1, src2, src3, src4, src5, src6, src7; in vpx_sum_squares_2d_i16_msa() local 49 LD_SH8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7); in vpx_sum_squares_2d_i16_msa() 51 DPADD_SH2_SW(src2, src3, src2, src3, mul0, mul1); in vpx_sum_squares_2d_i16_msa() 54 LD_SH8(src + 8, src_stride, src0, src1, src2, src3, src4, src5, src6, src7); in vpx_sum_squares_2d_i16_msa() [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/AMDGPU/ |
D | fdot2.ll | 22 <2 x half> addrspace(1)* %src2, 26 %src2.vec = load <2 x half>, <2 x half> addrspace(1)* %src2 29 %src2.el1 = extractelement <2 x half> %src2.vec, i64 0 32 %src2.el2 = extractelement <2 x half> %src2.vec, i64 1 34 %mul2 = fmul half %src1.el2, %src2.el2 35 %mul1 = fmul half %src1.el1, %src2.el1 59 <2 x half> addrspace(1)* %src2, 63 %src2.vec = load <2 x half>, <2 x half> addrspace(1)* %src2 67 %src2.el1 = extractelement <2 x half> %src2.vec, i64 0 68 %csrc2.el1 = fpext half %src2.el1 to float [all …]
|
D | mad-mix.ll | 11 define float @v_mad_mix_f32_f16lo_f16lo_f16lo(half %src0, half %src1, half %src2) #0 { 14 %src2.ext = fpext half %src2 to float 15 %result = tail call float @llvm.fmuladd.f32(float %src0.ext, float %src1.ext, float %src2.ext) 23 define float @v_mad_mix_f32_f16hi_f16hi_f16hi_int(i32 %src0, i32 %src1, i32 %src2) #0 { 26 %src2.hi = lshr i32 %src2, 16 29 %src2.i16 = trunc i32 %src2.hi to i16 32 %src2.fp16 = bitcast i16 %src2.i16 to half 35 %src2.ext = fpext half %src2.fp16 to float 36 %result = tail call float @llvm.fmuladd.f32(float %src0.ext, float %src1.ext, float %src2.ext) 45 …oat @v_mad_mix_f32_f16hi_f16hi_f16hi_elt(<2 x half> %src0, <2 x half> %src1, <2 x half> %src2) #0 { [all …]
|
/external/v8/src/x64/ |
D | assembler-x64.h | 1068 void vinstr(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2, 1070 void vinstr(byte op, XMMRegister dst, XMMRegister src1, Operand src2, 1090 void v##instruction(XMMRegister dst, XMMRegister src1, XMMRegister src2) { \ 1091 vinstr(0x##opcode, dst, src1, src2, k##prefix, k##escape, kW0); \ 1093 void v##instruction(XMMRegister dst, XMMRegister src1, Operand src2) { \ 1094 vinstr(0x##opcode, dst, src1, src2, k##prefix, k##escape, kW0); \ 1140 void v##instruction(XMMRegister dst, XMMRegister src1, XMMRegister src2) { \ 1141 vinstr(0x##opcode, dst, src1, src2, k##prefix, k##escape1##escape2, kW0); \ 1143 void v##instruction(XMMRegister dst, XMMRegister src1, Operand src2) { \ 1144 vinstr(0x##opcode, dst, src1, src2, k##prefix, k##escape1##escape2, kW0); \ [all …]
|