Home
last modified time | relevance | path

Searched refs:src1 (Results 1 – 25 of 484) sorted by relevance

12345678910>>...20

/external/mesa3d/prebuilt-intermediates/nir/
Dnir_builder_opcodes.h39 nir_ball_fequal2(nir_builder *build, nir_ssa_def *src0, nir_ssa_def *src1) in nir_ball_fequal2() argument
41 return nir_build_alu(build, nir_op_ball_fequal2, src0, src1, NULL, NULL); in nir_ball_fequal2()
44 nir_ball_fequal3(nir_builder *build, nir_ssa_def *src0, nir_ssa_def *src1) in nir_ball_fequal3() argument
46 return nir_build_alu(build, nir_op_ball_fequal3, src0, src1, NULL, NULL); in nir_ball_fequal3()
49 nir_ball_fequal4(nir_builder *build, nir_ssa_def *src0, nir_ssa_def *src1) in nir_ball_fequal4() argument
51 return nir_build_alu(build, nir_op_ball_fequal4, src0, src1, NULL, NULL); in nir_ball_fequal4()
54 nir_ball_iequal2(nir_builder *build, nir_ssa_def *src0, nir_ssa_def *src1) in nir_ball_iequal2() argument
56 return nir_build_alu(build, nir_op_ball_iequal2, src0, src1, NULL, NULL); in nir_ball_iequal2()
59 nir_ball_iequal3(nir_builder *build, nir_ssa_def *src0, nir_ssa_def *src1) in nir_ball_iequal3() argument
61 return nir_build_alu(build, nir_op_ball_iequal3, src0, src1, NULL, NULL); in nir_ball_iequal3()
[all …]
Dnir_constant_expressions.c355 const struct float32_vec src1 = { in evaluate_ball_fequal2() local
364 dst.x = dst.y = dst.z = dst.w = ((src0.x == src1.x) && (src0.y == src1.y)); in evaluate_ball_fequal2()
381 const struct float64_vec src1 = { in evaluate_ball_fequal2() local
390 dst.x = dst.y = dst.z = dst.w = ((src0.x == src1.x) && (src0.y == src1.y)); in evaluate_ball_fequal2()
421 const struct float32_vec src1 = { in evaluate_ball_fequal3() local
430 … dst.x = dst.y = dst.z = dst.w = ((src0.x == src1.x) && (src0.y == src1.y) && (src0.z == src1.z)); in evaluate_ball_fequal3()
447 const struct float64_vec src1 = { in evaluate_ball_fequal3() local
456 … dst.x = dst.y = dst.z = dst.w = ((src0.x == src1.x) && (src0.y == src1.y) && (src0.z == src1.z)); in evaluate_ball_fequal3()
487 const struct float32_vec src1 = { in evaluate_ball_fequal4() local
496 ….x = dst.y = dst.z = dst.w = ((src0.x == src1.x) && (src0.y == src1.y) && (src0.z == src1.z) && (s… in evaluate_ball_fequal4()
[all …]
/external/llvm/lib/Target/Hexagon/
DHexagonInstrInfoV60.td60 : V6_LDInst <(outs VectorRegs:$dst), (ins IntRegs:$src1, s4_6Imm:$src2),
65 : V6_LDInst <(outs VectorRegs128B:$dst), (ins IntRegs:$src1, s4_7Imm:$src2),
69 def V6_vL32b_ai : T_vload_ai <"$dst = vmem($src1+#$src2)">,
71 def V6_vL32b_nt_ai : T_vload_ai <"$dst = vmem($src1+#$src2):nt">,
74 def V6_vL32b_ai_128B : T_vload_ai_128B <"$dst = vmem($src1+#$src2)">,
76 def V6_vL32b_nt_ai_128B : T_vload_ai_128B <"$dst = vmem($src1+#$src2):nt">,
81 def V6_vL32Ub_ai : T_vload_ai <"$dst = vmemu($src1+#$src2)">,
83 def V6_vL32Ub_ai_128B : T_vload_ai_128B <"$dst = vmemu($src1+#$src2)">,
89 def V6_vL32b_cur_ai : T_vload_ai <"$dst.cur = vmem($src1+#$src2)">,
91 def V6_vL32b_nt_cur_ai : T_vload_ai <"$dst.cur = vmem($src1+#$src2):nt">,
[all …]
DHexagonInstrInfoV5.td48 (sra (i64 (add (i64 (sra I64:$src1, u6ImmPred:$src2)), 1)),
57 : MInst<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, u6Imm:$src2),
58 "$dst = asrrnd($src1, #$src2)">;
82 def CONST64_Float_Real : LDInst<(outs DoubleRegs:$dst), (ins f64imm:$src1),
83 "$dst = CONST64(#$src1)",
84 [(set F64:$dst, fpimm:$src1)]>,
88 def CONST32_Float_Real : LDInst<(outs IntRegs:$dst), (ins f32imm:$src1),
89 "$dst = CONST32(#$src1)",
90 [(set F32:$dst, fpimm:$src1)]>,
102 def TFRI_f : ALU32_ri<(outs IntRegs:$dst), (ins f32Ext:$src1),
[all …]
DHexagonIntrinsicsV60.td29 (ins VecDblRegs:$src1),
30 "$dst=vassignp_W($src1)",
31 [(set VecDblRegs:$dst, (int_hexagon_V6_vassignp VecDblRegs:$src1))]>;
35 (ins VecDblRegs128B:$src1),
36 "$dst=vassignp_W_128B($src1)",
38 VecDblRegs128B:$src1))]>;
42 (ins VecDblRegs:$src1),
43 "$dst=lo_W($src1)",
44 [(set VectorRegs:$dst, (int_hexagon_V6_lo VecDblRegs:$src1))]>;
48 (ins VecDblRegs:$src1),
[all …]
/external/llvm/lib/Target/X86/
DX86InstrXOP.td89 (ins VR128:$src1, VR128:$src2),
90 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
92 (vt128 (OpNode (vt128 VR128:$src1), (vt128 VR128:$src2))))]>,
95 (ins VR128:$src1, i128mem:$src2),
96 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
98 (vt128 (OpNode (vt128 VR128:$src1),
102 (ins i128mem:$src1, VR128:$src2),
103 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
105 (vt128 (OpNode (vt128 (bitconvert (loadv2i64 addr:$src1))),
128 (ins VR128:$src1, u8imm:$src2),
[all …]
DX86InstrShiftRotate.td18 let Constraints = "$src1 = $dst", SchedRW = [WriteShift] in {
20 def SHL8rCL : I<0xD2, MRM4r, (outs GR8 :$dst), (ins GR8 :$src1),
22 [(set GR8:$dst, (shl GR8:$src1, CL))], IIC_SR>;
23 def SHL16rCL : I<0xD3, MRM4r, (outs GR16:$dst), (ins GR16:$src1),
25 [(set GR16:$dst, (shl GR16:$src1, CL))], IIC_SR>, OpSize16;
26 def SHL32rCL : I<0xD3, MRM4r, (outs GR32:$dst), (ins GR32:$src1),
28 [(set GR32:$dst, (shl GR32:$src1, CL))], IIC_SR>, OpSize32;
29 def SHL64rCL : RI<0xD3, MRM4r, (outs GR64:$dst), (ins GR64:$src1),
31 [(set GR64:$dst, (shl GR64:$src1, CL))], IIC_SR>;
34 def SHL8ri : Ii8<0xC0, MRM4r, (outs GR8 :$dst), (ins GR8 :$src1, u8imm:$src2),
[all …]
/external/opencv/cxcore/src/
Dcxcmp.cpp57 worktype a1 = _toggle_macro_(src1[x]), \
67 worktype a1 = _toggle_macro_(src1[x*2]), \
70 a1 = _toggle_macro_(src1[x*2+1]); \
81 worktype a1 = _toggle_macro_(src1[x*3]), \
84 a1 = _toggle_macro_(src1[x*3+1]); \
88 a1 = _toggle_macro_(src1[x*3+2]); \
99 worktype a1 = _toggle_macro_(src1[x*4]), \
102 a1 = _toggle_macro_(src1[x*4+1]); \
106 a1 = _toggle_macro_(src1[x*4+2]); \
110 a1 = _toggle_macro_(src1[x*4+3]); \
[all …]
/external/swiftshader/third_party/LLVM/lib/Target/X86/
DX86InstrSSE.td26 def rr : SI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
29 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
30 [(set RC:$dst, (OpNode RC:$src1, RC:$src2))]>;
32 def rm : SI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
35 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
36 [(set RC:$dst, (OpNode RC:$src1, (load addr:$src2)))]>;
44 def rr_Int : SI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
47 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
50 RC:$src1, RC:$src2))]>;
51 def rm_Int : SI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, memopr:$src2),
[all …]
DX86InstrShiftRotate.td18 let Constraints = "$src1 = $dst" in {
20 def SHL8rCL : I<0xD2, MRM4r, (outs GR8 :$dst), (ins GR8 :$src1),
22 [(set GR8:$dst, (shl GR8:$src1, CL))]>;
23 def SHL16rCL : I<0xD3, MRM4r, (outs GR16:$dst), (ins GR16:$src1),
25 [(set GR16:$dst, (shl GR16:$src1, CL))]>, OpSize;
26 def SHL32rCL : I<0xD3, MRM4r, (outs GR32:$dst), (ins GR32:$src1),
28 [(set GR32:$dst, (shl GR32:$src1, CL))]>;
29 def SHL64rCL : RI<0xD3, MRM4r, (outs GR64:$dst), (ins GR64:$src1),
31 [(set GR64:$dst, (shl GR64:$src1, CL))]>;
34 def SHL8ri : Ii8<0xC0, MRM4r, (outs GR8 :$dst), (ins GR8 :$src1, i8imm:$src2),
[all …]
DX86InstrCompiler.td358 (outs GR8:$dst), (ins GR8:$src1, GR8:$src2, i8imm:$cond),
360 [(set GR8:$dst, (X86cmov GR8:$src1, GR8:$src2,
365 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2, i8imm:$cond),
368 (X86cmov GR32:$src1, GR32:$src2, imm:$cond, EFLAGS))]>;
370 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2, i8imm:$cond),
373 (X86cmov GR16:$src1, GR16:$src2, imm:$cond, EFLAGS))]>;
376 (ins RFP32:$src1, RFP32:$src2, i8imm:$cond),
379 (X86cmov RFP32:$src1, RFP32:$src2, imm:$cond,
383 (ins RFP64:$src1, RFP64:$src2, i8imm:$cond),
386 (X86cmov RFP64:$src1, RFP64:$src2, imm:$cond,
[all …]
/external/v8/src/ia32/
Dassembler-ia32.h1089 void vfmadd132sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) { in vfmadd132sd() argument
1090 vfmadd132sd(dst, src1, Operand(src2)); in vfmadd132sd()
1092 void vfmadd213sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) { in vfmadd213sd() argument
1093 vfmadd213sd(dst, src1, Operand(src2)); in vfmadd213sd()
1095 void vfmadd231sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) { in vfmadd231sd() argument
1096 vfmadd231sd(dst, src1, Operand(src2)); in vfmadd231sd()
1098 void vfmadd132sd(XMMRegister dst, XMMRegister src1, const Operand& src2) { in vfmadd132sd() argument
1099 vfmasd(0x99, dst, src1, src2); in vfmadd132sd()
1101 void vfmadd213sd(XMMRegister dst, XMMRegister src1, const Operand& src2) { in vfmadd213sd() argument
1102 vfmasd(0xa9, dst, src1, src2); in vfmadd213sd()
[all …]
/external/swiftshader/src/Shader/
DShaderCore.hpp233 void add(Vector4f &dst, const Vector4f &src0, const Vector4f &src1);
234 void iadd(Vector4f &dst, const Vector4f &src0, const Vector4f &src1);
235 void sub(Vector4f &dst, const Vector4f &src0, const Vector4f &src1);
236 void isub(Vector4f &dst, const Vector4f &src0, const Vector4f &src1);
237 void mad(Vector4f &dst, const Vector4f &src0, const Vector4f &src1, const Vector4f &src2);
238 void imad(Vector4f &dst, const Vector4f &src0, const Vector4f &src1, const Vector4f &src2);
239 void mul(Vector4f &dst, const Vector4f &src0, const Vector4f &src1);
240 void imul(Vector4f &dst, const Vector4f &src0, const Vector4f &src1);
242 void div(Vector4f &dst, const Vector4f &src0, const Vector4f &src1);
243 void idiv(Vector4f &dst, const Vector4f &src0, const Vector4f &src1);
[all …]
DShaderCore.cpp659 void ShaderCore::add(Vector4f &dst, const Vector4f &src0, const Vector4f &src1) in add() argument
661 dst.x = src0.x + src1.x; in add()
662 dst.y = src0.y + src1.y; in add()
663 dst.z = src0.z + src1.z; in add()
664 dst.w = src0.w + src1.w; in add()
667 void ShaderCore::iadd(Vector4f &dst, const Vector4f &src0, const Vector4f &src1) in iadd() argument
669 dst.x = As<Float4>(As<Int4>(src0.x) + As<Int4>(src1.x)); in iadd()
670 dst.y = As<Float4>(As<Int4>(src0.y) + As<Int4>(src1.y)); in iadd()
671 dst.z = As<Float4>(As<Int4>(src0.z) + As<Int4>(src1.z)); in iadd()
672 dst.w = As<Float4>(As<Int4>(src0.w) + As<Int4>(src1.w)); in iadd()
[all …]
/external/swiftshader/third_party/LLVM/lib/Target/SystemZ/
DSystemZInstrInfo.td79 def Select32 : Pseudo<(outs GR32:$dst), (ins GR32:$src1, GR32:$src2, i8imm:$cc),
82 (SystemZselect GR32:$src1, GR32:$src2, imm:$cc, PSW))]>;
83 def Select64 : Pseudo<(outs GR64:$dst), (ins GR64:$src1, GR64:$src2, i8imm:$cc),
86 (SystemZselect GR64:$src1, GR64:$src2, imm:$cc, PSW))]>;
541 let Constraints = "$src1 = $dst" in {
546 def ADD32rr : RRI<0x1A, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
548 [(set GR32:$dst, (add GR32:$src1, GR32:$src2)),
550 def ADD64rr : RREI<0xB908, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
552 [(set GR64:$dst, (add GR64:$src1, GR64:$src2)),
556 def ADD32rm : RXI<0x5A, (outs GR32:$dst), (ins GR32:$src1, rriaddr12:$src2),
[all …]
DSystemZInstrFP.td29 def SelectF32 : Pseudo<(outs FP32:$dst), (ins FP32:$src1, FP32:$src2, i8imm:$cc),
32 (SystemZselect FP32:$src1, FP32:$src2, imm:$cc, PSW))]>;
33 def SelectF64 : Pseudo<(outs FP64:$dst), (ins FP64:$src1, FP64:$src2, i8imm:$cc),
36 (SystemZselect FP64:$src1, FP64:$src2, imm:$cc, PSW))]>;
89 def FCOPYSIGN32 : Pseudo<(outs FP32:$dst), (ins FP32:$src1, FP32:$src2),
90 "cpsdr\t{$dst, $src2, $src1}",
91 [(set FP32:$dst, (fcopysign FP32:$src1, FP32:$src2))]>;
92 def FCOPYSIGN64 : Pseudo<(outs FP64:$dst), (ins FP64:$src1, FP64:$src2),
93 "cpsdr\t{$dst, $src2, $src1}",
94 [(set FP64:$dst, (fcopysign FP64:$src1, FP64:$src2))]>;
[all …]
/external/llvm/test/CodeGen/SystemZ/
Dint-cmp-38.ll10 define i32 @f1(i32 %src1) {
17 %cond = icmp slt i32 %src1, %src2
20 %mul = mul i32 %src1, %src1
23 %res = phi i32 [ %src1, %entry ], [ %mul, %mulb ]
28 define i32 @f2(i32 %src1) {
35 %cond = icmp ult i32 %src1, %src2
38 %mul = mul i32 %src1, %src1
41 %res = phi i32 [ %src1, %entry ], [ %mul, %mulb ]
46 define i32 @f3(i32 %src1) {
53 %cond = icmp eq i32 %src1, %src2
[all …]
Dint-cmp-43.ll10 define i64 @f1(i64 %src1) {
17 %cond = icmp slt i64 %src1, %src2
20 %mul = mul i64 %src1, %src1
23 %res = phi i64 [ %src1, %entry ], [ %mul, %mulb ]
28 define i64 @f2(i64 %src1) {
35 %cond = icmp ult i64 %src1, %src2
38 %mul = mul i64 %src1, %src1
41 %res = phi i64 [ %src1, %entry ], [ %mul, %mulb ]
46 define i64 @f3(i64 %src1) {
53 %cond = icmp eq i64 %src1, %src2
[all …]
Dint-cmp-39.ll10 define i64 @f1(i64 %src1) {
18 %cond = icmp slt i64 %src1, %src2
21 %mul = mul i64 %src1, %src1
24 %res = phi i64 [ %src1, %entry ], [ %mul, %mulb ]
29 define i64 @f2(i64 %src1) {
36 %cond = icmp ult i64 %src1, %src2
39 %mul = mul i64 %src1, %src1
42 %res = phi i64 [ %src1, %entry ], [ %mul, %mulb ]
47 define i64 @f3(i64 %src1) {
55 %cond = icmp eq i64 %src1, %src2
[all …]
/external/pcre/dist2/src/sljit/
DsljitNativePPC_32.c45 sljit_s32 dst, sljit_s32 src1, sljit_s32 src2) in emit_single_op() argument
52 SLJIT_ASSERT(src1 == TMP_REG1); in emit_single_op()
59 SLJIT_ASSERT(src1 == TMP_REG1); in emit_single_op()
74 SLJIT_ASSERT(src1 == TMP_REG1); in emit_single_op()
86 SLJIT_ASSERT(src1 == TMP_REG1); in emit_single_op()
90 SLJIT_ASSERT(src1 == TMP_REG1); in emit_single_op()
94 SLJIT_ASSERT(src1 == TMP_REG1); in emit_single_op()
101 return push_inst(compiler, ADDI | D(dst) | A(src1) | compiler->imm); in emit_single_op()
106 return push_inst(compiler, ADDIS | D(dst) | A(src1) | compiler->imm); in emit_single_op()
110 return push_inst(compiler, ADDIC | D(dst) | A(src1) | compiler->imm); in emit_single_op()
[all …]
DsljitNativePPC_64.c133 FAIL_IF(push_inst(compiler, EXTSW | S(src1) | A(TMP_REG1))); \
134 src1 = TMP_REG1; \
144 FAIL_IF(push_inst(compiler, EXTSW | S(src1) | A(TMP_REG1))); \
145 src1 = TMP_REG1; \
149 sljit_s32 dst, sljit_s32 src1, sljit_s32 src2) in emit_single_op() argument
154 SLJIT_ASSERT(src1 == TMP_REG1); in emit_single_op()
161 SLJIT_ASSERT(src1 == TMP_REG1); in emit_single_op()
174 SLJIT_ASSERT(src1 == TMP_REG1); in emit_single_op()
189 SLJIT_ASSERT(src1 == TMP_REG1); in emit_single_op()
201 SLJIT_ASSERT(src1 == TMP_REG1); in emit_single_op()
[all …]
/external/libvpx/libvpx/vpx_dsp/mips/
Dsum_squares_msa.c22 uint64_t src0, src1, src2, src3; in vpx_sum_squares_2d_i16_msa() local
26 LD4(src, src_stride, src0, src1, src2, src3); in vpx_sum_squares_2d_i16_msa()
27 INSERT_D2_SH(src0, src1, diff0); in vpx_sum_squares_2d_i16_msa()
35 v8i16 src0, src1, src2, src3, src4, src5, src6, src7; in vpx_sum_squares_2d_i16_msa() local
37 LD_SH8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7); in vpx_sum_squares_2d_i16_msa()
38 DOTP_SH2_SW(src0, src1, src0, src1, mul0, mul1); in vpx_sum_squares_2d_i16_msa()
47 v8i16 src0, src1, src2, src3, src4, src5, src6, src7; in vpx_sum_squares_2d_i16_msa() local
49 LD_SH8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7); in vpx_sum_squares_2d_i16_msa()
50 DOTP_SH2_SW(src0, src1, src0, src1, mul0, mul1); in vpx_sum_squares_2d_i16_msa()
54 LD_SH8(src + 8, src_stride, src0, src1, src2, src3, src4, src5, src6, src7); in vpx_sum_squares_2d_i16_msa()
[all …]
/external/v8/src/x64/
Dassembler-x64.h1070 void vinstr(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2,
1072 void vinstr(byte op, XMMRegister dst, XMMRegister src1, const Operand& src2,
1092 void v##instruction(XMMRegister dst, XMMRegister src1, XMMRegister src2) { \
1093 vinstr(0x##opcode, dst, src1, src2, k##prefix, k##escape, kW0); \
1095 void v##instruction(XMMRegister dst, XMMRegister src1, \
1097 vinstr(0x##opcode, dst, src1, src2, k##prefix, k##escape, kW0); \
1143 void v##instruction(XMMRegister dst, XMMRegister src1, XMMRegister src2) { \
1144 vinstr(0x##opcode, dst, src1, src2, k##prefix, k##escape1##escape2, kW0); \
1146 void v##instruction(XMMRegister dst, XMMRegister src1, \
1148 vinstr(0x##opcode, dst, src1, src2, k##prefix, k##escape1##escape2, kW0); \
[all …]
/external/libxaac/decoder/
Dixheaacd_basic_ops.c56 VOID ixheaacd_combine_fac(WORD32 *src1, WORD32 *src2, WORD32 *dest, WORD32 len, in ixheaacd_combine_fac() argument
61 *dest = ixheaacd_add32_sat(*src1, ((*src2) >> (fac_q - output_q))); in ixheaacd_combine_fac()
63 src1++; in ixheaacd_combine_fac()
68 *dest = ixheaacd_add32_sat(*src1, ((*src2) << (output_q - fac_q))); in ixheaacd_combine_fac()
70 src1++; in ixheaacd_combine_fac()
76 WORD8 ixheaacd_windowing_long1(WORD32 *src1, WORD32 *src2, in ixheaacd_windowing_long1() argument
86 ((ixheaacd_mult32_sh1(*src1, *win_fwd)) >> (shift1 - shift2)), in ixheaacd_windowing_long1()
89 ((ixheaacd_mult32_sh1(-(*src1), *win_rev)) >> (shift1 - shift2)), in ixheaacd_windowing_long1()
92 src1++; in ixheaacd_windowing_long1()
103 ixheaacd_mult32_sh1(*src1, *win_fwd), in ixheaacd_windowing_long1()
[all …]
/external/llvm/test/CodeGen/X86/
Davx-unpack.ll4 define <8 x float> @unpackhips(<8 x float> %src1, <8 x float> %src2) nounwind uwtable readnone ssp {
6 …%shuffle.i = shufflevector <8 x float> %src1, <8 x float> %src2, <8 x i32> <i32 2, i32 10, i32 3, …
11 define <4 x double> @unpackhipd(<4 x double> %src1, <4 x double> %src2) nounwind uwtable readnone s…
13 …%shuffle.i = shufflevector <4 x double> %src1, <4 x double> %src2, <4 x i32> <i32 1, i32 5, i32 3,…
18 define <8 x float> @unpacklops(<8 x float> %src1, <8 x float> %src2) nounwind uwtable readnone ssp {
20 …%shuffle.i = shufflevector <8 x float> %src1, <8 x float> %src2, <8 x i32> <i32 0, i32 8, i32 1, i…
25 define <4 x double> @unpacklopd(<4 x double> %src1, <4 x double> %src2) nounwind uwtable readnone s…
27 …%shuffle.i = shufflevector <4 x double> %src1, <4 x double> %src2, <4 x i32> <i32 0, i32 4, i32 2,…
32 define <8 x float> @unpacklops-not(<8 x float> %src1, <8 x float> %src2) nounwind uwtable readnone …
34 …%shuffle.i = shufflevector <8 x float> %src1, <8 x float> %src2, <8 x i32> <i32 0, i32 8, i32 1, i…
[all …]

12345678910>>...20