Home
last modified time | relevance | path

Searched refs:v2i64 (Results 1 – 25 of 205) sorted by relevance

123456789

/external/libvpx/libvpx/vpx_dsp/mips/
Dvpx_convolve_copy_msa.c25 out0 = __msa_copy_u_d((v2i64)src0, 0); in copy_width8_msa()
26 out1 = __msa_copy_u_d((v2i64)src1, 0); in copy_width8_msa()
27 out2 = __msa_copy_u_d((v2i64)src2, 0); in copy_width8_msa()
28 out3 = __msa_copy_u_d((v2i64)src3, 0); in copy_width8_msa()
29 out4 = __msa_copy_u_d((v2i64)src4, 0); in copy_width8_msa()
30 out5 = __msa_copy_u_d((v2i64)src5, 0); in copy_width8_msa()
31 out6 = __msa_copy_u_d((v2i64)src6, 0); in copy_width8_msa()
32 out7 = __msa_copy_u_d((v2i64)src7, 0); in copy_width8_msa()
42 out0 = __msa_copy_u_d((v2i64)src0, 0); in copy_width8_msa()
43 out1 = __msa_copy_u_d((v2i64)src1, 0); in copy_width8_msa()
[all …]
Dloopfilter_4_msa.c35 p1_d = __msa_copy_u_d((v2i64)p1_out, 0); in vpx_lpf_horizontal_4_msa()
36 p0_d = __msa_copy_u_d((v2i64)p0_out, 0); in vpx_lpf_horizontal_4_msa()
37 q0_d = __msa_copy_u_d((v2i64)q0_out, 0); in vpx_lpf_horizontal_4_msa()
38 q1_d = __msa_copy_u_d((v2i64)q1_out, 0); in vpx_lpf_horizontal_4_msa()
57 thresh0 = (v16u8)__msa_ilvr_d((v2i64)thresh1, (v2i64)thresh0); in vpx_lpf_horizontal_4_dual_msa()
61 b_limit0 = (v16u8)__msa_ilvr_d((v2i64)b_limit1, (v2i64)b_limit0); in vpx_lpf_horizontal_4_dual_msa()
65 limit0 = (v16u8)__msa_ilvr_d((v2i64)limit1, (v2i64)limit0); in vpx_lpf_horizontal_4_dual_msa()
129 thresh0 = (v16u8)__msa_ilvr_d((v2i64)thresh1, (v2i64)thresh0); in vpx_lpf_vertical_4_dual_msa()
133 b_limit0 = (v16u8)__msa_ilvr_d((v2i64)b_limit1, (v2i64)b_limit0); in vpx_lpf_vertical_4_dual_msa()
137 limit0 = (v16u8)__msa_ilvr_d((v2i64)limit1, (v2i64)limit0); in vpx_lpf_vertical_4_dual_msa()
Dloopfilter_8_msa.c40 flat = (v16u8)__msa_ilvr_d((v2i64)zero, (v2i64)flat); in vpx_lpf_horizontal_8_msa()
43 p1_d = __msa_copy_u_d((v2i64)p1_out, 0); in vpx_lpf_horizontal_8_msa()
44 p0_d = __msa_copy_u_d((v2i64)p0_out, 0); in vpx_lpf_horizontal_8_msa()
45 q0_d = __msa_copy_u_d((v2i64)q0_out, 0); in vpx_lpf_horizontal_8_msa()
46 q1_d = __msa_copy_u_d((v2i64)q1_out, 0); in vpx_lpf_horizontal_8_msa()
69 p2_d = __msa_copy_u_d((v2i64)p2_out, 0); in vpx_lpf_horizontal_8_msa()
70 p1_d = __msa_copy_u_d((v2i64)p1_out, 0); in vpx_lpf_horizontal_8_msa()
71 p0_d = __msa_copy_u_d((v2i64)p0_out, 0); in vpx_lpf_horizontal_8_msa()
72 q0_d = __msa_copy_u_d((v2i64)q0_out, 0); in vpx_lpf_horizontal_8_msa()
73 q1_d = __msa_copy_u_d((v2i64)q1_out, 0); in vpx_lpf_horizontal_8_msa()
[all …]
Dmacros_msa.h381 out1 = (v8i16)__msa_ilvl_d((v2i64)out0, (v2i64)out0); \
382 out3 = (v8i16)__msa_ilvl_d((v2i64)out2, (v2i64)out2); \
533 out0_m = __msa_copy_u_d((v2i64)in, 0); \
548 out0_m = __msa_copy_u_d((v2i64)in, 0); \
549 out1_m = __msa_copy_u_d((v2i64)in, 1); \
571 out0_m = __msa_copy_u_d((v2i64)in0, 0); \
572 out1_m = __msa_copy_u_d((v2i64)in0, 1); \
573 out2_m = __msa_copy_u_d((v2i64)in1, 0); \
574 out3_m = __msa_copy_u_d((v2i64)in1, 1); \
751 #define DOTP_SW2_SD(...) DOTP_SW2(v2i64, __VA_ARGS__)
[all …]
Dloopfilter_16_msa.c458 flat = (v16u8)__msa_ilvr_d((v2i64)zero, (v2i64)flat); in vpx_lpf_horizontal_16_msa()
461 p1_d = __msa_copy_u_d((v2i64)p1_out, 0); in vpx_lpf_horizontal_16_msa()
462 p0_d = __msa_copy_u_d((v2i64)p0_out, 0); in vpx_lpf_horizontal_16_msa()
463 q0_d = __msa_copy_u_d((v2i64)q0_out, 0); in vpx_lpf_horizontal_16_msa()
464 q1_d = __msa_copy_u_d((v2i64)q1_out, 0); in vpx_lpf_horizontal_16_msa()
495 p2_d = __msa_copy_u_d((v2i64)p2_out, 0); in vpx_lpf_horizontal_16_msa()
496 p1_d = __msa_copy_u_d((v2i64)p1_out, 0); in vpx_lpf_horizontal_16_msa()
497 p0_d = __msa_copy_u_d((v2i64)p0_out, 0); in vpx_lpf_horizontal_16_msa()
498 q0_d = __msa_copy_u_d((v2i64)q0_out, 0); in vpx_lpf_horizontal_16_msa()
499 q1_d = __msa_copy_u_d((v2i64)q1_out, 0); in vpx_lpf_horizontal_16_msa()
[all …]
/external/llvm/test/Analysis/CostModel/X86/
Dscalarize.ll14 declare %i8 @llvm.bswap.v2i64(%i8)
17 declare %i8 @llvm.ctpop.v2i64(%i8)
27 ; CHECK32: cost of 10 {{.*}}bswap.v2i64
28 ; CHECK64: cost of 6 {{.*}}bswap.v2i64
29 %r3 = call %i8 @llvm.bswap.v2i64(%i8 undef)
34 ; CHECK32: cost of 10 {{.*}}ctpop.v2i64
35 ; CHECK64: cost of 6 {{.*}}ctpop.v2i64
36 %r5 = call %i8 @llvm.ctpop.v2i64(%i8 undef)
/external/libvpx/libvpx/vp8/common/mips/msa/
Dpostproc_msa.c88 out9 = (v16u8)__msa_ilvl_d((v2i64)out8, (v2i64)out8); \
89 out11 = (v16u8)__msa_ilvl_d((v2i64)out10, (v2i64)out10); \
90 out13 = (v16u8)__msa_ilvl_d((v2i64)out12, (v2i64)out12); \
91 out15 = (v16u8)__msa_ilvl_d((v2i64)out14, (v2i64)out14); \
92 out1 = (v16u8)__msa_ilvl_d((v2i64)out0, (v2i64)out0); \
93 out3 = (v16u8)__msa_ilvl_d((v2i64)out2, (v2i64)out2); \
94 out5 = (v16u8)__msa_ilvl_d((v2i64)out4, (v2i64)out4); \
95 out7 = (v16u8)__msa_ilvl_d((v2i64)out6, (v2i64)out6); \
142 in1 = (v16u8)__msa_ilvl_d((v2i64)temp4, (v2i64)temp0); \
143 in3 = (v16u8)__msa_ilvl_d((v2i64)temp5, (v2i64)temp1); \
[all …]
Dvp8_macros_msa.h476 out0_m = __msa_copy_u_d((v2i64)in, 0); \
492 out0_m = __msa_copy_u_d((v2i64)in, 0); \
493 out1_m = __msa_copy_u_d((v2i64)in, 1); \
516 out0_m = __msa_copy_u_d((v2i64)in0, 0); \
517 out1_m = __msa_copy_u_d((v2i64)in0, 1); \
518 out2_m = __msa_copy_u_d((v2i64)in1, 0); \
519 out3_m = __msa_copy_u_d((v2i64)in1, 1); \
690 #define DOTP_SW2_SD(...) DOTP_SW2(v2i64, __VA_ARGS__)
754 out0 = (RTYPE)__msa_dpadd_s_d((v2i64)out0, (v4i32)mult0, (v4i32)mult0); \
755 out1 = (RTYPE)__msa_dpadd_s_d((v2i64)out1, (v4i32)mult1, (v4i32)mult1); \
[all …]
Dloopfilter_filters_msa.c289 thresh0 = (v16u8)__msa_ilvr_d((v2i64)thresh1, (v2i64)thresh0); in loop_filter_horizontal_4_dual_msa()
293 b_limit0 = (v16u8)__msa_ilvr_d((v2i64)b_limit1, (v2i64)b_limit0); in loop_filter_horizontal_4_dual_msa()
297 limit0 = (v16u8)__msa_ilvr_d((v2i64)limit1, (v2i64)limit0); in loop_filter_horizontal_4_dual_msa()
330 thresh0 = (v16u8)__msa_ilvr_d((v2i64)thresh1, (v2i64)thresh0); in loop_filter_vertical_4_dual_msa()
334 b_limit0 = (v16u8)__msa_ilvr_d((v2i64)b_limit1, (v2i64)b_limit0); in loop_filter_vertical_4_dual_msa()
338 limit0 = (v16u8)__msa_ilvr_d((v2i64)limit1, (v2i64)limit0); in loop_filter_vertical_4_dual_msa()
405 p2_d = __msa_copy_u_d((v2i64)p2, 0); in mbloop_filter_horizontal_edge_uv_msa()
406 p1_d = __msa_copy_u_d((v2i64)p1, 0); in mbloop_filter_horizontal_edge_uv_msa()
407 p0_d = __msa_copy_u_d((v2i64)p0, 0); in mbloop_filter_horizontal_edge_uv_msa()
408 q0_d = __msa_copy_u_d((v2i64)q0, 0); in mbloop_filter_horizontal_edge_uv_msa()
[all …]
/external/llvm/lib/Target/PowerPC/
DPPCInstrAltivec.td858 def : Pat<(v16i8 (bitconvert (v2i64 VRRC:$src))), (v16i8 VRRC:$src)>;
864 def : Pat<(v8i16 (bitconvert (v2i64 VRRC:$src))), (v8i16 VRRC:$src)>;
870 def : Pat<(v4i32 (bitconvert (v2i64 VRRC:$src))), (v4i32 VRRC:$src)>;
876 def : Pat<(v4f32 (bitconvert (v2i64 VRRC:$src))), (v4f32 VRRC:$src)>;
879 def : Pat<(v2i64 (bitconvert (v16i8 VRRC:$src))), (v2i64 VRRC:$src)>;
880 def : Pat<(v2i64 (bitconvert (v8i16 VRRC:$src))), (v2i64 VRRC:$src)>;
881 def : Pat<(v2i64 (bitconvert (v4i32 VRRC:$src))), (v2i64 VRRC:$src)>;
882 def : Pat<(v2i64 (bitconvert (v4f32 VRRC:$src))), (v2i64 VRRC:$src)>;
883 def : Pat<(v2i64 (bitconvert (v1i128 VRRC:$src))), (v2i64 VRRC:$src)>;
889 def : Pat<(v1i128 (bitconvert (v2i64 VRRC:$src))), (v1i128 VRRC:$src)>;
[all …]
DPPCInstrVSX.td462 int_ppc_vsx_xvcmpeqdp, v2i64, v2f64>;
468 int_ppc_vsx_xvcmpgedp, v2i64, v2f64>;
474 int_ppc_vsx_xvcmpgtdp, v2i64, v2f64>;
572 [(set v2i64:$XT, (fp_to_sint v2f64:$XB))]>;
579 [(set v2i64:$XT, (fp_to_uint v2f64:$XB))]>;
602 [(set v2f64:$XT, (sint_to_fp v2i64:$XB))]>;
615 [(set v2f64:$XT, (uint_to_fp v2i64:$XB))]>;
889 def : Pat<(v2i64 (bitconvert v4f32:$A)),
891 def : Pat<(v2i64 (bitconvert v4i32:$A)),
893 def : Pat<(v2i64 (bitconvert v8i16:$A)),
[all …]
/external/llvm/test/CodeGen/PowerPC/
Dvec_popcnt.ll2 ; In addition, check the conversions to/from the v2i64 VMX register that was also added in P8.
9 declare <2 x i64> @llvm.ctpop.v2i64(<2 x i64>) nounwind readnone
39 %vcnt = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %x)
48 %vcnt = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %tmp)
58 %vcnt = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %tmp)
67 %vcnt = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %tmp)
/external/llvm/test/CodeGen/SystemZ/
Dvec-shift-07.ll71 ; Test a v2i1->v2i64 extension.
82 ; Test a v2i8->v2i64 extension.
92 ; Test a v2i16->v2i64 extension.
102 ; Test a v2i32->v2i64 extension.
112 ; Test an alternative v2i8->v2i64 extension.
122 ; Test an alternative v2i16->v2i64 extension.
132 ; Test an alternative v2i32->v2i64 extension.
142 ; Test an extraction-based v2i8->v2i64 extension.
156 ; Test an extraction-based v2i16->v2i64 extension.
170 ; Test an extraction-based v2i32->v2i64 extension.
Dvec-move-16.ll63 ; Test a v2i1->v2i64 extension.
71 ; Test a v2i8->v2i64 extension.
84 ; Test a v2i16->v2i64 extension.
96 ; Test a v2i32->v2i64 extension.
Dvec-move-15.ll63 ; Test a v2i1->v2i64 extension.
71 ; Test a v2i8->v2i64 extension.
84 ; Test a v2i16->v2i64 extension.
96 ; Test a v2i32->v2i64 extension.
/external/llvm/lib/Target/X86/
DX86TargetTransformInfo.cpp157 { ISD::SHL, MVT::v2i64, 1 }, in getArithmeticInstrCost()
158 { ISD::SRL, MVT::v2i64, 1 }, in getArithmeticInstrCost()
187 { ISD::SHL, MVT::v2i64, 1 }, in getArithmeticInstrCost()
188 { ISD::SRL, MVT::v2i64, 2 }, in getArithmeticInstrCost()
189 { ISD::SRA, MVT::v2i64, 2 }, in getArithmeticInstrCost()
220 { ISD::SRA, MVT::v2i64, 4 }, // srl/xor/sub sequence. in getArithmeticInstrCost()
252 { ISD::SHL, MVT::v2i64, 1 }, // psllq. in getArithmeticInstrCost()
261 { ISD::SRL, MVT::v2i64, 1 }, // psrlq. in getArithmeticInstrCost()
270 { ISD::SRA, MVT::v2i64, 4 }, // 2 x psrad + shuffle. in getArithmeticInstrCost()
327 { ISD::SHL, MVT::v2i64, 4 }, // splat+shuffle sequence. in getArithmeticInstrCost()
[all …]
/external/llvm/test/CodeGen/AArch64/
Darm64-vcvt_n.ll35 %vcvt_n1 = tail call <2 x double> @llvm.aarch64.neon.vcvtfxu2fp.v2f64.v2i64(<2 x i64> %a, i32 12)
40 %vcvt_n1 = tail call <2 x double> @llvm.aarch64.neon.vcvtfxs2fp.v2f64.v2i64(<2 x i64> %a, i32 9)
48 declare <2 x double> @llvm.aarch64.neon.vcvtfxu2fp.v2f64.v2i64(<2 x i64>, i32) nounwind readnone
49 declare <2 x double> @llvm.aarch64.neon.vcvtfxs2fp.v2f64.v2i64(<2 x i64>, i32) nounwind readnone
Darm64-vcvt.ll26 %tmp3 = call <2 x i64> @llvm.aarch64.neon.fcvtas.v2i64.v2f64(<2 x double> %A)
32 declare <2 x i64> @llvm.aarch64.neon.fcvtas.v2i64.v2f64(<2 x double>) nounwind readnone
57 %tmp3 = call <2 x i64> @llvm.aarch64.neon.fcvtau.v2i64.v2f64(<2 x double> %A)
63 declare <2 x i64> @llvm.aarch64.neon.fcvtau.v2i64.v2f64(<2 x double>) nounwind readnone
88 %tmp3 = call <2 x i64> @llvm.aarch64.neon.fcvtms.v2i64.v2f64(<2 x double> %A)
94 declare <2 x i64> @llvm.aarch64.neon.fcvtms.v2i64.v2f64(<2 x double>) nounwind readnone
119 %tmp3 = call <2 x i64> @llvm.aarch64.neon.fcvtmu.v2i64.v2f64(<2 x double> %A)
125 declare <2 x i64> @llvm.aarch64.neon.fcvtmu.v2i64.v2f64(<2 x double>) nounwind readnone
150 %tmp3 = call <2 x i64> @llvm.aarch64.neon.fcvtps.v2i64.v2f64(<2 x double> %A)
156 declare <2 x i64> @llvm.aarch64.neon.fcvtps.v2i64.v2f64(<2 x double>) nounwind readnone
[all …]
Darm64-neon-2velem-high.ll39 …%vmull9.i.i = call <2 x i64> @llvm.aarch64.neon.smull.v2i64(<2 x i32> %shuffle.i.i, <2 x i32> %vec…
50 …%vmull9.i.i = call <2 x i64> @llvm.aarch64.neon.smull.v2i64(<2 x i32> %shuffle.i.i, <2 x i32> <i32…
89 …%vmull9.i.i = call <2 x i64> @llvm.aarch64.neon.umull.v2i64(<2 x i32> %shuffle.i.i, <2 x i32> %vec…
100 …%vmull9.i.i = call <2 x i64> @llvm.aarch64.neon.umull.v2i64(<2 x i32> %shuffle.i.i, <2 x i32> <i32…
139 …%vqdmull9.i.i = call <2 x i64> @llvm.aarch64.neon.sqdmull.v2i64(<2 x i32> %shuffle.i.i, <2 x i32> …
150 …%vqdmull9.i.i = call <2 x i64> @llvm.aarch64.neon.sqdmull.v2i64(<2 x i32> %shuffle.i.i, <2 x i32> …
191 …%vmull2.i.i.i = call <2 x i64> @llvm.aarch64.neon.smull.v2i64(<2 x i32> %shuffle.i.i, <2 x i32> %v…
203 …%vmull2.i.i.i = call <2 x i64> @llvm.aarch64.neon.smull.v2i64(<2 x i32> %shuffle.i.i, <2 x i32> <i…
245 …%vmull2.i.i.i = call <2 x i64> @llvm.aarch64.neon.umull.v2i64(<2 x i32> %shuffle.i.i, <2 x i32> %v…
257 …%vmull2.i.i.i = call <2 x i64> @llvm.aarch64.neon.umull.v2i64(<2 x i32> %shuffle.i.i, <2 x i32> <i…
[all …]
/external/llvm/test/CodeGen/X86/
Dvec_ctbits.ll4 declare <2 x i64> @llvm.cttz.v2i64(<2 x i64>, i1)
5 declare <2 x i64> @llvm.ctlz.v2i64(<2 x i64>, i1)
6 declare <2 x i64> @llvm.ctpop.v2i64(<2 x i64>)
21 %c = call <2 x i64> @llvm.cttz.v2i64(<2 x i64> %a, i1 true)
40 %c = call <2 x i64> @llvm.ctlz.v2i64(<2 x i64> %a, i1 true)
66 %c = call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %a)
/external/llvm/lib/Target/ARM/
DARMCallingConv.td28 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>,
47 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>,
61 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>,
80 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>,
96 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>,
152 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>,
162 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>,
180 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>,
195 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>,
/external/llvm/lib/Target/AArch64/
DAArch64CallingConvention.td27 CCIfType<[v2f64, v4f32], CCBitConvertToType<v2i64>>,
33 CCIfBigEndian<CCIfType<[v2i64, v2f64, v4i32, v4f32, v8i16, v8f16, v16i8],
73 CCIfType<[f128, v2i64, v4i32, v8i16, v16i8, v4f32, v2f64, v8f16],
81 CCIfType<[f128, v2i64, v4i32, v8i16, v16i8, v4f32, v2f64, v8f16],
87 CCIfType<[v2f64, v4f32], CCBitConvertToType<v2i64>>,
93 CCIfBigEndian<CCIfType<[v2i64, v2f64, v4i32, v4f32, v8i16, v8f16, v16i8],
109 CCIfType<[f128, v2i64, v4i32, v8i16, v16i8, v4f32, v2f64, v8f16],
120 CCIfType<[v2f64, v4f32, f128], CCBitConvertToType<v2i64>>,
154 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32, v2f64, v8f16],
163 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32, v2f64, v8f16],
[all …]
/external/llvm/test/CodeGen/ARM/
Dvqdmul.ll175 %tmp3 = call <2 x i64> @llvm.arm.neon.vqdmull.v2i64(<2 x i32> %tmp1, <2 x i32> %tmp2)
193 …%1 = tail call <2 x i64> @llvm.arm.neon.vqdmull.v2i64(<2 x i32> %arg0_int32x2_t, <2 x i32> %0) ; <…
198 declare <2 x i64> @llvm.arm.neon.vqdmull.v2i64(<2 x i32>, <2 x i32>) nounwind readnone
217 %tmp4 = call <2 x i64> @llvm.arm.neon.vqdmull.v2i64(<2 x i32> %tmp2, <2 x i32> %tmp3)
218 %tmp5 = call <2 x i64> @llvm.arm.neon.vqadds.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp4)
237 %1 = tail call <2 x i64> @llvm.arm.neon.vqdmull.v2i64(<2 x i32> %arg1_int32x2_t, <2 x i32> %0)
238 %2 = call <2 x i64> @llvm.arm.neon.vqadds.v2i64(<2 x i64> %arg0_int64x2_t, <2 x i64> %1)
243 declare <2 x i64> @llvm.arm.neon.vqadds.v2i64(<2 x i64>, <2 x i64>) nounwind readnone
262 %tmp4 = call <2 x i64> @llvm.arm.neon.vqdmull.v2i64(<2 x i32> %tmp2, <2 x i32> %tmp3)
263 %tmp5 = call <2 x i64> @llvm.arm.neon.vqsubs.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp4)
[all …]
/external/mesa3d/src/gallium/drivers/radeon/
DAMDILISelLowering.cpp66 (int)MVT::v2i64 in InitAMDILLowering()
94 (int)MVT::v2i64 in InitAMDILLowering()
122 if (VT != MVT::i64 && VT != MVT::v2i64) { in InitAMDILLowering()
175 setOperationAction(ISD::MULHU, MVT::v2i64, Expand); in InitAMDILLowering()
177 setOperationAction(ISD::MULHS, MVT::v2i64, Expand); in InitAMDILLowering()
178 setOperationAction(ISD::ADD, MVT::v2i64, Expand); in InitAMDILLowering()
179 setOperationAction(ISD::SREM, MVT::v2i64, Expand); in InitAMDILLowering()
181 setOperationAction(ISD::SDIV, MVT::v2i64, Expand); in InitAMDILLowering()
182 setOperationAction(ISD::TRUNCATE, MVT::v2i64, Expand); in InitAMDILLowering()
183 setOperationAction(ISD::SIGN_EXTEND, MVT::v2i64, Expand); in InitAMDILLowering()
[all …]
/external/llvm/test/CodeGen/Mips/msa/
Dbasic_operations.ll12 @v2i64 = global <2 x i64> <i64 0, i64 0>
130 store volatile <2 x i64> <i64 0, i64 0>, <2 x i64>*@v2i64
133 store volatile <2 x i64> <i64 72340172838076673, i64 72340172838076673>, <2 x i64>*@v2i64
136 store volatile <2 x i64> <i64 281479271743489, i64 281479271743489>, <2 x i64>*@v2i64
139 store volatile <2 x i64> <i64 4294967297, i64 4294967297>, <2 x i64>*@v2i64
142 store volatile <2 x i64> <i64 1, i64 1>, <2 x i64>*@v2i64
145 store volatile <2 x i64> <i64 1, i64 31>, <2 x i64>*@v2i64
152 store volatile <2 x i64> <i64 3, i64 4>, <2 x i64>*@v2i64
273 store volatile <2 x i64> %2, <2 x i64>*@v2i64
332 %1 = load <2 x i64>, <2 x i64>* @v2i64
[all …]

123456789