/external/libvpx/libvpx/vpx_dsp/mips/ |
D | vpx_convolve_copy_msa.c | 25 out0 = __msa_copy_u_d((v2i64)src0, 0); in copy_width8_msa() 26 out1 = __msa_copy_u_d((v2i64)src1, 0); in copy_width8_msa() 27 out2 = __msa_copy_u_d((v2i64)src2, 0); in copy_width8_msa() 28 out3 = __msa_copy_u_d((v2i64)src3, 0); in copy_width8_msa() 29 out4 = __msa_copy_u_d((v2i64)src4, 0); in copy_width8_msa() 30 out5 = __msa_copy_u_d((v2i64)src5, 0); in copy_width8_msa() 31 out6 = __msa_copy_u_d((v2i64)src6, 0); in copy_width8_msa() 32 out7 = __msa_copy_u_d((v2i64)src7, 0); in copy_width8_msa() 42 out0 = __msa_copy_u_d((v2i64)src0, 0); in copy_width8_msa() 43 out1 = __msa_copy_u_d((v2i64)src1, 0); in copy_width8_msa() [all …]
|
D | deblock_msa.c | 41 out9 = (v16u8)__msa_ilvl_d((v2i64)out8, (v2i64)out8); \ 42 out11 = (v16u8)__msa_ilvl_d((v2i64)out10, (v2i64)out10); \ 43 out13 = (v16u8)__msa_ilvl_d((v2i64)out12, (v2i64)out12); \ 44 out15 = (v16u8)__msa_ilvl_d((v2i64)out14, (v2i64)out14); \ 45 out1 = (v16u8)__msa_ilvl_d((v2i64)out0, (v2i64)out0); \ 46 out3 = (v16u8)__msa_ilvl_d((v2i64)out2, (v2i64)out2); \ 47 out5 = (v16u8)__msa_ilvl_d((v2i64)out4, (v2i64)out4); \ 48 out7 = (v16u8)__msa_ilvl_d((v2i64)out6, (v2i64)out6); \ 95 in1 = (v16u8)__msa_ilvl_d((v2i64)temp4, (v2i64)temp0); \ 96 in3 = (v16u8)__msa_ilvl_d((v2i64)temp5, (v2i64)temp1); \ [all …]
|
D | loopfilter_4_msa.c | 32 p1_d = __msa_copy_u_d((v2i64)p1_out, 0); in vpx_lpf_horizontal_4_msa() 33 p0_d = __msa_copy_u_d((v2i64)p0_out, 0); in vpx_lpf_horizontal_4_msa() 34 q0_d = __msa_copy_u_d((v2i64)q0_out, 0); in vpx_lpf_horizontal_4_msa() 35 q1_d = __msa_copy_u_d((v2i64)q1_out, 0); in vpx_lpf_horizontal_4_msa() 54 thresh0 = (v16u8)__msa_ilvr_d((v2i64)thresh1, (v2i64)thresh0); in vpx_lpf_horizontal_4_dual_msa() 58 b_limit0 = (v16u8)__msa_ilvr_d((v2i64)b_limit1, (v2i64)b_limit0); in vpx_lpf_horizontal_4_dual_msa() 62 limit0 = (v16u8)__msa_ilvr_d((v2i64)limit1, (v2i64)limit0); in vpx_lpf_horizontal_4_dual_msa() 123 thresh0 = (v16u8)__msa_ilvr_d((v2i64)thresh1, (v2i64)thresh0); in vpx_lpf_vertical_4_dual_msa() 127 b_limit0 = (v16u8)__msa_ilvr_d((v2i64)b_limit1, (v2i64)b_limit0); in vpx_lpf_vertical_4_dual_msa() 131 limit0 = (v16u8)__msa_ilvr_d((v2i64)limit1, (v2i64)limit0); in vpx_lpf_vertical_4_dual_msa()
|
D | loopfilter_8_msa.c | 37 flat = (v16u8)__msa_ilvr_d((v2i64)zero, (v2i64)flat); in vpx_lpf_horizontal_8_msa() 40 p1_d = __msa_copy_u_d((v2i64)p1_out, 0); in vpx_lpf_horizontal_8_msa() 41 p0_d = __msa_copy_u_d((v2i64)p0_out, 0); in vpx_lpf_horizontal_8_msa() 42 q0_d = __msa_copy_u_d((v2i64)q0_out, 0); in vpx_lpf_horizontal_8_msa() 43 q1_d = __msa_copy_u_d((v2i64)q1_out, 0); in vpx_lpf_horizontal_8_msa() 64 p2_d = __msa_copy_u_d((v2i64)p2_out, 0); in vpx_lpf_horizontal_8_msa() 65 p1_d = __msa_copy_u_d((v2i64)p1_out, 0); in vpx_lpf_horizontal_8_msa() 66 p0_d = __msa_copy_u_d((v2i64)p0_out, 0); in vpx_lpf_horizontal_8_msa() 67 q0_d = __msa_copy_u_d((v2i64)q0_out, 0); in vpx_lpf_horizontal_8_msa() 68 q1_d = __msa_copy_u_d((v2i64)q1_out, 0); in vpx_lpf_horizontal_8_msa() [all …]
|
D | macros_msa.h | 387 out1 = (v8i16)__msa_ilvl_d((v2i64)out0, (v2i64)out0); \ 388 out3 = (v8i16)__msa_ilvl_d((v2i64)out2, (v2i64)out2); \ 551 out0_m = __msa_copy_u_d((v2i64)in, 0); \ 567 out0_m = __msa_copy_u_d((v2i64)in, 0); \ 568 out1_m = __msa_copy_u_d((v2i64)in, 1); \ 591 out0_m = __msa_copy_u_d((v2i64)in0, 0); \ 592 out1_m = __msa_copy_u_d((v2i64)in0, 1); \ 593 out2_m = __msa_copy_u_d((v2i64)in1, 0); \ 594 out3_m = __msa_copy_u_d((v2i64)in1, 1); \ 784 #define DOTP_SW2_SD(...) DOTP_SW2(v2i64, __VA_ARGS__) [all …]
|
D | loopfilter_16_msa.c | 455 flat = (v16u8)__msa_ilvr_d((v2i64)zero, (v2i64)flat); in mb_lpf_horizontal_edge() 458 p1_d = __msa_copy_u_d((v2i64)p1_out, 0); in mb_lpf_horizontal_edge() 459 p0_d = __msa_copy_u_d((v2i64)p0_out, 0); in mb_lpf_horizontal_edge() 460 q0_d = __msa_copy_u_d((v2i64)q0_out, 0); in mb_lpf_horizontal_edge() 461 q1_d = __msa_copy_u_d((v2i64)q1_out, 0); in mb_lpf_horizontal_edge() 491 p2_d = __msa_copy_u_d((v2i64)p2_out, 0); in mb_lpf_horizontal_edge() 492 p1_d = __msa_copy_u_d((v2i64)p1_out, 0); in mb_lpf_horizontal_edge() 493 p0_d = __msa_copy_u_d((v2i64)p0_out, 0); in mb_lpf_horizontal_edge() 494 q0_d = __msa_copy_u_d((v2i64)q0_out, 0); in mb_lpf_horizontal_edge() 495 q1_d = __msa_copy_u_d((v2i64)q1_out, 0); in mb_lpf_horizontal_edge() [all …]
|
/external/swiftshader/third_party/LLVM/lib/Target/CellSPU/ |
D | SPU64InstrInfo.td | 21 // 4. v2i64 setcc results are v4i32, which can be converted to a FSM mask (TODO) 24 // 5. The code sequences for r64 and v2i64 are probably overly conservative, 67 // v2i64 seteq (equality): the setcc result is v4i32 71 def v2i64: CodeFrag<(i32 (COPY_TO_REGCLASS CEQv2i64compare.Fragment, R32C))>; 83 def : Pat<(seteq (v2i64 VECREG:$rA), (v2i64 VECREG:$rB)), I64EQv2i64.Fragment>; 120 def v2i64: CodeFrag<CLGTv2i64compare.Fragment>; 132 //def : Pat<(setugt (v2i64 VECREG:$rA), (v2i64 VECREG:$rB)), 154 def v2i64: CodeFrag<CLGEv2i64compare.Fragment>; 166 def : Pat<(v2i64 (setuge (v2i64 VECREG:$rA), (v2i64 VECREG:$rB))), 205 def v2i64: CodeFrag<CGTv2i64compare.Fragment>; [all …]
|
/external/llvm/test/Analysis/CostModel/X86/ |
D | scalarize.ll | 14 declare %i8 @llvm.bswap.v2i64(%i8) 17 declare %i8 @llvm.cttz.v2i64(%i8) 27 ; CHECK32: cost of 1 {{.*}}bswap.v2i64 28 ; CHECK64: cost of 1 {{.*}}bswap.v2i64 29 %r3 = call %i8 @llvm.bswap.v2i64(%i8 undef) 34 ; CHECK32: cost of 10 {{.*}}cttz.v2i64 35 ; CHECK64: cost of 6 {{.*}}cttz.v2i64 36 %r5 = call %i8 @llvm.cttz.v2i64(%i8 undef)
|
/external/swiftshader/third_party/LLVM/test/CodeGen/X86/ |
D | vec_ctbits.ll | 3 declare <2 x i64> @llvm.cttz.v2i64(<2 x i64>) 4 declare <2 x i64> @llvm.ctlz.v2i64(<2 x i64>) 5 declare <2 x i64> @llvm.ctpop.v2i64(<2 x i64>) 8 %c = call <2 x i64> @llvm.cttz.v2i64(<2 x i64> %a) 12 %c = call <2 x i64> @llvm.ctlz.v2i64(<2 x i64> %a) 16 %c = call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %a)
|
/external/libvpx/libvpx/vp8/common/mips/msa/ |
D | vp8_macros_msa.h | 457 out0_m = __msa_copy_u_d((v2i64)in, 0); \ 473 out0_m = __msa_copy_u_d((v2i64)in, 0); \ 474 out1_m = __msa_copy_u_d((v2i64)in, 1); \ 497 out0_m = __msa_copy_u_d((v2i64)in0, 0); \ 498 out1_m = __msa_copy_u_d((v2i64)in0, 1); \ 499 out2_m = __msa_copy_u_d((v2i64)in1, 0); \ 500 out3_m = __msa_copy_u_d((v2i64)in1, 1); \ 669 #define DOTP_SW2_SD(...) DOTP_SW2(v2i64, __VA_ARGS__) 733 out0 = (RTYPE)__msa_dpadd_s_d((v2i64)out0, (v4i32)mult0, (v4i32)mult0); \ 734 out1 = (RTYPE)__msa_dpadd_s_d((v2i64)out1, (v4i32)mult1, (v4i32)mult1); \ [all …]
|
D | loopfilter_filters_msa.c | 223 thresh0 = (v16u8)__msa_ilvr_d((v2i64)thresh1, (v2i64)thresh0); in loop_filter_horizontal_4_dual_msa() 227 b_limit0 = (v16u8)__msa_ilvr_d((v2i64)b_limit1, (v2i64)b_limit0); in loop_filter_horizontal_4_dual_msa() 231 limit0 = (v16u8)__msa_ilvr_d((v2i64)limit1, (v2i64)limit0); in loop_filter_horizontal_4_dual_msa() 263 thresh0 = (v16u8)__msa_ilvr_d((v2i64)thresh1, (v2i64)thresh0); in loop_filter_vertical_4_dual_msa() 267 b_limit0 = (v16u8)__msa_ilvr_d((v2i64)b_limit1, (v2i64)b_limit0); in loop_filter_vertical_4_dual_msa() 271 limit0 = (v16u8)__msa_ilvr_d((v2i64)limit1, (v2i64)limit0); in loop_filter_vertical_4_dual_msa() 336 p2_d = __msa_copy_u_d((v2i64)p2, 0); in mbloop_filter_horizontal_edge_uv_msa() 337 p1_d = __msa_copy_u_d((v2i64)p1, 0); in mbloop_filter_horizontal_edge_uv_msa() 338 p0_d = __msa_copy_u_d((v2i64)p0, 0); in mbloop_filter_horizontal_edge_uv_msa() 339 q0_d = __msa_copy_u_d((v2i64)q0, 0); in mbloop_filter_horizontal_edge_uv_msa() [all …]
|
/external/llvm/test/CodeGen/PowerPC/ |
D | vec_popcnt.ll | 2 ; In addition, check the conversions to/from the v2i64 VMX register that was also added in P8. 9 declare <2 x i64> @llvm.ctpop.v2i64(<2 x i64>) nounwind readnone 39 %vcnt = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %x) 48 %vcnt = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %tmp) 58 %vcnt = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %tmp) 67 %vcnt = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %tmp)
|
/external/webp/src/dsp/ |
D | msa_macro.h | 587 const v2i64 res0_m = __msa_hadd_s_d((v4i32)in, (v4i32)in); in func_hadd_sw_s32() 588 const v2i64 res1_m = __msa_splati_d(res0_m, 1); in func_hadd_sw_s32() 589 const v2i64 out = res0_m + res1_m; in func_hadd_sw_s32() 604 const v2i64 res0 = __msa_hadd_s_d(res, res); in func_hadd_sh_s32() 605 const v2i64 res1 = __msa_splati_d(res0, 1); in func_hadd_sh_s32() 606 const v2i64 res2 = res0 + res1; in func_hadd_sh_s32() 623 v2u64 res1_m = (v2u64)__msa_splati_d((v2i64)res0_m, 1); in func_hadd_uh_u32() 697 out = (RTYPE)__msa_insert_d((v2i64)out, 0, in0); \ 698 out = (RTYPE)__msa_insert_d((v2i64)out, 1, in1); \ 718 #define ILVEV_B2_SD(...) ILVEV_B2(v2i64, __VA_ARGS__) [all …]
|
/external/swiftshader/third_party/LLVM/lib/Target/X86/ |
D | X86GenDAGISel.inc | 95 /*172*/ OPC_CheckChild1Type, MVT::v2i64, 104 …// Src: (st VR128:v2i64:$src, addr:iPTR:$dst)<<P:Predicate_alignednontemporalstore>> - Complexity … 105 // Dst: (VMOVNTDQmr addr:iPTR:$dst, VR128:v2i64:$src) 112 …// Src: (st VR128:v2i64:$src, addr:iPTR:$dst)<<P:Predicate_alignednontemporalstore>> - Complexity … 113 // Dst: (MOVNTDQmr addr:iPTR:$dst, VR128:v2i64:$src) 3766 /*8348*/ OPC_CheckType, MVT::v2i64, 3784 …// Src: (st (vector_shuffle:v4i32 (bitconvert:v4i32 (ld:v2i64 addr:iPTR:$src1)<<P:Predicate_uninde… 3792 …// Src: (st (vector_shuffle:v4i32 (bitconvert:v4i32 (ld:v2i64 addr:iPTR:$src1)<<P:Predicate_uninde… 3853 /*SwitchType*/ 56, MVT::v2i64,// ->8604 3866 …// Src: (st (vector_shuffle:v2i64 (ld:v2i64 addr:iPTR:$src1)<<P:Predicate_unindexedload>><<P:Predi… [all …]
|
/external/llvm/test/CodeGen/SystemZ/ |
D | vec-shift-07.ll | 71 ; Test a v2i1->v2i64 extension. 82 ; Test a v2i8->v2i64 extension. 92 ; Test a v2i16->v2i64 extension. 102 ; Test a v2i32->v2i64 extension. 112 ; Test an alternative v2i8->v2i64 extension. 122 ; Test an alternative v2i16->v2i64 extension. 132 ; Test an alternative v2i32->v2i64 extension. 142 ; Test an extraction-based v2i8->v2i64 extension. 156 ; Test an extraction-based v2i16->v2i64 extension. 170 ; Test an extraction-based v2i32->v2i64 extension.
|
/external/swiftshader/third_party/LLVM/test/CodeGen/CellSPU/useful-harnesses/ |
D | vecoperations.c | 7 typedef long long v2i64 __attribute__((ext_vector_type(2))); typedef 58 void print_v2i64(const char *str, v2i64 v) { in print_v2i64() 126 v2i64 v2i64_shuffle(v2i64 a) { in v2i64_shuffle() 127 v2i64 c2 = a.yx; in v2i64_shuffle() 147 v2i64 v3 = { 691043ll, 910301513ll }; in main()
|
/external/libyuv/files/source/ |
D | scale_msa.cc | 141 src0 = (v16u8)__msa_insert_d((v2i64)src0, 0, data0); in ScaleARGBRowDownEvenBox_MSA() 142 src0 = (v16u8)__msa_insert_d((v2i64)src0, 1, data1); in ScaleARGBRowDownEvenBox_MSA() 143 src1 = (v16u8)__msa_insert_d((v2i64)src1, 0, data2); in ScaleARGBRowDownEvenBox_MSA() 144 src1 = (v16u8)__msa_insert_d((v2i64)src1, 1, data3); in ScaleARGBRowDownEvenBox_MSA() 149 src2 = (v16u8)__msa_insert_d((v2i64)src2, 0, data0); in ScaleARGBRowDownEvenBox_MSA() 150 src2 = (v16u8)__msa_insert_d((v2i64)src2, 1, data1); in ScaleARGBRowDownEvenBox_MSA() 151 src3 = (v16u8)__msa_insert_d((v2i64)src3, 0, data2); in ScaleARGBRowDownEvenBox_MSA() 152 src3 = (v16u8)__msa_insert_d((v2i64)src3, 1, data3); in ScaleARGBRowDownEvenBox_MSA() 161 reg4 = (v8u16)__msa_pckev_d((v2i64)reg2, (v2i64)reg0); in ScaleARGBRowDownEvenBox_MSA() 162 reg5 = (v8u16)__msa_pckev_d((v2i64)reg3, (v2i64)reg1); in ScaleARGBRowDownEvenBox_MSA() [all …]
|
D | row_msa.cc | 48 out_y = (v16u8)__msa_insert_d((v2i64)zero_m, 0, (int64)y_m); \ 190 reg8_m = (v8u16)__msa_pckev_d((v2i64)reg4_m, (v2i64)reg0_m); \ 191 reg9_m = (v8u16)__msa_pckev_d((v2i64)reg5_m, (v2i64)reg1_m); \ 192 reg8_m += (v8u16)__msa_pckod_d((v2i64)reg4_m, (v2i64)reg0_m); \ 193 reg9_m += (v8u16)__msa_pckod_d((v2i64)reg5_m, (v2i64)reg1_m); \ 194 reg0_m = (v8u16)__msa_pckev_d((v2i64)reg6_m, (v2i64)reg2_m); \ 195 reg1_m = (v8u16)__msa_pckev_d((v2i64)reg7_m, (v2i64)reg3_m); \ 196 reg0_m += (v8u16)__msa_pckod_d((v2i64)reg6_m, (v2i64)reg2_m); \ 197 reg1_m += (v8u16)__msa_pckod_d((v2i64)reg7_m, (v2i64)reg3_m); \ 228 reg8_m = (v8u16)__msa_pckev_d((v2i64)reg4_m, (v2i64)reg0_m); \ [all …]
|
/external/swiftshader/third_party/LLVM/lib/Target/ARM/ |
D | ARMCallingConv.td | 32 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>, 50 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>, 64 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>, 76 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>, 92 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>, 139 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>, 149 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>, 164 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>, 176 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>,
|
/external/llvm/lib/Target/PowerPC/ |
D | PPCInstrAltivec.td | 858 def : Pat<(v16i8 (bitconvert (v2i64 VRRC:$src))), (v16i8 VRRC:$src)>; 864 def : Pat<(v8i16 (bitconvert (v2i64 VRRC:$src))), (v8i16 VRRC:$src)>; 870 def : Pat<(v4i32 (bitconvert (v2i64 VRRC:$src))), (v4i32 VRRC:$src)>; 876 def : Pat<(v4f32 (bitconvert (v2i64 VRRC:$src))), (v4f32 VRRC:$src)>; 879 def : Pat<(v2i64 (bitconvert (v16i8 VRRC:$src))), (v2i64 VRRC:$src)>; 880 def : Pat<(v2i64 (bitconvert (v8i16 VRRC:$src))), (v2i64 VRRC:$src)>; 881 def : Pat<(v2i64 (bitconvert (v4i32 VRRC:$src))), (v2i64 VRRC:$src)>; 882 def : Pat<(v2i64 (bitconvert (v4f32 VRRC:$src))), (v2i64 VRRC:$src)>; 883 def : Pat<(v2i64 (bitconvert (v1i128 VRRC:$src))), (v2i64 VRRC:$src)>; 889 def : Pat<(v1i128 (bitconvert (v2i64 VRRC:$src))), (v1i128 VRRC:$src)>; [all …]
|
D | README_P9.txt | 14 (set v2i64:$vD, (int_ppc_altivec_vextractub v16i8:$vA, imm:$UIMM)) 15 (set v2i64:$vD, (int_ppc_altivec_vextractuh v8i16:$vA, imm:$UIMM)) 16 (set v2i64:$vD, (int_ppc_altivec_vextractuw v4i32:$vA, imm:$UIMM)) 17 (set v2i64:$vD, (int_ppc_altivec_vextractd v2i64:$vA, imm:$UIMM)) 36 (set v2i64:$vD, (int_ppc_altivec_vinsertw v2i64:$vA, imm:$UIMM)) 49 (set v2i64:$vD, (cttz v2i64:$vB)) // vctzd 69 (set v2i64:$vD, (sext v2i8:$vB)) 77 (set v2i64:$vD, (sext v2i16:$vB)) 85 (set v2i64:$vD, (sext v2i32:$vB)) 95 (set v2i64:$rT, (ineg v2i64:$rA)) // vnegd [all …]
|
/external/llvm/lib/Target/X86/ |
D | X86TargetTransformInfo.cpp | 157 { ISD::SHL, MVT::v2i64, 1 }, in getArithmeticInstrCost() 158 { ISD::SRL, MVT::v2i64, 1 }, in getArithmeticInstrCost() 187 { ISD::SHL, MVT::v2i64, 1 }, in getArithmeticInstrCost() 188 { ISD::SRL, MVT::v2i64, 2 }, in getArithmeticInstrCost() 189 { ISD::SRA, MVT::v2i64, 2 }, in getArithmeticInstrCost() 220 { ISD::SRA, MVT::v2i64, 4 }, // srl/xor/sub sequence. in getArithmeticInstrCost() 252 { ISD::SHL, MVT::v2i64, 1 }, // psllq. in getArithmeticInstrCost() 261 { ISD::SRL, MVT::v2i64, 1 }, // psrlq. in getArithmeticInstrCost() 270 { ISD::SRA, MVT::v2i64, 4 }, // 2 x psrad + shuffle. in getArithmeticInstrCost() 327 { ISD::SHL, MVT::v2i64, 4 }, // splat+shuffle sequence. in getArithmeticInstrCost() [all …]
|
/external/llvm/test/CodeGen/AArch64/ |
D | arm64-detect-vec-redux.ll | 20 %vpaddq_v2.i = tail call <2 x i64> @llvm.aarch64.neon.addp.v2i64(<2 x i64> undef, <2 x i64> %1) #2 21 …%vqdmlal2.i = tail call <2 x i64> @llvm.aarch64.neon.sqdmull.v2i64(<2 x i32> undef, <2 x i32> unde… 22 …%vqdmlal_v3.i = tail call <2 x i64> @llvm.aarch64.neon.sqadd.v2i64(<2 x i64> %vpaddq_v2.i, <2 x i6… 39 declare <2 x i64> @llvm.aarch64.neon.sqdmull.v2i64(<2 x i32>, <2 x i32>) #1 42 declare <2 x i64> @llvm.aarch64.neon.sqadd.v2i64(<2 x i64>, <2 x i64>) #1 45 declare <2 x i64> @llvm.aarch64.neon.addp.v2i64(<2 x i64>, <2 x i64>) #1
|
D | arm64-vcvt_n.ll | 35 %vcvt_n1 = tail call <2 x double> @llvm.aarch64.neon.vcvtfxu2fp.v2f64.v2i64(<2 x i64> %a, i32 12) 40 %vcvt_n1 = tail call <2 x double> @llvm.aarch64.neon.vcvtfxs2fp.v2f64.v2i64(<2 x i64> %a, i32 9) 48 declare <2 x double> @llvm.aarch64.neon.vcvtfxu2fp.v2f64.v2i64(<2 x i64>, i32) nounwind readnone 49 declare <2 x double> @llvm.aarch64.neon.vcvtfxs2fp.v2f64.v2i64(<2 x i64>, i32) nounwind readnone
|
D | arm64-vcvt.ll | 26 %tmp3 = call <2 x i64> @llvm.aarch64.neon.fcvtas.v2i64.v2f64(<2 x double> %A) 32 declare <2 x i64> @llvm.aarch64.neon.fcvtas.v2i64.v2f64(<2 x double>) nounwind readnone 57 %tmp3 = call <2 x i64> @llvm.aarch64.neon.fcvtau.v2i64.v2f64(<2 x double> %A) 63 declare <2 x i64> @llvm.aarch64.neon.fcvtau.v2i64.v2f64(<2 x double>) nounwind readnone 88 %tmp3 = call <2 x i64> @llvm.aarch64.neon.fcvtms.v2i64.v2f64(<2 x double> %A) 94 declare <2 x i64> @llvm.aarch64.neon.fcvtms.v2i64.v2f64(<2 x double>) nounwind readnone 119 %tmp3 = call <2 x i64> @llvm.aarch64.neon.fcvtmu.v2i64.v2f64(<2 x double> %A) 125 declare <2 x i64> @llvm.aarch64.neon.fcvtmu.v2i64.v2f64(<2 x double>) nounwind readnone 150 %tmp3 = call <2 x i64> @llvm.aarch64.neon.fcvtps.v2i64.v2f64(<2 x double> %A) 156 declare <2 x i64> @llvm.aarch64.neon.fcvtps.v2i64.v2f64(<2 x double>) nounwind readnone [all …]
|