/external/llvm/test/CodeGen/PowerPC/ |
D | vaddsplat.ll | 9 %v8i16 = type <8 x i16> 34 define void @test_v8i16_pos_even(%v8i16* %P, %v8i16* %S) { 35 %p = load %v8i16, %v8i16* %P 36 %r = add %v8i16 %p, < i16 30, i16 30, i16 30, i16 30, i16 30, i16 30, i16 30, i16 30 > 37 store %v8i16 %r, %v8i16* %S 45 define void @test_v8i16_neg_even(%v8i16* %P, %v8i16* %S) { 46 %p = load %v8i16, %v8i16* %P 47 … %r = add %v8i16 %p, < i16 -32, i16 -32, i16 -32, i16 -32, i16 -32, i16 -32, i16 -32, i16 -32 > 48 store %v8i16 %r, %v8i16* %S 102 define void @test_v8i16_pos_odd(%v8i16* %P, %v8i16* %S) { [all …]
|
/external/llvm/test/Analysis/BasicAA/ |
D | intrinsics.ll | 10 ; CHECK-NEXT: %a = call <8 x i16> @llvm.arm.neon.vld1.v8i16(i8* %p, i32 16) [[ATTR:#[0-9]+]] 11 ; CHECK-NEXT: call void @llvm.arm.neon.vst1.v8i16(i8* %q, <8 x i16> %y, i32 16) 15 %a = call <8 x i16> @llvm.arm.neon.vld1.v8i16(i8* %p, i32 16) nounwind 16 call void @llvm.arm.neon.vst1.v8i16(i8* %q, <8 x i16> %y, i32 16) 17 %b = call <8 x i16> @llvm.arm.neon.vld1.v8i16(i8* %p, i32 16) nounwind 25 ; CHECK-NEXT: %a = call <8 x i16> @llvm.arm.neon.vld1.v8i16(i8* %p, i32 16) [[ATTR]] 26 ; CHECK-NEXT: call void @llvm.arm.neon.vst1.v8i16(i8* %q, <8 x i16> %y, i32 16) 31 %a = call <8 x i16> @llvm.arm.neon.vld1.v8i16(i8* %p, i32 16) nounwind 32 call void @llvm.arm.neon.vst1.v8i16(i8* %q, <8 x i16> %y, i32 16) 33 %b = call <8 x i16> @llvm.arm.neon.vld1.v8i16(i8* %p, i32 16) nounwind [all …]
|
D | cs-cs.ll | 5 declare <8 x i16> @llvm.arm.neon.vld1.v8i16(i8*, i32) nounwind readonly 6 declare void @llvm.arm.neon.vst1.v8i16(i8*, <8 x i16>, i32) nounwind 16 %a = call <8 x i16> @llvm.arm.neon.vld1.v8i16(i8* %p, i32 16) nounwind 17 call void @llvm.arm.neon.vst1.v8i16(i8* %q, <8 x i16> %y, i32 16) 18 %b = call <8 x i16> @llvm.arm.neon.vld1.v8i16(i8* %p, i32 16) nounwind 25 ; CHECK: Just Ref: Ptr: i8* %p <-> %a = call <8 x i16> @llvm.arm.neon.vld1.v8i16(i8* %p, i… 26 ; CHECK: NoModRef: Ptr: i8* %q <-> %a = call <8 x i16> @llvm.arm.neon.vld1.v8i16(i8* %p, i… 27 ; CHECK: NoModRef: Ptr: i8* %p <-> call void @llvm.arm.neon.vst1.v8i16(i8* %q, <8 x i16> %… 28 ; CHECK: Both ModRef: Ptr: i8* %q <-> call void @llvm.arm.neon.vst1.v8i16(i8* %q, <8 x i16> %… 29 ; CHECK: Just Ref: Ptr: i8* %p <-> %b = call <8 x i16> @llvm.arm.neon.vld1.v8i16(i8* %p, i… [all …]
|
/external/llvm/lib/Target/PowerPC/ |
D | PPCInstrAltivec.td | 350 [(set v8i16:$vD, (int_ppc_altivec_mfvscr))]>; 361 [(set v8i16:$vD, (int_ppc_altivec_lvehx xoaddr:$src))]>; 388 [(int_ppc_altivec_stvehx v8i16:$rS, xoaddr:$dst)]>; 414 def VMHADDSHS : VA1a_Int_Ty<32, "vmhaddshs", int_ppc_altivec_vmhaddshs, v8i16>; 416 v8i16>; 417 def VMLADDUHM : VA1a_Int_Ty<34, "vmladduhm", int_ppc_altivec_vmladduhm, v8i16>; 441 [(set v8i16:$vD, (add v8i16:$vA, v8i16:$vB))]>; 448 def VADDSHS : VX1_Int_Ty<832, "vaddshs", int_ppc_altivec_vaddshs, v8i16>; 451 def VADDUHS : VX1_Int_Ty<576, "vadduhs", int_ppc_altivec_vadduhs, v8i16>; 507 def VAVGSH : VX1_Int_Ty<1346, "vavgsh", int_ppc_altivec_vavgsh, v8i16>; [all …]
|
/external/llvm/test/Analysis/TypeBasedAliasAnalysis/ |
D | intrinsics.ll | 10 ; CHECK-NEXT: %a = call <8 x i16> @llvm.arm.neon.vld1.v8i16(i8* %p, i32 16) [[NUW:#[0-9]+]] 11 ; CHECK-NEXT: call void @llvm.arm.neon.vst1.v8i16(i8* %q, <8 x i16> %y, i32 16) 15 %a = call <8 x i16> @llvm.arm.neon.vld1.v8i16(i8* %p, i32 16) nounwind, !tbaa !2 16 call void @llvm.arm.neon.vst1.v8i16(i8* %q, <8 x i16> %y, i32 16), !tbaa !1 17 %b = call <8 x i16> @llvm.arm.neon.vld1.v8i16(i8* %p, i32 16) nounwind, !tbaa !2 22 declare <8 x i16> @llvm.arm.neon.vld1.v8i16(i8*, i32) nounwind readonly 23 declare void @llvm.arm.neon.vst1.v8i16(i8*, <8 x i16>, i32) nounwind
|
/external/llvm/lib/Target/X86/ |
D | X86TargetTransformInfo.cpp | 196 { ISD::SHL, MVT::v8i16, 1 }, // psllw. in getArithmeticInstrCost() 201 { ISD::SRL, MVT::v8i16, 1 }, // psrlw. in getArithmeticInstrCost() 206 { ISD::SRA, MVT::v8i16, 1 }, // psraw. in getArithmeticInstrCost() 209 { ISD::SDIV, MVT::v8i16, 6 }, // pmulhw sequence in getArithmeticInstrCost() 210 { ISD::UDIV, MVT::v8i16, 6 }, // pmulhuw sequence in getArithmeticInstrCost() 229 if ((VT == MVT::v8i16 && ST->hasSSE2()) || in getArithmeticInstrCost() 251 { ISD::SHL, MVT::v8i16, 8*10 }, // Scalarized. in getArithmeticInstrCost() 257 { ISD::SRL, MVT::v8i16, 8*10 }, // Scalarized. in getArithmeticInstrCost() 262 { ISD::SRA, MVT::v8i16, 8*10 }, // Scalarized. in getArithmeticInstrCost() 273 { ISD::SDIV, MVT::v8i16, 8*20 }, in getArithmeticInstrCost() [all …]
|
/external/llvm/test/CodeGen/ARM/ |
D | 2012-08-27-CopyPhysRegCrash.ll | 43 %28 = tail call <8 x i16> @llvm.arm.neon.vabs.v8i16(<8 x i16> %26) nounwind 46 %31 = tail call <8 x i16> @llvm.arm.neon.vmaxu.v8i16(<8 x i16> undef, <8 x i16> %30) nounwind 57 %42 = tail call <8 x i16> @llvm.arm.neon.vabs.v8i16(<8 x i16> %36) nounwind 58 %43 = tail call <8 x i16> @llvm.arm.neon.vabs.v8i16(<8 x i16> %41) nounwind 62 %47 = tail call <8 x i16> @llvm.arm.neon.vmaxu.v8i16(<8 x i16> %31, <8 x i16> %46) nounwind 80 %65 = tail call <8 x i16> @llvm.arm.neon.vabs.v8i16(<8 x i16> %60) nounwind 87 %72 = tail call <8 x i16> @llvm.arm.neon.vabs.v8i16(<8 x i16> undef) nounwind 88 %73 = tail call <8 x i16> @llvm.arm.neon.vabs.v8i16(<8 x i16> %71) nounwind 96 %81 = tail call <8 x i16> @llvm.arm.neon.vabs.v8i16(<8 x i16> %80) nounwind 99 %84 = tail call <8 x i16> @llvm.arm.neon.vmaxu.v8i16(<8 x i16> %76, <8 x i16> %83) nounwind [all …]
|
D | neon-v8.1a.ll | 7 declare <8 x i16> @llvm.arm.neon.vqrdmulh.v8i16(<8 x i16>, <8 x i16>) 12 declare <8 x i16> @llvm.arm.neon.vqadds.v8i16(<8 x i16>, <8 x i16>) 17 declare <8 x i16> @llvm.arm.neon.vqsubs.v8i16(<8 x i16>, <8 x i16>) 31 %prod = call <8 x i16> @llvm.arm.neon.vqrdmulh.v8i16(<8 x i16> %mhs, <8 x i16> %rhs) 32 %retval = call <8 x i16> @llvm.arm.neon.vqadds.v8i16(<8 x i16> %acc, <8 x i16> %prod) 63 %prod = call <8 x i16> @llvm.arm.neon.vqrdmulh.v8i16(<8 x i16> %mhs, <8 x i16> %rhs) 64 %retval = call <8 x i16> @llvm.arm.neon.vqsubs.v8i16(<8 x i16> %acc, <8 x i16> %prod) 102 %prod = call <8 x i16> @llvm.arm.neon.vqrdmulh.v8i16(<8 x i16> %x, <8 x i16> %shuffle) 103 %retval = call <8 x i16> @llvm.arm.neon.vqadds.v8i16(<8 x i16> %acc, <8 x i16> %prod) 142 %prod = call <8 x i16> @llvm.arm.neon.vqrdmulh.v8i16(<8 x i16> %x, <8 x i16> %shuffle) [all …]
|
D | vpadal.ll | 62 %tmp3 = call <8 x i16> @llvm.arm.neon.vpadals.v8i16.v16i8(<8 x i16> %tmp1, <16 x i8> %tmp2) 71 %tmp3 = call <4 x i32> @llvm.arm.neon.vpadals.v4i32.v8i16(<4 x i32> %tmp1, <8 x i16> %tmp2) 89 %tmp3 = call <8 x i16> @llvm.arm.neon.vpadalu.v8i16.v16i8(<8 x i16> %tmp1, <16 x i8> %tmp2) 98 %tmp3 = call <4 x i32> @llvm.arm.neon.vpadalu.v4i32.v8i16(<4 x i32> %tmp1, <8 x i16> %tmp2) 119 declare <8 x i16> @llvm.arm.neon.vpadals.v8i16.v16i8(<8 x i16>, <16 x i8>) nounwind readnone 120 declare <4 x i32> @llvm.arm.neon.vpadals.v4i32.v8i16(<4 x i32>, <8 x i16>) nounwind readnone 123 declare <8 x i16> @llvm.arm.neon.vpadalu.v8i16.v16i8(<8 x i16>, <16 x i8>) nounwind readnone 124 declare <4 x i32> @llvm.arm.neon.vpadalu.v4i32.v8i16(<4 x i32>, <8 x i16>) nounwind readnone
|
D | vpadd.ll | 97 %tmp2 = call <8 x i16> @llvm.arm.neon.vpaddls.v8i16.v16i8(<16 x i8> %tmp1) 105 %tmp2 = call <4 x i32> @llvm.arm.neon.vpaddls.v4i32.v8i16(<8 x i16> %tmp1) 121 %tmp2 = call <8 x i16> @llvm.arm.neon.vpaddlu.v8i16.v16i8(<16 x i8> %tmp1) 129 %tmp2 = call <4 x i32> @llvm.arm.neon.vpaddlu.v4i32.v8i16(<8 x i16> %tmp1) 174 declare <8 x i16> @llvm.arm.neon.vpaddls.v8i16.v16i8(<16 x i8>) nounwind readnone 175 declare <4 x i32> @llvm.arm.neon.vpaddls.v4i32.v8i16(<8 x i16>) nounwind readnone 178 declare <8 x i16> @llvm.arm.neon.vpaddlu.v8i16.v16i8(<16 x i8>) nounwind readnone 179 declare <4 x i32> @llvm.arm.neon.vpaddlu.v4i32.v8i16(<8 x i16>) nounwind readnone
|
D | 2011-08-12-vmovqqqq-pseudo.ll | 7 …%vld3_lane = call { <8 x i16>, <8 x i16>, <8 x i16> } @llvm.arm.neon.vld3lane.v8i16(i8* undef, <8 … 12 declare { <8 x i16>, <8 x i16>, <8 x i16> } @llvm.arm.neon.vld3lane.v8i16(i8*, <8 x i16>, <8 x i16>…
|
D | popcnt.ll | 40 %tmp2 = call <8 x i16> @llvm.ctpop.v8i16(<8 x i16> %tmp1) 77 declare <8 x i16> @llvm.ctpop.v8i16(<8 x i16>) nounwind readnone 117 %tmp2 = call <8 x i16> @llvm.ctlz.v8i16(<8 x i16> %tmp1, i1 0) 134 declare <8 x i16> @llvm.ctlz.v8i16(<8 x i16>, i1) nounwind readnone 173 %tmp2 = call <8 x i16> @llvm.arm.neon.vcls.v8i16(<8 x i16> %tmp1) 190 declare <8 x i16> @llvm.arm.neon.vcls.v8i16(<8 x i16>) nounwind readnone
|
D | vhadd.ll | 71 %tmp3 = call <8 x i16> @llvm.arm.neon.vhadds.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2) 98 %tmp3 = call <8 x i16> @llvm.arm.neon.vhaddu.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2) 120 declare <8 x i16> @llvm.arm.neon.vhadds.v8i16(<8 x i16>, <8 x i16>) nounwind readnone 124 declare <8 x i16> @llvm.arm.neon.vhaddu.v8i16(<8 x i16>, <8 x i16>) nounwind readnone 195 %tmp3 = call <8 x i16> @llvm.arm.neon.vrhadds.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2) 222 %tmp3 = call <8 x i16> @llvm.arm.neon.vrhaddu.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2) 244 declare <8 x i16> @llvm.arm.neon.vrhadds.v8i16(<8 x i16>, <8 x i16>) nounwind readnone 248 declare <8 x i16> @llvm.arm.neon.vrhaddu.v8i16(<8 x i16>, <8 x i16>) nounwind readnone
|
/external/clang/test/CodeGen/ |
D | ppc64-vector.c | 7 typedef short v8i16 __attribute__((vector_size (16))); typedef 37 v8i16 test_v8i16(v8i16 x) in test_v8i16()
|
D | systemz-abi-vector.c | 23 typedef __attribute__((vector_size(16))) short v8i16; typedef 60 v8i16 pass_v8i16(v8i16 arg) { return arg; } in pass_v8i16()
|
/external/llvm/lib/Target/ARM/ |
D | ARMTargetTransformInfo.cpp | 95 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i16, 6 }, in getCastInstrCost() 96 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i16, 6 }, in getCastInstrCost() 120 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i16, 4 }, in getCastInstrCost() 121 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 4 }, in getCastInstrCost() 149 { ISD::FP_TO_SINT, MVT::v8i16, MVT::v8f32, 4 }, in getCastInstrCost() 150 { ISD::FP_TO_UINT, MVT::v8i16, MVT::v8f32, 4 }, in getCastInstrCost() 353 {ISD::VECTOR_SHUFFLE, MVT::v8i16, 2}, in getShuffleCost() 378 {ISD::VECTOR_SHUFFLE, MVT::v8i16, 16}, in getShuffleCost() 432 { ISD::SDIV, MVT::v8i16, 8 * FunctionCallDivCost}, in getArithmeticInstrCost() 433 { ISD::UDIV, MVT::v8i16, 8 * FunctionCallDivCost}, in getArithmeticInstrCost() [all …]
|
D | ARMCallingConv.td | 28 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>, 47 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>, 61 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>, 80 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>, 96 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>, 147 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>, 157 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>, 175 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>, 190 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>,
|
D | ARMInstrNEON.td | 1099 def VLD1LNq16Pseudo : VLD1QLNPseudo<v8i16, extloadi16>; 1400 def VLD1DUPq16 : VLD1QDUP<{0,1,1,?}, "16", v8i16, extloadi16, 2087 def VST1LNq16Pseudo : VST1QLNPseudo<v8i16, truncstorei16, NEONvgetlaneu>; 2129 def VST1LNq16Pseudo_UPD : VST1QLNWBPseudo<v8i16, post_truncsti16,NEONvgetlaneu>; 3259 def v8i16 : N2V<op24_23, op21_20, 0b01, op17_16, op11_7, 1, op4, 3262 [(set QPR:$Vd, (v8i16 (OpNode (v8i16 QPR:$Vm))))]>; 3293 def v8i16 : N2VQInt<op24_23, op21_20, 0b01, op17_16, op11_7, op4, 3294 itinQ, OpcodeStr, !strconcat(Dt, "16"),v8i16,v8i16,IntOp>; 3308 v8i8, v8i16, OpNode>; 3325 v8i8, v8i16, IntOp>; [all …]
|
/external/llvm/test/CodeGen/AArch64/ |
D | arm64-neon-across.ll | 13 declare i32 @llvm.aarch64.neon.saddv.i32.v8i16(<8 x i16>) 23 declare i32 @llvm.aarch64.neon.uminv.i32.v8i16(<8 x i16>) 29 declare i32 @llvm.aarch64.neon.sminv.i32.v8i16(<8 x i16>) 43 declare i32 @llvm.aarch64.neon.umaxv.i32.v8i16(<8 x i16>) 49 declare i32 @llvm.aarch64.neon.smaxv.i32.v8i16(<8 x i16>) 63 declare i32 @llvm.aarch64.neon.uaddlv.i32.v8i16(<8 x i16>) 69 declare i32 @llvm.aarch64.neon.saddlv.i32.v8i16(<8 x i16>) 128 %saddlvv.i = tail call i32 @llvm.aarch64.neon.saddlv.i32.v8i16(<8 x i16> %a) 153 %uaddlvv.i = tail call i32 @llvm.aarch64.neon.uaddlv.i32.v8i16(<8 x i16> %a) 214 %smaxv.i = tail call i32 @llvm.aarch64.neon.smaxv.i32.v8i16(<8 x i16> %a) [all …]
|
D | arm64-neon-v8.1a.ll | 6 declare <8 x i16> @llvm.aarch64.neon.sqrdmulh.v8i16(<8 x i16>, <8 x i16>) 13 declare <8 x i16> @llvm.aarch64.neon.sqadd.v8i16(<8 x i16>, <8 x i16>) 20 declare <8 x i16> @llvm.aarch64.neon.sqsub.v8i16(<8 x i16>, <8 x i16>) 42 %prod = call <8 x i16> @llvm.aarch64.neon.sqrdmulh.v8i16(<8 x i16> %mhs, <8 x i16> %rhs) 43 %retval = call <8 x i16> @llvm.aarch64.neon.sqadd.v8i16(<8 x i16> %acc, <8 x i16> %prod) 82 %prod = call <8 x i16> @llvm.aarch64.neon.sqrdmulh.v8i16(<8 x i16> %mhs, <8 x i16> %rhs) 83 %retval = call <8 x i16> @llvm.aarch64.neon.sqsub.v8i16(<8 x i16> %acc, <8 x i16> %prod) 130 %prod = call <8 x i16> @llvm.aarch64.neon.sqrdmulh.v8i16(<8 x i16> %x, <8 x i16> %shuffle) 131 %retval = call <8 x i16> @llvm.aarch64.neon.sqadd.v8i16(<8 x i16> %acc, <8 x i16> %prod) 178 %prod = call <8 x i16> @llvm.aarch64.neon.sqrdmulh.v8i16(<8 x i16> %x, <8 x i16> %shuffle) [all …]
|
D | arm64-vhadd.ll | 35 %tmp3 = call <8 x i16> @llvm.aarch64.neon.shadd.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2) 89 %tmp3 = call <8 x i16> @llvm.aarch64.neon.uhadd.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2) 120 declare <8 x i16> @llvm.aarch64.neon.shadd.v8i16(<8 x i16>, <8 x i16>) nounwind readnone 124 declare <8 x i16> @llvm.aarch64.neon.uhadd.v8i16(<8 x i16>, <8 x i16>) nounwind readnone 159 %tmp3 = call <8 x i16> @llvm.aarch64.neon.srhadd.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2) 213 %tmp3 = call <8 x i16> @llvm.aarch64.neon.urhadd.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2) 244 declare <8 x i16> @llvm.aarch64.neon.srhadd.v8i16(<8 x i16>, <8 x i16>) nounwind readnone 248 declare <8 x i16> @llvm.aarch64.neon.urhadd.v8i16(<8 x i16>, <8 x i16>) nounwind readnone
|
/external/llvm/include/llvm/CodeGen/ |
D | MachineValueType.h | 76 v8i16 = 29, // 8 x i16 enumerator 225 return (SimpleTy == MVT::v16i8 || SimpleTy == MVT::v8i16 || in is128BitVector() 300 case v8i16: in getVectorElementType() 345 case v8i16: in getVectorNumElements() 427 case v8i16: in getSizeInBits() 547 if (NumElements == 8) return MVT::v8i16; in getVectorVT()
|
/external/llvm/lib/Target/AArch64/ |
D | AArch64ISelDAGToDAG.cpp | 466 case MVT::v8i16: in SelectMLAV64LaneV128() 2264 else if (VT == MVT::v8i16 || VT == MVT::v8f16) in Select() 2282 else if (VT == MVT::v8i16 || VT == MVT::v8f16) in Select() 2300 else if (VT == MVT::v8i16 || VT == MVT::v8f16) in Select() 2318 else if (VT == MVT::v8i16 || VT == MVT::v8f16) in Select() 2336 else if (VT == MVT::v8i16 || VT == MVT::v8f16) in Select() 2354 else if (VT == MVT::v8i16 || VT == MVT::v8f16) in Select() 2372 else if (VT == MVT::v8i16 || VT == MVT::v8f16) in Select() 2390 else if (VT == MVT::v8i16 || VT == MVT::v8f16) in Select() 2408 else if (VT == MVT::v8i16 || VT == MVT::v8f16) in Select() [all …]
|
D | AArch64InstrInfo.td | 1227 defm : ScalToVecROLoadPat<ro16, extloadi16, i32, v8i16, LDRHroW, LDRHroX, hsub>; 1285 defm : VecROLoadPat<ro128, v8i16, LDRQroW, LDRQroX>; 1385 def : Pat <(v8i16 (scalar_to_vector (i32 1387 (INSERT_SUBREG (v8i16 (IMPLICIT_DEF)), 1433 def : Pat<(v8i16 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))), 1595 def : Pat<(v8i16 (load (am_unscaled128 GPR64sp:$Rn, simm9:$offset))), 1910 defm : VecROStorePat<ro128, v8i16, FPR128, STRQroW, STRQroX>; 1934 defm : VecROStoreLane0Pat<ro16, truncstorei16, v8i16, i32, hsub, STRHroW, STRHroX>; 1935 defm : VecROStoreLane0Pat<ro16, store , v8i16, i16, hsub, STRHroW, STRHroX>; 2013 def : Pat<(store (v8i16 FPR128:$Rt), [all …]
|
/external/llvm/test/CodeGen/Mips/msa/ |
D | basic_operations.ll | 6 @v8i16 = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0> 51 …store volatile <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, <8 x i16>*@v8i16 54 …store volatile <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>, <8 x i16>*@v8i16 57 …tore volatile <8 x i16> <i16 1, i16 1, i16 1, i16 2, i16 1, i16 1, i16 1, i16 31>, <8 x i16>*@v8i16 61 … <i16 1028, i16 1028, i16 1028, i16 1028, i16 1028, i16 1028, i16 1028, i16 1028>, <8 x i16>*@v8i16 64 …store volatile <8 x i16> <i16 1, i16 2, i16 1, i16 2, i16 1, i16 2, i16 1, i16 2>, <8 x i16>*@v8i16 71 …store volatile <8 x i16> <i16 1, i16 2, i16 3, i16 4, i16 1, i16 2, i16 3, i16 4>, <8 x i16>*@v8i16 218 store volatile <8 x i16> %8, <8 x i16>*@v8i16 280 %1 = load <8 x i16>, <8 x i16>* @v8i16 352 %1 = load <8 x i16>, <8 x i16>* @v8i16 [all …]
|