/external/llvm/test/CodeGen/AArch64/ |
D | machine-copy-prop.ll | 41 …call void @llvm.aarch64.neon.st2lane.v1i64.p0i8(<1 x i64> <i64 4096>, <1 x i64> <i64 -1>, i64 0, i… 42 …%vld2_lane = call { <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld2lane.v1i64.p0i8(<1 x i64> <i64 11… 45 …%vld2_lane1 = call { <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld2lane.v1i64.p0i8(<1 x i64> %vld2_… 49 …call void @llvm.aarch64.neon.st2.v1i64.p0i8(<1 x i64> %vld2_lane1.0.extract, <1 x i64> %vld2_lane1… 55 %sqadd1 = call <1 x i64> @llvm.aarch64.neon.usqadd.v1i64(<1 x i64> <i64 -1>, <1 x i64> <i64 1>) 56 …%sqadd2 = call <1 x i64> @llvm.aarch64.neon.usqadd.v1i64(<1 x i64> %vset_lane603, <1 x i64> %sqadd… 57 %sqadd3 = call <1 x i64> @llvm.aarch64.neon.usqadd.v1i64(<1 x i64> <i64 1>, <1 x i64> %sqadd2) 61 %vpadal = call <1 x i64> @llvm.aarch64.neon.uaddlp.v1i64.v2i32(<2 x i32> %sext.i) 89 declare void @llvm.aarch64.neon.st2lane.v1i64.p0i8(<1 x i64>, <1 x i64>, i64, i8* nocapture) 91 declare { <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld2lane.v1i64.p0i8(<1 x i64>, <1 x i64>, i64, i… [all …]
|
D | trunc-v1i64.ll | 6 ; v1i32 trunc v1i64, v1i16 trunc v1i64, v1i8 trunc v1i64. 8 ; scalarization will cause an assertion failure, as v1i64 is a legal type in 66 ; the i64 out of the v1i64 operand, and truncate that scalar instead.
|
D | aarch64-neon-v1i1-setcc.ll | 4 ; This file test the DAG node like "v1i1 SETCC v1i64, v1i64". As the v1i1 type 6 ; As the v1i64 operands of SETCC are legal types, they will not be scalarized.
|
D | arm64-neon-vector-list-spill.ll | 142 …tail call void @llvm.aarch64.neon.st2lane.v1i64.p0i64(<1 x i64> zeroinitializer, <1 x i64> zeroini… 153 …tail call void @llvm.aarch64.neon.st3lane.v1i64.p0i64(<1 x i64> zeroinitializer, <1 x i64> zeroini… 164 …tail call void @llvm.aarch64.neon.st4lane.v1i64.p0i64(<1 x i64> zeroinitializer, <1 x i64> zeroini… 173 declare void @llvm.aarch64.neon.st2lane.v1i64.p0i64(<1 x i64>, <1 x i64>, i64, i64*) 174 declare void @llvm.aarch64.neon.st3lane.v1i64.p0i64(<1 x i64>, <1 x i64>, <1 x i64>, i64, i64*) 175 declare void @llvm.aarch64.neon.st4lane.v1i64.p0i64(<1 x i64>, <1 x i64>, <1 x i64>, <1 x i64>, i64…
|
D | arm64-extract_subvector.ll | 29 define <1 x i64> @v1i64(<2 x i64> %a) nounwind { 30 ; CHECK-LABEL: v1i64:
|
/external/clang/test/CodeGen/ |
D | const-init.c | 137 typedef long long v1i64 __attribute((vector_size(8))); in g28() 143 static v1i64 a = (v1i64)10LL; in g28()
|
D | systemz-abi-vector.c | 23 typedef __attribute__((vector_size(8))) long long v1i64; typedef 94 v1i64 pass_v1i64(v1i64 arg) { return arg; } in pass_v1i64()
|
D | x86_64-arguments.c | 261 typedef unsigned long long v1i64 __attribute__((__vector_size__(8))); typedef 265 v1i64 f34(v1i64 arg) { return arg; } in f34()
|
/external/llvm/lib/Target/ARM/ |
D | ARMCallingConv.td | 27 CCIfType<[v1i64, v2i32, v4i16, v8i8, v2f32], CCBitConvertToType<f64>>, 46 CCIfType<[v1i64, v2i32, v4i16, v8i8, v2f32], CCBitConvertToType<f64>>, 60 CCIfType<[v1i64, v2i32, v4i16, v8i8, v2f32], CCBitConvertToType<f64>>, 79 CCIfType<[v1i64, v2i32, v4i16, v8i8, v2f32], CCBitConvertToType<f64>>, 95 CCIfType<[v1i64, v2i32, v4i16, v8i8, v2f32], CCBitConvertToType<f64>>, 151 CCIfType<[v1i64, v2i32, v4i16, v8i8, v2f32], CCBitConvertToType<f64>>, 161 CCIfType<[v1i64, v2i32, v4i16, v8i8, v2f32], CCBitConvertToType<f64>>, 179 CCIfType<[v1i64, v2i32, v4i16, v8i8, v2f32], CCBitConvertToType<f64>>, 194 CCIfType<[v1i64, v2i32, v4i16, v8i8, v2f32], CCBitConvertToType<f64>>,
|
/external/llvm/test/CodeGen/ARM/ |
D | neon_vshl_minint.ll | 9 …%vshl.i = tail call <1 x i64> @llvm.arm.neon.vshiftu.v1i64(<1 x i64> undef, <1 x i64> <i64 -922337… 13 declare <1 x i64> @llvm.arm.neon.vshiftu.v1i64(<1 x i64>, <1 x i64>)
|
D | vpadal.ll | 26 %tmp3 = call <1 x i64> @llvm.arm.neon.vpadals.v1i64.v2i32(<1 x i64> %tmp1, <2 x i32> %tmp2) 53 %tmp3 = call <1 x i64> @llvm.arm.neon.vpadalu.v1i64.v2i32(<1 x i64> %tmp1, <2 x i32> %tmp2) 113 declare <1 x i64> @llvm.arm.neon.vpadals.v1i64.v2i32(<1 x i64>, <2 x i32>) nounwind readnone 117 declare <1 x i64> @llvm.arm.neon.vpadalu.v1i64.v2i32(<1 x i64>, <2 x i32>) nounwind readnone
|
D | vqshl.ll | 35 %tmp3 = call <1 x i64> @llvm.arm.neon.vqshifts.v1i64(<1 x i64> %tmp1, <1 x i64> %tmp2) 71 %tmp3 = call <1 x i64> @llvm.arm.neon.vqshiftu.v1i64(<1 x i64> %tmp1, <1 x i64> %tmp2) 175 %tmp2 = call <1 x i64> @llvm.arm.neon.vqshifts.v1i64(<1 x i64> %tmp1, <1 x i64> < i64 63 >) 207 %tmp2 = call <1 x i64> @llvm.arm.neon.vqshiftu.v1i64(<1 x i64> %tmp1, <1 x i64> < i64 63 >) 239 %tmp2 = call <1 x i64> @llvm.arm.neon.vqshiftsu.v1i64(<1 x i64> %tmp1, <1 x i64> < i64 63 >) 342 declare <1 x i64> @llvm.arm.neon.vqshifts.v1i64(<1 x i64>, <1 x i64>) nounwind readnone 347 declare <1 x i64> @llvm.arm.neon.vqshiftu.v1i64(<1 x i64>, <1 x i64>) nounwind readnone 352 declare <1 x i64> @llvm.arm.neon.vqshiftsu.v1i64(<1 x i64>, <1 x i64>) nounwind readnone 401 %tmp3 = call <1 x i64> @llvm.arm.neon.vqrshifts.v1i64(<1 x i64> %tmp1, <1 x i64> %tmp2) 437 %tmp3 = call <1 x i64> @llvm.arm.neon.vqrshiftu.v1i64(<1 x i64> %tmp1, <1 x i64> %tmp2) [all …]
|
D | 2014-01-09-pseudo_expand_implicit_reg.ll | 30 …call void @llvm.arm.neon.vst4.p0i8.v1i64(i8* %m, <1 x i64> %s0, <1 x i64> %s1, <1 x i64> %s2, <1 x… 53 declare void @llvm.arm.neon.vst4.p0i8.v1i64(i8*, <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64>, i32)
|
D | vqsub.ll | 35 %tmp3 = call <1 x i64> @llvm.arm.neon.vqsubs.v1i64(<1 x i64> %tmp1, <1 x i64> %tmp2) 71 %tmp3 = call <1 x i64> @llvm.arm.neon.vqsubu.v1i64(<1 x i64> %tmp1, <1 x i64> %tmp2) 150 declare <1 x i64> @llvm.arm.neon.vqsubs.v1i64(<1 x i64>, <1 x i64>) nounwind readnone 155 declare <1 x i64> @llvm.arm.neon.vqsubu.v1i64(<1 x i64>, <1 x i64>) nounwind readnone
|
D | vqadd.ll | 35 %tmp3 = call <1 x i64> @llvm.arm.neon.vqadds.v1i64(<1 x i64> %tmp1, <1 x i64> %tmp2) 71 %tmp3 = call <1 x i64> @llvm.arm.neon.vqaddu.v1i64(<1 x i64> %tmp1, <1 x i64> %tmp2) 150 declare <1 x i64> @llvm.arm.neon.vqadds.v1i64(<1 x i64>, <1 x i64>) nounwind readnone 155 declare <1 x i64> @llvm.arm.neon.vqaddu.v1i64(<1 x i64>, <1 x i64>) nounwind readnone
|
D | vshl.ll | 35 %tmp3 = call <1 x i64> @llvm.arm.neon.vshifts.v1i64(<1 x i64> %tmp1, <1 x i64> %tmp2) 71 %tmp3 = call <1 x i64> @llvm.arm.neon.vshiftu.v1i64(<1 x i64> %tmp1, <1 x i64> %tmp2) 178 %tmp2 = call <1 x i64> @llvm.arm.neon.vshiftu.v1i64(<1 x i64> %tmp1, <1 x i64> < i64 63 >) 244 %tmp2 = call <1 x i64> @llvm.arm.neon.vshifts.v1i64(<1 x i64> %tmp1, <1 x i64> < i64 -64 >) 276 %tmp2 = call <1 x i64> @llvm.arm.neon.vshiftu.v1i64(<1 x i64> %tmp1, <1 x i64> < i64 -64 >) 347 declare <1 x i64> @llvm.arm.neon.vshifts.v1i64(<1 x i64>, <1 x i64>) nounwind readnone 352 declare <1 x i64> @llvm.arm.neon.vshiftu.v1i64(<1 x i64>, <1 x i64>) nounwind readnone 396 %tmp3 = call <1 x i64> @llvm.arm.neon.vrshifts.v1i64(<1 x i64> %tmp1, <1 x i64> %tmp2) 432 %tmp3 = call <1 x i64> @llvm.arm.neon.vrshiftu.v1i64(<1 x i64> %tmp1, <1 x i64> %tmp2) 536 %tmp2 = call <1 x i64> @llvm.arm.neon.vrshifts.v1i64(<1 x i64> %tmp1, <1 x i64> < i64 -64 >) [all …]
|
D | vpadd.ll | 65 %tmp2 = call <1 x i64> @llvm.arm.neon.vpaddls.v1i64.v2i32(<2 x i32> %tmp1) 89 %tmp2 = call <1 x i64> @llvm.arm.neon.vpaddlu.v1i64.v2i32(<2 x i32> %tmp1) 168 declare <1 x i64> @llvm.arm.neon.vpaddls.v1i64.v2i32(<2 x i32>) nounwind readnone 172 declare <1 x i64> @llvm.arm.neon.vpaddlu.v1i64.v2i32(<2 x i32>) nounwind readnone
|
/external/llvm/include/llvm/CodeGen/ |
D | MachineValueType.h | 95 v1i64 = 45, // 1 x i64 enumerator 235 SimpleTy == MVT::v2i32 || SimpleTy == MVT::v1i64 || in is64BitVector() 342 case v1i64: in getVectorElementType() 415 case v1i64: in getVectorNumElements() 467 case v1i64: in getSizeInBits() 626 if (NumElements == 1) return MVT::v1i64; in getVectorVT()
|
/external/llvm/lib/Target/AArch64/ |
D | AArch64ISelDAGToDAG.cpp | 2489 else if (VT == MVT::v1i64 || VT == MVT::v1f64) in Select() 2507 else if (VT == MVT::v1i64 || VT == MVT::v1f64) in Select() 2525 else if (VT == MVT::v1i64 || VT == MVT::v1f64) in Select() 2543 else if (VT == MVT::v1i64 || VT == MVT::v1f64) in Select() 2561 else if (VT == MVT::v1i64 || VT == MVT::v1f64) in Select() 2579 else if (VT == MVT::v1i64 || VT == MVT::v1f64) in Select() 2597 else if (VT == MVT::v1i64 || VT == MVT::v1f64) in Select() 2615 else if (VT == MVT::v1i64 || VT == MVT::v1f64) in Select() 2633 else if (VT == MVT::v1i64 || VT == MVT::v1f64) in Select() 2647 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 || in Select() [all …]
|
D | AArch64CallingConvention.td | 70 CCIfType<[v1i64, v2i32, v4i16, v8i8, v1f64, v2f32, v4f16], 79 CCIfType<[i64, f64, v1f64, v2f32, v1i64, v2i32, v4i16, v8i8, v4f16], 106 CCIfType<[v1i64, v2i32, v4i16, v8i8, v1f64, v2f32, v4f16], 151 CCIfType<[v1i64, v2i32, v4i16, v8i8, v1f64, v2f32, v4f16], 161 CCIfType<[i64, f64, v1f64, v2f32, v1i64, v2i32, v4i16, v8i8, v4f16], 180 CCIfType<[i64, f64, v1i64, v2i32, v4i16, v8i8, v1f64, v2f32, v4f16], 236 CCIfType<[v1i64, v2i32, v4i16, v8i8, v2f32], CCBitConvertToType<f64>>,
|
D | AArch64SchedA57.td | 341 // D form - v1i8, v1i16, v1i32, v1i64 368 …e_5cyc_1W], (instregex "^(P?MUL|SQR?DMULH)(v8i8|v4i16|v2i32|v1i8|v1i16|v1i32|v1i64)(_indexed)?$")>; 386 def : InstRW<[A57Write_3cyc_1W], (instregex "^PMULL(v1i64|v2i64)")>; 404 def : InstRW<[A57Write_4cyc_1X], (instregex "^[SU][QR]{1,2}SHL(v1i8|v1i16|v1i32|v1i64|v8i8|v4i16|v2… 417 // D form - v1i32, v1i64 432 …yc_1V], (instregex "^(FACGE|FACGT|FCMEQ|FCMGE|FCMGT|FCMLE|FCMLT)(v2f32|32|64|v1i32|v2i32|v1i64)")>; 439 …InstRW<[A57Write_5cyc_1V], (instregex "^[FVSU]CVT([AMNPZ][SU])?(_Int)?(v2f32|v1i32|v2i32|v1i64)")>; 470 def : InstRW<[A57Write_5cyc_1V], (instregex "^FMULX?(v2f32|v1i32|v2i32|v1i64|32|64)")>; 479 def : InstRW<[A57WriteFPVMAD, A57ReadFPVMA5], (instregex "^FML[AS](v2f32|v1i32|v2i32|v1i64)")>; 494 // D form - v1i8, v1i16, v1i32, v1i64 [all …]
|
D | AArch64InstrInfo.td | 1343 def : Pat <(v1i64 (scalar_to_vector (i64 1348 def : Pat <(v1i64 (scalar_to_vector (i64 1375 defm : VecROLoadPat<ro64, v1i64, LDRDroW, LDRDroX>; 1497 def : Pat <(v1i64 (scalar_to_vector (i64 1521 def : Pat<(v1i64 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))), 1682 def : Pat<(v1i64 (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset))), 2000 defm : VecROStorePat<ro64, v1i64, FPR64, STRDroW, STRDroX>; 2097 def : Pat<(store (v1i64 FPR64:$Rt), 2193 def : Pat<(store (v1i64 FPR64:$Rt), (am_unscaled64 GPR64sp:$Rn, simm9:$offset)), 2295 def : Pat<(pre_store (v1i64 FPR64:$Rt), GPR64sp:$addr, simm9:$off), [all …]
|
/external/llvm/test/CodeGen/X86/ |
D | mmx-arg-passing.ll | 6 ; On Darwin x86-32, v1i64 values are passed in memory. In this example, they 9 ; On Darwin x86-64, v1i64 values are passed in 64-bit GPRs.
|
/external/llvm/test/Transforms/LoopStrengthReduce/ARM/ |
D | ivchain-ARM.ll | 242 %12 = tail call <1 x i64> @llvm.arm.neon.vld1.v1i64.p0i8(i8* %.05, i32 1) nounwind 244 %14 = tail call <1 x i64> @llvm.arm.neon.vld1.v1i64.p0i8(i8* %13, i32 1) nounwind 250 %19 = tail call <1 x i64> @llvm.arm.neon.vld1.v1i64.p0i8(i8* %18, i32 1) nounwind 252 %21 = tail call <1 x i64> @llvm.arm.neon.vld1.v1i64.p0i8(i8* %20, i32 1) nounwind 258 %26 = tail call <1 x i64> @llvm.arm.neon.vld1.v1i64.p0i8(i8* %25, i32 1) nounwind 260 %28 = tail call <1 x i64> @llvm.arm.neon.vld1.v1i64.p0i8(i8* %27, i32 1) nounwind 266 %33 = tail call <1 x i64> @llvm.arm.neon.vld1.v1i64.p0i8(i8* %32, i32 1) nounwind 268 %35 = tail call <1 x i64> @llvm.arm.neon.vld1.v1i64.p0i8(i8* %34, i32 1) nounwind 293 declare <1 x i64> @llvm.arm.neon.vld1.v1i64.p0i8(i8*, i32) nounwind readonly
|
/external/llvm/test/CodeGen/AMDGPU/ |
D | v1i64-kernel-arg.ll | 11 ; i64 arg works, v1i64 arg does not.
|