/external/llvm/test/CodeGen/PowerPC/ |
D | vaddsplat.ll | 10 %v16i8 = type <16 x i8> 56 define void @test_v16i8_pos_even(%v16i8* %P, %v16i8* %S) { 57 %p = load %v16i8, %v16i8* %P 58 …%r = add %v16i8 %p, < i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16,… 59 store %v16i8 %r, %v16i8* %S 67 define void @test_v16i8_neg_even(%v16i8* %P, %v16i8* %S) { 68 %p = load %v16i8, %v16i8* %P 69 …%r = add %v16i8 %p, < i8 -18, i8 -18, i8 -18, i8 -18, i8 -18, i8 -18, i8 -18, i8 -18, i8 -18, i8 -… 70 store %v16i8 %r, %v16i8* %S 126 define void @test_v16i8_pos_odd(%v16i8* %P, %v16i8* %S) { [all …]
|
D | vec_clz.ll | 5 declare <16 x i8> @llvm.ctlz.v16i8(<16 x i8>) nounwind readnone 11 %vcnt = tail call <16 x i8> @llvm.ctlz.v16i8(<16 x i8> %x)
|
/external/llvm/lib/Target/PowerPC/ |
D | PPCInstrAltivec.td | 18 // Since we canonicalize buildvectors to v16i8, all vnots "-1" operands will be 21 (xor node:$in, (bitconvert (v16i8 immAllOnesV)))>; 52 (vector_shuffle (v16i8 node:$lhs), node:$rhs), [{ 56 (vector_shuffle (v16i8 node:$lhs), node:$rhs), [{ 60 (vector_shuffle (v16i8 node:$lhs), node:$rhs), [{ 64 (vector_shuffle (v16i8 node:$lhs), node:$rhs), [{ 68 (vector_shuffle (v16i8 node:$lhs), node:$rhs), [{ 72 (vector_shuffle (v16i8 node:$lhs), node:$rhs), [{ 78 (vector_shuffle (v16i8 node:$lhs), node:$rhs), [{ 106 (vector_shuffle (v16i8 node:$lhs), node:$rhs), [{ [all …]
|
/external/llvm/test/CodeGen/AArch64/ |
D | arm64-tbl.ll | 13 %tmp3 = call <16 x i8> @llvm.aarch64.neon.tbl1.v16i8(<16 x i8> %A, <16 x i8> %B) 27 %tmp3 = call <16 x i8> @llvm.aarch64.neon.tbl2.v16i8(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C) 41 …%tmp3 = call <16 x i8> @llvm.aarch64.neon.tbl3.v16i8(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C, <16… 55 …%tmp3 = call <16 x i8> @llvm.aarch64.neon.tbl4.v16i8(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C, <16… 60 declare <16 x i8> @llvm.aarch64.neon.tbl1.v16i8(<16 x i8>, <16 x i8>) nounwind readnone 62 declare <16 x i8> @llvm.aarch64.neon.tbl2.v16i8(<16 x i8>, <16 x i8>, <16 x i8>) nounwind readnone 64 declare <16 x i8> @llvm.aarch64.neon.tbl3.v16i8(<16 x i8>, <16 x i8>, <16 x i8>, <16 x i8>) nounwin… 66 declare <16 x i8> @llvm.aarch64.neon.tbl4.v16i8(<16 x i8>, <16 x i8>, <16 x i8>, <16 x i8>, <16 x i… 78 %tmp3 = call <16 x i8> @llvm.aarch64.neon.tbx1.v16i8(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C) 92 …%tmp3 = call <16 x i8> @llvm.aarch64.neon.tbx2.v16i8(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C, <16… [all …]
|
D | arm64-neon-across.ll | 15 declare i32 @llvm.aarch64.neon.saddv.i32.v16i8(<16 x i8>) 25 declare i32 @llvm.aarch64.neon.uminv.i32.v16i8(<16 x i8>) 31 declare i32 @llvm.aarch64.neon.sminv.i32.v16i8(<16 x i8>) 45 declare i32 @llvm.aarch64.neon.umaxv.i32.v16i8(<16 x i8>) 51 declare i32 @llvm.aarch64.neon.smaxv.i32.v16i8(<16 x i8>) 65 declare i32 @llvm.aarch64.neon.uaddlv.i32.v16i8(<16 x i8>) 71 declare i32 @llvm.aarch64.neon.saddlv.i32.v16i8(<16 x i8>) 119 %saddlvv.i = tail call i32 @llvm.aarch64.neon.saddlv.i32.v16i8(<16 x i8> %a) 144 %uaddlvv.i = tail call i32 @llvm.aarch64.neon.uaddlv.i32.v16i8(<16 x i8> %a) 205 %smaxv.i = tail call i32 @llvm.aarch64.neon.smaxv.i32.v16i8(<16 x i8> %a) [all …]
|
D | arm64-copy-tuple.ll | 105 …%vec = tail call { <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld3.v16i8.p0v16i8(<16 x i8… 110 …tail call void @llvm.aarch64.neon.st3.v16i8.p0i8(<16 x i8> %vec0, <16 x i8> %vec1, <16 x i8> %vec2… 113 …tail call void @llvm.aarch64.neon.st3.v16i8.p0i8(<16 x i8> %vec0, <16 x i8> %vec1, <16 x i8> %vec2… 124 …%vec = tail call { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld4.v16i8.p0v16… 131 …tail call void @llvm.aarch64.neon.st4.v16i8.p0i8(<16 x i8> %vec0, <16 x i8> %vec1, <16 x i8> %vec2… 134 …tail call void @llvm.aarch64.neon.st4.v16i8.p0i8(<16 x i8> %vec0, <16 x i8> %vec1, <16 x i8> %vec2… 140 declare { <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld3.v16i8.p0v16i8(<16 x i8>*) 141 declare { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld4.v16i8.p0v16i8(<16 x i… 145 declare void @llvm.aarch64.neon.st3.v16i8.p0i8(<16 x i8>, <16 x i8>, <16 x i8>, i8*) 146 declare void @llvm.aarch64.neon.st4.v16i8.p0i8(<16 x i8>, <16 x i8>, <16 x i8>, <16 x i8>, i8*)
|
D | arm64-vecCmpBr.ll | 42 %vminv.i = tail call i32 @llvm.aarch64.neon.uminv.i32.v16i8(<16 x i8> %0) #3 89 %vmaxv.i = tail call i32 @llvm.aarch64.neon.umaxv.i32.v16i8(<16 x i8> %0) #3 135 %vmaxv.i = tail call i32 @llvm.aarch64.neon.umaxv.i32.v16i8(<16 x i8> %0) #3 181 %vminv.i = tail call i32 @llvm.aarch64.neon.uminv.i32.v16i8(<16 x i8> %0) #3 195 declare i32 @llvm.aarch64.neon.umaxv.i32.v16i8(<16 x i8>) #2 199 declare i32 @llvm.aarch64.neon.uminv.i32.v16i8(<16 x i8>) #2
|
D | arm64-simd-scalar-to-vector.ll | 13 %tmp = tail call i32 @llvm.aarch64.neon.uaddlv.i32.v16i8(<16 x i8> %a) nounwind 22 declare i32 @llvm.aarch64.neon.uaddlv.i32.v16i8(<16 x i8>) nounwind readnone
|
D | arm64-vhadd.ll | 17 %tmp3 = call <16 x i8> @llvm.aarch64.neon.shadd.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2) 71 %tmp3 = call <16 x i8> @llvm.aarch64.neon.uhadd.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2) 119 declare <16 x i8> @llvm.aarch64.neon.shadd.v16i8(<16 x i8>, <16 x i8>) nounwind readnone 123 declare <16 x i8> @llvm.aarch64.neon.uhadd.v16i8(<16 x i8>, <16 x i8>) nounwind readnone 141 %tmp3 = call <16 x i8> @llvm.aarch64.neon.srhadd.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2) 195 %tmp3 = call <16 x i8> @llvm.aarch64.neon.urhadd.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2) 243 declare <16 x i8> @llvm.aarch64.neon.srhadd.v16i8(<16 x i8>, <16 x i8>) nounwind readnone 247 declare <16 x i8> @llvm.aarch64.neon.urhadd.v16i8(<16 x i8>, <16 x i8>) nounwind readnone
|
/external/clang/test/CodeGen/ |
D | systemz-abi-vector.c | 22 typedef __attribute__((vector_size(16))) char v16i8; typedef 45 v16i8 pass_v16i8(v16i8 arg) { return arg; } in pass_v16i8() 198 v16i8 va_v16i8(__builtin_va_list l) { return __builtin_va_arg(l, v16i8); } in va_v16i8()
|
/external/llvm/test/CodeGen/X86/ |
D | vshift-6.ll | 8 ; This happens for example when lowering a shift left of a MVT::v16i8 vector. 12 ; B = BITCAST MVT::v16i8, A 16 ; D = BITCAST MVT::v16i8, C 22 ; Where 'r' is a vector of type MVT::v16i8, and
|
/external/llvm/test/CodeGen/ARM/ |
D | 2013-10-11-select-stalls.ll | 8 %vld1 = call <16 x i8> @llvm.arm.neon.vld1.v16i8(i8* %foo, i32 1) 9 %vld2 = call <16 x i8> @llvm.arm.neon.vld1.v16i8(i8* %bar, i32 1) 16 declare <16 x i8> @llvm.arm.neon.vld1.v16i8(i8* , i32 )
|
D | vcnt.ll | 16 %tmp2 = call <16 x i8> @llvm.ctpop.v16i8(<16 x i8> %tmp1) 21 declare <16 x i8> @llvm.ctpop.v16i8(<16 x i8>) nounwind readnone 51 %tmp2 = call <16 x i8> @llvm.ctlz.v16i8(<16 x i8> %tmp1, i1 0) 75 declare <16 x i8> @llvm.ctlz.v16i8(<16 x i8>, i1) nounwind readnone 107 %tmp2 = call <16 x i8> @llvm.arm.neon.vcls.v16i8(<16 x i8> %tmp1) 131 declare <16 x i8> @llvm.arm.neon.vcls.v16i8(<16 x i8>) nounwind readnone
|
D | 2012-08-27-CopyPhysRegCrash.ll | 8 declare { <16 x i8>, <16 x i8>, <16 x i8> } @llvm.arm.neon.vld3.v16i8(i8*, i32) nounwind readonly 10 declare void @llvm.arm.neon.vst1.v16i8(i8*, <16 x i8>, i32) nounwind 22 %7 = tail call { <16 x i8>, <16 x i8>, <16 x i8> } @llvm.arm.neon.vld3.v16i8(i8* null, i32 1) 25 %10 = tail call { <16 x i8>, <16 x i8>, <16 x i8> } @llvm.arm.neon.vld3.v16i8(i8* %9, i32 1) 27 %12 = tail call { <16 x i8>, <16 x i8>, <16 x i8> } @llvm.arm.neon.vld3.v16i8(i8* %6, i32 1) 31 %16 = tail call { <16 x i8>, <16 x i8>, <16 x i8> } @llvm.arm.neon.vld3.v16i8(i8* %15, i32 1) 114 tail call void @llvm.arm.neon.vst1.v16i8(i8* null, <16 x i8> %98, i32 1)
|
D | popcnt.ll | 16 %tmp2 = call <16 x i8> @llvm.ctpop.v16i8(<16 x i8> %tmp1) 75 declare <16 x i8> @llvm.ctpop.v16i8(<16 x i8>) nounwind readnone 109 %tmp2 = call <16 x i8> @llvm.ctlz.v16i8(<16 x i8> %tmp1, i1 0) 133 declare <16 x i8> @llvm.ctlz.v16i8(<16 x i8>, i1) nounwind readnone 165 %tmp2 = call <16 x i8> @llvm.arm.neon.vcls.v16i8(<16 x i8> %tmp1) 189 declare <16 x i8> @llvm.arm.neon.vcls.v16i8(<16 x i8>) nounwind readnone
|
D | vhadd.ll | 62 %tmp3 = call <16 x i8> @llvm.arm.neon.vhadds.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2) 89 %tmp3 = call <16 x i8> @llvm.arm.neon.vhaddu.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2) 119 declare <16 x i8> @llvm.arm.neon.vhadds.v16i8(<16 x i8>, <16 x i8>) nounwind readnone 123 declare <16 x i8> @llvm.arm.neon.vhaddu.v16i8(<16 x i8>, <16 x i8>) nounwind readnone 186 %tmp3 = call <16 x i8> @llvm.arm.neon.vrhadds.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2) 213 %tmp3 = call <16 x i8> @llvm.arm.neon.vrhaddu.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2) 243 declare <16 x i8> @llvm.arm.neon.vrhadds.v16i8(<16 x i8>, <16 x i8>) nounwind readnone 247 declare <16 x i8> @llvm.arm.neon.vrhaddu.v16i8(<16 x i8>, <16 x i8>) nounwind readnone
|
/external/llvm/lib/Target/X86/ |
D | X86TargetTransformInfo.cpp | 195 { ISD::SHL, MVT::v16i8, 1 }, // psllw. in getArithmeticInstrCost() 200 { ISD::SRL, MVT::v16i8, 1 }, // psrlw. in getArithmeticInstrCost() 205 { ISD::SRA, MVT::v16i8, 4 }, // psrlw, pand, pxor, psubb. in getArithmeticInstrCost() 250 { ISD::SHL, MVT::v16i8, 30 }, // cmpeqb sequence. in getArithmeticInstrCost() 256 { ISD::SRL, MVT::v16i8, 16*10 }, // Scalarized. in getArithmeticInstrCost() 261 { ISD::SRA, MVT::v16i8, 16*10 }, // Scalarized. in getArithmeticInstrCost() 272 { ISD::SDIV, MVT::v16i8, 16*20 }, in getArithmeticInstrCost() 276 { ISD::UDIV, MVT::v16i8, 16*20 }, in getArithmeticInstrCost() 405 {ISD::VECTOR_SHUFFLE, MVT::v16i8, 3} in getShuffleCost() 424 {ISD::VECTOR_SHUFFLE, MVT::v16i8, 3} // pshufb + pshufb + or in getShuffleCost() [all …]
|
/external/llvm/lib/Target/R600/ |
D | SITypeRewriter.cpp | 37 Type *v16i8; member in __anon33e15f910111::SITypeRewriter 58 v16i8 = VectorType::get(Type::getInt8Ty(M.getContext()), 16); in doInitialization() 85 if (ElemTy == v16i8) { in visitLoadInst() 110 if (Arg->getType() == v16i8) { in visitCallInst()
|
/external/llvm/lib/Target/ARM/ |
D | ARMCallingConv.td | 28 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>, 47 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>, 61 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>, 80 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>, 96 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>, 147 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>, 157 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>, 175 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>, 190 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>,
|
D | ARMTargetTransformInfo.cpp | 97 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 6 }, in getCastInstrCost() 98 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 6 }, in getCastInstrCost() 101 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i32, 6 }, in getCastInstrCost() 354 {ISD::VECTOR_SHUFFLE, MVT::v16i8, 2}}; in getShuffleCost() 380 {ISD::VECTOR_SHUFFLE, MVT::v16i8, 32}}; in getShuffleCost() 436 { ISD::SDIV, MVT::v16i8, 16 * FunctionCallDivCost}, in getArithmeticInstrCost() 437 { ISD::UDIV, MVT::v16i8, 16 * FunctionCallDivCost}, in getArithmeticInstrCost() 438 { ISD::SREM, MVT::v16i8, 16 * FunctionCallDivCost}, in getArithmeticInstrCost() 439 { ISD::UREM, MVT::v16i8, 16 * FunctionCallDivCost}, in getArithmeticInstrCost()
|
/external/llvm/test/CodeGen/Thumb2/ |
D | v8_IT_1.ll | 9 %vld1 = call <16 x i8> @llvm.arm.neon.vld1.v16i8(i8* %bar, i32 1) 16 declare <16 x i8> @llvm.arm.neon.vld1.v16i8(i8* , i32 )
|
/external/llvm/include/llvm/CodeGen/ |
D | MachineValueType.h | 70 v16i8 = 23, // 16 x i8 enumerator 225 return (SimpleTy == MVT::v16i8 || SimpleTy == MVT::v8i16 || in is128BitVector() 294 case v16i8: in getVectorElementType() 338 case v16i8: in getVectorNumElements() 426 case v16i8: in getSizeInBits() 539 if (NumElements == 16) return MVT::v16i8; in getVectorVT()
|
/external/llvm/test/CodeGen/Mips/msa/ |
D | basic_operations.ll | 5 @v16i8 = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, … 15 … 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, <16 x i8>*@v16i8 18 … 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>, <16 x i8>*@v16i8 21 …1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 31>, <16 x i8>*@v16i8 25 … 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6>, <16 x i8>*@v16i8 29 … 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0>, <16 x i8>*@v16i8 33 … 3, i8 4, i8 1, i8 2, i8 3, i8 4, i8 1, i8 2, i8 3, i8 4, i8 1, i8 2, i8 3, i8 4>, <16 x i8>*@v16i8 40 … 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8>, <16 x i8>*@v16i8 184 store volatile <16 x i8> %16, <16 x i8>*@v16i8 261 %1 = load <16 x i8>, <16 x i8>* @v16i8 [all …]
|
/external/llvm/lib/Target/AArch64/ |
D | AArch64ISelDAGToDAG.cpp | 2260 else if (VT == MVT::v16i8) in Select() 2278 else if (VT == MVT::v16i8) in Select() 2296 else if (VT == MVT::v16i8) in Select() 2314 else if (VT == MVT::v16i8) in Select() 2332 else if (VT == MVT::v16i8) in Select() 2350 else if (VT == MVT::v16i8) in Select() 2368 else if (VT == MVT::v16i8) in Select() 2386 else if (VT == MVT::v16i8) in Select() 2404 else if (VT == MVT::v16i8) in Select() 2420 if (VT == MVT::v16i8 || VT == MVT::v8i8) in Select() [all …]
|
D | AArch64InstrInfo.td | 1224 defm : ScalToVecROLoadPat<ro8, extloadi8, i32, v16i8, LDRBroW, LDRBroX, bsub>; 1287 defm : VecROLoadPat<ro128, v16i8, LDRQroW, LDRQroX>; 1377 def : Pat <(v16i8 (scalar_to_vector (i32 1379 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)), 1431 def : Pat<(v16i8 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))), 1597 def : Pat<(v16i8 (load (am_unscaled128 GPR64sp:$Rn, simm9:$offset))), 1911 defm : VecROStorePat<ro128, v16i8, FPR128, STRQroW, STRQroX>; 2010 def : Pat<(store (v16i8 FPR128:$Rt), 2105 def : Pat<(store (v16i8 FPR128:$Rt), 2202 def : Pat<(pre_store (v16i8 FPR128:$Rt), GPR64sp:$addr, simm9:$off), [all …]
|