/external/libjpeg-turbo/simd/nasm/ |
D | jpeg_nbits_table.inc | 2050 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, \ 2051 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, \ 2052 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, \ 2053 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, \ 2054 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, \ 2055 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, \ 2056 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, \ 2057 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, \ 2058 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, \ 2059 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, \ [all …]
|
/external/libjpeg-turbo/ |
D | jpeg_nbits_table.h | 2050 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 2051 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 2052 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 2053 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 2054 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 2055 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 2056 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 2057 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 2058 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 2059 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, [all …]
|
/external/boringssl/linux-aarch64/crypto/fipsmodule/ |
D | aesv8-armx64.S | 35 stp x29,x30,[sp,#-16]! 54 eor v0.16b,v0.16b,v0.16b 55 ld1 {v3.16b},[x0],#16 65 tbl v6.16b,{v3.16b},v2.16b 66 ext v5.16b,v0.16b,v3.16b,#12 67 st1 {v3.4s},[x2],#16 68 aese v6.16b,v0.16b 71 eor v3.16b,v3.16b,v5.16b 72 ext v5.16b,v0.16b,v5.16b,#12 73 eor v3.16b,v3.16b,v5.16b [all …]
|
D | vpaes-armv8.S | 120 movi v17.16b, #0x0f 147 adrp x11, .Lk_mc_forward+16 148 add x11, x11, :lo12:.Lk_mc_forward+16 150 ld1 {v16.2d}, [x9], #16 // vmovdqu (%r9), %xmm5 # round0 key 151 and v1.16b, v7.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1 152 ushr v0.16b, v7.16b, #4 // vpsrlb $4, %xmm0, %xmm0 153 tbl v1.16b, {v20.16b}, v1.16b // vpshufb %xmm1, %xmm2, %xmm1 154 // vmovdqa .Lk_ipt+16(%rip), %xmm3 # ipthi 155 tbl v2.16b, {v21.16b}, v0.16b // vpshufb %xmm0, %xmm3, %xmm2 156 eor v0.16b, v1.16b, v16.16b // vpxor %xmm5, %xmm1, %xmm0 [all …]
|
D | ghashv8-armx64.S | 25 movi v19.16b,#0xe1 27 ext v3.16b,v17.16b,v17.16b,#8 30 ext v16.16b,v18.16b,v19.16b,#8 //t0=0xc2....01 33 and v18.16b,v18.16b,v16.16b 35 ext v18.16b,v18.16b,v18.16b,#8 36 and v16.16b,v16.16b,v17.16b 37 orr v3.16b,v3.16b,v18.16b //H<<<=1 38 eor v20.16b,v3.16b,v16.16b //twisted H 39 st1 {v20.2d},[x0],#16 //store Htable[0] 42 ext v16.16b,v20.16b,v20.16b,#8 //Karatsuba pre-processing [all …]
|
/external/boringssl/ios-aarch64/crypto/fipsmodule/ |
D | aesv8-armx64.S | 34 stp x29,x30,[sp,#-16]! 53 eor v0.16b,v0.16b,v0.16b 54 ld1 {v3.16b},[x0],#16 64 tbl v6.16b,{v3.16b},v2.16b 65 ext v5.16b,v0.16b,v3.16b,#12 66 st1 {v3.4s},[x2],#16 67 aese v6.16b,v0.16b 70 eor v3.16b,v3.16b,v5.16b 71 ext v5.16b,v0.16b,v5.16b,#12 72 eor v3.16b,v3.16b,v5.16b [all …]
|
D | vpaes-armv8.S | 119 movi v17.16b, #0x0f 146 adrp x11, Lk_mc_forward@PAGE+16 147 add x11, x11, Lk_mc_forward@PAGEOFF+16 149 ld1 {v16.2d}, [x9], #16 // vmovdqu (%r9), %xmm5 # round0 key 150 and v1.16b, v7.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1 151 ushr v0.16b, v7.16b, #4 // vpsrlb $4, %xmm0, %xmm0 152 tbl v1.16b, {v20.16b}, v1.16b // vpshufb %xmm1, %xmm2, %xmm1 153 // vmovdqa .Lk_ipt+16(%rip), %xmm3 # ipthi 154 tbl v2.16b, {v21.16b}, v0.16b // vpshufb %xmm0, %xmm3, %xmm2 155 eor v0.16b, v1.16b, v16.16b // vpxor %xmm5, %xmm1, %xmm0 [all …]
|
D | ghashv8-armx64.S | 24 movi v19.16b,#0xe1 26 ext v3.16b,v17.16b,v17.16b,#8 29 ext v16.16b,v18.16b,v19.16b,#8 //t0=0xc2....01 32 and v18.16b,v18.16b,v16.16b 34 ext v18.16b,v18.16b,v18.16b,#8 35 and v16.16b,v16.16b,v17.16b 36 orr v3.16b,v3.16b,v18.16b //H<<<=1 37 eor v20.16b,v3.16b,v16.16b //twisted H 38 st1 {v20.2d},[x0],#16 //store Htable[0] 41 ext v16.16b,v20.16b,v20.16b,#8 //Karatsuba pre-processing [all …]
|
/external/llvm/test/CodeGen/Hexagon/ |
D | reg-scavenger-valid-slot.ll | 20 define void @foo(<16 x i32>* nocapture readnone %p) #0 { 39 …, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 } %0, 16 52 …16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16… 53 …16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16… 54 …16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16… 55 …16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16… 56 …16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16… 57 …16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16… 58 …16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16… 59 …16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16… [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/Hexagon/ |
D | reg-scavenger-valid-slot.ll | 20 define void @foo(<16 x i32>* nocapture readnone %p) #0 { 39 …, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 } %0, 16 52 …16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16… 53 …16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16… 54 …16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16… 55 …16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16… 56 …16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16… 57 …16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16… 58 …16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16… 59 …16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16… [all …]
|
D | reg-scavengebug.ll | 7 declare <16 x i32> @llvm.hexagon.V6.hi(<32 x i32>) #0 10 declare <16 x i32> @llvm.hexagon.V6.vshuffb(<16 x i32>) #0 13 declare <32 x i32> @llvm.hexagon.V6.vmpyubv(<16 x i32>, <16 x i32>) #0 16 declare <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32>, <16 x i32>, i32) #0 19 declare <16 x i32> @llvm.hexagon.V6.vaddh(<16 x i32>, <16 x i32>) #0 26 %v2 = bitcast i16* %a0 to <16 x i32>* 27 %v3 = bitcast i8* %a3 to <16 x i32>* 29 %v5 = bitcast i32* %v4 to <16 x i32>* 30 %v6 = load <16 x i32>, <16 x i32>* %v5, align 64, !tbaa !0 31 %v7 = tail call <16 x i32> @llvm.hexagon.V6.lvsplatw(i32 32768) [all …]
|
/external/libhevc/common/ |
D | ihevc_quant_tables.c | 49 …16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16… 50 …16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16… 51 …16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16… 52 …16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16… 53 …16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16… 54 …16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16… 55 …16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16… 56 …16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16… 57 …16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16… 58 …16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16… [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/SystemZ/ |
D | frame-19.ll | 5 ; We need to allocate a 16-byte spill slot and save the 8 call-saved FPRs. 6 ; The frame size should be exactly 160 + 16 + 8 * 8 = 240. 7 define void @f1(<16 x i8> *%ptr) { 30 %v0 = load volatile <16 x i8>, <16 x i8> *%ptr 31 %v1 = load volatile <16 x i8>, <16 x i8> *%ptr 32 %v2 = load volatile <16 x i8>, <16 x i8> *%ptr 33 %v3 = load volatile <16 x i8>, <16 x i8> *%ptr 34 %v4 = load volatile <16 x i8>, <16 x i8> *%ptr 35 %v5 = load volatile <16 x i8>, <16 x i8> *%ptr 36 %v6 = load volatile <16 x i8>, <16 x i8> *%ptr [all …]
|
/external/llvm/test/CodeGen/SystemZ/ |
D | frame-19.ll | 5 ; We need to allocate a 16-byte spill slot and save the 8 call-saved FPRs. 6 ; The frame size should be exactly 160 + 16 + 8 * 8 = 240. 7 define void @f1(<16 x i8> *%ptr) { 30 %v0 = load volatile <16 x i8>, <16 x i8> *%ptr 31 %v1 = load volatile <16 x i8>, <16 x i8> *%ptr 32 %v2 = load volatile <16 x i8>, <16 x i8> *%ptr 33 %v3 = load volatile <16 x i8>, <16 x i8> *%ptr 34 %v4 = load volatile <16 x i8>, <16 x i8> *%ptr 35 %v5 = load volatile <16 x i8>, <16 x i8> *%ptr 36 %v6 = load volatile <16 x i8>, <16 x i8> *%ptr [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Analysis/CostModel/X86/ |
D | rem.ll | 23 ; CHECK-NEXT: Cost Model: Found an estimated cost of 320 for instruction: %V16i32 = srem <16 x i32… 26 ; CHECK-NEXT: Cost Model: Found an estimated cost of 320 for instruction: %V16i16 = srem <16 x i16… 29 ; CHECK-NEXT: Cost Model: Found an estimated cost of 320 for instruction: %V16i8 = srem <16 x i8> … 42 ; BTVER2-NEXT: Cost Model: Found an estimated cost of 320 for instruction: %V16i32 = srem <16 x i3… 45 ; BTVER2-NEXT: Cost Model: Found an estimated cost of 320 for instruction: %V16i16 = srem <16 x i1… 48 ; BTVER2-NEXT: Cost Model: Found an estimated cost of 320 for instruction: %V16i8 = srem <16 x i8>… 61 %V16i32 = srem <16 x i32> undef, undef 65 %V16i16 = srem <16 x i16> undef, undef 69 %V16i8 = srem <16 x i8> undef, undef 85 ; CHECK-NEXT: Cost Model: Found an estimated cost of 320 for instruction: %V16i32 = urem <16 x i32… [all …]
|
D | div.ll | 23 ; CHECK-NEXT: Cost Model: Found an estimated cost of 320 for instruction: %V16i32 = sdiv <16 x i32… 26 ; CHECK-NEXT: Cost Model: Found an estimated cost of 320 for instruction: %V16i16 = sdiv <16 x i16… 29 ; CHECK-NEXT: Cost Model: Found an estimated cost of 320 for instruction: %V16i8 = sdiv <16 x i8> … 42 ; BTVER2-NEXT: Cost Model: Found an estimated cost of 320 for instruction: %V16i32 = sdiv <16 x i3… 45 ; BTVER2-NEXT: Cost Model: Found an estimated cost of 320 for instruction: %V16i16 = sdiv <16 x i1… 48 ; BTVER2-NEXT: Cost Model: Found an estimated cost of 320 for instruction: %V16i8 = sdiv <16 x i8>… 61 %V16i32 = sdiv <16 x i32> undef, undef 65 %V16i16 = sdiv <16 x i16> undef, undef 69 %V16i8 = sdiv <16 x i8> undef, undef 85 ; CHECK-NEXT: Cost Model: Found an estimated cost of 320 for instruction: %V16i32 = udiv <16 x i32… [all …]
|
/external/libpng/powerpc/ |
D | filter_vsx_intrinsics.c | 5 * Last changed in libpng 1.6.29 [March 16, 2017] 45 png_size_t unaligned_top = 16 - (((png_size_t)rp % 16));\ 47 if(unaligned_top == 16)\ 64 /* Altivec operations require 16-byte aligned data in png_read_filter_row_up_vsx() 75 while( istop >= 16 ) in png_read_filter_row_up_vsx() 84 pp += 16; in png_read_filter_row_up_vsx() 85 rp += 16; in png_read_filter_row_up_vsx() 86 istop -= 16; in png_read_filter_row_up_vsx() 91 /* If byte count of row is not divisible by 16 in png_read_filter_row_up_vsx() 103 …tic const vector unsigned char VSX_LEFTSHIFTED1_4 = {16,16,16,16, 0, 1, 2, 3,16,16,16,16,16,16,16,… [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/AArch64/ |
D | misched-fusion-aes.ll | 12 declare <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %d, <16 x i8> %k) 13 declare <16 x i8> @llvm.aarch64.crypto.aesmc(<16 x i8> %d) 14 declare <16 x i8> @llvm.aarch64.crypto.aesd(<16 x i8> %d, <16 x i8> %k) 15 declare <16 x i8> @llvm.aarch64.crypto.aesimc(<16 x i8> %d) 17 define void @aesea(<16 x i8>* %a0, <16 x i8>* %b0, <16 x i8>* %c0, <16 x i8> %d, <16 x i8> %e) { 18 %d0 = load <16 x i8>, <16 x i8>* %a0 19 %a1 = getelementptr inbounds <16 x i8>, <16 x i8>* %a0, i64 1 20 %d1 = load <16 x i8>, <16 x i8>* %a1 21 %a2 = getelementptr inbounds <16 x i8>, <16 x i8>* %a0, i64 2 22 %d2 = load <16 x i8>, <16 x i8>* %a2 [all …]
|
D | arm64-tbl.ll | 3 define <8 x i8> @tbl1_8b(<16 x i8> %A, <8 x i8> %B) nounwind { 6 %tmp3 = call <8 x i8> @llvm.aarch64.neon.tbl1.v8i8(<16 x i8> %A, <8 x i8> %B) 10 define <16 x i8> @tbl1_16b(<16 x i8> %A, <16 x i8> %B) nounwind { 12 ; CHECK: tbl.16b 13 %tmp3 = call <16 x i8> @llvm.aarch64.neon.tbl1.v16i8(<16 x i8> %A, <16 x i8> %B) 14 ret <16 x i8> %tmp3 17 define <8 x i8> @tbl2_8b(<16 x i8> %A, <16 x i8> %B, <8 x i8> %C) { 20 %tmp3 = call <8 x i8> @llvm.aarch64.neon.tbl2.v8i8(<16 x i8> %A, <16 x i8> %B, <8 x i8> %C) 24 define <16 x i8> @tbl2_16b(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C) { 26 ; CHECK: tbl.16b [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/X86/ |
D | avx512-vpternlog-commute.ll | 6 declare <16 x i32> @llvm.x86.avx512.pternlog.d.512(<16 x i32>, <16 x i32>, <16 x i32>, i32) 8 define <16 x i32> @vpternlog_v16i32_012(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2) { 13 …%1 = call <16 x i32> @llvm.x86.avx512.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x… 14 ret <16 x i32> %1 17 define <16 x i32> @vpternlog_v16i32_102(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2) { 22 …%1 = call <16 x i32> @llvm.x86.avx512.pternlog.d.512(<16 x i32> %x1, <16 x i32> %x0, <16 x i32> %x… 23 ret <16 x i32> %1 26 define <16 x i32> @vpternlog_v16i32_210(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2) { 31 …%1 = call <16 x i32> @llvm.x86.avx512.pternlog.d.512(<16 x i32> %x2, <16 x i32> %x1, <16 x i32> %x… 32 ret <16 x i32> %1 [all …]
|
/external/boringssl/src/crypto/fipsmodule/aes/asm/ |
D | vpaes-armv8.pl | 150 my ($invlo,$invhi,$iptlo,$ipthi,$sbou,$sbot) = map("v$_.16b",(18..23)); 151 my ($sb1u,$sb1t,$sb2u,$sb2t) = map("v$_.16b",(24..27)); 152 my ($sb9u,$sb9t,$sbdu,$sbdt,$sbbu,$sbbt,$sbeu,$sbet)=map("v$_.16b",(24..31)); 166 movi v17.16b, #0x0f 193 adrp x11, :pg_hi21:.Lk_mc_forward+16 194 add x11, x11, :lo12:.Lk_mc_forward+16 196 ld1 {v16.2d}, [x9], #16 // vmovdqu (%r9), %xmm5 # round0 key 197 and v1.16b, v7.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1 198 ushr v0.16b, v7.16b, #4 // vpsrlb \$4, %xmm0, %xmm0 199 tbl v1.16b, {$iptlo}, v1.16b // vpshufb %xmm1, %xmm2, %xmm1 [all …]
|
/external/llvm/test/CodeGen/AArch64/ |
D | arm64-tbl.ll | 3 define <8 x i8> @tbl1_8b(<16 x i8> %A, <8 x i8> %B) nounwind { 6 %tmp3 = call <8 x i8> @llvm.aarch64.neon.tbl1.v8i8(<16 x i8> %A, <8 x i8> %B) 10 define <16 x i8> @tbl1_16b(<16 x i8> %A, <16 x i8> %B) nounwind { 12 ; CHECK: tbl.16b 13 %tmp3 = call <16 x i8> @llvm.aarch64.neon.tbl1.v16i8(<16 x i8> %A, <16 x i8> %B) 14 ret <16 x i8> %tmp3 17 define <8 x i8> @tbl2_8b(<16 x i8> %A, <16 x i8> %B, <8 x i8> %C) { 20 %tmp3 = call <8 x i8> @llvm.aarch64.neon.tbl2.v8i8(<16 x i8> %A, <16 x i8> %B, <8 x i8> %C) 24 define <16 x i8> @tbl2_16b(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C) { 26 ; CHECK: tbl.16b [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/MC/AArch64/ |
D | neon-tbl.s | 9 tbl v0.8b, { v1.16b }, v2.8b 10 tbl v0.8b, { v1.16b, v2.16b }, v2.8b 11 tbl v0.8b, { v1.16b, v2.16b, v3.16b }, v2.8b 12 tbl v0.8b, { v1.16b, v2.16b, v3.16b, v4.16b }, v2.8b 13 tbl v0.8b, { v31.16b, v0.16b, v1.16b, v2.16b }, v2.8b 15 // CHECK: tbl v0.8b, { v1.16b }, v2.8b // encoding: [0x20,0x00,0x02,0x0e] 16 // CHECK: tbl v0.8b, { v1.16b, v2.16b }, v2.8b // encoding: [0x20,0x20,0x02,0x0e] 17 // CHECK: tbl v0.8b, { v1.16b, v2.16b, v3.16b }, v2.8b // encoding: [0x20,0x40,0x02,0x0e] 18 // CHECK: tbl v0.8b, { v1.16b, v2.16b, v3.16b, v4.16b }, v2.8b // encoding: [0x20,0x60,0x02,0x0e] 19 // CHECK: tbl v0.8b, { v31.16b, v0.16b, v1.16b, v2.16b }, v2.8b // encoding: [0xe0,0x63,0x02,0x0e] [all …]
|
/external/llvm/test/MC/AArch64/ |
D | neon-tbl.s | 9 tbl v0.8b, { v1.16b }, v2.8b 10 tbl v0.8b, { v1.16b, v2.16b }, v2.8b 11 tbl v0.8b, { v1.16b, v2.16b, v3.16b }, v2.8b 12 tbl v0.8b, { v1.16b, v2.16b, v3.16b, v4.16b }, v2.8b 13 tbl v0.8b, { v31.16b, v0.16b, v1.16b, v2.16b }, v2.8b 15 // CHECK: tbl v0.8b, { v1.16b }, v2.8b // encoding: [0x20,0x00,0x02,0x0e] 16 // CHECK: tbl v0.8b, { v1.16b, v2.16b }, v2.8b // encoding: [0x20,0x20,0x02,0x0e] 17 // CHECK: tbl v0.8b, { v1.16b, v2.16b, v3.16b }, v2.8b // encoding: [0x20,0x40,0x02,0x0e] 18 // CHECK: tbl v0.8b, { v1.16b, v2.16b, v3.16b, v4.16b }, v2.8b // encoding: [0x20,0x60,0x02,0x0e] 19 // CHECK: tbl v0.8b, { v31.16b, v0.16b, v1.16b, v2.16b }, v2.8b // encoding: [0xe0,0x63,0x02,0x0e] [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/ARM/ |
D | misched-fusion-aes.ll | 3 declare <16 x i8> @llvm.arm.neon.aese(<16 x i8> %d, <16 x i8> %k) 4 declare <16 x i8> @llvm.arm.neon.aesmc(<16 x i8> %d) 5 declare <16 x i8> @llvm.arm.neon.aesd(<16 x i8> %d, <16 x i8> %k) 6 declare <16 x i8> @llvm.arm.neon.aesimc(<16 x i8> %d) 8 define void @aesea(<16 x i8>* %a0, <16 x i8>* %b0, <16 x i8>* %c0, <16 x i8> %d, <16 x i8> %e) { 9 %d0 = load <16 x i8>, <16 x i8>* %a0 10 %a1 = getelementptr inbounds <16 x i8>, <16 x i8>* %a0, i64 1 11 %d1 = load <16 x i8>, <16 x i8>* %a1 12 %a2 = getelementptr inbounds <16 x i8>, <16 x i8>* %a0, i64 2 13 %d2 = load <16 x i8>, <16 x i8>* %a2 [all …]
|