/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/X86/GlobalISel/ |
D | legalize-add-v512.mir | 46 ; ALL: [[DEF:%[0-9]+]]:_(<64 x s8>) = IMPLICIT_DEF 47 ; ALL: [[DEF1:%[0-9]+]]:_(<64 x s8>) = IMPLICIT_DEF 48 …]]:_(<16 x s8>), [[UV1:%[0-9]+]]:_(<16 x s8>), [[UV2:%[0-9]+]]:_(<16 x s8>), [[UV3:%[0-9]+]]:_(<16… 49 …]]:_(<16 x s8>), [[UV5:%[0-9]+]]:_(<16 x s8>), [[UV6:%[0-9]+]]:_(<16 x s8>), [[UV7:%[0-9]+]]:_(<16… 50 ; AVX1: [[ADD:%[0-9]+]]:_(<16 x s8>) = G_ADD [[UV]], [[UV4]] 51 ; AVX1: [[ADD1:%[0-9]+]]:_(<16 x s8>) = G_ADD [[UV1]], [[UV5]] 52 ; AVX1: [[ADD2:%[0-9]+]]:_(<16 x s8>) = G_ADD [[UV2]], [[UV6]] 53 ; AVX1: [[ADD3:%[0-9]+]]:_(<16 x s8>) = G_ADD [[UV3]], [[UV7]] 54 …1: [[MV:%[0-9]+]]:_(<64 x s8>) = G_MERGE_VALUES [[ADD]](<16 x s8>), [[ADD1]](<16 x s8>), [[ADD2]](… 55 ; AVX1: $zmm0 = COPY [[MV]](<64 x s8>) [all …]
|
D | legalize-ext.mir | 108 ; X32: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 1 109 ; X32: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY]](s32) 110 ; X32: [[AND:%[0-9]+]]:_(s8) = G_AND [[TRUNC]], [[C]] 111 ; X32: $al = COPY [[AND]](s8) 115 ; X64: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 1 116 ; X64: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY]](s32) 117 ; X64: [[AND:%[0-9]+]]:_(s8) = G_AND [[TRUNC]], [[C]] 118 ; X64: $al = COPY [[AND]](s8) 122 %2:_(s8) = G_ZEXT %0(s1) 123 $al = COPY %2(s8) [all …]
|
D | legalize-add-v256.mir | 42 ; ALL: [[DEF:%[0-9]+]]:_(<32 x s8>) = IMPLICIT_DEF 43 ; ALL: [[DEF1:%[0-9]+]]:_(<32 x s8>) = IMPLICIT_DEF 44 …; SSE2: [[UV:%[0-9]+]]:_(<16 x s8>), [[UV1:%[0-9]+]]:_(<16 x s8>) = G_UNMERGE_VALUES [[DEF]](<32 x… 45 …; SSE2: [[UV2:%[0-9]+]]:_(<16 x s8>), [[UV3:%[0-9]+]]:_(<16 x s8>) = G_UNMERGE_VALUES [[DEF1]](<32… 46 …; AVX1: [[UV:%[0-9]+]]:_(<16 x s8>), [[UV1:%[0-9]+]]:_(<16 x s8>) = G_UNMERGE_VALUES [[DEF]](<32 x… 47 …; AVX1: [[UV2:%[0-9]+]]:_(<16 x s8>), [[UV3:%[0-9]+]]:_(<16 x s8>) = G_UNMERGE_VALUES [[DEF1]](<32… 48 ; SSE2: [[ADD:%[0-9]+]]:_(<16 x s8>) = G_ADD [[UV]], [[UV2]] 49 ; SSE2: [[ADD1:%[0-9]+]]:_(<16 x s8>) = G_ADD [[UV1]], [[UV3]] 50 ; AVX1: [[ADD:%[0-9]+]]:_(<16 x s8>) = G_ADD [[UV]], [[UV2]] 51 ; AVX1: [[ADD1:%[0-9]+]]:_(<16 x s8>) = G_ADD [[UV1]], [[UV3]] [all …]
|
D | legalize-trunc.mir | 22 ; X32: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 1 23 ; X32: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[DEF]](s32) 24 ; X32: [[AND:%[0-9]+]]:_(s8) = G_AND [[TRUNC]], [[C]] 25 ; X32: G_STORE [[AND]](s8), [[DEF1]](p0) :: (store 1) 26 ; X32: [[TRUNC1:%[0-9]+]]:_(s8) = G_TRUNC [[DEF]](s32) 27 ; X32: G_STORE [[TRUNC1]](s8), [[DEF1]](p0) :: (store 8) 34 ; X64: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 1 35 ; X64: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[DEF]](s32) 36 ; X64: [[AND:%[0-9]+]]:_(s8) = G_AND [[TRUNC]], [[C]] 37 ; X64: G_STORE [[AND]](s8), [[DEF1]](p0) :: (store 1) [all …]
|
D | legalize-memop-scalar.mir | 37 ; X64: [[LOAD:%[0-9]+]]:_(s8) = G_LOAD [[DEF]](p0) :: (load 1) 38 ; X64: [[LOAD1:%[0-9]+]]:_(s8) = G_LOAD [[DEF]](p0) :: (load 1) 42 ; X64: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 1 43 ; X64: [[COPY:%[0-9]+]]:_(s8) = COPY [[LOAD]](s8) 44 ; X64: [[AND:%[0-9]+]]:_(s8) = G_AND [[COPY]], [[C]] 45 ; X64: G_STORE [[AND]](s8), [[DEF]](p0) :: (store 1) 46 ; X64: G_STORE [[LOAD1]](s8), [[DEF]](p0) :: (store 1) 52 ; X32: [[LOAD:%[0-9]+]]:_(s8) = G_LOAD [[DEF]](p0) :: (load 1) 53 ; X32: [[LOAD1:%[0-9]+]]:_(s8) = G_LOAD [[DEF]](p0) :: (load 1) 57 ; X32: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 1 [all …]
|
D | legalize-or-scalar.mir | 45 ; CHECK: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY]](s32) 46 ; CHECK: [[TRUNC1:%[0-9]+]]:_(s8) = G_TRUNC [[COPY]](s32) 47 ; CHECK: [[OR:%[0-9]+]]:_(s8) = G_OR [[TRUNC]], [[TRUNC1]] 49 ; CHECK: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 1 50 ; CHECK: [[COPY1:%[0-9]+]]:_(s8) = COPY [[OR]](s8) 51 ; CHECK: [[AND:%[0-9]+]]:_(s8) = G_AND [[COPY1]], [[C]] 52 ; CHECK: G_STORE [[AND]](s8), [[DEF]](p0) :: (store 1) 76 ; CHECK: [[DEF:%[0-9]+]]:_(s8) = IMPLICIT_DEF 77 ; CHECK: [[OR:%[0-9]+]]:_(s8) = G_OR [[DEF]], [[DEF]] 78 ; CHECK: $al = COPY [[OR]](s8) [all …]
|
D | legalize-ext-x86-64.mir | 79 ; CHECK: [[COPY:%[0-9]+]]:_(s8) = COPY $dil 81 ; CHECK: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[COPY]](s8) 86 %0(s8) = COPY $dil 87 %1(s1) = G_TRUNC %0(s8) 106 ; CHECK: [[COPY:%[0-9]+]]:_(s8) = COPY $dil 107 ; CHECK: [[SEXT:%[0-9]+]]:_(s64) = G_SEXT [[COPY]](s8) 110 %0(s8) = COPY $dil 111 %1(s64) = G_SEXT %0(s8) 176 ; CHECK: [[COPY:%[0-9]+]]:_(s8) = COPY $dil 178 ; CHECK: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[COPY]](s8) [all …]
|
/external/libhevc/common/arm/ |
D | ihevc_intra_pred_luma_planar.s | 135 vdup.s8 d0, r7 @src[nt-1] 141 vdup.s8 d1, r7 @src[3nt+1] 152 vdup.s8 d5, r8 @row + 1 153 vdup.s8 d6, r9 @nt - 1 - row 184 vld1.s8 d8, [r12] @(1-8)load 8 coeffs [col+1] 186 vld1.s8 d4, [r6] @(1-8)src[2nt-1-row] 187 vsub.s8 d9, d2, d8 @(1-8)[nt-1-col] 192 vld1.s8 d3, [r14] @(1-8)load 8 src[2nt+1+col] 195 vdup.s8 d20, d4[7] @(1) 198 vdup.s8 d21, d4[6] @(2) [all …]
|
D | ihevc_intra_pred_luma_mode_3_to_9.s | 161 vmull.s8 q11, d30, d31 @(col+1)*intra_pred_angle [0:7](col) 201 vsub.s8 d8, d8, d2 @ref_main_idx (sub row) 202 vsub.s8 d8, d26, d8 @ref_main_idx (row 0) 203 vadd.s8 d8, d8, d27 @t0 compensate the pu1_src idx incremented by 8 204 vsub.s8 d9, d8, d2 @ref_main_idx + 1 (row 0) 206 vsub.s8 d7, d28, d6 @32-fract 209 vsub.s8 d4, d8, d2 @ref_main_idx (row 1) 210 vsub.s8 d5, d9, d2 @ref_main_idx + 1 (row 1) 217 vsub.s8 d8, d8, d3 @ref_main_idx (row 2) 218 vsub.s8 d9, d9, d3 @ref_main_idx + 1 (row 2) [all …]
|
D | ihevc_intra_pred_filters_luma_mode_11_to_17.s | 269 vmull.s8 q11, d30, d31 @(col+1)*intra_pred_angle [0:7](col) 312 vadd.s8 d8, d8, d27 @ref_main_idx (add row) 313 vsub.s8 d8, d8, d26 @ref_main_idx (row 0) 314 vadd.s8 d9, d8, d2 @ref_main_idx + 1 (row 0) 316 vsub.s8 d7, d28, d6 @32-fract 319 vadd.s8 d4, d8, d2 @ref_main_idx (row 1) 320 vadd.s8 d5, d9, d2 @ref_main_idx + 1 (row 1) 327 vadd.s8 d8, d8, d3 @ref_main_idx (row 2) 328 vadd.s8 d9, d9, d3 @ref_main_idx + 1 (row 2) 337 vadd.s8 d4, d4, d3 @ref_main_idx (row 3) [all …]
|
D | ihevc_intra_pred_chroma_mode_3_to_9.s | 155 vmull.s8 q11, d30, d31 @(col+1)*intra_pred_angle [0:7](col) 186 vshl.s8 d8, d8, #1 @ 2 * idx 197 vsub.s8 d8, d8, d27 @ref_main_idx (sub row) 198 vsub.s8 d8, d26, d8 @ref_main_idx (row 0) 199 vadd.s8 d8, d8, d9 @to compensate the pu1_src idx incremented by 8 200 vsub.s8 d9, d8, d29 @ref_main_idx + 1 (row 0) 202 vsub.s8 d7, d28, d6 @32-fract 205 vsub.s8 d4, d8, d29 @ref_main_idx (row 1) 206 vsub.s8 d5, d9, d29 @ref_main_idx + 1 (row 1) 215 vsub.s8 d8, d8, d29 @ref_main_idx (row 2) [all …]
|
D | ihevc_intra_pred_chroma_planar.s | 153 vdup.s8 d5, r8 @row + 1 154 vdup.s8 d6, r9 @nt - 1 - row 170 vld1.s8 {d10,d11}, [r14]! @load src[2nt+1+col] 171 vld1.s8 d8, [r12]! 174 vsub.s8 d30, d2, d8 @[nt-1-col] 175 vsub.s8 d31, d2, d9 196 vadd.s8 d18, d5, d7 @row++ [(row+1)++]c 200 vsub.s8 d19, d6, d7 @[nt-1-row]-- 218 vadd.s8 d5, d18, d7 @row++ [(row+1)++] 220 vsub.s8 d6, d19, d7 @[nt-1-row]-- [all …]
|
/external/libvpx/libvpx/vpx_dsp/arm/ |
D | highbd_idct32x32_135_add_neon.c | 102 s8[32]; in vpx_highbd_idct32_12_neon() local 318 s8[0] = highbd_idct_add_dual(s7[0], s6[31]); in vpx_highbd_idct32_12_neon() 319 s8[1] = highbd_idct_add_dual(s7[1], s6[30]); in vpx_highbd_idct32_12_neon() 320 s8[2] = highbd_idct_add_dual(s7[2], s6[29]); in vpx_highbd_idct32_12_neon() 321 s8[3] = highbd_idct_add_dual(s7[3], s6[28]); in vpx_highbd_idct32_12_neon() 322 s8[4] = highbd_idct_add_dual(s7[4], s7[27]); in vpx_highbd_idct32_12_neon() 323 s8[5] = highbd_idct_add_dual(s7[5], s7[26]); in vpx_highbd_idct32_12_neon() 324 s8[6] = highbd_idct_add_dual(s7[6], s7[25]); in vpx_highbd_idct32_12_neon() 325 s8[7] = highbd_idct_add_dual(s7[7], s7[24]); in vpx_highbd_idct32_12_neon() 326 s8[8] = highbd_idct_add_dual(s7[8], s7[23]); in vpx_highbd_idct32_12_neon() [all …]
|
D | highbd_vpx_convolve8_neon.c | 157 int16x4_t s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10; in vpx_highbd_convolve8_horiz_neon() local 181 load_4x4((const int16_t *)src, src_stride, &s7, &s8, &s9, &s10); in vpx_highbd_convolve8_horiz_neon() 182 transpose_s16_4x4d(&s7, &s8, &s9, &s10); in vpx_highbd_convolve8_horiz_neon() 185 d1 = highbd_convolve8_4(s1, s2, s3, s4, s5, s6, s7, s8, filters); in vpx_highbd_convolve8_horiz_neon() 186 d2 = highbd_convolve8_4(s2, s3, s4, s5, s6, s7, s8, s9, filters); in vpx_highbd_convolve8_horiz_neon() 187 d3 = highbd_convolve8_4(s3, s4, s5, s6, s7, s8, s9, s10, filters); in vpx_highbd_convolve8_horiz_neon() 204 s4 = s8; in vpx_highbd_convolve8_horiz_neon() 213 int16x8_t s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10; in vpx_highbd_convolve8_horiz_neon() local 222 load_8x8((const int16_t *)(src + 7), src_stride, &s7, &s8, &s9, &s10, in vpx_highbd_convolve8_horiz_neon() 233 transpose_s16_8x8(&s7, &s8, &s9, &s10, &t4, &t5, &t6, &t7); in vpx_highbd_convolve8_horiz_neon() [all …]
|
D | vpx_convolve8_neon.c | 75 int16x4_t filter3, filter4, s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, d0, in vpx_convolve8_horiz_neon() local 112 s8 = vget_low_s16(tt1); in vpx_convolve8_horiz_neon() 118 d1 = convolve8_4(s1, s2, s3, s4, s5, s6, s7, s8, filters, filter3, in vpx_convolve8_horiz_neon() 120 d2 = convolve8_4(s2, s3, s4, s5, s6, s7, s8, s9, filters, filter3, in vpx_convolve8_horiz_neon() 122 d3 = convolve8_4(s3, s4, s5, s6, s7, s8, s9, s10, filters, filter3, in vpx_convolve8_horiz_neon() 142 s4 = s8; in vpx_convolve8_horiz_neon() 155 int16x8_t s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10; in vpx_convolve8_horiz_neon() local 182 s8 = vreinterpretq_s16_u16(vmovl_u8(t1)); in vpx_convolve8_horiz_neon() 196 t1 = convolve8_8(s1, s2, s3, s4, s5, s6, s7, s8, filters, filter3, in vpx_convolve8_horiz_neon() 198 t2 = convolve8_8(s2, s3, s4, s5, s6, s7, s8, s9, filters, filter3, in vpx_convolve8_horiz_neon() [all …]
|
/external/llvm/test/MC/ARM/ |
D | vpush-vpop.s | 7 vpush {s8, s9, s10, s11, s12} 9 vpop {s8, s9, s10, s11, s12} 11 vpush.s8 {d8, d9, d10, d11, d12} 12 vpush.16 {s8, s9, s10, s11, s12} 14 vpop.64 {s8, s9, s10, s11, s12} 17 @ CHECK-THUMB: vpush {s8, s9, s10, s11, s12} @ encoding: [0x2d,0xed,0x05,0x4a] 19 @ CHECK-THUMB: vpop {s8, s9, s10, s11, s12} @ encoding: [0xbd,0xec,0x05,0x4a] 22 @ CHECK-ARM: vpush {s8, s9, s10, s11, s12} @ encoding: [0x05,0x4a,0x2d,0xed] 24 @ CHECK-ARM: vpop {s8, s9, s10, s11, s12} @ encoding: [0x05,0x4a,0xbd,0xec] 27 @ CHECK-THUMB: vpush {s8, s9, s10, s11, s12} @ encoding: [0x2d,0xed,0x05,0x4a] [all …]
|
D | neon-add-encoding.s | 17 @ CHECK: vaddl.s8 q8, d17, d16 @ encoding: [0xa0,0x00,0xc1,0xf2] 18 vaddl.s8 q8, d17, d16 30 @ CHECK: vaddw.s8 q8, q8, d18 @ encoding: [0xa2,0x01,0xc0,0xf2] 31 vaddw.s8 q8, q8, d18 43 @ CHECK: vhadd.s8 d16, d16, d17 @ encoding: [0xa1,0x00,0x40,0xf2] 44 vhadd.s8 d16, d16, d17 55 @ CHECK: vhadd.s8 q8, q8, q9 @ encoding: [0xe2,0x00,0x40,0xf2] 56 vhadd.s8 q8, q8, q9 69 vhadd.s8 d11, d24 75 vhadd.s8 q1, q12 [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/MC/ARM/ |
D | vpush-vpop.s | 7 vpush {s8, s9, s10, s11, s12} 9 vpop {s8, s9, s10, s11, s12} 11 vpush.s8 {d8, d9, d10, d11, d12} 12 vpush.16 {s8, s9, s10, s11, s12} 14 vpop.64 {s8, s9, s10, s11, s12} 17 @ CHECK-THUMB: vpush {s8, s9, s10, s11, s12} @ encoding: [0x2d,0xed,0x05,0x4a] 19 @ CHECK-THUMB: vpop {s8, s9, s10, s11, s12} @ encoding: [0xbd,0xec,0x05,0x4a] 22 @ CHECK-ARM: vpush {s8, s9, s10, s11, s12} @ encoding: [0x05,0x4a,0x2d,0xed] 24 @ CHECK-ARM: vpop {s8, s9, s10, s11, s12} @ encoding: [0x05,0x4a,0xbd,0xec] 27 @ CHECK-THUMB: vpush {s8, s9, s10, s11, s12} @ encoding: [0x2d,0xed,0x05,0x4a] [all …]
|
D | neon-add-encoding.s | 17 @ CHECK: vaddl.s8 q8, d17, d16 @ encoding: [0xa0,0x00,0xc1,0xf2] 18 vaddl.s8 q8, d17, d16 30 @ CHECK: vaddw.s8 q8, q8, d18 @ encoding: [0xa2,0x01,0xc0,0xf2] 31 vaddw.s8 q8, q8, d18 43 @ CHECK: vhadd.s8 d16, d16, d17 @ encoding: [0xa1,0x00,0x40,0xf2] 44 vhadd.s8 d16, d16, d17 55 @ CHECK: vhadd.s8 q8, q8, q9 @ encoding: [0xe2,0x00,0x40,0xf2] 56 vhadd.s8 q8, q8, q9 69 vhadd.s8 d11, d24 75 vhadd.s8 q1, q12 [all …]
|
D | neont2-neg-encoding.s | 5 @ CHECK: vneg.s8 d16, d16 @ encoding: [0xf1,0xff,0xa0,0x03] 6 vneg.s8 d16, d16 13 @ CHECK: vneg.s8 q8, q8 @ encoding: [0xf1,0xff,0xe0,0x03] 14 vneg.s8 q8, q8 21 @ CHECK: vqneg.s8 d16, d16 @ encoding: [0xf0,0xff,0xa0,0x07] 22 vqneg.s8 d16, d16 27 @ CHECK: vqneg.s8 q8, q8 @ encoding: [0xf0,0xff,0xe0,0x07] 28 vqneg.s8 q8, q8
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/MC/Disassembler/ARM/ |
D | invalid-armv8.1a.txt | 4 [0x12,0x0b,0x01,0xf3] # vqrdmlah.s8 d0, d1, d2 6 # CHECK-NEXT: [0x12,0x0b,0x01,0xf3] # vqrdmlah.s8 d0, d1, d2 14 [0x54,0x0b,0x02,0xf3] # vqrdmlah.s8 q0, q1, q2 16 # CHECK-NEXT: [0x54,0x0b,0x02,0xf3] # vqrdmlah.s8 q0, q1, q2 24 [0x15,0x7c,0x06,0xf3] # vqrdmlsh.s8 d0, d1, d2 26 # CHECK-NEXT: [0x15,0x7c,0x06,0xf3] # vqrdmlsh.s8 d0, d1, d2 34 [0x54,0x0c,0x02,0xf3] # vqrdmlsh.s8 q0, q1, q2 36 # CHECK-NEXT: [0x54,0x0c,0x02,0xf3] # vqrdmlsh.s8 q0, q1, q2 44 [0x42,0x0e,0x81,0xf2] # vqrdmlah.s8 d0, d1, d2[0] 46 # CHECK-NEXT: [0x42,0x0e,0x81,0xf2] # vqrdmlah.s8 d0, d1, d2[0] [all …]
|
D | invalid-thumbv8.1a.txt | 4 [0x01,0xff,0x12,0x0b] # vqrdmlah.s8 d0, d1, d2 6 [0x02,0xff,0x54,0x0b] # vqrdmlah.s8 q0, q1, q2 9 [0x01,0xff,0x12,0x0c] # vqrdmlsh.s8 d0, d1, d2 11 [0x02,0xff,0x54,0x0c] # vqrdmlsh.s8 q0, q1, q2 15 # CHECK-NEXT: [0x01,0xff,0x12,0x0b] # vqrdmlah.s8 d0, d1, d2 21 # CHECK-NEXT: [0x02,0xff,0x54,0x0b] # vqrdmlah.s8 q0, q1, q2 27 # CHECK-NEXT: [0x01,0xff,0x12,0x0c] # vqrdmlsh.s8 d0, d1, d2 33 # CHECK-NEXT: [0x02,0xff,0x54,0x0c] # vqrdmlsh.s8 q0, q1, q2 39 [0x81,0xef,0x42,0x0e] # vqrdmlah.s8 d0, d1, d2[0] 41 [0x82,0xff,0x42,0x0e] # vqrdmlah.s8 q0, q1, d2[0] [all …]
|
/external/llvm/test/MC/Disassembler/ARM/ |
D | invalid-thumbv8.1a.txt | 4 [0x01,0xff,0x12,0x0b] # vqrdmlah.s8 d0, d1, d2 6 [0x02,0xff,0x54,0x0b] # vqrdmlah.s8 q0, q1, q2 9 [0x01,0xff,0x12,0x0c] # vqrdmlsh.s8 d0, d1, d2 11 [0x02,0xff,0x54,0x0c] # vqrdmlsh.s8 q0, q1, q2 15 # CHECK-NEXT: [0x01,0xff,0x12,0x0b] # vqrdmlah.s8 d0, d1, d2 21 # CHECK-NEXT: [0x02,0xff,0x54,0x0b] # vqrdmlah.s8 q0, q1, q2 27 # CHECK-NEXT: [0x01,0xff,0x12,0x0c] # vqrdmlsh.s8 d0, d1, d2 33 # CHECK-NEXT: [0x02,0xff,0x54,0x0c] # vqrdmlsh.s8 q0, q1, q2 39 [0x81,0xef,0x42,0x0e] # vqrdmlah.s8 d0, d1, d2[0] 41 [0x82,0xff,0x42,0x0e] # vqrdmlah.s8 q0, q1, d2[0] [all …]
|
D | invalid-armv8.1a.txt | 4 [0x12,0x0b,0x01,0xf3] # vqrdmlah.s8 d0, d1, d2 6 # CHECK-NEXT: [0x12,0x0b,0x01,0xf3] # vqrdmlah.s8 d0, d1, d2 14 [0x54,0x0b,0x02,0xf3] # vqrdmlah.s8 q0, q1, q2 16 # CHECK-NEXT: [0x54,0x0b,0x02,0xf3] # vqrdmlah.s8 q0, q1, q2 24 [0x15,0x7c,0x06,0xf3] # vqrdmlsh.s8 d0, d1, d2 26 # CHECK-NEXT: [0x15,0x7c,0x06,0xf3] # vqrdmlsh.s8 d0, d1, d2 34 [0x54,0x0c,0x02,0xf3] # vqrdmlsh.s8 q0, q1, q2 36 # CHECK-NEXT: [0x54,0x0c,0x02,0xf3] # vqrdmlsh.s8 q0, q1, q2 44 [0x42,0x0e,0x81,0xf2] # vqrdmlah.s8 d0, d1, d2[0] 46 # CHECK-NEXT: [0x42,0x0e,0x81,0xf2] # vqrdmlah.s8 d0, d1, d2[0] [all …]
|
/external/swiftshader/third_party/LLVM/test/MC/ARM/ |
D | neon-neg-encoding.s | 3 @ CHECK: vneg.s8 d16, d16 @ encoding: [0xa0,0x03,0xf1,0xf3] 4 vneg.s8 d16, d16 11 @ CHECK: vneg.s8 q8, q8 @ encoding: [0xe0,0x03,0xf1,0xf3] 12 vneg.s8 q8, q8 19 @ CHECK: vqneg.s8 d16, d16 @ encoding: [0xa0,0x07,0xf0,0xf3] 20 vqneg.s8 d16, d16 25 @ CHECK: vqneg.s8 q8, q8 @ encoding: [0xe0,0x07,0xf0,0xf3] 26 vqneg.s8 q8, q8
|