/external/llvm/test/MC/ARM/ |
D | neon-add-encoding.s | 14 @ CHECK: vadd.f32 q8, q8, q9 @ encoding: [0xe2,0x0d,0x40,0xf2] 15 vadd.f32 q8, q8, q9 55 @ CHECK: vhadd.s8 q8, q8, q9 @ encoding: [0xe2,0x00,0x40,0xf2] 56 vhadd.s8 q8, q8, q9 57 @ CHECK: vhadd.s16 q8, q8, q9 @ encoding: [0xe2,0x00,0x50,0xf2] 58 vhadd.s16 q8, q8, q9 59 @ CHECK: vhadd.s32 q8, q8, q9 @ encoding: [0xe2,0x00,0x60,0xf2] 60 vhadd.s32 q8, q8, q9 61 @ CHECK: vhadd.u8 q8, q8, q9 @ encoding: [0xe2,0x00,0x40,0xf3] 62 vhadd.u8 q8, q8, q9 [all …]
|
D | neont2-add-encoding.s | 15 @ CHECK: vadd.f32 q8, q8, q9 @ encoding: [0x40,0xef,0xe2,0x0d] 16 vadd.f32 q8, q8, q9 56 @ CHECK: vhadd.s8 q8, q8, q9 @ encoding: [0x40,0xef,0xe2,0x00] 57 vhadd.s8 q8, q8, q9 58 @ CHECK: vhadd.s16 q8, q8, q9 @ encoding: [0x50,0xef,0xe2,0x00] 59 vhadd.s16 q8, q8, q9 60 @ CHECK: vhadd.s32 q8, q8, q9 @ encoding: [0x60,0xef,0xe2,0x00] 61 vhadd.s32 q8, q8, q9 62 @ CHECK: vhadd.u8 q8, q8, q9 @ encoding: [0x40,0xff,0xe2,0x00] 63 vhadd.u8 q8, q8, q9 [all …]
|
D | neont2-shuffle-encoding.s | 9 @ CHECK: vext.8 q8, q9, q8, #3 @ encoding: [0xf2,0xef,0xe0,0x03] 10 vext.8 q8, q9, q8, #3 11 @ CHECK: vext.8 q8, q9, q8, #7 @ encoding: [0xf2,0xef,0xe0,0x07] 12 vext.8 q8, q9, q8, #7 15 @ CHECK: vext.32 q8, q9, q8, #3 @ encoding: [0xf2,0xef,0xe0,0x0c] 16 vext.32 q8, q9, q8, #3 23 @ CHECK: vtrn.8 q9, q8 @ encoding: [0xf2,0xff,0xe0,0x20] 24 vtrn.8 q9, q8 25 @ CHECK: vtrn.16 q9, q8 @ encoding: [0xf6,0xff,0xe0,0x20] 26 vtrn.16 q9, q8 [all …]
|
D | neont2-sub-encoding.s | 7 @ CHECK: vext.8 q8, q9, q8, #3 @ encoding: [0xf2,0xef,0xe0,0x03] 8 vext.8 q8, q9, q8, #3 9 @ CHECK: vext.8 q8, q9, q8, #7 @ encoding: [0xf2,0xef,0xe0,0x07] 10 vext.8 q8, q9, q8, #7 13 @ CHECK: vext.32 q8, q9, q8, #3 @ encoding: [0xf2,0xef,0xe0,0x0c] 14 vext.32 q8, q9, q8, #3 21 @ CHECK: vtrn.8 q9, q8 @ encoding: [0xf2,0xff,0xe0,0x20] 22 vtrn.8 q9, q8 23 @ CHECK: vtrn.16 q9, q8 @ encoding: [0xf6,0xff,0xe0,0x20] 24 vtrn.16 q9, q8 [all …]
|
D | neon-cmp-encoding.s | 7 vceq.i8 q8, q8, q9 8 vceq.i16 q8, q8, q9 9 vceq.i32 q8, q8, q9 10 vceq.f32 q8, q8, q9 16 @ CHECK: vceq.i8 q8, q8, q9 @ encoding: [0xf2,0x08,0x40,0xf3] 17 @ CHECK: vceq.i16 q8, q8, q9 @ encoding: [0xf2,0x08,0x50,0xf3] 18 @ CHECK: vceq.i32 q8, q8, q9 @ encoding: [0xf2,0x08,0x60,0xf3] 19 @ CHECK: vceq.f32 q8, q8, q9 @ encoding: [0xe2,0x0e,0x40,0xf2] 28 vcge.s8 q8, q8, q9 29 vcge.s16 q8, q8, q9 [all …]
|
D | neon-sub-encoding.s | 8 vsub.i8 q8, q8, q9 9 vsub.i16 q8, q8, q9 10 vsub.i32 q8, q8, q9 11 vsub.i64 q8, q8, q9 12 vsub.f32 q8, q8, q9 20 vsub.i16 q2, q9 30 @ CHECK: vsub.i8 q8, q8, q9 @ encoding: [0xe2,0x08,0x40,0xf3] 31 @ CHECK: vsub.i16 q8, q8, q9 @ encoding: [0xe2,0x08,0x50,0xf3] 32 @ CHECK: vsub.i32 q8, q8, q9 @ encoding: [0xe2,0x08,0x60,0xf3] 33 @ CHECK: vsub.i64 q8, q8, q9 @ encoding: [0xe2,0x08,0x70,0xf3] [all …]
|
D | neon-shuffle-encoding.s | 5 vext.8 q8, q9, q8, #3 6 vext.8 q8, q9, q8, #7 8 vext.32 q8, q9, q8, #3 9 vext.64 q8, q9, q8, #1 14 vext.8 q9, q4, #7 22 @ CHECK: vext.8 q8, q9, q8, #3 @ encoding: [0xe0,0x03,0xf2,0xf2] 23 @ CHECK: vext.8 q8, q9, q8, #7 @ encoding: [0xe0,0x07,0xf2,0xf2] 25 @ CHECK: vext.32 q8, q9, q8, #3 @ encoding: [0xe0,0x0c,0xf2,0xf2] 26 @ CHECK: vext.64 q8, q9, q8, #1 @ encoding: [0xe0,0x08,0xf2,0xf2] 31 @ CHECK: vext.8 q9, q9, q4, #7 @ encoding: [0xc8,0x27,0xf2,0xf2] [all …]
|
D | neon-absdiff-encoding.s | 17 @ CHECK: vabd.s8 q8, q8, q9 @ encoding: [0xe2,0x07,0x40,0xf2] 18 vabd.s8 q8, q8, q9 19 @ CHECK: vabd.s16 q8, q8, q9 @ encoding: [0xe2,0x07,0x50,0xf2] 20 vabd.s16 q8, q8, q9 21 @ CHECK: vabd.s32 q8, q8, q9 @ encoding: [0xe2,0x07,0x60,0xf2] 22 vabd.s32 q8, q8, q9 23 @ CHECK: vabd.u8 q8, q8, q9 @ encoding: [0xe2,0x07,0x40,0xf3] 24 vabd.u8 q8, q8, q9 25 @ CHECK: vabd.u16 q8, q8, q9 @ encoding: [0xe2,0x07,0x50,0xf3] 26 vabd.u16 q8, q8, q9 [all …]
|
D | neont2-absdiff-encoding.s | 12 vabd.s8 q8, q8, q9 13 vabd.s16 q8, q8, q9 14 vabd.s32 q8, q8, q9 15 vabd.u8 q8, q8, q9 16 vabd.u16 q8, q8, q9 17 vabd.u32 q8, q8, q9 18 vabd.f32 q8, q8, q9 27 @ CHECK: vabd.s8 q8, q8, q9 @ encoding: [0x40,0xef,0xe2,0x07] 28 @ CHECK: vabd.s16 q8, q8, q9 @ encoding: [0x50,0xef,0xe2,0x07] 29 @ CHECK: vabd.s32 q8, q8, q9 @ encoding: [0x60,0xef,0xe2,0x07] [all …]
|
D | neon-mul-encoding.s | 7 vmul.i8 q8, q8, q9 8 vmul.i16 q8, q8, q9 9 vmul.i32 q8, q8, q9 10 vmul.f32 q8, q8, q9 12 vmul.p8 q8, q8, q9 19 vmul.i8 q8, q9 20 vmul.i16 q8, q9 21 vmul.i32 q8, q9 22 vmul.f32 q8, q9 24 vmul.p8 q8, q9 [all …]
|
D | neont2-satshift-encoding.s | 21 @ CHECK: vqshl.s8 q8, q8, q9 @ encoding: [0x42,0xef,0xf0,0x04] 22 vqshl.s8 q8, q8, q9 23 @ CHECK: vqshl.s16 q8, q8, q9 @ encoding: [0x52,0xef,0xf0,0x04] 24 vqshl.s16 q8, q8, q9 25 @ CHECK: vqshl.s32 q8, q8, q9 @ encoding: [0x62,0xef,0xf0,0x04] 26 vqshl.s32 q8, q8, q9 27 @ CHECK: vqshl.s64 q8, q8, q9 @ encoding: [0x72,0xef,0xf0,0x04] 28 vqshl.s64 q8, q8, q9 29 @ CHECK: vqshl.u8 q8, q8, q9 @ encoding: [0x42,0xff,0xf0,0x04] 30 vqshl.u8 q8, q8, q9 [all …]
|
D | neon-satshift-encoding.s | 19 @ CHECK: vqshl.s8 q8, q8, q9 @ encoding: [0xf0,0x04,0x42,0xf2] 20 vqshl.s8 q8, q8, q9 21 @ CHECK: vqshl.s16 q8, q8, q9 @ encoding: [0xf0,0x04,0x52,0xf2] 22 vqshl.s16 q8, q8, q9 23 @ CHECK: vqshl.s32 q8, q8, q9 @ encoding: [0xf0,0x04,0x62,0xf2] 24 vqshl.s32 q8, q8, q9 25 @ CHECK: vqshl.s64 q8, q8, q9 @ encoding: [0xf0,0x04,0x72,0xf2] 26 vqshl.s64 q8, q8, q9 27 @ CHECK: vqshl.u8 q8, q8, q9 @ encoding: [0xf0,0x04,0x42,0xf3] 28 vqshl.u8 q8, q8, q9 [all …]
|
D | neont2-bitwise-encoding.s | 6 vand q8, q8, q9 9 @ CHECK: vand q8, q8, q9 @ encoding: [0x40,0xef,0xf2,0x01] 12 veor q8, q8, q9 15 @ CHECK: veor q8, q8, q9 @ encoding: [0x40,0xff,0xf2,0x01] 19 vorr q8, q8, q9 25 @ CHECK: vorr q8, q8, q9 @ encoding: [0x60,0xef,0xf2,0x01] 29 vbic q8, q8, q9 34 @ CHECK: vbic q8, q8, q9 @ encoding: [0x50,0xef,0xf2,0x01] 38 vorn q8, q8, q9 41 @ CHECK: vorn q8, q8, q9 @ encoding: [0x70,0xef,0xf2,0x01] [all …]
|
D | neont2-mul-encoding.s | 9 vmul.i8 q8, q8, q9 10 vmul.i16 q8, q8, q9 11 vmul.i32 q8, q8, q9 12 vmul.f32 q8, q8, q9 14 vmul.p8 q8, q8, q9 21 @ CHECK: vmul.i8 q8, q8, q9 @ encoding: [0x40,0xef,0xf2,0x09] 22 @ CHECK: vmul.i16 q8, q8, q9 @ encoding: [0x50,0xef,0xf2,0x09] 23 @ CHECK: vmul.i32 q8, q8, q9 @ encoding: [0x60,0xef,0xf2,0x09] 24 @ CHECK: vmul.f32 q8, q8, q9 @ encoding: [0x40,0xff,0xf2,0x0d] 26 @ CHECK: vmul.p8 q8, q8, q9 @ encoding: [0x40,0xff,0xf2,0x09] [all …]
|
D | neon-mul-accum-encoding.s | 7 vmla.i8 q9, q8, q10 8 vmla.i16 q9, q8, q10 9 vmla.i32 q9, q8, q10 10 vmla.f32 q9, q8, q10 17 @ CHECK: vmla.i8 q9, q8, q10 @ encoding: [0xe4,0x29,0x40,0xf2] 18 @ CHECK: vmla.i16 q9, q8, q10 @ encoding: [0xe4,0x29,0x50,0xf2] 19 @ CHECK: vmla.i32 q9, q8, q10 @ encoding: [0xe4,0x29,0x60,0xf2] 20 @ CHECK: vmla.f32 q9, q8, q10 @ encoding: [0xf4,0x2d,0x40,0xf2] 58 vmls.i8 q9, q8, q10 59 vmls.i16 q9, q8, q10 [all …]
|
D | neont2-shift-encoding.s | 21 @ CHECK: vshl.u8 q8, q9, q8 @ encoding: [0x40,0xff,0xe2,0x04] 22 vshl.u8 q8, q9, q8 23 @ CHECK: vshl.u16 q8, q9, q8 @ encoding: [0x50,0xff,0xe2,0x04] 24 vshl.u16 q8, q9, q8 25 @ CHECK: vshl.u32 q8, q9, q8 @ encoding: [0x60,0xff,0xe2,0x04] 26 vshl.u32 q8, q9, q8 27 @ CHECK: vshl.u64 q8, q9, q8 @ encoding: [0x70,0xff,0xe2,0x04] 28 vshl.u64 q8, q9, q8 109 @ CHECK: vrshl.s8 q8, q9, q8 @ encoding: [0x40,0xef,0xe2,0x05] 110 vrshl.s8 q8, q9, q8 [all …]
|
D | neont2-mul-accum-encoding.s | 9 vmla.i8 q9, q8, q10 10 vmla.i16 q9, q8, q10 11 vmla.i32 q9, q8, q10 12 vmla.f32 q9, q8, q10 19 @ CHECK: vmla.i8 q9, q8, q10 @ encoding: [0x40,0xef,0xe4,0x29] 20 @ CHECK: vmla.i16 q9, q8, q10 @ encoding: [0x50,0xef,0xe4,0x29] 21 @ CHECK: vmla.i32 q9, q8, q10 @ encoding: [0x60,0xef,0xe4,0x29] 22 @ CHECK: vmla.f32 q9, q8, q10 @ encoding: [0x40,0xef,0xf4,0x2d] 62 vmls.i8 q9, q8, q10 63 vmls.i16 q9, q8, q10 [all …]
|
/external/boringssl/linux-arm/crypto/aes/ |
D | aesv8-armx.S | 48 vext.8 q9,q0,q3,#12 53 veor q3,q3,q9 54 vext.8 q9,q0,q9,#12 55 veor q3,q3,q9 56 vext.8 q9,q0,q9,#12 58 veor q3,q3,q9 67 vext.8 q9,q0,q3,#12 71 veor q3,q3,q9 72 vext.8 q9,q0,q9,#12 73 veor q3,q3,q9 [all …]
|
D | bsaes-armv7.S | 1156 vld1.8 {q8,q9}, [r0]! @ reload input 1160 veor q6, q6, q9 1210 vld1.8 {q8,q9}, [r0]! @ reload input 1214 veor q6, q6, q9 1233 vld1.8 {q8,q9}, [r0]! @ reload input 1237 veor q6, q6, q9 1254 vld1.8 {q8,q9}, [r0]! @ reload input 1258 veor q6, q6, q9 1272 vld1.8 {q8,q9}, [r0]! @ reload input 1276 veor q6, q6, q9 [all …]
|
/external/boringssl/linux-arm/crypto/modes/ |
D | ghashv8-armx.S | 10 vld1.64 {q9},[r1] @ load input H 13 vext.8 q3,q9,q9,#8 15 vdup.32 q9,d18[1] 18 vshr.s32 q9,q9,#31 @ broadcast carry bit 22 vand q8,q8,q9 34 vext.8 q9,q0,q2,#8 @ Karatsuba post-processing 36 veor q1,q1,q9 49 vext.8 q9,q14,q14,#8 @ Karatsuba pre-processing 50 veor q9,q9,q14 51 vext.8 q13,q8,q9,#8 @ pack Karatsuba pre-processed [all …]
|
/external/libavc/common/arm/ |
D | ih264_inter_pred_luma_horz_qpel_vert_hpel_a9q.s | 168 vaddl.u8 q9, d0, d10 170 vmla.u16 q9, q10, q14 176 vmls.u16 q9, q11, q15 182 vst1.32 {q9}, [r9]! 184 vext.16 q12, q9, q10, #2 185 vext.16 q13, q9, q10, #3 187 vext.16 q11, q9, q10, #5 189 vext.16 q12, q9, q10, #1 190 vext.16 q13, q9, q10, #4 204 vqmovn.u16 d18, q9 [all …]
|
D | ih264_inter_pred_luma_horz_hpel_vert_qpel_a9q.s | 229 vaddl.u8 q9, d2, d3 231 vmla.u16 q8, q9, q11 234 vaddl.u8 q9, d1, d4 236 vmls.u16 q8, q9, q12 245 vaddl.s16 q9, d6, d16 253 vmlal.s16 q9, d30, d22 254 vmlsl.s16 q9, d28, d24 260 vqrshrun.s32 d18, q9, #10 267 vqmovn.u16 d18, q9 277 vaddl.s16 q9, d8, d20 [all …]
|
/external/libhevc/common/arm/ |
D | ihevc_itrans_recon_8x8.s | 189 vmull.s16 q9,d3,d1[2] @// y2 * sin2 (q3 is freed by this time)(part of d1) 239 vmlsl.s16 q9,d11,d0[2] @// d1 = y2 * sin2 - y6 * cos2(part of a0 and a1) 252 vsub.s32 q11,q10,q9 @// a2 = c1 - d1(part of r2,r5) 253 vadd.s32 q9,q10,q9 @// a1 = c1 + d1(part of r1,r6) 261 vadd.s32 q14,q9,q13 @// a1 + b1(part of r1) 262 vsub.s32 q9,q9,q13 @// a1 - b1(part of r6) 272 vqrshrn.s32 d11,q9,#shift_stage1_idct @// r6 = (a1 - b1 + rnd) >> 7(shift_stage1_idct) 312 vmull.s16 q9,d3,d1[2] @// y2 * sin2 (q3 is freed by this time)(part of d1) 320 vsub.s32 q11,q10,q9 @// a2 = c1 - d1(part of r2,r5) 321 vadd.s32 q9,q10,q9 @// a1 = c1 + d1(part of r1,r6) [all …]
|
D | ihevc_itrans_recon_32x32.s | 235 vmull.s16 q9,d10,d0[0] 236 vmlal.s16 q9,d11,d3[2] 269 vmlal.s16 q9,d12,d7[0] 270 vmlsl.s16 q9,d13,d5[2] 305 vmlsl.s16 q9,d10,d2[0] 306 vmlsl.s16 q9,d11,d1[2] 343 vmlsl.s16 q9,d12,d5[0] 344 vmlal.s16 q9,d13,d7[2] 381 vmlal.s16 q9,d10,d0[0] 382 vmlal.s16 q9,d11,d0[2] [all …]
|
/external/libvpx/libvpx/vp8/encoder/arm/neon/ |
D | vp8_mse16x16_neon.asm | 32 vmov.i8 q7, #0 ;q7, q8, q9, q10 - sse 34 vmov.i8 q9, #0 55 vmlal.s16 q9, d24, d24 59 vmlal.s16 q9, d28, d28 65 vadd.u32 q9, q9, q10 69 vadd.u32 q10, q7, q9 106 vmull.s16 q9, d26, d26 110 vadd.u32 q9, q9, q10 111 vadd.u32 q9, q7, q9 113 vpaddl.u32 q1, q9
|