/external/llvm/test/MC/ARM/ |
D | neon-bitwise-encoding.s | 110 veor q4, q7, q3 111 veor.8 q4, q7, q3 112 veor.16 q4, q7, q3 113 veor.32 q4, q7, q3 114 veor.64 q4, q7, q3 116 veor.i8 q4, q7, q3 117 veor.i16 q4, q7, q3 118 veor.i32 q4, q7, q3 119 veor.i64 q4, q7, q3 121 veor.s8 q4, q7, q3 [all …]
|
D | neon-v8.s | 5 vmaxnm.f32 q2, q4, q6 6 @ CHECK: vmaxnm.f32 q2, q4, q6 @ encoding: [0x5c,0x4f,0x08,0xf3] 16 vcvta.s32.f32 q4, q6 17 @ CHECK: vcvta.s32.f32 q4, q6 @ encoding: [0x4c,0x80,0xbb,0xf3] 18 vcvta.u32.f32 q4, q10 19 @ CHECK: vcvta.u32.f32 q4, q10 @ encoding: [0xe4,0x80,0xbb,0xf3] 43 vcvtp.s32.f32 q4, q15 44 @ CHECK: vcvtp.s32.f32 q4, q15 @ encoding: [0x6e,0x82,0xbb,0xf3] 50 vrintn.f32 q1, q4 51 @ CHECK: vrintn.f32 q1, q4 @ encoding: [0x48,0x24,0xba,0xf3] [all …]
|
D | thumb-neon-v8.s | 5 vmaxnm.f32 q2, q4, q6 6 @ CHECK: vmaxnm.f32 q2, q4, q6 @ encoding: [0x08,0xff,0x5c,0x4f] 16 vcvta.s32.f32 q4, q6 17 @ CHECK: vcvta.s32.f32 q4, q6 @ encoding: [0xbb,0xff,0x4c,0x80] 18 vcvta.u32.f32 q4, q10 19 @ CHECK: vcvta.u32.f32 q4, q10 @ encoding: [0xbb,0xff,0xe4,0x80] 43 vcvtp.s32.f32 q4, q15 44 @ CHECK: vcvtp.s32.f32 q4, q15 @ encoding: [0xbb,0xff,0x6e,0x82] 50 vrintn.f32 q1, q4 51 @ CHECK: vrintn.f32 q1, q4 @ encoding: [0xba,0xff,0x48,0x24] [all …]
|
D | neon-shift-encoding.s | 116 vsra.s64 q4, q5, #63 122 vsra.s8 q4, #7 134 @ CHECK: vsra.s64 q4, q5, #63 @ encoding: [0xda,0x81,0x81,0xf2] 139 @ CHECK: vsra.s8 q4, q4, #7 @ encoding: [0x58,0x81,0x89,0xf2] 152 vsra.u64 q4, q5, #63 158 vsra.u8 q4, #7 170 @ CHECK: vsra.u64 q4, q5, #63 @ encoding: [0xda,0x81,0x81,0xf3] 175 @ CHECK: vsra.u8 q4, q4, #7 @ encoding: [0x58,0x81,0x89,0xf3] 188 vsri.64 q4, q5, #63 194 vsri.8 q4, #7 [all …]
|
D | neont2-shiftaccum-encoding.s | 12 vsra.s64 q8, q4, #64 20 vsra.u64 q4, q5, #25 30 vsra.s64 q4, #64 47 @ CHECK: vsra.s64 q8, q4, #64 @ encoding: [0xc0,0xef,0xd8,0x01] 55 @ CHECK: vsra.u64 q4, q5, #25 @ encoding: [0xa7,0xff,0xda,0x81] 64 @ CHECK: vsra.s64 q4, q4, #64 @ encoding: [0x80,0xef,0xd8,0x81] 85 vrsra.s32 q3, q4, #32 86 vrsra.s64 q4, q5, #64 103 vrsra.s32 q4, #32 120 @ CHECK: vrsra.s32 q3, q4, #32 @ encoding: [0xa0,0xef,0x58,0x63] [all …]
|
D | neon-shiftaccum-encoding.s | 10 vsra.s64 q8, q4, #64 18 vsra.u64 q4, q5, #25 28 vsra.s64 q4, #64 45 @ CHECK: vsra.s64 q8, q4, #64 @ encoding: [0xd8,0x01,0xc0,0xf2] 53 @ CHECK: vsra.u64 q4, q5, #25 @ encoding: [0xda,0x81,0xa7,0xf3] 62 @ CHECK: vsra.s64 q4, q4, #64 @ encoding: [0xd8,0x81,0x80,0xf2] 82 vrsra.s32 q3, q4, #32 83 vrsra.s64 q4, q5, #64 100 vrsra.s32 q4, #32 117 @ CHECK: vrsra.s32 q3, q4, #32 @ encoding: [0x58,0x63,0xa0,0xf2] [all …]
|
/external/llvm/test/CodeGen/ARM/ |
D | thumb-big-stack.ll | 145 …tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q… 147 …tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q… 149 …tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q… 151 …tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q… 153 …tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q… 155 …tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q… 157 …tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q… 159 …tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q… 161 …tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q… 163 …tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q… [all …]
|
/external/libvpx/libvpx/vp8/common/arm/neon/ |
D | dequant_idct_neon.c | 27 int16x8_t q1, q2, q3, q4, q5, q6; in vp8_dequant_idct_add_neon() local 38 q4 = vld1q_s16(input); in vp8_dequant_idct_add_neon() 59 vmulq_u16(vreinterpretq_u16_s16(q4), vreinterpretq_u16_s16(q6))); in vp8_dequant_idct_add_neon() 67 q4 = vqdmulhq_n_s16(q2, cospi8sqrt2minus1); in vp8_dequant_idct_add_neon() 69 q4 = vshrq_n_s16(q4, 1); in vp8_dequant_idct_add_neon() 71 q4 = vqaddq_s16(q4, q2); in vp8_dequant_idct_add_neon() 73 d10 = vqsub_s16(vget_low_s16(q3), vget_high_s16(q4)); in vp8_dequant_idct_add_neon() 74 d11 = vqadd_s16(vget_high_s16(q3), vget_low_s16(q4)); in vp8_dequant_idct_add_neon() 92 q4 = vqdmulhq_n_s16(q2, cospi8sqrt2minus1); in vp8_dequant_idct_add_neon() 97 q4 = vshrq_n_s16(q4, 1); in vp8_dequant_idct_add_neon() [all …]
|
D | idct_dequant_full_2x_neon.c | 21 int16x8_t q0, q1, q2, q3, q4, q5, q6, q7, q8, q9, q10, q11; in idct_dequant_full_2x_neon() local 41 q4 = vld1q_s16(q); in idct_dequant_full_2x_neon() 68 q4 = vmulq_s16(q4, q0); in idct_dequant_full_2x_neon() 74 dLow1 = vget_low_s16(q4); in idct_dequant_full_2x_neon() 75 dHigh1 = vget_high_s16(q4); in idct_dequant_full_2x_neon() 77 q4 = vcombine_s16(dHigh0, dHigh1); in idct_dequant_full_2x_neon() 86 q6 = vqdmulhq_n_s16(q4, sinpi8sqrt2); in idct_dequant_full_2x_neon() 88 q8 = vqdmulhq_n_s16(q4, cospi8sqrt2minus1); in idct_dequant_full_2x_neon() 97 q4 = vqaddq_s16(q4, q8); in idct_dequant_full_2x_neon() 101 q3 = vqaddq_s16(q7, q4); in idct_dequant_full_2x_neon() [all …]
|
D | mbloopfilter_neon.c | 18 uint8x16_t q4, // p2 in vp8_mbloop_filter_neon() argument 38 q11u8 = vabdq_u8(q3, q4); in vp8_mbloop_filter_neon() 39 q12u8 = vabdq_u8(q4, q5); in vp8_mbloop_filter_neon() 70 q4 = veorq_u8(q4, q0u8); in vp8_mbloop_filter_neon() 137 q0s8 = vqaddq_s8(vreinterpretq_s8_u8(q4), q0s8); in vp8_mbloop_filter_neon() 157 uint8x16_t qblimit, qlimit, qthresh, q3, q4; in vp8_mbloop_filter_horizontal_edge_y_neon() local 168 q4 = vld1q_u8(src); in vp8_mbloop_filter_horizontal_edge_y_neon() 182 vp8_mbloop_filter_neon(qblimit, qlimit, qthresh, q3, q4, q5, q6, q7, q8, q9, in vp8_mbloop_filter_horizontal_edge_y_neon() 183 q10, &q4, &q5, &q6, &q7, &q8, &q9); in vp8_mbloop_filter_horizontal_edge_y_neon() 186 vst1q_u8(src, q4); in vp8_mbloop_filter_horizontal_edge_y_neon() [all …]
|
/external/valgrind/none/tests/arm/ |
D | neon128.c | 439 TESTINSN_bin("vand q4, q6, q5", q4, q6, i8, 0xff, q5, i16, 0x57); in main() 445 TESTINSN_bin("vbic q4, q6, q5", q4, q6, i8, 0xff, q5, i16, 0x57); in main() 452 TESTINSN_bin("vorr q4, q4, q4", q4, q4, i16, 0xff, q4, i16, 0xff); in main() 458 TESTINSN_bin("vorn q4, q4, q4", q4, q4, i16, 0xff, q4, i16, 0xff); in main() 463 TESTINSN_bin("veor q4, q6, q5", q4, q6, i8, 0xff, q5, i16, 0x57); in main() 468 TESTINSN_bin("veor q4, q4, q4", q4, q4, i16, 0xff, q4, i16, 0xff); in main() 473 TESTINSN_bin("vbsl q4, q6, q5", q4, q6, i8, 0xff, q5, i16, 0x57); in main() 478 TESTINSN_bin("vbsl q4, q4, q4", q4, q4, i16, 0xff, q4, i16, 0xff); in main() 483 TESTINSN_bin("vbit q4, q6, q5", q4, q6, i8, 0xff, q5, i16, 0x57); in main() 488 TESTINSN_bin("vbit q4, q4, q4", q4, q4, i16, 0xff, q4, i16, 0xff); in main() [all …]
|
/external/libhevc/common/arm/ |
D | ihevc_inter_pred_luma_vert_w16inp_w16out.s | 158 vmull.s16 q4,d1,d23 @mul_res1 = vmull_u8(src_tmp2, coeffabs_1)@ 160 vmlal.s16 q4,d0,d22 @mul_res1 = vmlal_u8(mul_res1, src_tmp1, coeffabs_0)@ 162 vmlal.s16 q4,d2,d24 @mul_res1 = vmlal_u8(mul_res1, src_tmp3, coeffabs_2)@ 164 vmlal.s16 q4,d3,d25 @mul_res1 = vmlal_u8(mul_res1, src_tmp4, coeffabs_3)@ 166 vmlal.s16 q4,d4,d26 @mul_res1 = vmlal_u8(mul_res1, src_tmp1, coeffabs_4)@ 168 vmlal.s16 q4,d5,d27 @mul_res1 = vmlal_u8(mul_res1, src_tmp2, coeffabs_5)@ 169 vmlal.s16 q4,d6,d28 @mul_res1 = vmlal_u8(mul_res1, src_tmp3, coeffabs_6)@ 170 vmlal.s16 q4,d7,d29 @mul_res1 = vmlal_u8(mul_res1, src_tmp4, coeffabs_7)@ 187 vsub.s32 q4, q4, q15 202 vshrn.s32 d8, q4, #6 [all …]
|
D | ihevc_inter_pred_filters_luma_vert_w16inp.s | 148 vmull.s16 q4,d1,d23 @mul_res1 = vmull_u8(src_tmp2, coeffabs_1)@ 150 vmlal.s16 q4,d0,d22 @mul_res1 = vmlal_u8(mul_res1, src_tmp1, coeffabs_0)@ 152 vmlal.s16 q4,d2,d24 @mul_res1 = vmlal_u8(mul_res1, src_tmp3, coeffabs_2)@ 154 vmlal.s16 q4,d3,d25 @mul_res1 = vmlal_u8(mul_res1, src_tmp4, coeffabs_3)@ 156 vmlal.s16 q4,d4,d26 @mul_res1 = vmlal_u8(mul_res1, src_tmp1, coeffabs_4)@ 158 vmlal.s16 q4,d5,d27 @mul_res1 = vmlal_u8(mul_res1, src_tmp2, coeffabs_5)@ 159 vmlal.s16 q4,d6,d28 @mul_res1 = vmlal_u8(mul_res1, src_tmp3, coeffabs_6)@ 160 vmlal.s16 q4,d7,d29 @mul_res1 = vmlal_u8(mul_res1, src_tmp4, coeffabs_7)@ 177 vqshrn.s32 d8, q4, #6 192 vqrshrun.s16 d8,q4,#6 @sto_res = vqmovun_s16(sto_res_tmp)@ [all …]
|
D | ihevc_inter_pred_luma_horz_w16out.s | 253 vmull.u8 q4,d1,d25 @arithmetic operations for ii iteration in the same time 254 vmlsl.u8 q4,d0,d24 255 vmlsl.u8 q4,d2,d26 256 vmlal.u8 q4,d3,d27 257 vmlal.u8 q4,d4,d28 258 vmlsl.u8 q4,d5,d29 259 vmlal.u8 q4,d6,d30 260 vmlsl.u8 q4,d7,d31 262 @ vqrshrun.s16 d8,q4,#6 @narrow right shift and saturating the result 310 vmull.u8 q4,d1,d25 @arithmetic operations for ii iteration in the same time [all …]
|
D | ihevc_inter_pred_filters_luma_vert.s | 158 vmull.u8 q4,d1,d23 @mul_res1 = vmull_u8(src_tmp2, coeffabs_1)@ 160 vmlsl.u8 q4,d0,d22 @mul_res1 = vmlsl_u8(mul_res1, src_tmp1, coeffabs_0)@ 162 vmlsl.u8 q4,d2,d24 @mul_res1 = vmlsl_u8(mul_res1, src_tmp3, coeffabs_2)@ 164 vmlal.u8 q4,d3,d25 @mul_res1 = vmlal_u8(mul_res1, src_tmp4, coeffabs_3)@ 166 vmlal.u8 q4,d4,d26 @mul_res1 = vmlal_u8(mul_res1, src_tmp1, coeffabs_4)@ 168 vmlsl.u8 q4,d5,d27 @mul_res1 = vmlsl_u8(mul_res1, src_tmp2, coeffabs_5)@ 170 vmlal.u8 q4,d6,d28 @mul_res1 = vmlal_u8(mul_res1, src_tmp3, coeffabs_6)@ 172 vmlsl.u8 q4,d7,d29 @mul_res1 = vmlsl_u8(mul_res1, src_tmp4, coeffabs_7)@ 198 vqrshrun.s16 d8,q4,#6 @sto_res = vqmovun_s16(sto_res_tmp)@ 241 vmull.u8 q4,d1,d23 @mul_res1 = vmull_u8(src_tmp2, coeffabs_1)@ [all …]
|
D | ihevc_intra_pred_luma_horz.s | 128 vdup.8 q4,d1[4] 137 vst1.8 {q4},[r2],r3 138 vst1.8 {q4},[r9],r3 144 vdup.8 q4,d1[0] 153 vst1.8 {q4},[r2],r3 154 vst1.8 {q4},[r9],r3 160 vdup.8 q4,d0[4] 169 vst1.8 {q4},[r2],r3 170 vst1.8 {q4},[r9],r3 177 vdup.8 q4,d0[0] [all …]
|
D | ihevc_inter_pred_filters_luma_horz.s | 216 vmull.u8 q4,d1,d25 @mul_res = vmlal_u8(src[0_1], coeffabs_1)@ 218 vmlal.u8 q4,d3,d27 @mul_res = vmull_u8(src[0_3], coeffabs_3)@ 220 vmlsl.u8 q4,d0,d24 @mul_res = vmlsl_u8(src[0_0], coeffabs_0)@ 222 vmlsl.u8 q4,d2,d26 @mul_res = vmlsl_u8(src[0_2], coeffabs_2)@ 224 vmlal.u8 q4,d4,d28 @mul_res = vmlal_u8(src[0_4], coeffabs_4)@ 226 vmlsl.u8 q4,d5,d29 @mul_res = vmlsl_u8(src[0_5], coeffabs_5)@ 228 vmlal.u8 q4,d6,d30 @mul_res = vmlal_u8(src[0_6], coeffabs_6)@ 230 vmlsl.u8 q4,d7,d31 @mul_res = vmlsl_u8(src[0_7], coeffabs_7)@ 240 vqrshrun.s16 d20,q4,#6 @right shift and saturating narrow result 1 296 vmull.u8 q4,d2,d25 @mul_res = vmlal_u8(src[0_1], coeffabs_1)@ [all …]
|
D | ihevc_intra_pred_chroma_horz.s | 130 vdup.16 q4,d1[0] 139 vst1.16 {q4},[r2],r3 140 vst1.16 {q4},[r9],r3 146 vdup.16 q4,d0[0] 155 vst1.16 {q4},[r2],r3 156 vst1.16 {q4},[r9],r3 162 vdup.16 q4,d11[0] 171 vst1.16 {q4},[r2],r3 172 vst1.16 {q4},[r9],r3 179 vdup.16 q4,d10[0] [all …]
|
/external/boringssl/src/crypto/curve25519/asm/ |
D | x25519-asm-arm.S | 31 vpush {q4,q5,q6,q7} 108 vand q4,q4,q2 119 vadd.i64 q12,q4,q1 129 vsub.i64 q4,q4,q12 165 vadd.i64 q4,q4,q14 170 vadd.i64 q4,q4,q6 174 vadd.i64 q4,q4,q13 176 vadd.i64 q1,q4,q1 192 vsub.i64 q1,q4,q1 247 veor q6,q4,q5 [all …]
|
/external/pdfium/third_party/freetype/src/base/ |
D | ftbbox.c | 253 FT_Pos q4 ) in cubic_peak() argument 271 FT_ABS( q4 ) ) ); in cubic_peak() 282 q4 <<= shift; in cubic_peak() 289 q4 >>= -shift; in cubic_peak() 297 if ( q1 + q2 > q3 + q4 ) /* first half */ in cubic_peak() 299 q4 = q4 + q3; in cubic_peak() 302 q4 = q4 + q3; in cubic_peak() 304 q4 = ( q4 + q3 ) / 8; in cubic_peak() 312 q3 = q3 + q4; in cubic_peak() 326 if ( q3 == q4 && q2 <= q4 ) in cubic_peak() [all …]
|
/external/freetype/src/base/ |
D | ftbbox.c | 253 FT_Pos q4 ) in cubic_peak() argument 271 FT_ABS( q4 ) ) ); in cubic_peak() 282 q4 <<= shift; in cubic_peak() 289 q4 >>= -shift; in cubic_peak() 297 if ( q1 + q2 > q3 + q4 ) /* first half */ in cubic_peak() 299 q4 = q4 + q3; in cubic_peak() 302 q4 = q4 + q3; in cubic_peak() 304 q4 = ( q4 + q3 ) / 8; in cubic_peak() 312 q3 = q3 + q4; in cubic_peak() 326 if ( q3 == q4 && q2 <= q4 ) in cubic_peak() [all …]
|
/external/libavc/common/arm/ |
D | ih264_inter_pred_filters_luma_horz_a9q.s | 127 vaddl.u8 q4, d31, d2 @// a0 + a5 (column1,row0) 135 vmlal.u8 q4, d31, d1 @// a0 + a5 + 20a2 (column1,row0) 143 vmlal.u8 q4, d31, d1 @// a0 + a5 + 20a2 + 20a3 (column1,row0) 151 vmlsl.u8 q4, d31, d0 @// a0 + a5 + 20a2 + 20a3 - 5a1 (column1,row0) 159 vmlsl.u8 q4, d31, d0 @// a0 + a5 + 20a2 + 20a3 - 5a1 - 5a4 (column1,row0) 165 …vqrshrun.s16 d20, q4, #5 @// (a0 + a5 + 20a2 + 20a3 - 5a1 - 5a4 + 16) >> 5 (column1,r… 195 vaddl.u8 q4, d31, d2 @// a0 + a5 (column1,row0) 198 vmlal.u8 q4, d29, d1 @// a0 + a5 + 20a2 + 20a3 (column1,row0) 199 vmlal.u8 q4, d30, d1 @// a0 + a5 + 20a2 (column1,row0) 200 vmlsl.u8 q4, d27, d0 @// a0 + a5 + 20a2 + 20a3 - 5a1 (column1,row0) [all …]
|
D | ih264_weighted_bi_pred_a9q.s | 181 vmovl.u8 q4, d8 @converting rows 3,4 in source 1 to 16-bit 186 vmul.s16 q4, q4, d2[0] @weight 1 mult. for rows 3,4 187 vmla.s16 q4, q5, d2[2] @weight 2 mult. for rows 3,4 191 vrshl.s16 q4, q4, q0 @rounds off the weighted samples from rows 3,4 194 vaddw.s8 q4, q4, d3 @adding offset for rows 3,4 197 vqmovun.s16 d8, q4 @saturating rows 3,4 to unsigned 8-bit 221 vmovl.u8 q4, d8 @converting row 2 in source 1 to 16-bit 228 vmul.s16 q4, q4, d2[0] @weight 1 mult. for row 2 229 vmla.s16 q4, q5, d2[2] @weight 2 mult. for row 2 239 vrshl.s16 q4, q4, q0 @rounds off the weighted samples from row 2 [all …]
|
D | ih264_inter_pred_luma_horz_qpel_a9q.s | 134 vaddl.u8 q4, d31, d2 @// a0 + a5 (column1,row0) 142 vmlal.u8 q4, d31, d1 @// a0 + a5 + 20a2 (column1,row0) 150 vmlal.u8 q4, d31, d1 @// a0 + a5 + 20a2 + 20a3 (column1,row0) 158 vmlsl.u8 q4, d31, d0 @// a0 + a5 + 20a2 + 20a3 - 5a1 (column1,row0) 166 vmlsl.u8 q4, d31, d0 @// a0 + a5 + 20a2 + 20a3 - 5a1 - 5a4 (column1,row0) 173 …vqrshrun.s16 d20, q4, #5 @// (a0 + a5 + 20a2 + 20a3 - 5a1 - 5a4 + 16) >> 5 (column1,r… 207 vaddl.u8 q4, d31, d2 @// a0 + a5 (column1,row0) 210 vmlal.u8 q4, d29, d1 @// a0 + a5 + 20a2 + 20a3 (column1,row0) 211 vmlal.u8 q4, d30, d1 @// a0 + a5 + 20a2 (column1,row0) 212 vmlsl.u8 q4, d27, d0 @// a0 + a5 + 20a2 + 20a3 - 5a1 (column1,row0) [all …]
|
D | ih264_inter_pred_luma_horz_hpel_vert_qpel_a9q.s | 157 vaddl.u8 q4, d2, d3 159 vmla.u16 q3, q4, q11 161 vaddl.u8 q4, d1, d4 163 vmls.u16 q3, q4, q12 165 vaddl.u8 q4, d0, d5 173 vmla.u16 q4, q5, q11 177 vmls.u16 q4, q5, q12 184 vst1.32 {q4}, [r9], r6 @ store temp buffer 1 233 vadd.s16 q14, q4, q7 281 vld1.32 {q4}, [r7], r6 @load from temp buffer 1 [all …]
|