Home
last modified time | relevance | path

Searched refs:q7 (Results 1 – 25 of 121) sorted by relevance

12345

/external/llvm/test/MC/ARM/
Dneon-bitwise-encoding.s110 veor q4, q7, q3
111 veor.8 q4, q7, q3
112 veor.16 q4, q7, q3
113 veor.32 q4, q7, q3
114 veor.64 q4, q7, q3
116 veor.i8 q4, q7, q3
117 veor.i16 q4, q7, q3
118 veor.i32 q4, q7, q3
119 veor.i64 q4, q7, q3
121 veor.s8 q4, q7, q3
[all …]
Dneon-shiftaccum-encoding.s7 vsra.s8 q7, q2, #8
15 vsra.u8 q1, q7, #8
16 vsra.u16 q2, q7, #6
33 vsra.u8 q7, #8
34 vsra.u16 q7, #6
42 @ CHECK: vsra.s8 q7, q2, #8 @ encoding: [0x54,0xe1,0x88,0xf2]
50 @ CHECK: vsra.u8 q1, q7, #8 @ encoding: [0x5e,0x21,0x88,0xf3]
51 @ CHECK: vsra.u16 q2, q7, #6 @ encoding: [0x5e,0x41,0x9a,0xf3]
67 @ CHECK: vsra.u8 q7, q7, #8 @ encoding: [0x5e,0xe1,0x88,0xf3]
68 @ CHECK: vsra.u16 q7, q7, #6 @ encoding: [0x5e,0xe1,0x9a,0xf3]
[all …]
Dneont2-shiftaccum-encoding.s9 vsra.s8 q7, q2, #8
17 vsra.u8 q1, q7, #8
18 vsra.u16 q2, q7, #6
35 vsra.u8 q7, #8
36 vsra.u16 q7, #6
44 @ CHECK: vsra.s8 q7, q2, #8 @ encoding: [0x88,0xef,0x54,0xe1]
52 @ CHECK: vsra.u8 q1, q7, #8 @ encoding: [0x88,0xff,0x5e,0x21]
53 @ CHECK: vsra.u16 q2, q7, #6 @ encoding: [0x9a,0xff,0x5e,0x41]
69 @ CHECK: vsra.u8 q7, q7, #8 @ encoding: [0x88,0xff,0x5e,0xe1]
70 @ CHECK: vsra.u16 q7, q7, #6 @ encoding: [0x9a,0xff,0x5e,0xe1]
[all …]
Dneont2-minmax-encoding.s23 vmax.s32 q7, q8, q9
26 vmax.u32 q6, q7, q8
34 vmax.u32 q7, q8
53 @ CHECK: vmax.s32 q7, q8, q9 @ encoding: [0x20,0xef,0xe2,0xe6]
56 @ CHECK: vmax.u32 q6, q7, q8 @ encoding: [0x2e,0xff,0x60,0xc6]
63 @ CHECK: vmax.u32 q7, q7, q8 @ encoding: [0x2e,0xff,0x60,0xe6]
85 vmin.s32 q7, q8, q9
88 vmin.u32 q6, q7, q8
96 vmin.u32 q7, q8
115 @ CHECK: vmin.s32 q7, q8, q9 @ encoding: [0x20,0xef,0xf2,0xe6]
[all …]
Dneon-minmax-encoding.s21 vmax.s32 q7, q8, q9
24 vmax.u32 q6, q7, q8
32 vmax.u32 q7, q8
51 @ CHECK: vmax.s32 q7, q8, q9 @ encoding: [0xe2,0xe6,0x20,0xf2]
54 @ CHECK: vmax.u32 q6, q7, q8 @ encoding: [0x60,0xc6,0x2e,0xf3]
61 @ CHECK: vmax.u32 q7, q7, q8 @ encoding: [0x60,0xe6,0x2e,0xf3]
83 vmin.s32 q7, q8, q9
86 vmin.u32 q6, q7, q8
94 vmin.u32 q7, q8
113 @ CHECK: vmin.s32 q7, q8, q9 @ encoding: [0xf2,0xe6,0x20,0xf2]
[all …]
Dneon-shift-encoding.s114 vsra.s16 q2, q7, #15
125 vsra.s64 q7, #63
132 @ CHECK: vsra.s16 q2, q7, #15 @ encoding: [0x5e,0x41,0x91,0xf2]
142 @ CHECK: vsra.s64 q7, q7, #63 @ encoding: [0xde,0xe1,0x81,0xf2]
150 vsra.u16 q2, q7, #15
161 vsra.u64 q7, #63
168 @ CHECK: vsra.u16 q2, q7, #15 @ encoding: [0x5e,0x41,0x91,0xf3]
178 @ CHECK: vsra.u64 q7, q7, #63 @ encoding: [0xde,0xe1,0x81,0xf3]
186 vsri.16 q2, q7, #15
197 vsri.64 q7, #63
[all …]
/external/libvpx/libvpx/vp8/encoder/arm/neon/
Dvp8_mse16x16_neon.asm30 vpush {q7}
32 vmov.i8 q7, #0 ;q7, q8, q9, q10 - sse
50 vmlal.s16 q7, d22, d22
57 vmlal.s16 q7, d26, d26
64 vadd.u32 q7, q7, q8
69 vadd.u32 q10, q7, q9
76 vpop {q7}
88 vpush {q7}
104 vmull.s16 q7, d22, d22
109 vadd.u32 q7, q7, q8
[all …]
Dfastquantizeb_neon.asm27 vstmdb sp!, {q4-q7}
44 vld1.s16 {q6, q7}, [r6@128] ; load round_ptr [0-15]
50 vadd.s16 q5, q7
67 vadd.s16 q11, q7
77 vld1.s16 {q6, q7}, [r8@128] ;load dequant_ptr[i]
99 vmul.s16 q3, q7, q5
108 vmul.s16 q13, q7, q11
110 vld1.16 {q6, q7}, [r0@128] ; load inverse scan order
120 vand q1, q7, q15
130 vand q11, q7, q3
[all …]
/external/libavc/common/arm/
Dih264_inter_pred_luma_horz_qpel_vert_hpel_a9q.s329 vaddl.u8 q7, d4, d6
332 vmla.u16 q6, q7, q13
334 vaddl.u8 q7, d1, d11
336 vmla.u16 q7, q9, q13
340 vmls.u16 q7, q11, q12
343 vext.16 q11, q6, q7, #5
347 vst1.32 {q7}, [r9], r7 @ store row 0 to temp buffer: col 1
349 vext.16 q8, q6, q7, #2
351 vext.16 q9, q6, q7, #3
352 vext.16 q10, q6, q7, #4
[all …]
Dih264_inter_pred_filters_luma_horz_a9q.s131 vaddl.u8 q7, d28, d5 @// a0 + a5 (column1,row1)
139 vmlal.u8 q7, d28, d1 @// a0 + a5 + 20a2 (column1,row1)
147 vmlal.u8 q7, d28, d1 @// a0 + a5 + 20a2 + 20a3 (column1,row1)
155 vmlsl.u8 q7, d28, d0 @// a0 + a5 + 20a2 + 20a3 - 5a1 (column1,row1)
163 vmlsl.u8 q7, d28, d0 @// a0 + a5 + 20a2 + 20a3 - 5a1 - 5a4 (column1,row1)
169 …vqrshrun.s16 d23, q7, #5 @// (a0 + a5 + 20a2 + 20a3 - 5a1 - 5a4 + 16) >> 5 (column1,r…
188 vaddl.u8 q7, d28, d5 @// a0 + a5 (column1,row1)
190 vmlal.u8 q7, d25, d1 @// a0 + a5 + 20a2 (column1,row1)
191 vmlal.u8 q7, d24, d1 @// a0 + a5 + 20a2 + 20a3 (column1,row1)
192 vmlsl.u8 q7, d23, d0 @// a0 + a5 + 20a2 + 20a3 - 5a1 (column1,row1)
[all …]
Dih264_inter_pred_luma_horz_qpel_a9q.s138 vaddl.u8 q7, d28, d5 @// a0 + a5 (column1,row1)
146 vmlal.u8 q7, d28, d1 @// a0 + a5 + 20a2 (column1,row1)
154 vmlal.u8 q7, d28, d1 @// a0 + a5 + 20a2 + 20a3 (column1,row1)
162 vmlsl.u8 q7, d28, d0 @// a0 + a5 + 20a2 + 20a3 - 5a1 (column1,row1)
170 vmlsl.u8 q7, d28, d0 @// a0 + a5 + 20a2 + 20a3 - 5a1 - 5a4 (column1,row1)
177 …vqrshrun.s16 d18, q7, #5 @// (a0 + a5 + 20a2 + 20a3 - 5a1 - 5a4 + 16) >> 5 (column1,r…
200 vaddl.u8 q7, d28, d5 @// a0 + a5 (column1,row1)
202 vmlal.u8 q7, d25, d1 @// a0 + a5 + 20a2 (column1,row1)
203 vmlal.u8 q7, d24, d1 @// a0 + a5 + 20a2 + 20a3 (column1,row1)
204 vmlsl.u8 q7, d23, d0 @// a0 + a5 + 20a2 + 20a3 - 5a1 (column1,row1)
[all …]
Dih264_inter_pred_filters_luma_vert_a9q.s129 vaddl.u8 q7, d0, d10 @ temp = src[0_0] + src[5_0]
131 vmla.u16 q7, q6, q11 @ temp += temp1 * 20
138 vmls.u16 q7, q8, q12 @ temp -= temp2 * 5
145 vqrshrun.s16 d30, q7, #5 @ dst[0_0] = CLIP_U8((temp +16) >> 5)
146 vaddl.u8 q7, d3, d1
148 vmla.u16 q7, q6, q11
157 vmls.u16 q7, q13, q12
166 vqrshrun.s16 d31, q7, #5
168 vaddl.u8 q7, d6, d4
170 vmla.u16 q7, q6, q11
[all …]
Dih264_inter_pred_luma_vert_qpel_a9q.s136 vaddl.u8 q7, d0, d10 @ temp = src[0_0] + src[5_0]
138 vmla.u16 q7, q6, q11 @ temp += temp1 * 20
145 vmls.u16 q7, q8, q12 @ temp -= temp2 * 5
152 vqrshrun.s16 d30, q7, #5 @ dst[0_0] = CLIP_U8((temp +16) >> 5)
153 vaddl.u8 q7, d3, d1
155 vmla.u16 q7, q6, q11
166 vmls.u16 q7, q13, q12
175 vqrshrun.s16 d31, q7, #5
176 vld1.u32 {q7}, [r7], r2 @ Load for interpolation row 1
178 vrhadd.u8 q15, q7, q15 @ Interpolation to obtain qpel value
[all …]
Dih264_deblk_chroma_a9.s110 vaddl.u8 q7, d4, d2 @
113 vmlal.u8 q7, d6, d31 @
121 vrshrn.u16 d10, q7, #2 @
191 vaddl.u8 q7, d2, d6
196 vmlal.u8 q7, d0, d31
204 vrshrn.i16 d14, q7, #2
210 vbit q1, q7, q4
285 vmovl.u8 q7, d14 @
293 vsli.16 q7, q7, #8 @
312 vmin.u8 q7, q3, q7 @Q7 = delta = (ABS(i_macro) > C) ? C : ABS(i_macro)
[all …]
Dih264_inter_pred_luma_horz_hpel_vert_qpel_a9q.s196 vaddl.u8 q7, d2, d3
201 vmla.u16 q6, q7, q11
203 vaddl.u8 q7, d1, d4
205 vmls.u16 q6, q7, q12
207 vaddl.u8 q7, d0, d5
215 vmla.u16 q7, q8, q11
219 vmls.u16 q7, q8, q12
228 vst1.32 {q7}, [r9], r6 @ store temp buffer 4
233 vadd.s16 q14, q4, q7
265 vadd.s16 q15, q6, q7
[all …]
/external/boringssl/linux-arm/crypto/aes/
Dbsaes-armv7.S1109 veor q7, q7, q6 @ fix up round 0 key
1110 vstmia sp, {q7}
1125 veor q7, q7, q6 @ fix up round 0 key
1126 vstmia r4, {q7}
1149 vld1.8 {q6,q7}, [r0]
1165 veor q7, q7, q12
1172 vst1.8 {q7}, [r1]!
1219 veor q7, q7, q12
1225 vst1.8 {q7}, [r1]!
1242 veor q7, q7, q12
[all …]
/external/libhevc/common/arm/
Dihevc_inter_pred_luma_vert_w16inp_w16out.s205 vmull.s16 q7,d4,d23
206 vmlal.s16 q7,d3,d22
207 vmlal.s16 q7,d5,d24
208 vmlal.s16 q7,d6,d25
210 vmlal.s16 q7,d7,d26
212 vmlal.s16 q7,d16,d27
214 vmlal.s16 q7,d17,d28
216 vmlal.s16 q7,d18,d29
245 vsub.s32 q7, q7, q15
264 vshrn.s32 d14, q7, #6
[all …]
Dihevc_inter_pred_filters_luma_vert_w16inp.s194 vmull.s16 q7,d4,d23
195 vmlal.s16 q7,d3,d22
196 vmlal.s16 q7,d5,d24
197 vmlal.s16 q7,d6,d25
199 vmlal.s16 q7,d7,d26
201 vmlal.s16 q7,d16,d27
203 vmlal.s16 q7,d17,d28
205 vmlal.s16 q7,d18,d29
232 vqshrn.s32 d14, q7, #6
250 vqrshrun.s16 d14,q7,#6
[all …]
Dihevc_inter_pred_filters_luma_vert.s216 vmull.u8 q7,d4,d23
218 vmlsl.u8 q7,d3,d22
219 vmlsl.u8 q7,d5,d24
220 vmlal.u8 q7,d6,d25
222 vmlal.u8 q7,d7,d26
224 vmlsl.u8 q7,d16,d27
226 vmlal.u8 q7,d17,d28
228 vmlsl.u8 q7,d18,d29
264 vqrshrun.s16 d14,q7,#6
319 vmull.u8 q7,d4,d23
[all …]
Dihevc_itrans_recon_16x16.s259 vmull.s16 q7,d10,d0[0]
260 vmlal.s16 q7,d11,d1[2]
286 vmlal.s16 q7,d4,d3[0]
287 vmlsl.s16 q7,d5,d3[2]
352 vmlsl.s16 q7,d10,d0[0]
353 vmlsl.s16 q7,d11,d0[2]
354 vmlsl.s16 q7,d4,d1[0]
355 vmlsl.s16 q7,d5,d2[2]
373 vadd.s32 q6,q7,q13
374 vsub.s32 q12,q7,q13
[all …]
/external/valgrind/none/tests/arm/
Dneon128.c359 TESTINSN_imm("vmov.i16 q7", q7, 0x700); in main()
373 TESTINSN_imm("vmvn.i16 q7", q7, 0x700); in main()
451 TESTINSN_bin("vorr q7, q3, q0", q7, q3, i8, 0x24, q0, i16, 0xff); in main()
457 TESTINSN_bin("vorn q7, q3, q0", q7, q3, i8, 0x24, q0, i16, 0xff); in main()
467 TESTINSN_bin("veor q7, q3, q0", q7, q3, i8, 0x24, q0, i16, 0xff); in main()
477 TESTINSN_bin("vbsl q7, q3, q0", q7, q3, i8, 0x24, q0, i16, 0xff); in main()
487 TESTINSN_bin("vbit q7, q3, q0", q7, q3, i8, 0x24, q0, i16, 0xff); in main()
497 TESTINSN_bin("vbif q7, q3, q0", q7, q3, i8, 0x24, q0, i16, 0xff); in main()
589 TESTINSN_bin("vrhadd.s8 q5, q7, q5", q5, q7, i32, (1 << 31) + 1, q5, i32, (1 << 31) + 2); in main()
592 TESTINSN_bin("vrhadd.s8 q5, q7, q5", q5, q7, i32, (1 << 31) + 1, q5, i32, (1 << 31) + 3); in main()
[all …]
/external/libvpx/libvpx/vp8/common/arm/neon/
Dmbloopfilter_neon.c22 uint8x16_t q7, // q0 in vp8_mbloop_filter_neon() argument
42 q14u8 = vabdq_u8(q8, q7); in vp8_mbloop_filter_neon()
51 q12u8 = vabdq_u8(q6, q7); in vp8_mbloop_filter_neon()
68 q7 = veorq_u8(q7, q0u8); in vp8_mbloop_filter_neon()
79 q2s16 = vsubl_s8(vget_low_s8(vreinterpretq_s8_u8(q7)), in vp8_mbloop_filter_neon()
81 q13s16 = vsubl_s8(vget_high_s8(vreinterpretq_s8_u8(q7)), in vp8_mbloop_filter_neon()
110 q7s8 = vqsubq_s8(vreinterpretq_s8_u8(q7), q2s8); in vp8_mbloop_filter_neon()
162 uint8x16_t q5, q6, q7, q8, q9, q10; in vp8_mbloop_filter_horizontal_edge_y_neon() local
178 q7 = vld1q_u8(src); in vp8_mbloop_filter_horizontal_edge_y_neon()
187 q5, q6, q7, q8, q9, q10, in vp8_mbloop_filter_horizontal_edge_y_neon()
[all …]
Didct_dequant_full_2x_neon.asm60 ; q7: 12 * sinpi : d1/temp2
64 vqdmulh.s16 q7, q5, d0[2]
90 vqadd.s16 q3, q4, q7
99 vqsub.s16 q7, q10, q3
103 vtrn.32 q5, q7
105 vtrn.16 q6, q7
110 ; q7: l 3, 7,11,15 r 3, 7,11,15
117 vqdmulh.s16 q9, q7, d0[2]
119 vqdmulh.s16 q11, q7, d0[0]
131 vqadd.s16 q11, q7, q11
[all …]
/external/libmpeg2/common/arm/
Dimpeg2_idct.s165 vaddw.u8 q7, q15, d3
170 vqmovun.s16 d3, q7
232 vaddw.u8 q7, q6, d30
233 vqmovun.s16 d30, q7
242 vaddw.u8 q7, q6, d30
243 vqmovun.s16 d30, q7
252 vaddw.u8 q7, q6, d30
253 vqmovun.s16 d30, q7
262 vaddw.u8 q7, q6, d30
263 vqmovun.s16 d30, q7
[all …]
/external/libvpx/libvpx/vp9/common/arm/neon/
Dvp9_idct32x32_add_neon.asm112 vrshr.s16 q7, q7, #6
117 vaddw.u8 q7, q7, d9
122 vqmovun.s16 d9, q7
146 vrshr.s16 q7, q7, #6
151 vaddw.u8 q7, q7, d9
156 vqmovun.s16 d9, q7
171 ; q4-q7 contain the results (out[j * 32 + 0-31])
182 vrshr.s16 q7, q7, #6
187 vaddw.u8 q7, q7, d7
192 vqmovun.s16 d7, q7
[all …]

12345