Home
last modified time | relevance | path

Searched refs:q5 (Results 1 – 25 of 169) sorted by relevance

1234567

/external/llvm/test/CodeGen/ARM/
Dthumb-big-stack.ll145 …tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q…
147 …tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q…
149 …tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q…
151 …tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q…
153 …tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q…
155 …tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q…
157 …tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q…
159 …tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q…
161 …tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q…
163 …tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q…
[all …]
/external/libvpx/libvpx/vp8/common/arm/neon/
Didct_dequant_full_2x_neon.c21 int16x8_t q0, q1, q2, q3, q4, q5, q6, q7, q8, q9, q10, q11; in idct_dequant_full_2x_neon() local
44 q5 = vld1q_s16(q); in idct_dequant_full_2x_neon()
69 q5 = vmulq_s16(q5, q1); in idct_dequant_full_2x_neon()
81 dLow1 = vget_low_s16(q5); in idct_dequant_full_2x_neon()
82 dHigh1 = vget_high_s16(q5); in idct_dequant_full_2x_neon()
84 q5 = vcombine_s16(dHigh0, dHigh1); in idct_dequant_full_2x_neon()
87 q7 = vqdmulhq_n_s16(q5, sinpi8sqrt2); in idct_dequant_full_2x_neon()
89 q9 = vqdmulhq_n_s16(q5, cospi8sqrt2minus1); in idct_dequant_full_2x_neon()
98 q5 = vqaddq_s16(q5, q9); in idct_dequant_full_2x_neon()
100 q2 = vqsubq_s16(q6, q5); in idct_dequant_full_2x_neon()
[all …]
Dvp8_loopfilter_neon.c20 uint8x16_t q5, // p1 in vp8_loop_filter_neon() argument
37 q12u8 = vabdq_u8(q4, q5); in vp8_loop_filter_neon()
38 q13u8 = vabdq_u8(q5, q6); in vp8_loop_filter_neon()
55 q2u8 = vabdq_u8(q5, q8); in vp8_loop_filter_neon()
66 q5 = veorq_u8(q5, q10); in vp8_loop_filter_neon()
80 q1s8 = vqsubq_s8(vreinterpretq_s8_u8(q5), vreinterpretq_s8_u8(q8)); in vp8_loop_filter_neon()
114 q13s8 = vqaddq_s8(vreinterpretq_s8_u8(q5), q1s8); in vp8_loop_filter_neon()
130 uint8x16_t q5, q6, q7, q8, q9, q10; in vp8_loop_filter_horizontal_edge_y_neon() local
141 q5 = vld1q_u8(src); in vp8_loop_filter_horizontal_edge_y_neon()
153 vp8_loop_filter_neon(qblimit, qlimit, qthresh, q3, q4, q5, q6, q7, q8, q9, in vp8_loop_filter_horizontal_edge_y_neon()
[all …]
Dmbloopfilter_neon.c19 uint8x16_t q5, // p1 in vp8_mbloop_filter_neon() argument
39 q12u8 = vabdq_u8(q4, q5); in vp8_mbloop_filter_neon()
40 q13u8 = vabdq_u8(q5, q6); in vp8_mbloop_filter_neon()
59 q1u8 = vabdq_u8(q5, q8); in vp8_mbloop_filter_neon()
69 q5 = veorq_u8(q5, q0u8); in vp8_mbloop_filter_neon()
83 q1s8 = vqsubq_s8(vreinterpretq_s8_u8(q5), vreinterpretq_s8_u8(q8)); in vp8_mbloop_filter_neon()
139 q12s8 = vqaddq_s8(vreinterpretq_s8_u8(q5), q12s8); in vp8_mbloop_filter_neon()
158 uint8x16_t q5, q6, q7, q8, q9, q10; in vp8_mbloop_filter_horizontal_edge_y_neon() local
170 q5 = vld1q_u8(src); in vp8_mbloop_filter_horizontal_edge_y_neon()
182 vp8_mbloop_filter_neon(qblimit, qlimit, qthresh, q3, q4, q5, q6, q7, q8, q9, in vp8_mbloop_filter_horizontal_edge_y_neon()
[all …]
/external/valgrind/none/tests/arm/
Dneon128.c358 TESTINSN_imm("vmov.i32 q5", q5, 0x700); in main()
372 TESTINSN_imm("vmvn.i32 q5", q5, 0x700); in main()
391 TESTINSN_imm("vbic.i32 q5", q5, 0x700); in main()
439 TESTINSN_bin("vand q4, q6, q5", q4, q6, i8, 0xff, q5, i16, 0x57); in main()
445 TESTINSN_bin("vbic q4, q6, q5", q4, q6, i8, 0xff, q5, i16, 0x57); in main()
463 TESTINSN_bin("veor q4, q6, q5", q4, q6, i8, 0xff, q5, i16, 0x57); in main()
473 TESTINSN_bin("vbsl q4, q6, q5", q4, q6, i8, 0xff, q5, i16, 0x57); in main()
483 TESTINSN_bin("vbit q4, q6, q5", q4, q6, i8, 0xff, q5, i16, 0x57); in main()
493 TESTINSN_bin("vbif q4, q6, q5", q4, q6, i8, 0xff, q5, i16, 0x57); in main()
507 TESTINSN_bin("vext.8 q0, q5, q15, #12", q0, q5, i8, 0x77, q15, i8, 0xff); in main()
[all …]
/external/libavc/common/arm/
Dih264_inter_pred_luma_horz_qpel_vert_qpel_a9q.s146 vld1.32 {q5}, [r7], r2 @ Vector load from src[5_0]
248 vaddl.u8 q5, d0, d5
249 vmlal.u8 q5, d2, d30
250 vmlal.u8 q5, d3, d30
251 vmlsl.u8 q5, d1, d31
252 vmlsl.u8 q5, d4, d31
259 vqrshrun.s16 d26, q5, #5
261 vaddl.u8 q5, d12, d17
262 vmlal.u8 q5, d14, d30
263 vmlal.u8 q5, d15, d30
[all …]
Dih264_inter_pred_chroma_a9q.s148 vmull.u8 q5, d0, d28
149 vmlal.u8 q5, d5, d30
150 vmlal.u8 q5, d3, d29
151 vmlal.u8 q5, d8, d31
163 vqrshrun.s16 d14, q5, #6
175 vmull.u8 q5, d0, d28
176 vmlal.u8 q5, d5, d30
177 vmlal.u8 q5, d3, d29
178 vmlal.u8 q5, d8, d31
186 vqrshrun.s16 d14, q5, #6
Dih264_inter_pred_luma_horz_hpel_vert_qpel_a9q.s168 vaddl.u8 q5, d2, d3
173 vmla.u16 q4, q5, q11
175 vaddl.u8 q5, d1, d4
177 vmls.u16 q4, q5, q12
179 vaddl.u8 q5, d0, d5
187 vmla.u16 q5, q6, q11
191 vmls.u16 q5, q6, q12
198 vst1.32 {q5}, [r9], r6 @ store temp buffer 2
235 vadd.s16 q15, q5, q6
263 vadd.s16 q14, q5, q8
[all …]
/external/llvm/test/MC/ARM/
Dneont2-shiftaccum-encoding.s11 vsra.s32 q9, q5, #32
20 vsra.u64 q4, q5, #25
29 vsra.s32 q5, #32
38 vsra.u64 q5, #25
46 @ CHECK: vsra.s32 q9, q5, #32 @ encoding: [0xe0,0xef,0x5a,0x21]
55 @ CHECK: vsra.u64 q4, q5, #25 @ encoding: [0xa7,0xff,0xda,0x81]
63 @ CHECK: vsra.s32 q5, q5, #32 @ encoding: [0xa0,0xef,0x5a,0xa1]
72 @ CHECK: vsra.u64 q5, q5, #25 @ encoding: [0xa7,0xff,0xda,0xa1]
86 vrsra.s64 q4, q5, #64
87 vrsra.u8 q5, q6, #8
[all …]
Dneon-shiftaccum-encoding.s9 vsra.s32 q9, q5, #32
18 vsra.u64 q4, q5, #25
27 vsra.s32 q5, #32
36 vsra.u64 q5, #25
44 @ CHECK: vsra.s32 q9, q5, #32 @ encoding: [0x5a,0x21,0xe0,0xf2]
53 @ CHECK: vsra.u64 q4, q5, #25 @ encoding: [0xda,0x81,0xa7,0xf3]
61 @ CHECK: vsra.s32 q5, q5, #32 @ encoding: [0x5a,0xa1,0xa0,0xf2]
70 @ CHECK: vsra.u64 q5, q5, #25 @ encoding: [0xda,0xa1,0xa7,0xf3]
83 vrsra.s64 q4, q5, #64
84 vrsra.u8 q5, q6, #8
[all …]
Dneont2-minmax-encoding.s22 vmax.s16 q4, q5, q6
27 vmax.f32 q9, q5, q1
30 vmax.s16 q5, q6
33 vmax.u16 q4, q5
52 @ CHECK: vmax.s16 q4, q5, q6 @ encoding: [0x1a,0xef,0x4c,0x86]
57 @ CHECK: vmax.f32 q9, q5, q1 @ encoding: [0x4a,0xef,0x42,0x2f]
59 @ CHECK: vmax.s16 q5, q5, q6 @ encoding: [0x1a,0xef,0x4c,0xa6]
62 @ CHECK: vmax.u16 q4, q4, q5 @ encoding: [0x18,0xff,0x4a,0x86]
84 vmin.s16 q4, q5, q6
89 vmin.f32 q9, q5, q1
[all …]
Dneon-minmax-encoding.s20 vmax.s16 q4, q5, q6
25 vmax.f32 q9, q5, q1
28 vmax.s16 q5, q6
31 vmax.u16 q4, q5
50 @ CHECK: vmax.s16 q4, q5, q6 @ encoding: [0x4c,0x86,0x1a,0xf2]
55 @ CHECK: vmax.f32 q9, q5, q1 @ encoding: [0x42,0x2f,0x4a,0xf2]
57 @ CHECK: vmax.s16 q5, q5, q6 @ encoding: [0x4c,0xa6,0x1a,0xf2]
60 @ CHECK: vmax.u16 q4, q4, q5 @ encoding: [0x4a,0x86,0x18,0xf3]
82 vmin.s16 q4, q5, q6
87 vmin.f32 q9, q5, q1
[all …]
Dneon-shift-encoding.s116 vsra.s64 q4, q5, #63
123 vsra.s16 q5, #15
134 @ CHECK: vsra.s64 q4, q5, #63 @ encoding: [0xda,0x81,0x81,0xf2]
140 @ CHECK: vsra.s16 q5, q5, #15 @ encoding: [0x5a,0xa1,0x91,0xf2]
152 vsra.u64 q4, q5, #63
159 vsra.u16 q5, #15
170 @ CHECK: vsra.u64 q4, q5, #63 @ encoding: [0xda,0x81,0x81,0xf3]
176 @ CHECK: vsra.u16 q5, q5, #15 @ encoding: [0x5a,0xa1,0x91,0xf3]
188 vsri.64 q4, q5, #63
195 vsri.16 q5, #15
[all …]
Dneon-bitwise-encoding.s286 vand q6, q5
287 vand.s8 q6, q5
292 veor q6, q5
293 veor.8 q6, q5
298 veor q6, q5
299 veor.i8 q6, q5
304 vclt.s16 q5, #0
307 vceq.s16 q5, q3
310 vcgt.s16 q5, q3
313 vcge.s16 q5, q3
[all …]
/external/libhevc/common/arm/
Dihevc_inter_pred_luma_vert_w16inp_w16out.s174 vmull.s16 q5,d2,d23 @mul_res2 = vmull_u8(src_tmp3, coeffabs_1)@
176 vmlal.s16 q5,d1,d22 @mul_res2 = vmlal_u8(mul_res2, src_tmp2, coeffabs_0)@
178 vmlal.s16 q5,d3,d24 @mul_res2 = vmlal_u8(mul_res2, src_tmp4, coeffabs_2)@
180 vmlal.s16 q5,d4,d25 @mul_res2 = vmlal_u8(mul_res2, src_tmp1, coeffabs_3)@
182 vmlal.s16 q5,d5,d26 @mul_res2 = vmlal_u8(mul_res2, src_tmp2, coeffabs_4)@
184 vmlal.s16 q5,d6,d27 @mul_res2 = vmlal_u8(mul_res2, src_tmp3, coeffabs_5)@
185 vmlal.s16 q5,d7,d28 @mul_res2 = vmlal_u8(mul_res2, src_tmp4, coeffabs_6)@
186 vmlal.s16 q5,d16,d29 @mul_res2 = vmlal_u8(mul_res2, src_tmp1, coeffabs_7)@
201 vsub.s32 q5, q5, q15
221 vshrn.s32 d10, q5, #6
[all …]
Dihevc_inter_pred_filters_luma_vert_w16inp.s164 vmull.s16 q5,d2,d23 @mul_res2 = vmull_u8(src_tmp3, coeffabs_1)@
166 vmlal.s16 q5,d1,d22 @mul_res2 = vmlal_u8(mul_res2, src_tmp2, coeffabs_0)@
168 vmlal.s16 q5,d3,d24 @mul_res2 = vmlal_u8(mul_res2, src_tmp4, coeffabs_2)@
170 vmlal.s16 q5,d4,d25 @mul_res2 = vmlal_u8(mul_res2, src_tmp1, coeffabs_3)@
172 vmlal.s16 q5,d5,d26 @mul_res2 = vmlal_u8(mul_res2, src_tmp2, coeffabs_4)@
174 vmlal.s16 q5,d6,d27 @mul_res2 = vmlal_u8(mul_res2, src_tmp3, coeffabs_5)@
175 vmlal.s16 q5,d7,d28 @mul_res2 = vmlal_u8(mul_res2, src_tmp4, coeffabs_6)@
176 vmlal.s16 q5,d16,d29 @mul_res2 = vmlal_u8(mul_res2, src_tmp1, coeffabs_7)@
191 vqshrn.s32 d10, q5, #6
210 vqrshrun.s16 d10,q5,#6 @sto_res = vqmovun_s16(sto_res_tmp)@
[all …]
Dihevc_inter_pred_filters_luma_vert.s176 vmull.u8 q5,d2,d23 @mul_res2 = vmull_u8(src_tmp3, coeffabs_1)@
179 vmlsl.u8 q5,d1,d22 @mul_res2 = vmlsl_u8(mul_res2, src_tmp2, coeffabs_0)@
182 vmlsl.u8 q5,d3,d24 @mul_res2 = vmlsl_u8(mul_res2, src_tmp4, coeffabs_2)@
185 vmlal.u8 q5,d4,d25 @mul_res2 = vmlal_u8(mul_res2, src_tmp1, coeffabs_3)@
187 vmlal.u8 q5,d5,d26 @mul_res2 = vmlal_u8(mul_res2, src_tmp2, coeffabs_4)@
189 vmlsl.u8 q5,d6,d27 @mul_res2 = vmlsl_u8(mul_res2, src_tmp3, coeffabs_5)@
192 vmlal.u8 q5,d7,d28 @mul_res2 = vmlal_u8(mul_res2, src_tmp4, coeffabs_6)@
195 vmlsl.u8 q5,d16,d29 @mul_res2 = vmlsl_u8(mul_res2, src_tmp1, coeffabs_7)@
213 vqrshrun.s16 d10,q5,#6 @sto_res = vqmovun_s16(sto_res_tmp)@
267 vmull.u8 q5,d2,d23 @mul_res2 = vmull_u8(src_tmp3, coeffabs_1)@
[all …]
Dihevc_itrans_recon_4x4.s158 vaddl.s16 q5,d0,d2 @pi2_src[0] + pi2_src[2]
160 vshl.s32 q5,q5,#6 @e[0] = 64*(pi2_src[0] + pi2_src[2])
163 vadd.s32 q7,q5,q3 @((e[0] + o[0] )
166 vsub.s32 q10,q5,q3 @((e[0] - o[0])
188 vaddl.s16 q5,d0,d2 @pi2_src[0] + pi2_src[2]
190 vshl.s32 q5,q5,#6 @e[0] = 64*(pi2_src[0] + pi2_src[2])
194 vadd.s32 q7,q5,q3 @((e[0] + o[0] )
197 vsub.s32 q10,q5,q3 @((e[0] - o[0])
/external/boringssl/src/crypto/curve25519/asm/
Dx25519-asm-arm.S31 vpush {q4,q5,q6,q7}
100 vshr.u64 q5,q5,#26
113 vand q5,q5,q3
123 vadd.i64 q5,q5,q12
125 vadd.i64 q14,q5,q0
137 vsub.i64 q5,q5,q12
186 vadd.i64 q5,q5,q1
247 veor q6,q4,q5
259 veor q5,q5,q6
297 vadd.i32 q2,q5,q7
[all …]
/external/libjpeg-turbo/simd/
Djsimd_arm_neon.S111 JLONG q1, q2, q3, q4, q5, q6, q7; \
124 q5 = row7 + row3; \
126 q6 = MULTIPLY(q5, FIX_1_175875602_MINUS_1_961570560) + \
128 q7 = MULTIPLY(q5, FIX_1_175875602) + \
140 q5 = q7; \
150 q5 += MULTIPLY(row5, FIX_2_053119869_MINUS_2_562915447) + \
166 tmp1 = q5; \
289 vmov q5, q7
298 vmlal.s16 q5, ROW5L, XFIX_2_053119869_MINUS_2_562915447
300 vmlsl.s16 q5, ROW3L, XFIX_2_562915447
[all …]
/external/boringssl/linux-arm/crypto/aes/
Dbsaes-armv7.S1147 vld1.8 {q4,q5}, [r0]!
1169 veor q5, q5, q14
1174 vst1.8 {q5}, [r1]!
1202 vld1.8 {q5}, [r0]!
1314 vmov q5,q0 @ and input are preserved
1318 vmov q15, q5 @ q5 holds input
1413 vadd.u32 q5, q2, q10
1416 vadd.u32 q10, q5, q10 @ next counter
1455 veor q5, q15
1464 vst1.8 {q5}, [r1]!
[all …]
Daesv8-armx32.S319 veor q5,q8,q7
371 veor q8,q8,q5
414 veor q8,q8,q5
463 veor q5,q2,q7
504 veor q5,q5,q1
509 vst1.8 {q5},[r1]!
550 veor q5,q6,q7
559 veor q5,q5,q1
562 vst1.8 {q5},[r1]!
567 veor q5,q5,q10
[all …]
/external/libavc/encoder/arm/
Dih264e_evaluate_intra4x4_modes_a9q.s237 vext.8 q5, q0, q0, #2
315 vext.8 q15, q5, q5, #4
317 vext.8 q15, q5, q5, #3
351 vmov.8 q1, q5
363 vext.8 q15, q5, q5, #3
390 vext.8 q15, q5, q5, #5
418 vrev64.8 q5, q1
428 vext.8 q6, q5, q5, #7
431 vext.8 q6, q5, q5, #3
434 vext.8 q6, q5, q5, #1
Dih264e_evaluate_intra_chroma_modes_a9q.s100 vld1.32 {q5}, [r1]!
102 vuzp.u8 q4, q5 @
129 vzip.u16 q4, q5
131 vadd.u16 q7, q5, q6
134 vqrshrn.u16 d16, q5, #2
141 vzip.u16 q4, q5
142 vqrshrn.u16 d16, q5, #2
149 vzip.u16 q4, q5
161 vld1.32 {q5}, [r12]!
326 vmov q15, q5
[all …]
/external/libmpeg2/common/arm/
Dimpeg2_idct.s158 vaddw.u8 q5, q15, d1
162 vqmovun.s16 d1, q5
229 vmovl.s16 q5, d3
231 vraddhn.s32 d13, q0, q5
239 vmovl.s16 q5, d3
241 vraddhn.s32 d13, q0, q5
249 vmovl.s16 q5, d3
251 vraddhn.s32 d13, q0, q5
259 vmovl.s16 q5, d3
261 vraddhn.s32 d13, q0, q5
[all …]

1234567