Home
last modified time | relevance | path

Searched refs:q5 (Results 1 – 25 of 133) sorted by relevance

123456

/external/libvpx/libvpx/vp8/common/arm/neon/
Didct_dequant_full_2x_neon.asm30 vld1.16 {q4, q5}, [r0] ; r q
49 vmul.i16 q5, q5, q1
54 ; q4: l4r4 q5: l12r12
64 vqdmulh.s16 q7, q5, d0[2]
66 vqdmulh.s16 q9, q5, d0[0]
83 ; q5: 12 + 12 * cospi : c1/temp2
85 vqadd.s16 q5, q5, q9
89 vqsub.s16 q2, q6, q5
97 vqadd.s16 q5, q11, q2
103 vtrn.32 q5, q7
[all …]
Dmbloopfilter_neon.c20 uint8x16_t q5, // p1 in vp8_mbloop_filter_neon() argument
40 q12u8 = vabdq_u8(q4, q5); in vp8_mbloop_filter_neon()
41 q13u8 = vabdq_u8(q5, q6); in vp8_mbloop_filter_neon()
60 q1u8 = vabdq_u8(q5, q8); in vp8_mbloop_filter_neon()
70 q5 = veorq_u8(q5, q0u8); in vp8_mbloop_filter_neon()
84 q1s8 = vqsubq_s8(vreinterpretq_s8_u8(q5), in vp8_mbloop_filter_neon()
141 q12s8 = vqaddq_s8(vreinterpretq_s8_u8(q5), q12s8); in vp8_mbloop_filter_neon()
162 uint8x16_t q5, q6, q7, q8, q9, q10; in vp8_mbloop_filter_horizontal_edge_y_neon() local
174 q5 = vld1q_u8(src); in vp8_mbloop_filter_horizontal_edge_y_neon()
187 q5, q6, q7, q8, q9, q10, in vp8_mbloop_filter_horizontal_edge_y_neon()
[all …]
Dvp8_subpixelvariance16x16s_neon.asm58 vext.8 q5, q4, q5, #1
64 vrhadd.u8 q2, q4, q5
68 vsubl.u8 q5, d1, d23
82 vpadal.s16 q8, q5
114 vmull.s32 q5, d0, d0
154 vld1.8 {q5}, [r2], r3
213 vmull.s32 q5, d0, d0
262 vext.8 q5, q4, q5, #1
267 vrhadd.u8 q2, q4, q5
271 vld1.8 {q5}, [r2], r3
[all …]
/external/valgrind/none/tests/arm/
Dneon128.c358 TESTINSN_imm("vmov.i32 q5", q5, 0x700); in main()
372 TESTINSN_imm("vmvn.i32 q5", q5, 0x700); in main()
391 TESTINSN_imm("vbic.i32 q5", q5, 0x700); in main()
439 TESTINSN_bin("vand q4, q6, q5", q4, q6, i8, 0xff, q5, i16, 0x57); in main()
445 TESTINSN_bin("vbic q4, q6, q5", q4, q6, i8, 0xff, q5, i16, 0x57); in main()
463 TESTINSN_bin("veor q4, q6, q5", q4, q6, i8, 0xff, q5, i16, 0x57); in main()
473 TESTINSN_bin("vbsl q4, q6, q5", q4, q6, i8, 0xff, q5, i16, 0x57); in main()
483 TESTINSN_bin("vbit q4, q6, q5", q4, q6, i8, 0xff, q5, i16, 0x57); in main()
493 TESTINSN_bin("vbif q4, q6, q5", q4, q6, i8, 0xff, q5, i16, 0x57); in main()
507 TESTINSN_bin("vext.8 q0, q5, q15, #12", q0, q5, i8, 0x77, q15, i8, 0xff); in main()
[all …]
/external/libavc/common/arm/
Dih264_inter_pred_luma_horz_qpel_vert_qpel_a9q.s146 vld1.32 {q5}, [r7], r2 @ Vector load from src[5_0]
248 vaddl.u8 q5, d0, d5
249 vmlal.u8 q5, d2, d30
250 vmlal.u8 q5, d3, d30
251 vmlsl.u8 q5, d1, d31
252 vmlsl.u8 q5, d4, d31
259 vqrshrun.s16 d26, q5, #5
261 vaddl.u8 q5, d12, d17
262 vmlal.u8 q5, d14, d30
263 vmlal.u8 q5, d15, d30
[all …]
Dih264_inter_pred_chroma_a9q.s148 vmull.u8 q5, d0, d28
149 vmlal.u8 q5, d5, d30
150 vmlal.u8 q5, d3, d29
151 vmlal.u8 q5, d8, d31
163 vqrshrun.s16 d14, q5, #6
175 vmull.u8 q5, d0, d28
176 vmlal.u8 q5, d5, d30
177 vmlal.u8 q5, d3, d29
178 vmlal.u8 q5, d8, d31
186 vqrshrun.s16 d14, q5, #6
Dih264_inter_pred_luma_horz_hpel_vert_qpel_a9q.s168 vaddl.u8 q5, d2, d3
173 vmla.u16 q4, q5, q11
175 vaddl.u8 q5, d1, d4
177 vmls.u16 q4, q5, q12
179 vaddl.u8 q5, d0, d5
187 vmla.u16 q5, q6, q11
191 vmls.u16 q5, q6, q12
198 vst1.32 {q5}, [r9], r6 @ store temp buffer 2
235 vadd.s16 q15, q5, q6
263 vadd.s16 q14, q5, q8
[all …]
/external/llvm/test/MC/ARM/
Dneon-shiftaccum-encoding.s9 vsra.s32 q9, q5, #32
18 vsra.u64 q4, q5, #25
27 vsra.s32 q5, #32
36 vsra.u64 q5, #25
44 @ CHECK: vsra.s32 q9, q5, #32 @ encoding: [0x5a,0x21,0xe0,0xf2]
53 @ CHECK: vsra.u64 q4, q5, #25 @ encoding: [0xda,0x81,0xa7,0xf3]
61 @ CHECK: vsra.s32 q5, q5, #32 @ encoding: [0x5a,0xa1,0xa0,0xf2]
70 @ CHECK: vsra.u64 q5, q5, #25 @ encoding: [0xda,0xa1,0xa7,0xf3]
83 vrsra.s64 q4, q5, #64
84 vrsra.u8 q5, q6, #8
[all …]
Dneont2-shiftaccum-encoding.s11 vsra.s32 q9, q5, #32
20 vsra.u64 q4, q5, #25
29 vsra.s32 q5, #32
38 vsra.u64 q5, #25
46 @ CHECK: vsra.s32 q9, q5, #32 @ encoding: [0xe0,0xef,0x5a,0x21]
55 @ CHECK: vsra.u64 q4, q5, #25 @ encoding: [0xa7,0xff,0xda,0x81]
63 @ CHECK: vsra.s32 q5, q5, #32 @ encoding: [0xa0,0xef,0x5a,0xa1]
72 @ CHECK: vsra.u64 q5, q5, #25 @ encoding: [0xa7,0xff,0xda,0xa1]
86 vrsra.s64 q4, q5, #64
87 vrsra.u8 q5, q6, #8
[all …]
Dneont2-minmax-encoding.s22 vmax.s16 q4, q5, q6
27 vmax.f32 q9, q5, q1
30 vmax.s16 q5, q6
33 vmax.u16 q4, q5
52 @ CHECK: vmax.s16 q4, q5, q6 @ encoding: [0x1a,0xef,0x4c,0x86]
57 @ CHECK: vmax.f32 q9, q5, q1 @ encoding: [0x4a,0xef,0x42,0x2f]
59 @ CHECK: vmax.s16 q5, q5, q6 @ encoding: [0x1a,0xef,0x4c,0xa6]
62 @ CHECK: vmax.u16 q4, q4, q5 @ encoding: [0x18,0xff,0x4a,0x86]
84 vmin.s16 q4, q5, q6
89 vmin.f32 q9, q5, q1
[all …]
Dneon-minmax-encoding.s20 vmax.s16 q4, q5, q6
25 vmax.f32 q9, q5, q1
28 vmax.s16 q5, q6
31 vmax.u16 q4, q5
50 @ CHECK: vmax.s16 q4, q5, q6 @ encoding: [0x4c,0x86,0x1a,0xf2]
55 @ CHECK: vmax.f32 q9, q5, q1 @ encoding: [0x42,0x2f,0x4a,0xf2]
57 @ CHECK: vmax.s16 q5, q5, q6 @ encoding: [0x4c,0xa6,0x1a,0xf2]
60 @ CHECK: vmax.u16 q4, q4, q5 @ encoding: [0x4a,0x86,0x18,0xf3]
82 vmin.s16 q4, q5, q6
87 vmin.f32 q9, q5, q1
[all …]
Dneon-shift-encoding.s116 vsra.s64 q4, q5, #63
123 vsra.s16 q5, #15
134 @ CHECK: vsra.s64 q4, q5, #63 @ encoding: [0xda,0x81,0x81,0xf2]
140 @ CHECK: vsra.s16 q5, q5, #15 @ encoding: [0x5a,0xa1,0x91,0xf2]
152 vsra.u64 q4, q5, #63
159 vsra.u16 q5, #15
170 @ CHECK: vsra.u64 q4, q5, #63 @ encoding: [0xda,0x81,0x81,0xf3]
176 @ CHECK: vsra.u16 q5, q5, #15 @ encoding: [0x5a,0xa1,0x91,0xf3]
188 vsri.64 q4, q5, #63
195 vsri.16 q5, #15
[all …]
Dneon-bitwise-encoding.s286 vand q6, q5
287 vand.s8 q6, q5
292 veor q6, q5
293 veor.8 q6, q5
298 veor q6, q5
299 veor.i8 q6, q5
304 vclt.s16 q5, #0
307 vceq.s16 q5, q3
310 vcgt.s16 q5, q3
313 vcge.s16 q5, q3
[all …]
/external/libhevc/common/arm/
Dihevc_inter_pred_luma_vert_w16inp_w16out.s174 vmull.s16 q5,d2,d23 @mul_res2 = vmull_u8(src_tmp3, coeffabs_1)@
176 vmlal.s16 q5,d1,d22 @mul_res2 = vmlal_u8(mul_res2, src_tmp2, coeffabs_0)@
178 vmlal.s16 q5,d3,d24 @mul_res2 = vmlal_u8(mul_res2, src_tmp4, coeffabs_2)@
180 vmlal.s16 q5,d4,d25 @mul_res2 = vmlal_u8(mul_res2, src_tmp1, coeffabs_3)@
182 vmlal.s16 q5,d5,d26 @mul_res2 = vmlal_u8(mul_res2, src_tmp2, coeffabs_4)@
184 vmlal.s16 q5,d6,d27 @mul_res2 = vmlal_u8(mul_res2, src_tmp3, coeffabs_5)@
185 vmlal.s16 q5,d7,d28 @mul_res2 = vmlal_u8(mul_res2, src_tmp4, coeffabs_6)@
186 vmlal.s16 q5,d16,d29 @mul_res2 = vmlal_u8(mul_res2, src_tmp1, coeffabs_7)@
201 vsub.s32 q5, q5, q15
221 vshrn.s32 d10, q5, #6
[all …]
Dihevc_inter_pred_filters_luma_vert_w16inp.s164 vmull.s16 q5,d2,d23 @mul_res2 = vmull_u8(src_tmp3, coeffabs_1)@
166 vmlal.s16 q5,d1,d22 @mul_res2 = vmlal_u8(mul_res2, src_tmp2, coeffabs_0)@
168 vmlal.s16 q5,d3,d24 @mul_res2 = vmlal_u8(mul_res2, src_tmp4, coeffabs_2)@
170 vmlal.s16 q5,d4,d25 @mul_res2 = vmlal_u8(mul_res2, src_tmp1, coeffabs_3)@
172 vmlal.s16 q5,d5,d26 @mul_res2 = vmlal_u8(mul_res2, src_tmp2, coeffabs_4)@
174 vmlal.s16 q5,d6,d27 @mul_res2 = vmlal_u8(mul_res2, src_tmp3, coeffabs_5)@
175 vmlal.s16 q5,d7,d28 @mul_res2 = vmlal_u8(mul_res2, src_tmp4, coeffabs_6)@
176 vmlal.s16 q5,d16,d29 @mul_res2 = vmlal_u8(mul_res2, src_tmp1, coeffabs_7)@
191 vqshrn.s32 d10, q5, #6
210 vqrshrun.s16 d10,q5,#6 @sto_res = vqmovun_s16(sto_res_tmp)@
[all …]
Dihevc_inter_pred_filters_luma_vert.s176 vmull.u8 q5,d2,d23 @mul_res2 = vmull_u8(src_tmp3, coeffabs_1)@
179 vmlsl.u8 q5,d1,d22 @mul_res2 = vmlsl_u8(mul_res2, src_tmp2, coeffabs_0)@
182 vmlsl.u8 q5,d3,d24 @mul_res2 = vmlsl_u8(mul_res2, src_tmp4, coeffabs_2)@
185 vmlal.u8 q5,d4,d25 @mul_res2 = vmlal_u8(mul_res2, src_tmp1, coeffabs_3)@
187 vmlal.u8 q5,d5,d26 @mul_res2 = vmlal_u8(mul_res2, src_tmp2, coeffabs_4)@
189 vmlsl.u8 q5,d6,d27 @mul_res2 = vmlsl_u8(mul_res2, src_tmp3, coeffabs_5)@
192 vmlal.u8 q5,d7,d28 @mul_res2 = vmlal_u8(mul_res2, src_tmp4, coeffabs_6)@
195 vmlsl.u8 q5,d16,d29 @mul_res2 = vmlsl_u8(mul_res2, src_tmp1, coeffabs_7)@
213 vqrshrun.s16 d10,q5,#6 @sto_res = vqmovun_s16(sto_res_tmp)@
267 vmull.u8 q5,d2,d23 @mul_res2 = vmull_u8(src_tmp3, coeffabs_1)@
[all …]
Dihevc_itrans_recon_4x4.s158 vaddl.s16 q5,d0,d2 @pi2_src[0] + pi2_src[2]
160 vshl.s32 q5,q5,#6 @e[0] = 64*(pi2_src[0] + pi2_src[2])
163 vadd.s32 q7,q5,q3 @((e[0] + o[0] )
166 vsub.s32 q10,q5,q3 @((e[0] - o[0])
188 vaddl.s16 q5,d0,d2 @pi2_src[0] + pi2_src[2]
190 vshl.s32 q5,q5,#6 @e[0] = 64*(pi2_src[0] + pi2_src[2])
194 vadd.s32 q7,q5,q3 @((e[0] + o[0] )
197 vsub.s32 q10,q5,q3 @((e[0] - o[0])
/external/boringssl/linux-arm/crypto/aes/
Dbsaes-armv7.S1147 vld1.8 {q4,q5}, [r0]!
1169 veor q5, q5, q14
1174 vst1.8 {q5}, [r1]!
1202 vld1.8 {q5}, [r0]!
1314 vmov q5,q0 @ and input are preserved
1318 vmov q15, q5 @ q5 holds input
1413 vadd.u32 q5, q2, q10
1416 vadd.u32 q10, q5, q10 @ next counter
1455 veor q5, q15
1464 vst1.8 {q5}, [r1]!
[all …]
Daesv8-armx.S313 veor q5,q8,q7
365 veor q8,q8,q5
408 veor q8,q8,q5
457 veor q5,q2,q7
498 veor q5,q5,q1
503 vst1.8 {q5},[r1]!
544 veor q5,q6,q7
553 veor q5,q5,q1
556 vst1.8 {q5},[r1]!
561 veor q5,q5,q10
[all …]
/external/libvpx/libvpx/vp9/common/arm/neon/
Dvp9_idct32x32_add_neon.asm180 vrshr.s16 q5, q5, #6
185 vaddw.u8 q5, q5, d5
190 vqmovun.s16 d5, q5
214 vrshr.s16 q5, q5, #6
219 vaddw.u8 q5, q5, d5
224 vqmovun.s16 d5, q5
376 vld1.s16 {q5}, [r3]!
400 vtrn.32 q5, q7
407 vtrn.16 q4, q5
425 vst1.16 {q5}, [r0]!
[all …]
Dvp9_loopfilter_16_neon.asm59 vld1.u8 {q5}, [r2@64], r1 ; p1
71 vst1.u8 {q5}, [r2@64], r1 ; store op1
92 ; q5 p1
100 ; q5 op1
108 vabd.u8 q12, q4, q5 ; m2 = abs(p2 - p1)
109 vabd.u8 q13, q5, q6 ; m3 = abs(p1 - p0)
130 vabd.u8 q2, q5, q8 ; a = abs(p1 - q1)
140 veor q5, q5, q10 ; ps1
152 vqsub.s8 q1, q5, q8 ; filter = clamp(ps1-qs1)
189 vqadd.s8 q13, q5, q1 ; u = clamp(ps1 + filter)
[all …]
Dvp9_iht8x8_add_neon.asm135 vmull.s16 q5, d26, d2
143 vmlsl.s16 q5, d22, d3
151 vqrshrn.s32 d10, q5, #14 ; >> 14
241 vsub.s16 q13, q4, q5 ; step2[5] = step1[4] - step1[5]
242 vadd.s16 q4, q4, q5 ; step2[4] = step1[4] + step1[5]
273 vadd.s16 q10, q2, q5 ; output[2] = step1[2] + step1[5];
276 vsub.s16 q13, q2, q5 ; output[5] = step1[2] - step1[5];
310 vmull.s16 q5, d22, d30
318 vmlal.s16 q5, d24, d31
326 vadd.s32 q11, q1, q5
[all …]
/external/libavc/encoder/arm/
Dih264e_evaluate_intra4x4_modes_a9q.s237 vext.8 q5, q0, q0, #2
315 vext.8 q15, q5, q5, #4
317 vext.8 q15, q5, q5, #3
351 vmov.8 q1, q5
363 vext.8 q15, q5, q5, #3
390 vext.8 q15, q5, q5, #5
418 vrev64.8 q5, q1
428 vext.8 q6, q5, q5, #7
431 vext.8 q6, q5, q5, #3
434 vext.8 q6, q5, q5, #1
Dih264e_evaluate_intra_chroma_modes_a9q.s100 vld1.32 {q5}, [r1]!
102 vuzp.u8 q4, q5 @
129 vzip.u16 q4, q5
131 vadd.u16 q7, q5, q6
134 vqrshrn.u16 d16, q5, #2
141 vzip.u16 q4, q5
142 vqrshrn.u16 d16, q5, #2
149 vzip.u16 q4, q5
161 vld1.32 {q5}, [r12]!
326 vmov q15, q5
[all …]
/external/libmpeg2/common/arm/
Dimpeg2_idct.s158 vaddw.u8 q5, q15, d1
162 vqmovun.s16 d1, q5
229 vmovl.s16 q5, d3
231 vraddhn.s32 d13, q0, q5
239 vmovl.s16 q5, d3
241 vraddhn.s32 d13, q0, q5
249 vmovl.s16 q5, d3
251 vraddhn.s32 d13, q0, q5
259 vmovl.s16 q5, d3
261 vraddhn.s32 d13, q0, q5
[all …]

123456