Home
last modified time | relevance | path

Searched refs:q9s32 (Results 1 – 8 of 8) sorted by relevance

/external/libvpx/libvpx/vpx_dsp/arm/
Dvariance_neon.c168 int32x4_t q8s32, q9s32, q10s32; in vpx_variance16x8_neon() local
172 q9s32 = vdupq_n_s32(0); in vpx_variance16x8_neon()
196 q9s32 = vmlal_s16(q9s32, d22s16, d22s16); in vpx_variance16x8_neon()
202 q9s32 = vmlal_s16(q9s32, d24s16, d24s16); in vpx_variance16x8_neon()
208 q9s32 = vmlal_s16(q9s32, d26s16, d26s16); in vpx_variance16x8_neon()
214 q9s32 = vmlal_s16(q9s32, d28s16, d28s16); in vpx_variance16x8_neon()
218 q10s32 = vaddq_s32(q10s32, q9s32); in vpx_variance16x8_neon()
247 int32x4_t q8s32, q9s32, q10s32; in vpx_variance8x16_neon() local
251 q9s32 = vdupq_n_s32(0); in vpx_variance8x16_neon()
273 q9s32 = vmlal_s16(q9s32, d22s16, d22s16); in vpx_variance8x16_neon()
[all …]
Didct8x8_add_neon.c99 int32x4_t q2s32, q3s32, q5s32, q6s32, q8s32, q9s32; in IDCT8x8_1D() local
143 q9s32 = vmull_s16(d26s16, d3s16); in IDCT8x8_1D()
148 q9s32 = vmlal_s16(q9s32, d22s16, d2s16); in IDCT8x8_1D()
153 d12s16 = vqrshrn_n_s32(q9s32, 14); in IDCT8x8_1D()
213 q9s32 = vmull_s16(d28s16, d16s16); in IDCT8x8_1D()
218 q9s32 = vmlsl_s16(q9s32, d26s16, d16s16); in IDCT8x8_1D()
223 d10s16 = vqrshrn_n_s32(q9s32, 14); in IDCT8x8_1D()
366 int32x4_t q9s32, q10s32, q11s32, q12s32; in vpx_idct8x8_12_add_neon() local
427 q9s32 = vmull_s16(d28s16, d16s16); in vpx_idct8x8_12_add_neon()
432 q9s32 = vmlsl_s16(q9s32, d26s16, d16s16); in vpx_idct8x8_12_add_neon()
[all …]
Didct16x16_add_neon.c97 int32x4_t q0s32, q1s32, q2s32, q3s32, q5s32, q6s32, q9s32; in vpx_idct16x16_256_add_neon_pass1() local
171 q9s32 = vmull_s16(d26s16, d3s16); in vpx_idct16x16_256_add_neon_pass1()
176 q9s32 = vmlal_s16(q9s32, d22s16, d2s16); in vpx_idct16x16_256_add_neon_pass1()
181 d12s16 = vqrshrn_n_s32(q9s32, 14); in vpx_idct16x16_256_add_neon_pass1()
245 q9s32 = vmull_s16(d28s16, d16s16); in vpx_idct16x16_256_add_neon_pass1()
248 q6s32 = vsubq_s32(q9s32, q11s32); in vpx_idct16x16_256_add_neon_pass1()
250 q9s32 = vaddq_s32(q9s32, q11s32); in vpx_idct16x16_256_add_neon_pass1()
255 d12s16 = vqrshrn_n_s32(q9s32, 14); in vpx_idct16x16_256_add_neon_pass1()
342 int32x4_t q0s32, q1s32, q2s32, q3s32, q4s32, q5s32, q6s32, q8s32, q9s32; in vpx_idct16x16_256_add_neon_pass2() local
518 q9s32 = vmull_s16(d21s16, d30s16); in vpx_idct16x16_256_add_neon_pass2()
[all …]
Didct32x32_add_neon.c134 int32x4_t q8s32, q9s32, q10s32, q11s32, q12s32, q15s32; in DO_BUTTERFLY() local
147 q9s32 = vmull_s16(dDs16, d30s16); in DO_BUTTERFLY()
152 q9s32 = vsubq_s32(q9s32, q11s32); in DO_BUTTERFLY()
162 vqrshrn_n_s32(q9s32, 14)); in DO_BUTTERFLY()
/external/libvpx/libvpx/vp8/encoder/arm/neon/
Dshortfdct_neon.c21 int32x4_t q9s32, q10s32, q11s32, q12s32; in vp8_short_fdct4x4_neon() local
27 q9s32 = vdupq_n_s32(14500); in vp8_short_fdct4x4_neon()
64 q9s32 = vmlal_s16(q9s32, d7s16, d16s16); in vp8_short_fdct4x4_neon()
66 q9s32 = vmlal_s16(q9s32, d6s16, d17s16); in vp8_short_fdct4x4_neon()
69 d1s16 = vshrn_n_s32(q9s32, 12); in vp8_short_fdct4x4_neon()
128 int32x4_t q9s32, q10s32, q11s32, q12s32; in vp8_short_fdct8x4_neon() local
134 q9s32 = vdupq_n_s32(14500); in vp8_short_fdct8x4_neon()
169 q11s32 = q9s32; in vp8_short_fdct8x4_neon()
177 q9s32 = vmlal_s16(q9s32, d28s16, d16s16); in vp8_short_fdct8x4_neon()
182 q9s32 = vmlal_s16(q9s32, d26s16, d17s16); in vp8_short_fdct8x4_neon()
[all …]
Dvp8_shortwalsh4x4_neon.c31 int32x4_t q9s32, q10s32, q11s32, q15s32; in vp8_short_walsh4x4_neon() local
88 q9s32 = vaddl_s16(v2tmp1.val[1], v2tmp0.val[1]); in vp8_short_walsh4x4_neon()
92 q0s32 = vaddq_s32(q8s32, q9s32); in vp8_short_walsh4x4_neon()
95 q3s32 = vsubq_s32(q8s32, q9s32); in vp8_short_walsh4x4_neon()
103 q9s32 = vreinterpretq_s32_u32(q9u32); in vp8_short_walsh4x4_neon()
108 q1s32 = vsubq_s32(q1s32, q9s32); in vp8_short_walsh4x4_neon()
113 q9s32 = vaddq_s32(q1s32, q15s32); in vp8_short_walsh4x4_neon()
118 d1s16 = vshrn_n_s32(q9s32, 3); in vp8_short_walsh4x4_neon()
/external/libvpx/libvpx/vp9/common/arm/neon/
Dvp9_iht8x8_add_neon.c117 int32x4_t q2s32, q3s32, q5s32, q6s32, q8s32, q9s32; in IDCT8x8_1D() local
161 q9s32 = vmull_s16(d26s16, d3s16); in IDCT8x8_1D()
166 q9s32 = vmlal_s16(q9s32, d22s16, d2s16); in IDCT8x8_1D()
171 d12s16 = vqrshrn_n_s32(q9s32, 14); in IDCT8x8_1D()
231 q9s32 = vmull_s16(d28s16, d16s16); in IDCT8x8_1D()
236 q9s32 = vmlsl_s16(q9s32, d26s16, d16s16); in IDCT8x8_1D()
241 d10s16 = vqrshrn_n_s32(q9s32, 14); in IDCT8x8_1D()
274 int32x4_t q9s32, q10s32, q11s32, q12s32, q13s32, q14s32, q15s32; in IADST8X8_1D() local
363 q9s32 = vmull_s16(d19s16, d31s16); in IADST8X8_1D()
366 q9s32 = vmlsl_s16(q9s32, d29s16, d30s16); in IADST8X8_1D()
[all …]
Dvp9_iht4x4_add_neon.c29 int32x4_t q8s32, q9s32; in TRANSPOSE4X4() local
37 q9s32 = vreinterpretq_s32_s16(vcombine_s16(d1x2s16.val[0], d1x2s16.val[1])); in TRANSPOSE4X4()
38 q0x2s32 = vtrnq_s32(q8s32, q9s32); in TRANSPOSE4X4()
115 int32x4_t q8s32, q9s32, q10s32, q11s32, q12s32, q13s32, q14s32, q15s32; in IADST4x4_1D() local
133 q9s32 = vmull_s16(*d5s16, d19s16); in IADST4x4_1D()
139 q11s32 = vsubq_s32(q11s32, q9s32); in IADST4x4_1D()