Home
last modified time | relevance | path

Searched refs:q8s32 (Results 1 – 7 of 7) sorted by relevance

/external/libvpx/libvpx/vpx_dsp/arm/
Dvariance_neon.c168 int32x4_t q8s32, q9s32, q10s32; in vpx_variance16x8_neon() local
171 q8s32 = vdupq_n_s32(0); in vpx_variance16x8_neon()
195 q8s32 = vpadalq_s16(q8s32, vreinterpretq_s16_u16(q11u16)); in vpx_variance16x8_neon()
201 q8s32 = vpadalq_s16(q8s32, vreinterpretq_s16_u16(q12u16)); in vpx_variance16x8_neon()
207 q8s32 = vpadalq_s16(q8s32, vreinterpretq_s16_u16(q13u16)); in vpx_variance16x8_neon()
213 q8s32 = vpadalq_s16(q8s32, vreinterpretq_s16_u16(q14u16)); in vpx_variance16x8_neon()
219 q0s64 = vpaddlq_s32(q8s32); in vpx_variance16x8_neon()
247 int32x4_t q8s32, q9s32, q10s32; in vpx_variance8x16_neon() local
250 q8s32 = vdupq_n_s32(0); in vpx_variance8x16_neon()
272 q8s32 = vpadalq_s16(q8s32, vreinterpretq_s16_u16(q11u16)); in vpx_variance8x16_neon()
[all …]
Didct8x8_add_neon.c99 int32x4_t q2s32, q3s32, q5s32, q6s32, q8s32, q9s32; in IDCT8x8_1D() local
182 q8s32 = vmull_s16(d20s16, d1s16); in IDCT8x8_1D()
187 q8s32 = vmlal_s16(q8s32, d28s16, d0s16); in IDCT8x8_1D()
192 d30s16 = vqrshrn_n_s32(q8s32, 14); in IDCT8x8_1D()
Didct16x16_add_neon.c342 int32x4_t q0s32, q1s32, q2s32, q3s32, q4s32, q5s32, q6s32, q8s32, q9s32; in vpx_idct16x16_256_add_neon_pass2() local
517 q8s32 = vmull_s16(d20s16, d30s16); in vpx_idct16x16_256_add_neon_pass2()
522 q8s32 = vmlal_s16(q8s32, d26s16, d31s16); in vpx_idct16x16_256_add_neon_pass2()
527 d10s16 = vqrshrn_n_s32(q8s32, 14); in vpx_idct16x16_256_add_neon_pass2()
1036 int32x4_t q0s32, q1s32, q2s32, q3s32, q4s32, q5s32, q6s32, q8s32, q9s32; in vpx_idct16x16_10_add_neon_pass2() local
1114 q8s32 = vmull_s16(d6s16, d30s16); in vpx_idct16x16_10_add_neon_pass2()
1119 q8s32 = vmlal_s16(q8s32, d8s16, d31s16); in vpx_idct16x16_10_add_neon_pass2()
1124 d10s16 = vqrshrn_n_s32(q8s32, 14); in vpx_idct16x16_10_add_neon_pass2()
Didct32x32_add_neon.c134 int32x4_t q8s32, q9s32, q10s32, q11s32, q12s32, q15s32; in DO_BUTTERFLY() local
145 q8s32 = vmull_s16(dCs16, d30s16); in DO_BUTTERFLY()
151 q8s32 = vsubq_s32(q8s32, q10s32); in DO_BUTTERFLY()
161 *qAs16 = vcombine_s16(vqrshrn_n_s32(q8s32, 14), in DO_BUTTERFLY()
/external/libvpx/libvpx/vp8/encoder/arm/neon/
Dvp8_shortwalsh4x4_neon.c30 int32x4_t qEmptys32, q0s32, q1s32, q2s32, q3s32, q8s32; in vp8_short_walsh4x4_neon() local
87 q8s32 = vaddl_s16(v2tmp1.val[0], v2tmp0.val[0]); in vp8_short_walsh4x4_neon()
92 q0s32 = vaddq_s32(q8s32, q9s32); in vp8_short_walsh4x4_neon()
95 q3s32 = vsubq_s32(q8s32, q9s32); in vp8_short_walsh4x4_neon()
102 q8s32 = vreinterpretq_s32_u32(q8u32); in vp8_short_walsh4x4_neon()
107 q0s32 = vsubq_s32(q0s32, q8s32); in vp8_short_walsh4x4_neon()
112 q8s32 = vaddq_s32(q0s32, q15s32); in vp8_short_walsh4x4_neon()
117 d0s16 = vshrn_n_s32(q8s32, 3); in vp8_short_walsh4x4_neon()
/external/libvpx/libvpx/vp9/common/arm/neon/
Dvp9_iht4x4_add_neon.c29 int32x4_t q8s32, q9s32; in TRANSPOSE4X4() local
36 q8s32 = vreinterpretq_s32_s16(vcombine_s16(d0x2s16.val[0], d0x2s16.val[1])); in TRANSPOSE4X4()
38 q0x2s32 = vtrnq_s32(q8s32, q9s32); in TRANSPOSE4X4()
115 int32x4_t q8s32, q9s32, q10s32, q11s32, q12s32, q13s32, q14s32, q15s32; in IADST4x4_1D() local
131 q8s32 = vmull_s16(*d4s16, d19s16); in IADST4x4_1D()
136 q10s32 = vaddq_s32(q10s32, q8s32); in IADST4x4_1D()
138 q8s32 = vdupq_n_s32(sinpi_3_9); in IADST4x4_1D()
140 q15s32 = vmulq_s32(q15s32, q8s32); in IADST4x4_1D()
Dvp9_iht8x8_add_neon.c117 int32x4_t q2s32, q3s32, q5s32, q6s32, q8s32, q9s32; in IDCT8x8_1D() local
200 q8s32 = vmull_s16(d20s16, d1s16); in IDCT8x8_1D()
205 q8s32 = vmlal_s16(q8s32, d28s16, d0s16); in IDCT8x8_1D()
210 d30s16 = vqrshrn_n_s32(q8s32, 14); in IDCT8x8_1D()
273 int32x4_t q0s32, q1s32, q2s32, q3s32, q4s32, q5s32, q6s32, q7s32, q8s32; in IADST8X8_1D() local
312 q8s32 = vmull_s16(d23s16, d31s16); in IADST8X8_1D()
317 q8s32 = vmlsl_s16(q8s32, d25s16, d30s16); in IADST8X8_1D()
329 q15s32 = vaddq_s32(q4s32, q8s32); in IADST8X8_1D()
331 q4s32 = vsubq_s32(q4s32, q8s32); in IADST8X8_1D()