Home
last modified time | relevance | path

Searched refs:q2u8 (Results 1 – 7 of 7) sorted by relevance

/external/libvpx/libvpx/vpx_dsp/arm/
Dvpx_convolve_avg_neon.c30 uint8x16_t q0u8, q1u8, q2u8, q3u8, q8u8, q9u8, q10u8, q11u8; in vpx_convolve_avg_neon() local
39 q2u8 = vld1q_u8(src + 32); in vpx_convolve_avg_neon()
50 q2u8 = vrhaddq_u8(q2u8, q10u8); in vpx_convolve_avg_neon()
55 vst1q_u8(dst + 32, q2u8); in vpx_convolve_avg_neon()
64 q2u8 = vld1q_u8(src); in vpx_convolve_avg_neon()
76 q2u8 = vrhaddq_u8(q2u8, q10u8); in vpx_convolve_avg_neon()
82 vst1q_u8(dst, q2u8); in vpx_convolve_avg_neon()
92 q2u8 = vld1q_u8(d); in vpx_convolve_avg_neon()
97 q0u8 = vrhaddq_u8(q0u8, q2u8); in vpx_convolve_avg_neon()
Dvpx_convolve_copy_neon.c28 uint8x16_t q0u8, q1u8, q2u8, q3u8; in vpx_convolve_copy_neon() local
36 q2u8 = vld1q_u8(src + 32); in vpx_convolve_copy_neon()
42 vst1q_u8(dst + 32, q2u8); in vpx_convolve_copy_neon()
51 q2u8 = vld1q_u8(src); in vpx_convolve_copy_neon()
58 vst1q_u8(dst, q2u8); in vpx_convolve_copy_neon()
Dloopfilter_16_neon.c33 uint8x16_t q1u8, q2u8, q11u8, q12u8, q13u8, q14u8, q15u8; in loop_filter_neon_16() local
58 q2u8 = vabdq_u8(q5, q8); in loop_filter_neon_16()
71 q2u8 = vshrq_n_u8(q2u8, 1); in loop_filter_neon_16()
72 q9 = vqaddq_u8(q9, q2u8); in loop_filter_neon_16()
Dvariance_neon.c166 uint8x16_t q0u8, q1u8, q2u8, q3u8; in vpx_variance16x8_neon() local
182 q2u8 = vld1q_u8(ref_ptr); in vpx_variance16x8_neon()
188 q11u16 = vsubl_u8(vget_low_u8(q0u8), vget_low_u8(q2u8)); in vpx_variance16x8_neon()
189 q12u16 = vsubl_u8(vget_high_u8(q0u8), vget_high_u8(q2u8)); in vpx_variance16x8_neon()
309 uint8x16_t q0u8, q1u8, q2u8, q3u8; in vpx_mse16x16_neon() local
324 q2u8 = vld1q_u8(ref_ptr); in vpx_mse16x16_neon()
329 q11u16 = vsubl_u8(vget_low_u8(q0u8), vget_low_u8(q2u8)); in vpx_mse16x16_neon()
330 q12u16 = vsubl_u8(vget_high_u8(q0u8), vget_high_u8(q2u8)); in vpx_mse16x16_neon()
Dintrapred_neon.c727 uint8x16_t q0u8, q1u8, q2u8; in vpx_tm_predictor_32x32_neon() local
734 q2u8 = vld1q_u8(above + 16); in vpx_tm_predictor_32x32_neon()
737 q10u16 = vsubl_u8(vget_low_u8(q2u8), vget_low_u8(q0u8)); in vpx_tm_predictor_32x32_neon()
738 q11u16 = vsubl_u8(vget_high_u8(q2u8), vget_high_u8(q0u8)); in vpx_tm_predictor_32x32_neon()
/external/libvpx/libvpx/vp8/common/arm/neon/
Dbilinearpredict_neon.c33 uint8x16_t q1u8, q2u8; in vp8_bilinear_predict4x4_neon() local
65 q2u8 = vcombine_u8(d4u8, d5u8); in vp8_bilinear_predict4x4_neon()
71 q5u64 = vshrq_n_u64(vreinterpretq_u64_u8(q2u8), 8); in vp8_bilinear_predict4x4_neon()
76 d1u32x2 = vzip_u32(vreinterpret_u32_u8(vget_low_u8(q2u8)), in vp8_bilinear_predict4x4_neon()
77 vreinterpret_u32_u8(vget_high_u8(q2u8))); in vp8_bilinear_predict4x4_neon()
141 uint8x16_t q1u8, q2u8, q3u8, q4u8, q5u8; in vp8_bilinear_predict8x4_neon() local
153 q2u8 = vld1q_u8(src_ptr); src_ptr += src_pixels_per_line; in vp8_bilinear_predict8x4_neon()
162 q7u16 = vmull_u8(vget_low_u8(q2u8), d0u8); in vp8_bilinear_predict8x4_neon()
168 d5u8 = vext_u8(vget_low_u8(q2u8), vget_high_u8(q2u8), 1); in vp8_bilinear_predict8x4_neon()
228 uint8x16_t q1u8, q2u8, q3u8, q4u8, q5u8; in vp8_bilinear_predict8x8_neon() local
[all …]
Dvp8_loopfilter_neon.c31 uint8x16_t q0u8, q1u8, q2u8, q11u8, q12u8, q13u8, q14u8, q15u8; in vp8_loop_filter_neon() local
56 q2u8 = vabdq_u8(q5, q8); in vp8_loop_filter_neon()
69 q2u8 = vshrq_n_u8(q2u8, 1); in vp8_loop_filter_neon()
70 q9 = vqaddq_u8(q9, q2u8); in vp8_loop_filter_neon()