/external/libhevc/common/arm/ |
D | ihevc_inter_pred_filters_luma_vert.s | 164 vmull.u8 q4,d1,d23 @mul_res1 = vmull_u8(src_tmp2, coeffabs_1)@ 166 vmlsl.u8 q4,d0,d22 @mul_res1 = vmlsl_u8(mul_res1, src_tmp1, coeffabs_0)@ 168 vmlsl.u8 q4,d2,d24 @mul_res1 = vmlsl_u8(mul_res1, src_tmp3, coeffabs_2)@ 170 vmlal.u8 q4,d3,d25 @mul_res1 = vmlal_u8(mul_res1, src_tmp4, coeffabs_3)@ 172 vmlal.u8 q4,d4,d26 @mul_res1 = vmlal_u8(mul_res1, src_tmp1, coeffabs_4)@ 174 vmlsl.u8 q4,d5,d27 @mul_res1 = vmlsl_u8(mul_res1, src_tmp2, coeffabs_5)@ 176 vmlal.u8 q4,d6,d28 @mul_res1 = vmlal_u8(mul_res1, src_tmp3, coeffabs_6)@ 178 vmlsl.u8 q4,d7,d29 @mul_res1 = vmlsl_u8(mul_res1, src_tmp4, coeffabs_7)@ 247 vmull.u8 q4,d1,d23 @mul_res1 = vmull_u8(src_tmp2, coeffabs_1)@ 250 vmlsl.u8 q4,d0,d22 @mul_res1 = vmlsl_u8(mul_res1, src_tmp1, coeffabs_0)@ [all …]
|
D | ihevc_inter_pred_filters_luma_vert_w16inp.s | 153 vmull.s16 q4,d1,d23 @mul_res1 = vmull_u8(src_tmp2, coeffabs_1)@ 155 vmlal.s16 q4,d0,d22 @mul_res1 = vmlal_u8(mul_res1, src_tmp1, coeffabs_0)@ 157 vmlal.s16 q4,d2,d24 @mul_res1 = vmlal_u8(mul_res1, src_tmp3, coeffabs_2)@ 159 vmlal.s16 q4,d3,d25 @mul_res1 = vmlal_u8(mul_res1, src_tmp4, coeffabs_3)@ 161 vmlal.s16 q4,d4,d26 @mul_res1 = vmlal_u8(mul_res1, src_tmp1, coeffabs_4)@ 163 vmlal.s16 q4,d5,d27 @mul_res1 = vmlal_u8(mul_res1, src_tmp2, coeffabs_5)@ 164 vmlal.s16 q4,d6,d28 @mul_res1 = vmlal_u8(mul_res1, src_tmp3, coeffabs_6)@ 165 vmlal.s16 q4,d7,d29 @mul_res1 = vmlal_u8(mul_res1, src_tmp4, coeffabs_7)@ 225 vmull.s16 q4,d1,d23 @mul_res1 = vmull_u8(src_tmp2, coeffabs_1)@ 227 vmlal.s16 q4,d0,d22 @mul_res1 = vmlal_u8(mul_res1, src_tmp1, coeffabs_0)@ [all …]
|
D | ihevc_inter_pred_luma_vert_w16inp_w16out.s | 163 vmull.s16 q4,d1,d23 @mul_res1 = vmull_u8(src_tmp2, coeffabs_1)@ 165 vmlal.s16 q4,d0,d22 @mul_res1 = vmlal_u8(mul_res1, src_tmp1, coeffabs_0)@ 167 vmlal.s16 q4,d2,d24 @mul_res1 = vmlal_u8(mul_res1, src_tmp3, coeffabs_2)@ 169 vmlal.s16 q4,d3,d25 @mul_res1 = vmlal_u8(mul_res1, src_tmp4, coeffabs_3)@ 171 vmlal.s16 q4,d4,d26 @mul_res1 = vmlal_u8(mul_res1, src_tmp1, coeffabs_4)@ 173 vmlal.s16 q4,d5,d27 @mul_res1 = vmlal_u8(mul_res1, src_tmp2, coeffabs_5)@ 174 vmlal.s16 q4,d6,d28 @mul_res1 = vmlal_u8(mul_res1, src_tmp3, coeffabs_6)@ 175 vmlal.s16 q4,d7,d29 @mul_res1 = vmlal_u8(mul_res1, src_tmp4, coeffabs_7)@ 238 vmull.s16 q4,d1,d23 @mul_res1 = vmull_u8(src_tmp2, coeffabs_1)@ 240 vmlal.s16 q4,d0,d22 @mul_res1 = vmlal_u8(mul_res1, src_tmp1, coeffabs_0)@ [all …]
|
D | ihevc_intra_pred_filters_neon_intr.c | 2126 uint16x8_t mul_res1, mul_res2, add_res; in ihevc_intra_pred_luma_mode_3_to_9_neonintr() local 2155 mul_res1 = vmull_u8(ref_main_idx, dup_const_32_fract); in ihevc_intra_pred_luma_mode_3_to_9_neonintr() 2158 add_res = vaddq_u16(mul_res1, mul_res2); in ihevc_intra_pred_luma_mode_3_to_9_neonintr() 2212 uint16x8_t mul_res1, mul_res2, add_res; in ihevc_intra_pred_luma_mode_3_to_9_neonintr() local 2240 mul_res1 = vmull_u8(vreinterpret_u8_u32(pu1_ref_val1), dup_32_fract); in ihevc_intra_pred_luma_mode_3_to_9_neonintr() 2243 add_res = vaddq_u16(mul_res1, mul_res2); in ihevc_intra_pred_luma_mode_3_to_9_neonintr() 2334 uint16x8_t mul_res1, mul_res2, add_res; in ihevc_intra_pred_luma_mode_11_to_17_neonintr() local 2435 mul_res1 = vmull_u8(ref_main_idx, dup_const_32_fract); in ihevc_intra_pred_luma_mode_11_to_17_neonintr() 2438 add_res = vaddq_u16(mul_res1, mul_res2); in ihevc_intra_pred_luma_mode_11_to_17_neonintr() 2505 mul_res1 = vmull_u8(vreinterpret_u8_u32(ref_main_idx1), dup_const_32_fract); in ihevc_intra_pred_luma_mode_11_to_17_neonintr() [all …]
|
D | ihevc_inter_pred_chroma_vert.s | 151 …vmlsl.u8 q3,d5,d0 @vmlsl_u8(mul_res1, vreinterpret_u8_u32(src_tmp1), coeffab… 153 …vmlal.u8 q3,d4,d2 @vmlal_u8(mul_res1, vreinterpret_u8_u32(src_tmp3), coeffab… 203 vqrshrun.s16 d4,q2,#6 @vrshrq_n_s16(vreinterpretq_s16_u16(mul_res1),6)
|
D | ihevc_inter_pred_chroma_vert_w16out.s | 152 …vmlsl.u8 q3,d5,d0 @vmlsl_u8(mul_res1, vreinterpret_u8_u32(src_tmp1), coeffab… 154 …vmlal.u8 q3,d4,d2 @vmlal_u8(mul_res1, vreinterpret_u8_u32(src_tmp3), coeffab…
|
/external/libvpx/config/arm-neon/vpx_dsp/arm/ |
D | vpx_convolve8_vert_filter_type2_neon.asm.S | 90 vmlal.u8 q4, d1, d23 @mul_res1 = vmull_u8(src_tmp2, 93 vmlsl.u8 q4, d0, d22 @mul_res1 = vmlsl_u8(mul_res1, 96 vmlsl.u8 q4, d2, d24 @mul_res1 = vmlsl_u8(mul_res1, 99 vmlal.u8 q4, d3, d25 @mul_res1 = vmlal_u8(mul_res1, 102 vmlal.u8 q4, d4, d26 @mul_res1 = vmlal_u8(mul_res1, 105 vmlsl.u8 q4, d5, d27 @mul_res1 = vmlsl_u8(mul_res1, 108 vmlal.u8 q4, d6, d28 @mul_res1 = vmlal_u8(mul_res1, 111 vmlsl.u8 q4, d7, d29 @mul_res1 = vmlsl_u8(mul_res1, 184 vmlal.u8 q4, d1, d23 @mul_res1 = vmull_u8(src_tmp2, 187 vmlsl.u8 q4, d0, d22 @mul_res1 = vmlsl_u8(mul_res1, [all …]
|
D | vpx_convolve8_avg_vert_filter_type2_neon.asm.S | 90 vmlal.u8 q4, d1, d23 @mul_res1 = vmull_u8(src_tmp2, 93 vmlsl.u8 q4, d0, d22 @mul_res1 = vmlsl_u8(mul_res1, 96 vmlsl.u8 q4, d2, d24 @mul_res1 = vmlsl_u8(mul_res1, 99 vmlal.u8 q4, d3, d25 @mul_res1 = vmlal_u8(mul_res1, 102 vmlal.u8 q4, d4, d26 @mul_res1 = vmlal_u8(mul_res1, 105 vmlsl.u8 q4, d5, d27 @mul_res1 = vmlsl_u8(mul_res1, 108 vmlal.u8 q4, d6, d28 @mul_res1 = vmlal_u8(mul_res1, 111 vmlsl.u8 q4, d7, d29 @mul_res1 = vmlsl_u8(mul_res1, 187 vmlal.u8 q4, d1, d23 @mul_res1 = vmull_u8(src_tmp2, 190 vmlsl.u8 q4, d0, d22 @mul_res1 = vmlsl_u8(mul_res1, [all …]
|
D | vpx_convolve8_vert_filter_type1_neon.asm.S | 90 vmlsl.u8 q4, d1, d23 @mul_res1 = vmull_u8(src_tmp2, 93 vmlsl.u8 q4, d0, d22 @mul_res1 = vmlsl_u8(mul_res1, 96 vmlal.u8 q4, d2, d24 @mul_res1 = vmlsl_u8(mul_res1, 99 vmlal.u8 q4, d3, d25 @mul_res1 = vmlal_u8(mul_res1, 102 vmlal.u8 q4, d4, d26 @mul_res1 = vmlal_u8(mul_res1, 105 vmlal.u8 q4, d5, d27 @mul_res1 = vmlsl_u8(mul_res1, 108 vmlsl.u8 q4, d6, d28 @mul_res1 = vmlal_u8(mul_res1, 111 vmlsl.u8 q4, d7, d29 @mul_res1 = vmlsl_u8(mul_res1, 183 vmlsl.u8 q4, d1, d23 @mul_res1 = vmull_u8(src_tmp2, 186 vmlsl.u8 q4, d0, d22 @mul_res1 = vmlsl_u8(mul_res1, [all …]
|
D | vpx_convolve8_avg_vert_filter_type1_neon.asm.S | 89 vmlsl.u8 q4, d1, d23 @mul_res1 = vmull_u8(src_tmp2, 92 vmlsl.u8 q4, d0, d22 @mul_res1 = vmlsl_u8(mul_res1, 95 vmlal.u8 q4, d2, d24 @mul_res1 = vmlsl_u8(mul_res1, 98 vmlal.u8 q4, d3, d25 @mul_res1 = vmlal_u8(mul_res1, 101 vmlal.u8 q4, d4, d26 @mul_res1 = vmlal_u8(mul_res1, 104 vmlal.u8 q4, d5, d27 @mul_res1 = vmlsl_u8(mul_res1, 107 vmlsl.u8 q4, d6, d28 @mul_res1 = vmlal_u8(mul_res1, 110 vmlsl.u8 q4, d7, d29 @mul_res1 = vmlsl_u8(mul_res1, 186 vmlsl.u8 q4, d1, d23 @mul_res1 = vmull_u8(src_tmp2, 189 vmlsl.u8 q4, d0, d22 @mul_res1 = vmlsl_u8(mul_res1, [all …]
|
/external/libvpx/libvpx/vpx_dsp/arm/ |
D | vpx_convolve8_vert_filter_type2_neon.asm | 83 vmlal.u8 q4, d1, d23 ;mul_res1 = vmull_u8(src_tmp2, 86 vmlsl.u8 q4, d0, d22 ;mul_res1 = vmlsl_u8(mul_res1, 89 vmlsl.u8 q4, d2, d24 ;mul_res1 = vmlsl_u8(mul_res1, 92 vmlal.u8 q4, d3, d25 ;mul_res1 = vmlal_u8(mul_res1, 95 vmlal.u8 q4, d4, d26 ;mul_res1 = vmlal_u8(mul_res1, 98 vmlsl.u8 q4, d5, d27 ;mul_res1 = vmlsl_u8(mul_res1, 101 vmlal.u8 q4, d6, d28 ;mul_res1 = vmlal_u8(mul_res1, 104 vmlsl.u8 q4, d7, d29 ;mul_res1 = vmlsl_u8(mul_res1, 177 vmlal.u8 q4, d1, d23 ;mul_res1 = vmull_u8(src_tmp2, 180 vmlsl.u8 q4, d0, d22 ;mul_res1 = vmlsl_u8(mul_res1, [all …]
|
D | vpx_convolve8_avg_vert_filter_type2_neon.asm | 83 vmlal.u8 q4, d1, d23 ;mul_res1 = vmull_u8(src_tmp2, 86 vmlsl.u8 q4, d0, d22 ;mul_res1 = vmlsl_u8(mul_res1, 89 vmlsl.u8 q4, d2, d24 ;mul_res1 = vmlsl_u8(mul_res1, 92 vmlal.u8 q4, d3, d25 ;mul_res1 = vmlal_u8(mul_res1, 95 vmlal.u8 q4, d4, d26 ;mul_res1 = vmlal_u8(mul_res1, 98 vmlsl.u8 q4, d5, d27 ;mul_res1 = vmlsl_u8(mul_res1, 101 vmlal.u8 q4, d6, d28 ;mul_res1 = vmlal_u8(mul_res1, 104 vmlsl.u8 q4, d7, d29 ;mul_res1 = vmlsl_u8(mul_res1, 180 vmlal.u8 q4, d1, d23 ;mul_res1 = vmull_u8(src_tmp2, 183 vmlsl.u8 q4, d0, d22 ;mul_res1 = vmlsl_u8(mul_res1, [all …]
|
D | vpx_convolve8_vert_filter_type1_neon.asm | 83 vmlsl.u8 q4, d1, d23 ;mul_res1 = vmull_u8(src_tmp2, 86 vmlsl.u8 q4, d0, d22 ;mul_res1 = vmlsl_u8(mul_res1, 89 vmlal.u8 q4, d2, d24 ;mul_res1 = vmlsl_u8(mul_res1, 92 vmlal.u8 q4, d3, d25 ;mul_res1 = vmlal_u8(mul_res1, 95 vmlal.u8 q4, d4, d26 ;mul_res1 = vmlal_u8(mul_res1, 98 vmlal.u8 q4, d5, d27 ;mul_res1 = vmlsl_u8(mul_res1, 101 vmlsl.u8 q4, d6, d28 ;mul_res1 = vmlal_u8(mul_res1, 104 vmlsl.u8 q4, d7, d29 ;mul_res1 = vmlsl_u8(mul_res1, 176 vmlsl.u8 q4, d1, d23 ;mul_res1 = vmull_u8(src_tmp2, 179 vmlsl.u8 q4, d0, d22 ;mul_res1 = vmlsl_u8(mul_res1, [all …]
|
D | vpx_convolve8_avg_vert_filter_type1_neon.asm | 82 vmlsl.u8 q4, d1, d23 ;mul_res1 = vmull_u8(src_tmp2, 85 vmlsl.u8 q4, d0, d22 ;mul_res1 = vmlsl_u8(mul_res1, 88 vmlal.u8 q4, d2, d24 ;mul_res1 = vmlsl_u8(mul_res1, 91 vmlal.u8 q4, d3, d25 ;mul_res1 = vmlal_u8(mul_res1, 94 vmlal.u8 q4, d4, d26 ;mul_res1 = vmlal_u8(mul_res1, 97 vmlal.u8 q4, d5, d27 ;mul_res1 = vmlsl_u8(mul_res1, 100 vmlsl.u8 q4, d6, d28 ;mul_res1 = vmlal_u8(mul_res1, 103 vmlsl.u8 q4, d7, d29 ;mul_res1 = vmlsl_u8(mul_res1, 179 vmlsl.u8 q4, d1, d23 ;mul_res1 = vmull_u8(src_tmp2, 182 vmlsl.u8 q4, d0, d22 ;mul_res1 = vmlsl_u8(mul_res1, [all …]
|