Home
last modified time | relevance | path

Searched refs:vget_low_u16 (Results 1 – 25 of 76) sorted by relevance

1234

/external/libjpeg-turbo/simd/arm/aarch64/
Djccolext-neon.c84 uint32x4_t y_ll = vmull_laneq_u16(vget_low_u16(r_l), consts, 0); in jsimd_rgb_ycc_convert_neon()
85 y_ll = vmlal_laneq_u16(y_ll, vget_low_u16(g_l), consts, 1); in jsimd_rgb_ycc_convert_neon()
86 y_ll = vmlal_laneq_u16(y_ll, vget_low_u16(b_l), consts, 2); in jsimd_rgb_ycc_convert_neon()
90 uint32x4_t y_hl = vmull_laneq_u16(vget_low_u16(r_h), consts, 0); in jsimd_rgb_ycc_convert_neon()
91 y_hl = vmlal_laneq_u16(y_hl, vget_low_u16(g_h), consts, 1); in jsimd_rgb_ycc_convert_neon()
92 y_hl = vmlal_laneq_u16(y_hl, vget_low_u16(b_h), consts, 2); in jsimd_rgb_ycc_convert_neon()
99 cb_ll = vmlsl_laneq_u16(cb_ll, vget_low_u16(r_l), consts, 3); in jsimd_rgb_ycc_convert_neon()
100 cb_ll = vmlsl_laneq_u16(cb_ll, vget_low_u16(g_l), consts, 4); in jsimd_rgb_ycc_convert_neon()
101 cb_ll = vmlal_laneq_u16(cb_ll, vget_low_u16(b_l), consts, 5); in jsimd_rgb_ycc_convert_neon()
107 cb_hl = vmlsl_laneq_u16(cb_hl, vget_low_u16(r_h), consts, 3); in jsimd_rgb_ycc_convert_neon()
[all …]
/external/libgav1/libgav1/src/dsp/arm/
Dsuper_res_neon.cc205 uint32x4_t res_lo = vmull_u16(vget_low_u16(src[1]), vget_low_u16(f[1])); in SuperRes()
206 res_lo = vmlal_u16(res_lo, vget_low_u16(src[3]), vget_low_u16(f[3])); in SuperRes()
207 res_lo = vmlal_u16(res_lo, vget_low_u16(src[4]), vget_low_u16(f[4])); in SuperRes()
208 res_lo = vmlal_u16(res_lo, vget_low_u16(src[6]), vget_low_u16(f[6])); in SuperRes()
210 uint32x4_t temp_lo = vmull_u16(vget_low_u16(src[0]), vget_low_u16(f[0])); in SuperRes()
211 temp_lo = vmlal_u16(temp_lo, vget_low_u16(src[2]), vget_low_u16(f[2])); in SuperRes()
212 temp_lo = vmlal_u16(temp_lo, vget_low_u16(src[5]), vget_low_u16(f[5])); in SuperRes()
213 temp_lo = vmlal_u16(temp_lo, vget_low_u16(src[7]), vget_low_u16(f[7])); in SuperRes()
Dintrapred_smooth_neon.cc104 CalculatePred(vget_low_u16(weighted_top), vget_low_u16(weighted_left), in Smooth4Or8xN_NEON()
105 vget_low_u16(weighted_tr), vget_low_u16(weighted_bl)); in Smooth4Or8xN_NEON()
128 vget_low_u16(weighted_top_low), vget_low_u16(weighted_left_low), in CalculateWeightsAndPred()
129 vget_low_u16(weighted_tr_low), vget_low_u16(weighted_bl)); in CalculateWeightsAndPred()
140 vget_low_u16(weighted_top_high), vget_low_u16(weighted_left_high), in CalculateWeightsAndPred()
141 vget_low_u16(weighted_tr_high), vget_low_u16(weighted_bl)); in CalculateWeightsAndPred()
Daverage_blend_neon.cc149 vaddl_u16(vget_low_u16(pred0), vget_low_u16(pred1)); in AverageBlend8Row()
203 vst1_u16(dst, vget_low_u16(result)); in AverageBlend_NEON()
Dcdef_neon.cc333 uint32x4_t c = vmulq_u32(Square(vget_low_u16(a)), division_table[0]); in Cost0Or4()
335 c = vmlaq_u32(c, Square(vget_low_u16(b)), division_table[2]); in Cost0Or4()
343 uint32x4_t c = Square(vget_low_u16(a)); in SquareAccumulate()
352 uint32x4_t c = vandq_u32(mask, Square(vget_low_u16(a))); in CostOdd()
356 c = vmlaq_u32(c, Square(vget_low_u16(a)), division_table[0]); in CostOdd()
357 c = vmlaq_u32(c, Square(vget_low_u16(b)), division_table[1]); in CostOdd()
/external/libhevc/encoder/arm/
Dihevce_ssd_calculator_neon.c78 ssd_low = vaddl_u16(vget_low_u16(sqabs_low), vget_high_u16(sqabs_low)); in ihevce_4x4_ssd_computer_neon()
79 ssd_high = vaddl_u16(vget_low_u16(sqabs_high), vget_high_u16(sqabs_high)); in ihevce_4x4_ssd_computer_neon()
104 ssd_val = vaddl_u16(vget_low_u16(sqabs), vget_high_u16(sqabs)); in ihevce_1x8_ssd_computer_neon()
130 ssd_low = vaddl_u16(vget_low_u16(sqabs_low), vget_high_u16(sqabs_low)); in ihevce_1x16_ssd_computer_neon()
131 ssd_high = vaddl_u16(vget_low_u16(sqabs_high), vget_high_u16(sqabs_high)); in ihevce_1x16_ssd_computer_neon()
164 ssd_0 = vaddl_u16(vget_low_u16(sqabs_0), vget_high_u16(sqabs_0)); in ihevce_1x32_ssd_computer_neon()
165 ssd_1 = vaddl_u16(vget_low_u16(sqabs_1), vget_high_u16(sqabs_1)); in ihevce_1x32_ssd_computer_neon()
166 ssd_2 = vaddl_u16(vget_low_u16(sqabs_2), vget_high_u16(sqabs_2)); in ihevce_1x32_ssd_computer_neon()
167 ssd_3 = vaddl_u16(vget_low_u16(sqabs_3), vget_high_u16(sqabs_3)); in ihevce_1x32_ssd_computer_neon()
220 ssd_0 = vaddl_u16(vget_low_u16(sqabs_0), vget_high_u16(sqabs_0)); in ihevce_1x64_ssd_computer_neon()
[all …]
Dihevce_ssd_and_sad_calculator_neon.c84 b = vaddl_u16(vget_low_u16(sq_abs_l), vget_high_u16(sq_abs_l)); in ihevce_ssd_and_sad_calculator_neon()
85 d = vaddl_u16(vget_low_u16(sq_abs_h), vget_high_u16(sq_abs_h)); in ihevce_ssd_and_sad_calculator_neon()
108 tmp_a = vaddl_u16(vget_low_u16(sqabs), vget_high_u16(sqabs)); in ihevce_ssd_and_sad_calculator_neon()
150 tmp_a = vaddl_u16(vget_low_u16(sqabs_l), vget_high_u16(sqabs_l)); in ihevce_ssd_and_sad_calculator_neon()
151 tmp_c = vaddl_u16(vget_low_u16(sqabs_h), vget_high_u16(sqabs_h)); in ihevce_ssd_and_sad_calculator_neon()
196 tmp_a = vaddl_u16(vget_low_u16(sqabs_l), vget_high_u16(sqabs_l)); in ihevce_ssd_and_sad_calculator_neon()
197 tmp_c = vaddl_u16(vget_low_u16(sqabs_h), vget_high_u16(sqabs_h)); in ihevce_ssd_and_sad_calculator_neon()
207 tmp_a = vaddl_u16(vget_low_u16(sqabs_l), vget_high_u16(sqabs_l)); in ihevce_ssd_and_sad_calculator_neon()
208 tmp_c = vaddl_u16(vget_low_u16(sqabs_h), vget_high_u16(sqabs_h)); in ihevce_ssd_and_sad_calculator_neon()
259 tmp_a = vaddl_u16(vget_low_u16(sqabs_l), vget_high_u16(sqabs_l)); in ihevce_ssd_and_sad_calculator_neon()
[all …]
Dihevce_coarse_layer_sad_neon.c202 tmp_a0 = vpadd_u16(vget_low_u16(abs_01), vget_high_u16(abs_01)); in hme_store_4x4_sads_high_speed_neon()
203 tmp_a1 = vpadd_u16(vget_low_u16(abs_23), vget_high_u16(abs_23)); in hme_store_4x4_sads_high_speed_neon()
205 tmp_a0 = vpadd_u16(vget_low_u16(abs_01), vget_high_u16(abs_01)); in hme_store_4x4_sads_high_speed_neon()
222 tmp_a = vpadd_u16(vget_low_u16(abs_01), vget_high_u16(abs_01)); in hme_store_4x4_sads_high_speed_neon()
359 tmp_a.val[0] = vpadd_u16(vget_low_u16(abs_a_01), vget_high_u16(abs_a_01)); in hme_store_4x4_sads_high_quality_neon()
360 tmp_a.val[1] = vpadd_u16(vget_low_u16(abs_a_23), vget_high_u16(abs_a_23)); in hme_store_4x4_sads_high_quality_neon()
362 tmp_a.val[0] = vpadd_u16(vget_low_u16(abs_a_01), vget_high_u16(abs_a_01)); in hme_store_4x4_sads_high_quality_neon()
363 tmp_b0 = vpadd_u16(vget_low_u16(abs_b_01), vget_high_u16(abs_b_01)); in hme_store_4x4_sads_high_quality_neon()
364 tmp_b1 = vpadd_u16(vget_low_u16(abs_b_23), vget_high_u16(abs_b_23)); in hme_store_4x4_sads_high_quality_neon()
366 tmp_a.val[1] = vpadd_u16(vget_low_u16(abs_b_01), vget_high_u16(abs_b_01)); in hme_store_4x4_sads_high_quality_neon()
[all …]
Dihevce_me_neon.c197 tmp_a0 = vpadd_u16(vget_low_u16(abs_01), vget_high_u16(abs_01)); in ihevce_sad4_4x4_neon()
198 tmp_a1 = vpadd_u16(vget_low_u16(abs_23), vget_high_u16(abs_23)); in ihevce_sad4_4x4_neon()
200 tmp_a0 = vpadd_u16(vget_low_u16(abs_01), vget_high_u16(abs_01)); in ihevce_sad4_4x4_neon()
230 tmp_a0 = vpadd_u16(vget_low_u16(abs_0), vget_high_u16(abs_0)); in ihevce_sad4_8x8_neon()
231 tmp_a1 = vpadd_u16(vget_low_u16(abs_1), vget_high_u16(abs_1)); in ihevce_sad4_8x8_neon()
233 tmp_a0 = vpadd_u16(vget_low_u16(abs_2), vget_high_u16(abs_2)); in ihevce_sad4_8x8_neon()
234 tmp_a1 = vpadd_u16(vget_low_u16(abs_3), vget_high_u16(abs_3)); in ihevce_sad4_8x8_neon()
236 tmp_a0 = vpadd_u16(vget_low_u16(abs_0), vget_high_u16(abs_0)); in ihevce_sad4_8x8_neon()
237 tmp_a1 = vpadd_u16(vget_low_u16(abs_1), vget_high_u16(abs_1)); in ihevce_sad4_8x8_neon()
/external/libjpeg-turbo/simd/arm/aarch32/
Djccolext-neon.c98 uint32x4_t y_low = vmull_lane_u16(vget_low_u16(r), consts.val[0], 0); in jsimd_rgb_ycc_convert_neon()
99 y_low = vmlal_lane_u16(y_low, vget_low_u16(g), consts.val[0], 1); in jsimd_rgb_ycc_convert_neon()
100 y_low = vmlal_lane_u16(y_low, vget_low_u16(b), consts.val[0], 2); in jsimd_rgb_ycc_convert_neon()
107 cb_low = vmlsl_lane_u16(cb_low, vget_low_u16(r), consts.val[0], 3); in jsimd_rgb_ycc_convert_neon()
108 cb_low = vmlsl_lane_u16(cb_low, vget_low_u16(g), consts.val[1], 0); in jsimd_rgb_ycc_convert_neon()
109 cb_low = vmlal_lane_u16(cb_low, vget_low_u16(b), consts.val[1], 1); in jsimd_rgb_ycc_convert_neon()
117 cr_low = vmlal_lane_u16(cr_low, vget_low_u16(r), consts.val[1], 1); in jsimd_rgb_ycc_convert_neon()
118 cr_low = vmlsl_lane_u16(cr_low, vget_low_u16(g), consts.val[1], 2); in jsimd_rgb_ycc_convert_neon()
119 cr_low = vmlsl_lane_u16(cr_low, vget_low_u16(b), consts.val[1], 3); in jsimd_rgb_ycc_convert_neon()
/external/libjpeg-turbo/simd/arm/
Djcgryext-neon.c77 uint32x4_t y_ll = vmull_n_u16(vget_low_u16(r_l), F_0_298); in jsimd_rgb_gray_convert_neon()
79 uint32x4_t y_hl = vmull_n_u16(vget_low_u16(r_h), F_0_298); in jsimd_rgb_gray_convert_neon()
81 y_ll = vmlal_n_u16(y_ll, vget_low_u16(g_l), F_0_587); in jsimd_rgb_gray_convert_neon()
83 y_hl = vmlal_n_u16(y_hl, vget_low_u16(g_h), F_0_587); in jsimd_rgb_gray_convert_neon()
85 y_ll = vmlal_n_u16(y_ll, vget_low_u16(b_l), F_0_113); in jsimd_rgb_gray_convert_neon()
87 y_hl = vmlal_n_u16(y_hl, vget_low_u16(b_h), F_0_113); in jsimd_rgb_gray_convert_neon()
Djquanti-neon.c140 int32x4_t row0_l = vreinterpretq_s32_u32(vmull_u16(vget_low_u16(abs_row0), in jsimd_quantize_neon()
141 vget_low_u16(recip0))); in jsimd_quantize_neon()
144 int32x4_t row1_l = vreinterpretq_s32_u32(vmull_u16(vget_low_u16(abs_row1), in jsimd_quantize_neon()
145 vget_low_u16(recip1))); in jsimd_quantize_neon()
148 int32x4_t row2_l = vreinterpretq_s32_u32(vmull_u16(vget_low_u16(abs_row2), in jsimd_quantize_neon()
149 vget_low_u16(recip2))); in jsimd_quantize_neon()
152 int32x4_t row3_l = vreinterpretq_s32_u32(vmull_u16(vget_low_u16(abs_row3), in jsimd_quantize_neon()
153 vget_low_u16(recip3))); in jsimd_quantize_neon()
/external/webrtc/modules/audio_processing/aecm/
Daecm_core_neon.cc72 far_energy_v = vaddw_u16(far_energy_v, vget_low_u16(spectrum_v)); in WebRtcAecm_CalcLinearEnergiesNeon()
76 vget_low_u16(spectrum_v)); in WebRtcAecm_CalcLinearEnergiesNeon()
87 vget_low_u16(spectrum_v)); in WebRtcAecm_CalcLinearEnergiesNeon()
150 echo_est_v_low = vmull_u16(vget_low_u16(far_spectrum_v), in WebRtcAecm_StoreAdaptiveChannelNeon()
151 vget_low_u16(vreinterpretq_u16_s16(adapt_v))); in WebRtcAecm_StoreAdaptiveChannelNeon()
/external/libvpx/libvpx/vpx_dsp/arm/
Dsad4d_neon.c54 a[0] = vpadd_u16(vget_low_u16(abs[0]), vget_high_u16(abs[0])); in sad4x_4d()
55 a[1] = vpadd_u16(vget_low_u16(abs[1]), vget_high_u16(abs[1])); in sad4x_4d()
77 const uint16x4_t a0 = vadd_u16(vget_low_u16(sum[0]), vget_high_u16(sum[0])); in sad_512_pel_final_neon()
78 const uint16x4_t a1 = vadd_u16(vget_low_u16(sum[1]), vget_high_u16(sum[1])); in sad_512_pel_final_neon()
79 const uint16x4_t a2 = vadd_u16(vget_low_u16(sum[2]), vget_high_u16(sum[2])); in sad_512_pel_final_neon()
80 const uint16x4_t a3 = vadd_u16(vget_low_u16(sum[3]), vget_high_u16(sum[3])); in sad_512_pel_final_neon()
90 const uint16x4_t a0 = vpadd_u16(vget_low_u16(sum[0]), vget_high_u16(sum[0])); in sad_1024_pel_final_neon()
91 const uint16x4_t a1 = vpadd_u16(vget_low_u16(sum[1]), vget_high_u16(sum[1])); in sad_1024_pel_final_neon()
92 const uint16x4_t a2 = vpadd_u16(vget_low_u16(sum[2]), vget_high_u16(sum[2])); in sad_1024_pel_final_neon()
93 const uint16x4_t a3 = vpadd_u16(vget_low_u16(sum[3]), vget_high_u16(sum[3])); in sad_1024_pel_final_neon()
Dhighbd_vpx_convolve8_neon.c167 s0 = vreinterpret_s16_u16(vget_low_u16(t0)); in vpx_highbd_convolve8_horiz_neon()
168 s1 = vreinterpret_s16_u16(vget_low_u16(t1)); in vpx_highbd_convolve8_horiz_neon()
169 s2 = vreinterpret_s16_u16(vget_low_u16(t2)); in vpx_highbd_convolve8_horiz_neon()
170 s3 = vreinterpret_s16_u16(vget_low_u16(t3)); in vpx_highbd_convolve8_horiz_neon()
195 vst1_u16(dst + 0 * dst_stride, vget_low_u16(d01)); in vpx_highbd_convolve8_horiz_neon()
196 vst1_u16(dst + 1 * dst_stride, vget_low_u16(d23)); in vpx_highbd_convolve8_horiz_neon()
250 vst1_u16(dst, vget_low_u16(d0)); in vpx_highbd_convolve8_horiz_neon()
252 vst1_u16(dst, vget_low_u16(d1)); in vpx_highbd_convolve8_horiz_neon()
254 vst1_u16(dst, vget_low_u16(d2)); in vpx_highbd_convolve8_horiz_neon()
256 vst1_u16(dst, vget_low_u16(d3)); in vpx_highbd_convolve8_horiz_neon()
[all …]
Dvpx_convolve8_neon.c621 s0 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(vld1_u8(src)))); in vpx_convolve8_vert_neon()
623 s1 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(vld1_u8(src)))); in vpx_convolve8_vert_neon()
625 s2 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(vld1_u8(src)))); in vpx_convolve8_vert_neon()
627 s3 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(vld1_u8(src)))); in vpx_convolve8_vert_neon()
629 s4 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(vld1_u8(src)))); in vpx_convolve8_vert_neon()
631 s5 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(vld1_u8(src)))); in vpx_convolve8_vert_neon()
633 s6 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(vld1_u8(src)))); in vpx_convolve8_vert_neon()
637 s7 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(vld1_u8(src)))); in vpx_convolve8_vert_neon()
639 s8 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(vld1_u8(src)))); in vpx_convolve8_vert_neon()
641 s9 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(vld1_u8(src)))); in vpx_convolve8_vert_neon()
[all …]
Dhighbd_intrapred_neon.c84 uint16x4_t sum = vadd_u16(vget_low_u16(ref_u16), vget_high_u16(ref_u16)); in dc_sum_8()
104 uint16x4_t sum = vadd_u16(vget_low_u16(p0), vget_high_u16(p0)); in vpx_highbd_dc_predictor_8x8_neon()
148 uint16x4_t sum = vadd_u16(vget_low_u16(p0), vget_high_u16(p0)); in dc_sum_16()
171 uint16x4_t pal1 = vadd_u16(vget_low_u16(pal0), vget_high_u16(pal0)); in vpx_highbd_dc_predictor_16x16_neon()
218 uint16x4_t sum = vadd_u16(vget_low_u16(p2), vget_high_u16(p2)); in dc_sum_32()
249 const uint16x4_t pal1 = vadd_u16(vget_low_u16(pal0), vget_high_u16(pal0)); in vpx_highbd_dc_predictor_32x32_neon()
297 const uint16x4_t avg2_low = vget_low_u16(avg2); in vpx_highbd_d45_predictor_4x4_neon()
463 const uint16x8_t L3210XA012 = vcombine_u16(L3210, vget_low_u16(XA0123___)); in vpx_highbd_d135_predictor_4x4_neon()
468 const uint16x4_t row_0 = vget_low_u16(avg2); in vpx_highbd_d135_predictor_4x4_neon()
490 const uint16x4_t L3210 = vrev64_u16(vget_low_u16(L01234567)); in vpx_highbd_d135_predictor_8x8_neon()
[all …]
/external/libaom/libaom/aom_dsp/arm/
Dvariance_neon.c186 d22s16 = vreinterpret_s16_u16(vget_low_u16(q11u16)); in aom_variance16x8_neon()
192 d24s16 = vreinterpret_s16_u16(vget_low_u16(q12u16)); in aom_variance16x8_neon()
198 d26s16 = vreinterpret_s16_u16(vget_low_u16(q13u16)); in aom_variance16x8_neon()
204 d28s16 = vreinterpret_s16_u16(vget_low_u16(q14u16)); in aom_variance16x8_neon()
260 d22s16 = vreinterpret_s16_u16(vget_low_u16(q11u16)); in aom_variance8x16_neon()
266 d24s16 = vreinterpret_s16_u16(vget_low_u16(q12u16)); in aom_variance8x16_neon()
320 d22s16 = vreinterpret_s16_u16(vget_low_u16(q11u16)); in aom_mse16x16_neon()
325 d24s16 = vreinterpret_s16_u16(vget_low_u16(q12u16)); in aom_mse16x16_neon()
330 d26s16 = vreinterpret_s16_u16(vget_low_u16(q13u16)); in aom_mse16x16_neon()
335 d28s16 = vreinterpret_s16_u16(vget_low_u16(q14u16)); in aom_mse16x16_neon()
Dsad_neon.c71 d1 = vpaddl_u16(vget_low_u16(q12)); in aom_sad4x4_neon()
114 vaddl_u16(vget_low_u16(vec_lo), vget_high_u16(vec_lo)); in horizontal_long_add_16x8()
116 vaddl_u16(vget_low_u16(vec_hi), vget_high_u16(vec_hi)); in horizontal_long_add_16x8()
/external/libaom/libaom/av1/common/arm/
Dtranspose_neon.h365 *a0 = vcombine_u16(vget_low_u16(vreinterpretq_u16_u32(c0.val[0])), in transpose_u16_8x8()
366 vget_low_u16(vreinterpretq_u16_u32(c2.val[0]))); in transpose_u16_8x8()
370 *a2 = vcombine_u16(vget_low_u16(vreinterpretq_u16_u32(c0.val[1])), in transpose_u16_8x8()
371 vget_low_u16(vreinterpretq_u16_u32(c2.val[1]))); in transpose_u16_8x8()
375 *a1 = vcombine_u16(vget_low_u16(vreinterpretq_u16_u32(c1.val[0])), in transpose_u16_8x8()
376 vget_low_u16(vreinterpretq_u16_u32(c3.val[0]))); in transpose_u16_8x8()
380 *a3 = vcombine_u16(vget_low_u16(vreinterpretq_u16_u32(c1.val[1])), in transpose_u16_8x8()
381 vget_low_u16(vreinterpretq_u16_u32(c3.val[1]))); in transpose_u16_8x8()
Dselfguided_neon.c139 d0 = vget_low_u16(s16_4); in calc_ab_internal_common()
140 d1 = vget_low_u16(s16_5); in calc_ab_internal_common()
141 d2 = vget_low_u16(s16_6); in calc_ab_internal_common()
142 d3 = vget_low_u16(s16_7); in calc_ab_internal_common()
228 s0 = vmull_u16(vget_low_u16(s16_0), one_by_n_minus_1_vec); in calc_ab_internal_common()
229 s1 = vmull_u16(vget_low_u16(s16_1), one_by_n_minus_1_vec); in calc_ab_internal_common()
230 s2 = vmull_u16(vget_low_u16(s16_2), one_by_n_minus_1_vec); in calc_ab_internal_common()
231 s3 = vmull_u16(vget_low_u16(s16_3), one_by_n_minus_1_vec); in calc_ab_internal_common()
237 s0 = vmulq_u32(s0, vmovl_u16(vget_low_u16(s16_4))); in calc_ab_internal_common()
238 s1 = vmulq_u32(s1, vmovl_u16(vget_low_u16(s16_5))); in calc_ab_internal_common()
[all …]
/external/webp/src/dsp/
Dyuv_neon.c31 const uint16x4_t r_lo = vget_low_u16(r); in ConvertRGBToY_NEON()
33 const uint16x4_t g_lo = vget_low_u16(g); in ConvertRGBToY_NEON()
35 const uint16x4_t b_lo = vget_low_u16(b); in ConvertRGBToY_NEON()
91 const int16x4_t r_lo = vreinterpret_s16_u16(vget_low_u16(r)); \
93 const int16x4_t g_lo = vreinterpret_s16_u16(vget_low_u16(g)); \
95 const int16x4_t b_lo = vreinterpret_s16_u16(vget_low_u16(b)); \
/external/zlib/
Dadler32_simd.c289 v_s2 = vmlal_u16(v_s2, vget_low_u16 (v_column_sum_1), in adler32_simd_()
293 v_s2 = vmlal_u16(v_s2, vget_low_u16 (v_column_sum_2), in adler32_simd_()
297 v_s2 = vmlal_u16(v_s2, vget_low_u16 (v_column_sum_3), in adler32_simd_()
301 v_s2 = vmlal_u16(v_s2, vget_low_u16 (v_column_sum_4), in adler32_simd_()
/external/tensorflow/tensorflow/core/kernels/
Dquantized_instance_norm.cc75 const uint16x4_t v_high_low = vget_low_u16(v_high_u16); in ColMeanAndVariance()
77 const uint16x4_t v_low_low = vget_low_u16(v_low_u16); in ColMeanAndVariance()
170 vcvtq_f32_u32(vmovl_u16(vget_low_u16(v_high))), in MinAndMax()
172 vcvtq_f32_u32(vmovl_u16(vget_low_u16(v_low)))}; in MinAndMax()
226 vcvtq_f32_u32(vmovl_u16(vget_low_u16(v_high))), in InstanceNorm()
228 vcvtq_f32_u32(vmovl_u16(vget_low_u16(v_low)))}; in InstanceNorm()
/external/tensorflow/tensorflow/lite/kernels/internal/optimized/
Ddepthwiseconv_uint8.h205 vreinterpret_s16_u16(vget_low_u16(vmovl_u8(input_u8)));
250 vreinterpret_s16_u16(vget_low_u16(vmovl_u8(input_u8)));
280 vreinterpret_s16_u16(vget_low_u16(vmovl_u8(input_u8)));
311 vreinterpret_s16_u16(vget_low_u16(vmovl_u8(filter_u8)));
351 vreinterpret_s16_u16(vget_low_u16(vmovl_u8(input_u8)));
377 vreinterpret_s16_u16(vget_low_u16(vmovl_u8(filter_u8)));
447 vreinterpret_s16_u16(vget_low_u16(vmovl_u8(input_u8)));
466 vreinterpret_s16_u16(vget_low_u16(vmovl_u8(input_u8)));
491 vreinterpret_s16_u16(vget_low_u16(vmovl_u8(filter_u8)));
551 vreinterpret_s16_u16(vget_low_u16(vmovl_u8(filter_u8)));
[all …]

1234