/external/XNNPACK/src/u8-maxpool/ |
D | 9p8x-minmax-neon-c16.c | 81 const uint8x16_t vi0 = vld1q_u8(i0); i0 += 16; in xnn_u8_maxpool_minmax_ukernel_9p8x__neon_c16() 82 const uint8x16_t vi1 = vld1q_u8(i1); i1 += 16; in xnn_u8_maxpool_minmax_ukernel_9p8x__neon_c16() 83 const uint8x16_t vi2 = vld1q_u8(i2); i2 += 16; in xnn_u8_maxpool_minmax_ukernel_9p8x__neon_c16() 84 const uint8x16_t vi3 = vld1q_u8(i3); i3 += 16; in xnn_u8_maxpool_minmax_ukernel_9p8x__neon_c16() 85 const uint8x16_t vi4 = vld1q_u8(i4); i4 += 16; in xnn_u8_maxpool_minmax_ukernel_9p8x__neon_c16() 86 const uint8x16_t vi5 = vld1q_u8(i5); i5 += 16; in xnn_u8_maxpool_minmax_ukernel_9p8x__neon_c16() 87 const uint8x16_t vi6 = vld1q_u8(i6); i6 += 16; in xnn_u8_maxpool_minmax_ukernel_9p8x__neon_c16() 88 const uint8x16_t vi7 = vld1q_u8(i7); i7 += 16; in xnn_u8_maxpool_minmax_ukernel_9p8x__neon_c16() 89 const uint8x16_t vi8 = vld1q_u8(i8); i8 += 16; in xnn_u8_maxpool_minmax_ukernel_9p8x__neon_c16() 104 const uint8x16_t vi0 = vld1q_u8(i0); in xnn_u8_maxpool_minmax_ukernel_9p8x__neon_c16() [all …]
|
/external/libvpx/libvpx/vpx_dsp/arm/ |
D | vpx_convolve_avg_neon.c | 71 s0 = vld1q_u8(src); in vpx_convolve_avg_neon() 73 s1 = vld1q_u8(src); in vpx_convolve_avg_neon() 75 d0 = vld1q_u8(dst); in vpx_convolve_avg_neon() 76 d1 = vld1q_u8(dst + dst_stride); in vpx_convolve_avg_neon() 90 s0 = vld1q_u8(src); in vpx_convolve_avg_neon() 91 s1 = vld1q_u8(src + 16); in vpx_convolve_avg_neon() 93 s2 = vld1q_u8(src); in vpx_convolve_avg_neon() 94 s3 = vld1q_u8(src + 16); in vpx_convolve_avg_neon() 96 d0 = vld1q_u8(dst); in vpx_convolve_avg_neon() 97 d1 = vld1q_u8(dst + 16); in vpx_convolve_avg_neon() [all …]
|
D | sad_neon.c | 34 const uint8x16_t second_pred_u8 = vld1q_u8(second_pred); in vpx_sad4x4_avg_neon() 65 const uint8x16_t second_pred_u8 = vld1q_u8(second_pred); in vpx_sad4x8_avg_neon() 139 const uint8x16_t a_u8 = vld1q_u8(src_ptr); in sad16x() 140 const uint8x16_t b_u8 = vld1q_u8(ref_ptr); in sad16x() 157 const uint8x16_t a_u8 = vld1q_u8(src_ptr); in sad16x_avg() 158 const uint8x16_t b_u8 = vld1q_u8(ref_ptr); in sad16x_avg() 159 const uint8x16_t c_u8 = vld1q_u8(second_pred); in sad16x_avg() 197 const uint8x16_t a_lo = vld1q_u8(src_ptr); in sad32x() 198 const uint8x16_t a_hi = vld1q_u8(src_ptr + 16); in sad32x() 199 const uint8x16_t b_lo = vld1q_u8(ref_ptr); in sad32x() [all …]
|
D | vpx_convolve_copy_neon.c | 54 s0 = vld1q_u8(src); in vpx_convolve_copy_neon() 56 s1 = vld1q_u8(src); in vpx_convolve_copy_neon() 68 s0 = vld1q_u8(src); in vpx_convolve_copy_neon() 69 s1 = vld1q_u8(src + 16); in vpx_convolve_copy_neon() 71 s2 = vld1q_u8(src); in vpx_convolve_copy_neon() 72 s3 = vld1q_u8(src + 16); in vpx_convolve_copy_neon() 86 s0 = vld1q_u8(src); in vpx_convolve_copy_neon() 87 s1 = vld1q_u8(src + 16); in vpx_convolve_copy_neon() 88 s2 = vld1q_u8(src + 32); in vpx_convolve_copy_neon() 89 s3 = vld1q_u8(src + 48); in vpx_convolve_copy_neon()
|
D | subtract_neon.c | 28 const uint8x16_t s0 = vld1q_u8(&src[c + 0]); in vpx_subtract_block_neon() 29 const uint8x16_t s1 = vld1q_u8(&src[c + 16]); in vpx_subtract_block_neon() 30 const uint8x16_t p0 = vld1q_u8(&pred[c + 0]); in vpx_subtract_block_neon() 31 const uint8x16_t p1 = vld1q_u8(&pred[c + 16]); in vpx_subtract_block_neon() 47 const uint8x16_t s = vld1q_u8(&src[0]); in vpx_subtract_block_neon() 48 const uint8x16_t p = vld1q_u8(&pred[0]); in vpx_subtract_block_neon()
|
D | vpx_convolve8_neon.h | 58 *s0 = vld1q_u8(s); in load_u8_16x8() 60 *s1 = vld1q_u8(s); in load_u8_16x8() 62 *s2 = vld1q_u8(s); in load_u8_16x8() 64 *s3 = vld1q_u8(s); in load_u8_16x8() 66 *s4 = vld1q_u8(s); in load_u8_16x8() 68 *s5 = vld1q_u8(s); in load_u8_16x8() 70 *s6 = vld1q_u8(s); in load_u8_16x8() 72 *s7 = vld1q_u8(s); in load_u8_16x8()
|
D | avg_pred_neon.c | 23 const uint8x16_t p = vld1q_u8(pred + x); in vpx_comp_avg_pred_neon() 24 const uint8x16_t r = vld1q_u8(ref + x); in vpx_comp_avg_pred_neon() 35 const uint8x16_t p = vld1q_u8(pred); in vpx_comp_avg_pred_neon() 52 const uint8x16_t p = vld1q_u8(pred); in vpx_comp_avg_pred_neon()
|
D | idct32x32_1_add_neon.c | 19 const uint8x16_t a0 = vld1q_u8(*dest); in idct32x32_1_add_pos_kernel() 20 const uint8x16_t a1 = vld1q_u8(*dest + 16); in idct32x32_1_add_pos_kernel() 30 const uint8x16_t a0 = vld1q_u8(*dest); in idct32x32_1_add_neg_kernel() 31 const uint8x16_t a1 = vld1q_u8(*dest + 16); in idct32x32_1_add_neg_kernel()
|
D | avg_neon.c | 87 const uint8x16_t vec_row1 = vld1q_u8(ref); in vpx_int_pro_row_neon() 88 const uint8x16_t vec_row2 = vld1q_u8(ref + ref_stride); in vpx_int_pro_row_neon() 89 const uint8x16_t vec_row3 = vld1q_u8(ref + ref_stride * 2); in vpx_int_pro_row_neon() 90 const uint8x16_t vec_row4 = vld1q_u8(ref + ref_stride * 3); in vpx_int_pro_row_neon() 91 const uint8x16_t vec_row5 = vld1q_u8(ref + ref_stride * 4); in vpx_int_pro_row_neon() 92 const uint8x16_t vec_row6 = vld1q_u8(ref + ref_stride * 5); in vpx_int_pro_row_neon() 93 const uint8x16_t vec_row7 = vld1q_u8(ref + ref_stride * 6); in vpx_int_pro_row_neon() 94 const uint8x16_t vec_row8 = vld1q_u8(ref + ref_stride * 7); in vpx_int_pro_row_neon() 136 const uint8x16_t vec_row = vld1q_u8(ref); in vpx_int_pro_col_neon()
|
/external/libaom/libaom/aom_dsp/arm/ |
D | sse_neon.c | 104 q2 = vld1q_u8(a); in aom_sse_neon() 105 q3 = vld1q_u8(b); in aom_sse_neon() 115 q2 = vld1q_u8(a); in aom_sse_neon() 116 q3 = vld1q_u8(b); in aom_sse_neon() 120 q2 = vld1q_u8(a + 16); in aom_sse_neon() 121 q3 = vld1q_u8(b + 16); in aom_sse_neon() 131 q2 = vld1q_u8(a); in aom_sse_neon() 132 q3 = vld1q_u8(b); in aom_sse_neon() 136 q2 = vld1q_u8(a + 16); in aom_sse_neon() 137 q3 = vld1q_u8(b + 16); in aom_sse_neon() [all …]
|
D | sad_neon.c | 86 q0 = vld1q_u8(src_ptr); in aom_sad16x8_neon() 88 q4 = vld1q_u8(ref_ptr); in aom_sad16x8_neon() 94 q0 = vld1q_u8(src_ptr); in aom_sad16x8_neon() 96 q4 = vld1q_u8(ref_ptr); in aom_sad16x8_neon() 137 const uint8x16_t vec_src_00 = vld1q_u8(src); in aom_sad64x64_neon() 138 const uint8x16_t vec_src_16 = vld1q_u8(src + 16); in aom_sad64x64_neon() 139 const uint8x16_t vec_src_32 = vld1q_u8(src + 32); in aom_sad64x64_neon() 140 const uint8x16_t vec_src_48 = vld1q_u8(src + 48); in aom_sad64x64_neon() 141 const uint8x16_t vec_ref_00 = vld1q_u8(ref); in aom_sad64x64_neon() 142 const uint8x16_t vec_ref_16 = vld1q_u8(ref + 16); in aom_sad64x64_neon() [all …]
|
D | sad4d_neon.c | 41 const uint8x16_t vec_ref_00 = vld1q_u8(ref); in sad_neon_64() 42 const uint8x16_t vec_ref_16 = vld1q_u8(ref + 16); in sad_neon_64() 43 const uint8x16_t vec_ref_32 = vld1q_u8(ref + 32); in sad_neon_64() 44 const uint8x16_t vec_ref_48 = vld1q_u8(ref + 48); in sad_neon_64() 70 const uint8x16_t vec_ref_00 = vld1q_u8(ref); in sad_neon_32() 71 const uint8x16_t vec_ref_16 = vld1q_u8(ref + 16); in sad_neon_32() 102 const uint8x16_t vec_src_00 = vld1q_u8(src); in aom_sad64x64x4d_neon() 103 const uint8x16_t vec_src_16 = vld1q_u8(src + 16); in aom_sad64x64x4d_neon() 104 const uint8x16_t vec_src_32 = vld1q_u8(src + 32); in aom_sad64x64x4d_neon() 105 const uint8x16_t vec_src_48 = vld1q_u8(src + 48); in aom_sad64x64x4d_neon() [all …]
|
D | subtract_neon.c | 27 const uint8x16_t v_src_00 = vld1q_u8(&src[c + 0]); in aom_subtract_block_neon() 28 const uint8x16_t v_src_16 = vld1q_u8(&src[c + 16]); in aom_subtract_block_neon() 29 const uint8x16_t v_pred_00 = vld1q_u8(&pred[c + 0]); in aom_subtract_block_neon() 30 const uint8x16_t v_pred_16 = vld1q_u8(&pred[c + 16]); in aom_subtract_block_neon() 50 const uint8x16_t v_src = vld1q_u8(&src[0]); in aom_subtract_block_neon() 51 const uint8x16_t v_pred = vld1q_u8(&pred[0]); in aom_subtract_block_neon()
|
/external/libjpeg-turbo/simd/arm/ |
D | jdsample-neon.c | 82 uint8x16_t s0 = vld1q_u8(inptr); in jsimd_h2v1_fancy_upsample_neon() 83 uint8x16_t s1 = vld1q_u8(inptr + 1); in jsimd_h2v1_fancy_upsample_neon() 114 s0 = vld1q_u8(inptr + colctr - 1); in jsimd_h2v1_fancy_upsample_neon() 115 s1 = vld1q_u8(inptr + colctr); in jsimd_h2v1_fancy_upsample_neon() 241 uint8x16_t s0A = vld1q_u8(inptr0); in jsimd_h2v2_fancy_upsample_neon() 242 uint8x16_t s0B = vld1q_u8(inptr1); in jsimd_h2v2_fancy_upsample_neon() 243 uint8x16_t s0C = vld1q_u8(inptr2); in jsimd_h2v2_fancy_upsample_neon() 256 uint8x16_t s1A = vld1q_u8(inptr0 + 1); in jsimd_h2v2_fancy_upsample_neon() 257 uint8x16_t s1B = vld1q_u8(inptr1 + 1); in jsimd_h2v2_fancy_upsample_neon() 258 uint8x16_t s1C = vld1q_u8(inptr2 + 1); in jsimd_h2v2_fancy_upsample_neon() [all …]
|
D | jcsample-neon.c | 85 vld1q_u8(&jsimd_h2_downsample_consts[mask_offset]); in jsimd_h2v1_downsample_neon() 97 uint8x16_t pixels = vld1q_u8(inptr + i * 2 * DCTSIZE); in jsimd_h2v1_downsample_neon() 107 uint8x16_t pixels = vld1q_u8(inptr + (width_in_blocks - 1) * 2 * DCTSIZE); in jsimd_h2v1_downsample_neon() 139 vld1q_u8(&jsimd_h2_downsample_consts[mask_offset]); in jsimd_h2v2_downsample_neon() 152 uint8x16_t pixels_r0 = vld1q_u8(inptr0 + i * 2 * DCTSIZE); in jsimd_h2v2_downsample_neon() 153 uint8x16_t pixels_r1 = vld1q_u8(inptr1 + i * 2 * DCTSIZE); in jsimd_h2v2_downsample_neon() 167 vld1q_u8(inptr0 + (width_in_blocks - 1) * 2 * DCTSIZE); in jsimd_h2v2_downsample_neon() 169 vld1q_u8(inptr1 + (width_in_blocks - 1) * 2 * DCTSIZE); in jsimd_h2v2_downsample_neon()
|
/external/libhevc/encoder/arm/ |
D | ihevce_sad_compute_neon.c | 113 const uint8x16_t src = vld1q_u8(pu1_src); in ihevce_16xn_sad_computer_neon() 114 const uint8x16_t pred = vld1q_u8(pu1_pred); in ihevce_16xn_sad_computer_neon() 143 const uint8x16_t src_0 = vld1q_u8(pu1_src); in ihevce_32xn_sad_computer_neon() 144 const uint8x16_t pred_0 = vld1q_u8(pu1_pred); in ihevce_32xn_sad_computer_neon() 145 const uint8x16_t src_1 = vld1q_u8(pu1_src + 16); in ihevce_32xn_sad_computer_neon() 146 const uint8x16_t pred_1 = vld1q_u8(pu1_pred + 16); in ihevce_32xn_sad_computer_neon() 177 const uint8x16_t src_0 = vld1q_u8(pu1_src); in ihevce_64xn_sad_computer_neon() 178 const uint8x16_t pred_0 = vld1q_u8(pu1_pred); in ihevce_64xn_sad_computer_neon() 179 const uint8x16_t src_1 = vld1q_u8(pu1_src + 16); in ihevce_64xn_sad_computer_neon() 180 const uint8x16_t pred_1 = vld1q_u8(pu1_pred + 16); in ihevce_64xn_sad_computer_neon() [all …]
|
D | ihevce_ssd_calculator_neon.c | 118 src = vld1q_u8(pu1_src); in ihevce_1x16_ssd_computer_neon() 119 pred = vld1q_u8(pu1_pred); in ihevce_1x16_ssd_computer_neon() 145 src_0 = vld1q_u8(pu1_src); in ihevce_1x32_ssd_computer_neon() 146 pred_0 = vld1q_u8(pu1_pred); in ihevce_1x32_ssd_computer_neon() 147 src_1 = vld1q_u8(pu1_src + 16); in ihevce_1x32_ssd_computer_neon() 148 pred_1 = vld1q_u8(pu1_pred + 16); in ihevce_1x32_ssd_computer_neon() 187 src_0 = vld1q_u8(pu1_src); in ihevce_1x64_ssd_computer_neon() 188 pred_0 = vld1q_u8(pu1_pred); in ihevce_1x64_ssd_computer_neon() 189 src_1 = vld1q_u8(pu1_src + 16); in ihevce_1x64_ssd_computer_neon() 190 pred_1 = vld1q_u8(pu1_pred + 16); in ihevce_1x64_ssd_computer_neon() [all …]
|
D | ihevce_ssd_and_sad_calculator_neon.c | 138 const uint8x16_t src = vld1q_u8(pu1_src); in ihevce_ssd_and_sad_calculator_neon() 139 const uint8x16_t pred = vld1q_u8(pu1_recon); in ihevce_ssd_and_sad_calculator_neon() 185 const uint8x16_t src_0 = vld1q_u8(pu1_src); in ihevce_ssd_and_sad_calculator_neon() 186 const uint8x16_t pred_0 = vld1q_u8(pu1_recon); in ihevce_ssd_and_sad_calculator_neon() 187 const uint8x16_t src_1 = vld1q_u8(pu1_src + 16); in ihevce_ssd_and_sad_calculator_neon() 188 const uint8x16_t pred_1 = vld1q_u8(pu1_recon + 16); in ihevce_ssd_and_sad_calculator_neon() 244 const uint8x16_t src_0 = vld1q_u8(pu1_src); in ihevce_ssd_and_sad_calculator_neon() 245 const uint8x16_t pred_0 = vld1q_u8(pu1_recon); in ihevce_ssd_and_sad_calculator_neon() 246 const uint8x16_t src_1 = vld1q_u8(pu1_src + 16); in ihevce_ssd_and_sad_calculator_neon() 247 const uint8x16_t pred_1 = vld1q_u8(pu1_recon + 16); in ihevce_ssd_and_sad_calculator_neon() [all …]
|
D | ihevce_copy_neon.c | 109 src_0 = vld1q_u8(src_il); in ihevce_chroma_interleave_2d_copy_neon() 110 dst_0 = vld1q_u8(dst_il); in ihevce_chroma_interleave_2d_copy_neon() 147 uint8x16_t src = vld1q_u8(pu1_src); in copy_2d_neon() 161 src_0 = vld1q_u8(pu1_src); in copy_2d_neon() 163 src_1 = vld1q_u8(pu1_src + 16); in copy_2d_neon() 184 src_0 = vld1q_u8(src_il); in copy_2d_neon() 186 src_1 = vld1q_u8(src_il + 16); in copy_2d_neon() 188 src_2 = vld1q_u8(src_il + 32); in copy_2d_neon() 190 src_3 = vld1q_u8(src_il + 48); in copy_2d_neon()
|
/external/libgav1/libgav1/src/dsp/arm/ |
D | intra_edge_neon.cc | 69 uint8x16_t src_0 = vld1q_u8(dst_buffer); in IntraEdgeFilter_NEON() 76 const uint8x16_t src_1 = vld1q_u8(dst_buffer + i); in IntraEdgeFilter_NEON() 77 const uint8x16_t src_2 = vld1q_u8(dst_buffer + i + 1); in IntraEdgeFilter_NEON() 90 src_0 = vld1q_u8(dst_buffer + i + 15); in IntraEdgeFilter_NEON() 99 const uint8x16_t src_1 = vld1q_u8(dst_buffer + i); in IntraEdgeFilter_NEON() 100 const uint8x16_t src_2 = vld1q_u8(dst_buffer + i + 1); in IntraEdgeFilter_NEON() 147 uint8x16_t src_0 = vld1q_u8(dst_buffer - 1); in IntraEdgeFilter_NEON() 148 uint8x16_t src_1 = vld1q_u8(dst_buffer); in IntraEdgeFilter_NEON() 154 const uint8x16_t src_2 = vld1q_u8(dst_buffer + i); in IntraEdgeFilter_NEON() 155 const uint8x16_t src_3 = vld1q_u8(dst_buffer + i + 1); in IntraEdgeFilter_NEON() [all …]
|
D | intrapred_smooth_neon.cc | 162 top_v[0] = vld1q_u8(top); in Smooth16PlusxN_NEON() 164 top_v[1] = vld1q_u8(top + 16); in Smooth16PlusxN_NEON() 166 top_v[2] = vld1q_u8(top + 32); in Smooth16PlusxN_NEON() 167 top_v[3] = vld1q_u8(top + 48); in Smooth16PlusxN_NEON() 178 weights_x_v[0] = vld1q_u8(kSmoothWeights + width - 4); in Smooth16PlusxN_NEON() 180 weights_x_v[1] = vld1q_u8(kSmoothWeights + width + 16 - 4); in Smooth16PlusxN_NEON() 182 weights_x_v[2] = vld1q_u8(kSmoothWeights + width + 32 - 4); in Smooth16PlusxN_NEON() 183 weights_x_v[3] = vld1q_u8(kSmoothWeights + width + 48 - 4); in Smooth16PlusxN_NEON() 292 top_v[0] = vld1q_u8(top); in SmoothVertical16PlusxN_NEON() 294 top_v[1] = vld1q_u8(top + 16); in SmoothVertical16PlusxN_NEON() [all …]
|
/external/libhevc/common/arm/ |
D | ihevc_sao_edge_offset_class1.s | 135 …VLD1.8 D8,[r9]! @pu1_top_row = vld1q_u8(pu1_src_top_cpy || pu1_src - src_s… 136 …VLD1.8 D9,[r9]! @pu1_top_row = vld1q_u8(pu1_src_top_cpy || pu1_src - src_s… 137 VLD1.8 D10,[r0]! @pu1_cur_row = vld1q_u8(pu1_src) 138 VLD1.8 D11,[r0]! @pu1_cur_row = vld1q_u8(pu1_src) 140 VLD1.8 D30,[r12]! @vld1q_u8(pu1_src[(ht - 1) * src_strd]) 141 VLD1.8 D31,[r12]! @vld1q_u8(pu1_src[(ht - 1) * src_strd]) 152 VLD1.8 D18,[r10]! @pu1_next_row = vld1q_u8(pu1_src_cpy + src_strd) 153 VLD1.8 D19,[r10] @pu1_next_row = vld1q_u8(pu1_src_cpy + src_strd) 158 VLD1.8 D30,[r6]! @II pu1_next_row = vld1q_u8(pu1_src_cpy + src_strd) 159 VLD1.8 D31,[r6] @II pu1_next_row = vld1q_u8(pu1_src_cpy + src_strd) [all …]
|
D | ihevc_sao_edge_offset_class1_chroma.s | 139 …VLD1.8 D28,[r11]! @pu1_top_row = vld1q_u8(pu1_src_top_cpy || pu1_src - src_s… 140 …VLD1.8 D29,[r11]! @pu1_top_row = vld1q_u8(pu1_src_top_cpy || pu1_src - src_s… 141 VLD1.8 D10,[r0]! @pu1_cur_row = vld1q_u8(pu1_src) 142 VLD1.8 D11,[r0]! @pu1_cur_row = vld1q_u8(pu1_src) 144 VLD1.8 D30,[r12]! @vld1q_u8(pu1_src[(ht - 1) * src_strd]) 145 VLD1.8 D31,[r12]! @vld1q_u8(pu1_src[(ht - 1) * src_strd]) 156 VLD1.8 D18,[r10]! @pu1_next_row = vld1q_u8(pu1_src_cpy + src_strd) 157 VLD1.8 D19,[r10] @pu1_next_row = vld1q_u8(pu1_src_cpy + src_strd) 162 VLD1.8 D30,[r6]! @II pu1_next_row = vld1q_u8(pu1_src_cpy + src_strd) 163 VLD1.8 D31,[r6] @II pu1_next_row = vld1q_u8(pu1_src_cpy + src_strd) [all …]
|
/external/libaom/libaom/av1/common/arm/ |
D | mem_neon.h | 59 *s0 = vld1q_u8(s); in load_u8_8x16() 61 *s1 = vld1q_u8(s); in load_u8_8x16() 63 *s2 = vld1q_u8(s); in load_u8_8x16() 65 *s3 = vld1q_u8(s); in load_u8_8x16() 323 if (stride == 4) return vld1q_u8(buf); in load_unaligned_u8q() 433 *s0 = vld1q_u8(s); in load_u8_16x8() 435 *s1 = vld1q_u8(s); in load_u8_16x8() 437 *s2 = vld1q_u8(s); in load_u8_16x8() 439 *s3 = vld1q_u8(s); in load_u8_16x8() 441 *s4 = vld1q_u8(s); in load_u8_16x8() [all …]
|
/external/libvpx/libvpx/vp8/common/arm/neon/ |
D | bilinearpredict_neon.c | 142 q1u8 = vld1q_u8(src_ptr); in vp8_bilinear_predict8x4_neon() 144 q2u8 = vld1q_u8(src_ptr); in vp8_bilinear_predict8x4_neon() 146 q3u8 = vld1q_u8(src_ptr); in vp8_bilinear_predict8x4_neon() 148 q4u8 = vld1q_u8(src_ptr); in vp8_bilinear_predict8x4_neon() 150 q5u8 = vld1q_u8(src_ptr); in vp8_bilinear_predict8x4_neon() 248 q1u8 = vld1q_u8(src_ptr); in vp8_bilinear_predict8x8_neon() 250 q2u8 = vld1q_u8(src_ptr); in vp8_bilinear_predict8x8_neon() 252 q3u8 = vld1q_u8(src_ptr); in vp8_bilinear_predict8x8_neon() 254 q4u8 = vld1q_u8(src_ptr); in vp8_bilinear_predict8x8_neon() 281 q1u8 = vld1q_u8(src_ptr); in vp8_bilinear_predict8x8_neon() [all …]
|