/external/clang/test/CodeGen/ |
D | aarch64-neon-tbl.c | 510 uint8x8_t test_vtbl1_u8(uint8x8_t a, uint8x8_t b) { in test_vtbl1_u8() 517 uint8x8_t test_vqtbl1_u8(uint8x16_t a, uint8x8_t b) { in test_vqtbl1_u8() 539 uint8x8_t test_vtbl2_u8(uint8x8x2_t a, uint8x8_t b) { in test_vtbl2_u8() 560 uint8x8_t test_vqtbl2_u8(uint8x16x2_t a, uint8x8_t b) { in test_vqtbl2_u8() 586 uint8x8_t test_vtbl3_u8(uint8x8x3_t a, uint8x8_t b) { in test_vtbl3_u8() 610 uint8x8_t test_vqtbl3_u8(uint8x16x3_t a, uint8x8_t b) { in test_vqtbl3_u8() 639 uint8x8_t test_vtbl4_u8(uint8x8x4_t a, uint8x8_t b) { in test_vtbl4_u8() 666 uint8x8_t test_vqtbl4_u8(uint8x16x4_t a, uint8x8_t b) { in test_vqtbl4_u8() 759 uint8x8_t test_vtbx1_u8(uint8x8_t a, uint8x8_t b, uint8x8_t c) { in test_vtbx1_u8() 781 uint8x8_t test_vtbx2_u8(uint8x8_t a, uint8x8x2_t b, uint8x8_t c) { in test_vtbx2_u8() [all …]
|
D | aarch64-neon-shifts.c | 6 uint8x8_t test_shift_vshr(uint8x8_t a) { in test_shift_vshr() 18 uint8x8_t test_shift_vshr_umax(uint8x8_t a) { in test_shift_vshr_umax() 24 uint8x8_t test_shift_vsra(uint8x8_t a, uint8x8_t b) { in test_shift_vsra() 38 uint8x8_t test_shift_vsra_umax(uint8x8_t a, uint8x8_t b) { in test_shift_vsra_umax()
|
D | arm-neon-shifts.c | 9 uint8x8_t test_shift_vshr(uint8x8_t a) { in test_shift_vshr() 21 uint8x8_t test_shift_vshr_umax(uint8x8_t a) { in test_shift_vshr_umax() 27 uint8x8_t test_shift_vsra(uint8x8_t a, uint8x8_t b) { in test_shift_vsra() 41 uint8x8_t test_shift_vsra_umax(uint8x8_t a, uint8x8_t b) { in test_shift_vsra_umax()
|
D | arm_neon_intrinsics.c | 49 uint8x8_t test_vaba_u8(uint8x8_t a, uint8x8_t b, uint8x8_t c) { in test_vaba_u8() 202 uint16x8_t test_vabal_u8(uint16x8_t a, uint8x8_t b, uint8x8_t c) { in test_vabal_u8() 277 uint8x8_t test_vabd_u8(uint8x8_t a, uint8x8_t b) { in test_vabd_u8() 444 uint16x8_t test_vabdl_u8(uint8x8_t a, uint8x8_t b) { in test_vabdl_u8() 588 uint8x8_t test_vadd_u8(uint8x8_t a, uint8x8_t b) { in test_vadd_u8() 725 uint8x8_t test_vaddhn_u16(uint16x8_t a, uint16x8_t b) { in test_vaddhn_u16() 796 uint16x8_t test_vaddl_u8(uint8x8_t a, uint8x8_t b) { in test_vaddl_u8() 859 uint16x8_t test_vaddw_u8(uint16x8_t a, uint8x8_t b) { in test_vaddw_u8() 915 uint8x8_t test_vand_u8(uint8x8_t a, uint8x8_t b) { in test_vand_u8() 1033 uint8x8_t test_vbic_u8(uint8x8_t a, uint8x8_t b) { in test_vbic_u8() [all …]
|
D | aarch64-neon-3v.c | 66 uint8x8_t test_vand_u8(uint8x8_t a, uint8x8_t b) { in test_vand_u8() 178 uint8x8_t test_vorr_u8(uint8x8_t a, uint8x8_t b) { in test_vorr_u8() 290 uint8x8_t test_veor_u8(uint8x8_t a, uint8x8_t b) { in test_veor_u8() 411 uint8x8_t test_vbic_u8(uint8x8_t a, uint8x8_t b) { in test_vbic_u8() 539 uint8x8_t test_vorn_u8(uint8x8_t a, uint8x8_t b) { in test_vorn_u8()
|
/external/libvpx/libvpx/vpx_dsp/arm/ |
D | deblock_neon.c | 20 static uint8x8_t average_k_out(const uint8x8_t a2, const uint8x8_t a1, in average_k_out() 21 const uint8x8_t v0, const uint8x8_t b1, in average_k_out() 22 const uint8x8_t b2) { in average_k_out() 23 const uint8x8_t k1 = vrhadd_u8(a2, a1); in average_k_out() 24 const uint8x8_t k2 = vrhadd_u8(b2, b1); in average_k_out() 25 const uint8x8_t k3 = vrhadd_u8(k1, k2); in average_k_out() 29 static uint8x8_t generate_mask(const uint8x8_t a2, const uint8x8_t a1, in generate_mask() 30 const uint8x8_t v0, const uint8x8_t b1, in generate_mask() 31 const uint8x8_t b2, const uint8x8_t filter) { in generate_mask() 32 const uint8x8_t a2_v0 = vabd_u8(a2, v0); in generate_mask() [all …]
|
D | intrapred_neon.c | 21 const uint8x8_t ref_u8 = vld1_u8(ref); in dc_sum_4() 27 const uint8x8_t dc) { in dc_store_4x4() 28 const uint8x8_t dc_dup = vdup_lane_u8(dc, 0); in dc_store_4x4() 37 const uint8x8_t a = vld1_u8(above); in vpx_dc_predictor_4x4_neon() 38 const uint8x8_t l = vld1_u8(left); in vpx_dc_predictor_4x4_neon() 41 uint8x8_t dc; in vpx_dc_predictor_4x4_neon() 51 const uint8x8_t dc = vreinterpret_u8_u16(vrshr_n_u16(sum, 2)); in vpx_dc_left_predictor_4x4_neon() 59 const uint8x8_t dc = vreinterpret_u8_u16(vrshr_n_u16(sum, 2)); in vpx_dc_top_predictor_4x4_neon() 66 const uint8x8_t dc = vdup_n_u8(0x80); in vpx_dc_128_predictor_4x4_neon() 76 const uint8x8_t ref_u8 = vld1_u8(ref); in dc_sum_8() [all …]
|
D | transpose_neon.h | 67 static INLINE void transpose_u8_4x4(uint8x8_t *a0, uint8x8_t *a1) { in transpose_u8_4x4() 191 static INLINE void transpose_u8_4x8(uint8x8_t *a0, uint8x8_t *a1, uint8x8_t *a2, in transpose_u8_4x8() 192 uint8x8_t *a3, const uint8x8_t a4, in transpose_u8_4x8() 193 const uint8x8_t a5, const uint8x8_t a6, in transpose_u8_4x8() 194 const uint8x8_t a7) { in transpose_u8_4x8() 396 static INLINE void transpose_u8_8x4(uint8x8_t *a0, uint8x8_t *a1, uint8x8_t *a2, in transpose_u8_8x4() 397 uint8x8_t *a3) { in transpose_u8_8x4() 517 static INLINE void transpose_u8_8x8(uint8x8_t *a0, uint8x8_t *a1, uint8x8_t *a2, in transpose_u8_8x8() 518 uint8x8_t *a3, uint8x8_t *a4, uint8x8_t *a5, in transpose_u8_8x8() 519 uint8x8_t *a6, uint8x8_t *a7) { in transpose_u8_8x8() [all …]
|
D | vpx_convolve8_neon.h | 20 uint8x8_t *const s0, uint8x8_t *const s1, in load_u8_8x4() 21 uint8x8_t *const s2, uint8x8_t *const s3) { in load_u8_8x4() 32 uint8x8_t *const s0, uint8x8_t *const s1, in load_u8_8x8() 33 uint8x8_t *const s2, uint8x8_t *const s3, in load_u8_8x8() 34 uint8x8_t *const s4, uint8x8_t *const s5, in load_u8_8x8() 35 uint8x8_t *const s6, uint8x8_t *const s7) { in load_u8_8x8() 97 static INLINE uint8x8_t convolve8_8(const int16x8_t s0, const int16x8_t s1, in convolve8_8() 119 static INLINE uint8x8_t scale_filter_8(const uint8x8_t *const s, in scale_filter_8()
|
D | subpel_variance_neon.c | 32 const uint8x8_t f0 = vdup_n_u8(filter[0]); in var_filter_block2d_bil_w4() 33 const uint8x8_t f1 = vdup_n_u8(filter[1]); in var_filter_block2d_bil_w4() 36 const uint8x8_t src_0 = load_unaligned_u8(src_ptr, src_pixels_per_line); in var_filter_block2d_bil_w4() 37 const uint8x8_t src_1 = in var_filter_block2d_bil_w4() 41 const uint8x8_t out = vrshrn_n_u16(b, FILTER_BITS); in var_filter_block2d_bil_w4() 55 const uint8x8_t f0 = vdup_n_u8(filter[0]); in var_filter_block2d_bil_w8() 56 const uint8x8_t f1 = vdup_n_u8(filter[1]); in var_filter_block2d_bil_w8() 59 const uint8x8_t src_0 = vld1_u8(&src_ptr[0]); in var_filter_block2d_bil_w8() 60 const uint8x8_t src_1 = vld1_u8(&src_ptr[pixel_step]); in var_filter_block2d_bil_w8() 63 const uint8x8_t out = vrshrn_n_u16(b, FILTER_BITS); in var_filter_block2d_bil_w8() [all …]
|
D | idct8x8_1_add_neon.c | 16 static INLINE uint8x8_t create_dcd(const int16_t dc) { in create_dcd() 22 const uint8x8_t res) { in idct8x8_1_add_pos_kernel() 23 const uint8x8_t a = vld1_u8(*dest); in idct8x8_1_add_pos_kernel() 24 const uint8x8_t b = vqadd_u8(a, res); in idct8x8_1_add_pos_kernel() 30 const uint8x8_t res) { in idct8x8_1_add_neg_kernel() 31 const uint8x8_t a = vld1_u8(*dest); in idct8x8_1_add_neg_kernel() 32 const uint8x8_t b = vqsub_u8(a, res); in idct8x8_1_add_neg_kernel() 45 const uint8x8_t dc = create_dcd(a1); in vpx_idct8x8_1_add_neon() 55 const uint8x8_t dc = create_dcd(-a1); in vpx_idct8x8_1_add_neon()
|
D | loopfilter_neon.c | 49 static INLINE uint32_t calc_flat_status_8(uint8x8_t flat) { in calc_flat_status_8() 63 const uint8x8_t flat_4bit = in calc_flat_status_16() 173 static INLINE void filter_update_8(const uint8x8_t sub0, const uint8x8_t sub1, in filter_update_8() 174 const uint8x8_t add0, const uint8x8_t add1, in filter_update_8() 197 static INLINE uint8x8_t calc_7_tap_filter_8_kernel(const uint8x8_t sub0, in calc_7_tap_filter_8_kernel() 198 const uint8x8_t sub1, in calc_7_tap_filter_8_kernel() 199 const uint8x8_t add0, in calc_7_tap_filter_8_kernel() 200 const uint8x8_t add1, in calc_7_tap_filter_8_kernel() 213 static INLINE uint8x8_t apply_15_tap_filter_8_kernel( in apply_15_tap_filter_8_kernel() 214 const uint8x8_t flat, const uint8x8_t sub0, const uint8x8_t sub1, in apply_15_tap_filter_8_kernel() [all …]
|
/external/libaom/libaom/aom_dsp/arm/ |
D | loopfilter_neon.c | 21 static INLINE uint8x8_t lpf_mask(uint8x8_t p3q3, uint8x8_t p2q2, uint8x8_t p1q1, in lpf_mask() 22 uint8x8_t p0q0, const uint8_t blimit, in lpf_mask() 28 uint8x8_t mask_8x8, temp_8x8; in lpf_mask() 29 const uint8x8_t limit_8x8 = vdup_n_u8(limit); in lpf_mask() 55 static INLINE uint8x8_t lpf_mask2(uint8x8_t p1q1, uint8x8_t p0q0, in lpf_mask2() 61 const uint8x8_t limit_8x8 = vdup_n_u8(limit); in lpf_mask2() 62 uint8x8_t mask_8x8, temp_8x8; in lpf_mask2() 85 static INLINE uint8x8_t lpf_flat_mask4(uint8x8_t p3q3, uint8x8_t p2q2, in lpf_flat_mask4() 86 uint8x8_t p1q1, uint8x8_t p0q0) { in lpf_flat_mask4() 87 const uint8x8_t thresh_8x8 = vdup_n_u8(1); // for bd==8 threshold is always 1 in lpf_flat_mask4() [all …]
|
D | subpel_variance_neon.c | 30 const uint8x8_t f0 = vmov_n_u8(filter[0]); in var_filter_block2d_bil_w8() 31 const uint8x8_t f1 = vmov_n_u8(filter[1]); in var_filter_block2d_bil_w8() 34 const uint8x8_t src_0 = vld1_u8(&src_ptr[0]); in var_filter_block2d_bil_w8() 35 const uint8x8_t src_1 = vld1_u8(&src_ptr[pixel_step]); in var_filter_block2d_bil_w8() 38 const uint8x8_t out = vrshrn_n_u16(b, FILTER_BITS); in var_filter_block2d_bil_w8() 53 const uint8x8_t f0 = vmov_n_u8(filter[0]); in var_filter_block2d_bil_w16() 54 const uint8x8_t f1 = vmov_n_u8(filter[1]); in var_filter_block2d_bil_w16() 62 const uint8x8_t out_lo = vrshrn_n_u16(b, FILTER_BITS); in var_filter_block2d_bil_w16() 65 const uint8x8_t out_hi = vrshrn_n_u16(d, FILTER_BITS); in var_filter_block2d_bil_w16()
|
D | intrapred_neon.c | 27 uint8x8_t dc0; in dc_4x4() 30 const uint8x8_t A = vld1_u8(above); // top row in dc_4x4() 37 const uint8x8_t L = vld1_u8(left); // left border in dc_4x4() 55 const uint8x8_t dc = vdup_lane_u8(dc0, 0); in dc_4x4() 95 uint8x8_t dc0; in dc_8x8() 98 const uint8x8_t A = vld1_u8(above); // top row in dc_8x8() 106 const uint8x8_t L = vld1_u8(left); // left border in dc_8x8() 125 const uint8x8_t dc = vdup_lane_u8(dc0, 0); in dc_8x8() 166 uint8x8_t dc0; in dc_16x16() 242 uint8x8_t dc0; in dc_32x32() [all …]
|
/external/webp/src/dsp/ |
D | upsampling_neon.c | 32 const uint8x8_t a = vld1_u8(r1 + 0); \ 33 const uint8x8_t b = vld1_u8(r1 + 1); \ 34 const uint8x8_t c = vld1_u8(r2 + 0); \ 35 const uint8x8_t d = vld1_u8(r2 + 1); \ 45 const uint8x8_t diag2 = vshrn_n_u16(al, 3); \ 46 const uint8x8_t diag1 = vshrn_n_u16(bl, 3); \ 48 const uint8x8_t A = vrhadd_u8(a, diag1); \ 49 const uint8x8_t B = vrhadd_u8(b, diag2); \ 50 const uint8x8_t C = vrhadd_u8(c, diag2); \ 51 const uint8x8_t D = vrhadd_u8(d, diag1); \ [all …]
|
D | lossless_neon.c | 77 const uint8x8_t shuffle = vld1_u8(kRGBAShuffle); in ConvertBGRAToRGBA_NEON() 79 const uint8x8_t pixels = vld1_u8((uint8_t*)src); in ConvertBGRAToRGBA_NEON() 95 const uint8x8_t shuffle0 = vld1_u8(kBGRShuffle[0]); in ConvertBGRAToBGR_NEON() 96 const uint8x8_t shuffle1 = vld1_u8(kBGRShuffle[1]); in ConvertBGRAToBGR_NEON() 97 const uint8x8_t shuffle2 = vld1_u8(kBGRShuffle[2]); in ConvertBGRAToBGR_NEON() 122 const uint8x8_t shuffle0 = vld1_u8(kRGBShuffle[0]); in ConvertBGRAToRGB_NEON() 123 const uint8x8_t shuffle1 = vld1_u8(kRGBShuffle[1]); in ConvertBGRAToRGB_NEON() 124 const uint8x8_t shuffle2 = vld1_u8(kRGBShuffle[2]); in ConvertBGRAToRGB_NEON() 154 static WEBP_INLINE uint8x8_t Average2_u8_NEON(uint32_t a0, uint32_t a1) { in Average2_u8_NEON() 155 const uint8x8_t A0 = LOAD_U32_AS_U8(a0); in Average2_u8_NEON() [all …]
|
/external/libaom/libaom/av1/common/arm/ |
D | blend_a64_vmask_neon.c | 27 uint8x8_t tmp0, tmp1; in aom_blend_a64_vmask_neon() 42 const uint8x8_t m = vdup_n_u8((uint8_t)mask[i]); in aom_blend_a64_vmask_neon() 43 const uint8x8_t max_minus_m = vdup_n_u8(64 - (uint8_t)mask[i]); in aom_blend_a64_vmask_neon() 68 const uint8x8_t m = vdup_n_u8((uint8_t)mask[i]); in aom_blend_a64_vmask_neon() 69 const uint8x8_t max_minus_m = vdup_n_u8(64 - (uint8_t)mask[i]); in aom_blend_a64_vmask_neon() 87 const uint8x8_t m = vmovn_u16(vcombine_u16(m1, m2)); in aom_blend_a64_vmask_neon() 90 const uint8x8_t max_minus_m = in aom_blend_a64_vmask_neon() 114 const uint8x8_t m1 = vdup_n_u8(mask[i]); in aom_blend_a64_vmask_neon() 115 const uint8x8_t m2 = vdup_n_u8(mask[i + 1]); in aom_blend_a64_vmask_neon() 118 const uint8x8_t m = vreinterpret_u8_u16(m_trn.val[0]); in aom_blend_a64_vmask_neon() [all …]
|
D | mem_neon.h | 17 static INLINE void store_row2_u8_8x8(uint8_t *s, int p, const uint8x8_t s0, in store_row2_u8_8x8() 18 const uint8x8_t s1) { in store_row2_u8_8x8() 34 uint8x8_t *const s0, uint8x8_t *const s1, in load_u8_8x8() 35 uint8x8_t *const s2, uint8x8_t *const s3, in load_u8_8x8() 36 uint8x8_t *const s4, uint8x8_t *const s5, in load_u8_8x8() 37 uint8x8_t *const s6, uint8x8_t *const s7) { in load_u8_8x8() 68 uint8x8_t *const s0, uint8x8_t *const s1, in load_u8_8x4() 69 uint8x8_t *const s2, uint8x8_t *const s3) { in load_u8_8x4() 146 static INLINE void store_u8_8x8(uint8_t *s, ptrdiff_t p, const uint8x8_t s0, in store_u8_8x8() 147 const uint8x8_t s1, const uint8x8_t s2, in store_u8_8x8() [all …]
|
D | transpose_neon.h | 16 static INLINE void transpose_u8_8x8(uint8x8_t *a0, uint8x8_t *a1, uint8x8_t *a2, in transpose_u8_8x8() 17 uint8x8_t *a3, uint8x8_t *a4, uint8x8_t *a5, in transpose_u8_8x8() 18 uint8x8_t *a6, uint8x8_t *a7) { in transpose_u8_8x8() 70 static INLINE void transpose_u8_8x4(uint8x8_t *a0, uint8x8_t *a1, uint8x8_t *a2, in transpose_u8_8x4() 71 uint8x8_t *a3) { in transpose_u8_8x4() 103 static INLINE void transpose_u8_4x4(uint8x8_t *a0, uint8x8_t *a1) { in transpose_u8_4x4() 132 static INLINE void transpose_u8_4x8(uint8x8_t *a0, uint8x8_t *a1, uint8x8_t *a2, in transpose_u8_4x8() 133 uint8x8_t *a3, const uint8x8_t a4, in transpose_u8_4x8() 134 const uint8x8_t a5, const uint8x8_t a6, in transpose_u8_4x8() 135 const uint8x8_t a7) { in transpose_u8_4x8()
|
D | blend_a64_hmask_neon.c | 34 uint8x8_t tmp0, tmp1; in aom_blend_a64_hmask_neon() 39 const uint8x8_t vdup_64 = vdup_n_u8((uint8_t)64); in aom_blend_a64_hmask_neon() 71 const uint8x8_t m = vld1_u8(mask); in aom_blend_a64_hmask_neon() 72 const uint8x8_t max_minus_m = vsub_u8(vdup_64, m); in aom_blend_a64_hmask_neon() 86 const uint8x8_t m = vreinterpret_u8_u32(vld1_dup_u32((uint32_t *)mask)); in aom_blend_a64_hmask_neon() 87 const uint8x8_t max_minus_m = vsub_u8(vdup_64, m); in aom_blend_a64_hmask_neon() 110 const uint8x8_t m = vreinterpret_u8_u16(vld1_dup_u16((uint16_t *)mask)); in aom_blend_a64_hmask_neon() 111 const uint8x8_t max_minus_m = vsub_u8(vdup_64, m); in aom_blend_a64_hmask_neon()
|
/external/skqp/src/opts/ |
D | SkBlitRow_opts.h | 47 static inline uint8x8_t SkMulDiv255Round_neon8(uint8x8_t x, uint8x8_t y) { in SkMulDiv255Round_neon8() 63 uint8x8_t nalphas = vmvn_u8(src.val[3]); in SkPMSrcOver_neon8() 74 static inline uint8x8_t SkPMSrcOver_neon2(uint8x8_t dst, uint8x8_t src) { in SkPMSrcOver_neon2() 75 const uint8x8_t alpha_indices = vcreate_u8(0x0707070703030303); in SkPMSrcOver_neon2() 76 uint8x8_t nalphas = vmvn_u8(vtbl1_u8(src, alpha_indices)); in SkPMSrcOver_neon2() 201 uint8x8_t alphas = src_col.val[3]; in blit_row_s32a_opaque() 223 uint8x8_t src2 = vld1_u8(reinterpret_cast<const uint8_t*>(src)); in blit_row_s32a_opaque() 224 uint8x8_t dst2 = vld1_u8(reinterpret_cast<const uint8_t*>(dst)); in blit_row_s32a_opaque() 229 uint8x8_t result = SkPMSrcOver_neon2(vcreate_u8(*dst), vcreate_u8(*src)); in blit_row_s32a_opaque()
|
/external/skia/src/opts/ |
D | SkBlitRow_opts.h | 47 static inline uint8x8_t SkMulDiv255Round_neon8(uint8x8_t x, uint8x8_t y) { in SkMulDiv255Round_neon8() 63 uint8x8_t nalphas = vmvn_u8(src.val[3]); in SkPMSrcOver_neon8() 74 static inline uint8x8_t SkPMSrcOver_neon2(uint8x8_t dst, uint8x8_t src) { in SkPMSrcOver_neon2() 75 const uint8x8_t alpha_indices = vcreate_u8(0x0707070703030303); in SkPMSrcOver_neon2() 76 uint8x8_t nalphas = vmvn_u8(vtbl1_u8(src, alpha_indices)); in SkPMSrcOver_neon2() 201 uint8x8_t alphas = src_col.val[3]; in blit_row_s32a_opaque() 223 uint8x8_t src2 = vld1_u8(reinterpret_cast<const uint8_t*>(src)); in blit_row_s32a_opaque() 224 uint8x8_t dst2 = vld1_u8(reinterpret_cast<const uint8_t*>(dst)); in blit_row_s32a_opaque() 229 uint8x8_t result = SkPMSrcOver_neon2(vcreate_u8(*dst), vcreate_u8(*src)); in blit_row_s32a_opaque()
|
/external/libhevc/common/arm/ |
D | ihevc_intra_pred_filters_neon_intr.c | 162 uint8x8_t src; in ihevc_intra_pred_luma_ref_substitution_neonintr() 214 uint8x8_t dup_pu1_dst1; in ihevc_intra_pred_luma_ref_substitution_neonintr() 250 uint8x8_t dup_pu1_dst2; in ihevc_intra_pred_luma_ref_substitution_neonintr() 269 uint8x8_t dup_pu1_dst3; in ihevc_intra_pred_luma_ref_substitution_neonintr() 478 uint8x8_t src_val_0, src_val_2; in ihevc_intra_pred_ref_filtering_neonintr() 479 uint8x8_t src_val_1, shift_res; in ihevc_intra_pred_ref_filtering_neonintr() 480 uint8x8_t dup_const_2; in ihevc_intra_pred_ref_filtering_neonintr() 542 uint8x8_t const_col_i_val; in ihevc_intra_pred_ref_filtering_neonintr() 547 uint8x8_t res_val_1; in ihevc_intra_pred_ref_filtering_neonintr() 548 uint8x8_t res_val_2; in ihevc_intra_pred_ref_filtering_neonintr() [all …]
|
/external/tensorflow/tensorflow/lite/kernels/internal/optimized/ |
D | depthwiseconv_uint8.h | 59 const uint8x8_t input_u8 = vld1_u8(input_ptr); 89 const uint8x8_t filter_u8 = vld1_u8(filter_ptr); 102 uint8x8_t input_u8[2]; 135 const uint8x8_t input_u8 = vld1_u8(input_ptr); 157 const uint8x8_t filter_u8 = vld1_u8(filter_ptr); 170 const uint8x8_t input_u8 = vld1_u8(input_ptr); 197 uint8x8_t input_u8 = vdup_n_u8(0); 229 const uint8x8_t filter_u8 = vld1_u8(filter_ptr + 8 * i); 242 uint8x8_t input_u8 = vdup_n_u8(0); 274 uint8x8_t input_u8 = vdup_n_u8(0); [all …]
|