Home
last modified time | relevance | path

Searched refs:vzip_u8 (Results 1 – 15 of 15) sorted by relevance

/external/libjpeg-turbo/simd/arm/
Djdmrgext-neon.c129 uint8x8x2_t r = vzip_u8(vqmovun_s16(r_even), vqmovun_s16(r_odd)); in jsimd_h2v1_merged_upsample_neon()
130 uint8x8x2_t g = vzip_u8(vqmovun_s16(g_even), vqmovun_s16(g_odd)); in jsimd_h2v1_merged_upsample_neon()
131 uint8x8x2_t b = vzip_u8(vqmovun_s16(b_even), vqmovun_s16(b_odd)); in jsimd_h2v1_merged_upsample_neon()
208 uint8x8x2_t r = vzip_u8(vqmovun_s16(r_even), vqmovun_s16(r_odd)); in jsimd_h2v1_merged_upsample_neon()
209 uint8x8x2_t g = vzip_u8(vqmovun_s16(g_even), vqmovun_s16(g_odd)); in jsimd_h2v1_merged_upsample_neon()
210 uint8x8x2_t b = vzip_u8(vqmovun_s16(b_even), vqmovun_s16(b_odd)); in jsimd_h2v1_merged_upsample_neon()
406 uint8x8x2_t r0 = vzip_u8(vqmovun_s16(r0_even), vqmovun_s16(r0_odd)); in jsimd_h2v2_merged_upsample_neon()
407 uint8x8x2_t r1 = vzip_u8(vqmovun_s16(r1_even), vqmovun_s16(r1_odd)); in jsimd_h2v2_merged_upsample_neon()
408 uint8x8x2_t g0 = vzip_u8(vqmovun_s16(g0_even), vqmovun_s16(g0_odd)); in jsimd_h2v2_merged_upsample_neon()
409 uint8x8x2_t g1 = vzip_u8(vqmovun_s16(g1_even), vqmovun_s16(g1_odd)); in jsimd_h2v2_merged_upsample_neon()
[all …]
Djidctint-neon.c681 uint8x8x2_t cols_01_23 = vzip_u8(cols_02_u8, cols_13_u8); in jsimd_idct_islow_pass2_regular()
682 uint8x8x2_t cols_45_67 = vzip_u8(cols_46_u8, cols_57_u8); in jsimd_idct_islow_pass2_regular()
784 uint8x8x2_t cols_01_23 = vzip_u8(cols_02_u8, cols_13_u8); in jsimd_idct_islow_pass2_sparse()
785 uint8x8x2_t cols_45_67 = vzip_u8(cols_46_u8, cols_57_u8); in jsimd_idct_islow_pass2_sparse()
Djidctred-neon.c470 uint8x8x2_t output_0123 = vzip_u8(vqmovun_s16(output_cols_02), in jsimd_idct_4x4_neon()
/external/XNNPACK/src/x8-zip/
Dxm-neon.c42 const uint8x8x2_t vxy = vzip_u8(vx, vy); in xnn_x8_zip_xm_ukernel__neon()
43 const uint8x8x2_t vzw = vzip_u8(vz, vw); in xnn_x8_zip_xm_ukernel__neon()
85 const uint8x8x2_t vxy = vzip_u8(vreinterpret_u8_u64(vx), vreinterpret_u8_u64(vy)); in xnn_x8_zip_xm_ukernel__neon()
86 const uint8x8x2_t vzw = vzip_u8(vreinterpret_u8_u64(vz), vreinterpret_u8_u64(vw)); in xnn_x8_zip_xm_ukernel__neon()
/external/libaom/libaom/aom_dsp/arm/
Dsse_neon.c67 tmp = vzip_u8(d0, d1); in aom_sse_neon()
68 tmp2 = vzip_u8(d2, d3); in aom_sse_neon()
79 tmp = vzip_u8(d0, d1); in aom_sse_neon()
80 tmp2 = vzip_u8(d2, d3); in aom_sse_neon()
/external/webp/src/dsp/
Dupsampling_neon.c115 #define ZIP_U8(lo, hi) vzip_u8((lo), (hi))
117 #define ZIP_U8(lo, hi) vzip_u8((hi), (lo))
/external/libaom/libaom/aom_dsp/simd/
Dv64_intrinsics_arm.h374 uint8x8x2_t r = vzip_u8(vreinterpret_u8_s64(y), vreinterpret_u8_s64(x)); in v64_ziplo_8()
384 uint8x8x2_t r = vzip_u8(vreinterpret_u8_s64(y), vreinterpret_u8_s64(x)); in v64_ziphi_8()
Dv128_intrinsics_arm.h436 uint8x8x2_t r = vzip_u8(vreinterpret_u8_s64(y), vreinterpret_u8_s64(x)); in v128_zip_8()
/external/clang/test/CodeGen/
Daarch64-neon-perm.c1452 return vzip_u8(a, b); in test_vzip_u8()
Darm_neon_intrinsics.c23717 return vzip_u8(a, b); in test_vzip_u8()
/external/libgav1/libgav1/src/dsp/arm/
Dcommon_neon.h337 return vzip_u8(a, b).val[0]; in InterleaveLow8()
Dconvolve_neon.cc269 const uint8x8x2_t input = vzip_u8(input0, input1); in FilterHorizontalWidth2()
/external/llvm-project/clang/test/CodeGen/
Daarch64-neon-perm.c1354 return vzip_u8(a, b); in test_vzip_u8()
Darm_neon_intrinsics.c20959 return vzip_u8(a, b); in test_vzip_u8()
/external/neon_2_sse/
DNEON_2_SSE.h2240 _NEON2SSESTORAGE uint8x8x2_t vzip_u8(uint8x8_t a, uint8x8_t b); // VZIP.8 d0,d0
15810 _NEON2SSESTORAGE uint8x8x2_t vzip_u8(uint8x8_t a, uint8x8_t b); // VZIP.8 d0,d0
15811 #define vzip_u8 vzip_s8 macro
15823 #define vzip_p8 vzip_u8