Home
last modified time | relevance | path

Searched refs:uint16x4_t (Results 1 – 25 of 53) sorted by relevance

123

/external/clang/test/CodeGen/
Daarch64-neon-3v.c68 uint16x4_t test_vand_u16(uint16x4_t a, uint16x4_t b) { in test_vand_u16()
164 uint16x4_t test_vorr_u16(uint16x4_t a, uint16x4_t b) { in test_vorr_u16()
260 uint16x4_t test_veor_u16(uint16x4_t a, uint16x4_t b) { in test_veor_u16()
356 uint16x4_t test_vbic_u16(uint16x4_t a, uint16x4_t b) { in test_vbic_u16()
452 uint16x4_t test_vorn_u16(uint16x4_t a, uint16x4_t b) { in test_vorn_u16()
Darm_neon_intrinsics.c38 uint16x4_t test_vaba_u16(uint16x4_t a, uint16x4_t b, uint16x4_t c) { in test_vaba_u16()
111 uint32x4_t test_vabal_u16(uint32x4_t a, uint16x4_t b, uint16x4_t c) { in test_vabal_u16()
148 uint16x4_t test_vabd_u16(uint16x4_t a, uint16x4_t b) { in test_vabd_u16()
233 uint32x4_t test_vabdl_u16(uint16x4_t a, uint16x4_t b) { in test_vabdl_u16()
331 uint16x4_t test_vadd_u16(uint16x4_t a, uint16x4_t b) { in test_vadd_u16()
428 uint16x4_t test_vaddhn_u32(uint32x4_t a, uint32x4_t b) { in test_vaddhn_u32()
465 uint32x4_t test_vaddl_u16(uint16x4_t a, uint16x4_t b) { in test_vaddl_u16()
502 uint32x4_t test_vaddw_u16(uint32x4_t a, uint16x4_t b) { in test_vaddw_u16()
545 uint16x4_t test_vand_u16(uint16x4_t a, uint16x4_t b) { in test_vand_u16()
642 uint16x4_t test_vbic_u16(uint16x4_t a, uint16x4_t b) { in test_vbic_u16()
[all …]
Daarch64-neon-2velem.c128 uint16x4_t test_vmul_lane_u16(uint16x4_t a, uint16x4_t v) { in test_vmul_lane_u16()
134 uint16x8_t test_vmulq_lane_u16(uint16x8_t a, uint16x4_t v) { in test_vmulq_lane_u16()
176 uint16x4_t test_vmul_laneq_u16(uint16x4_t a, uint16x8_t v) { in test_vmul_laneq_u16()
500 uint32x4_t test_vmull_lane_u16(uint16x4_t a, uint16x4_t v) { in test_vmull_lane_u16()
524 uint32x4_t test_vmull_high_lane_u16(uint16x8_t a, uint16x4_t v) { in test_vmull_high_lane_u16()
548 uint32x4_t test_vmull_laneq_u16(uint16x4_t a, uint16x8_t v) { in test_vmull_laneq_u16()
935 uint16x4_t test_vmul_lane_u16_0(uint16x4_t a, uint16x4_t v) { in test_vmul_lane_u16_0()
941 uint16x8_t test_vmulq_lane_u16_0(uint16x8_t a, uint16x4_t v) { in test_vmulq_lane_u16_0()
983 uint16x4_t test_vmul_laneq_u16_0(uint16x4_t a, uint16x8_t v) { in test_vmul_laneq_u16_0()
1271 uint32x4_t test_vmull_lane_u16_0(uint16x4_t a, uint16x4_t v) { in test_vmull_lane_u16_0()
[all …]
Darm64_vecCmpBr.c9 unsigned anyZero64(uint16x4_t a) { in anyZero64()
35 unsigned anyNonZero64(uint16x4_t a) { in anyNonZero64()
61 unsigned allZero64(uint16x4_t a) { in allZero64()
87 unsigned allNonZero64(uint16x4_t a) { in allNonZero64()
Daarch64-neon-perm.c62 uint16x4_t test_vuzp1_u16(uint16x4_t a, uint16x4_t b) { in test_vuzp1_u16()
188 uint16x4_t test_vuzp2_u16(uint16x4_t a, uint16x4_t b) { in test_vuzp2_u16()
314 uint16x4_t test_vzip1_u16(uint16x4_t a, uint16x4_t b) { in test_vzip1_u16()
440 uint16x4_t test_vzip2_u16(uint16x4_t a, uint16x4_t b) { in test_vzip2_u16()
566 uint16x4_t test_vtrn1_u16(uint16x4_t a, uint16x4_t b) { in test_vtrn1_u16()
692 uint16x4_t test_vtrn2_u16(uint16x4_t a, uint16x4_t b) { in test_vtrn2_u16()
789 uint16x4x2_t test_vuzp_u16(uint16x4_t a, uint16x4_t b) { in test_vuzp_u16()
899 uint16x4x2_t test_vzip_u16(uint16x4_t a, uint16x4_t b) { in test_vzip_u16()
1009 uint16x4x2_t test_vtrn_u16(uint16x4_t a, uint16x4_t b) { in test_vtrn_u16()
Darm64_vshift.c59 uint16x4_t test_vqshl_n_u16(uint16x4_t in) { in test_vqshl_n_u16()
156 uint16x4_t test_vrshr_n_u16(uint16x4_t in) { in test_vrshr_n_u16()
310 uint16x4_t test_vrsra_n_u16(uint16x4_t acc, uint16x4_t in) { in test_vrsra_n_u16()
Daarch64-neon-misc.c17 uint16x4_t test_vceqz_s16(int16x4_t a) { in test_vceqz_s16()
77 uint16x4_t test_vceqz_u16(uint16x4_t a) { in test_vceqz_u16()
143 uint16x4_t test_vceqz_p16(poly16x4_t a) { in test_vceqz_p16()
173 uint16x4_t test_vcgez_s16(int16x4_t a) { in test_vcgez_s16()
245 uint16x4_t test_vclez_s16(int16x4_t a) { in test_vclez_s16()
317 uint16x4_t test_vcgtz_s16(int16x4_t a) { in test_vcgtz_s16()
389 uint16x4_t test_vcltz_s16(int16x4_t a) { in test_vcltz_s16()
509 uint16x4_t test_vrev32_u16(uint16x4_t a) { in test_vrev32_u16()
587 uint16x4_t test_vrev64_u16(uint16x4_t a) { in test_vrev64_u16()
687 uint16x4_t test_vpaddl_u8(uint8x8_t a) { in test_vpaddl_u8()
[all …]
Daarch64-neon-intrinsics.c45 uint16x4_t test_vadd_u16(uint16x4_t v1, uint16x4_t v2) { in test_vadd_u16()
157 uint16x4_t test_vsub_u16(uint16x4_t v1, uint16x4_t v2) { in test_vsub_u16()
266 uint16x4_t test_vmul_u16(uint16x4_t v1, uint16x4_t v2) { in test_vmul_u16()
369 uint16x4_t test_vmla_u16(uint16x4_t v1, uint16x4_t v2, uint16x4_t v3) { in test_vmla_u16()
459 uint16x4_t test_vmls_u16(uint16x4_t v1, uint16x4_t v2, uint16x4_t v3) { in test_vmls_u16()
593 uint16x4_t test_vaba_u16(uint16x4_t v1, uint16x4_t v2, uint16x4_t v3) { in test_vaba_u16()
665 uint16x4_t test_vabd_u16(uint16x4_t v1, uint16x4_t v2) { in test_vabd_u16()
738 int8x8_t test_vbsl_s16(uint16x4_t v1, int16x4_t v2, int16x4_t v3) { in test_vbsl_s16()
762 uint16x4_t test_vbsl_u16(uint16x4_t v1, uint16x4_t v2, uint16x4_t v3) { in test_vbsl_u16()
798 poly16x4_t test_vbsl_p16(uint16x4_t v1, poly16x4_t v2, poly16x4_t v3) { in test_vbsl_p16()
[all …]
Darm64_neon_high_half.c266 uint16x8_t test_vrshrn_high_n_u32(uint16x4_t lowpart, uint32x4_t input) { in test_vrshrn_high_n_u32()
296 uint16x8_t test_vshrn_high_n_u32(uint16x4_t lowpart, uint32x4_t input) { in test_vshrn_high_n_u32()
311 uint16x8_t test_vqshrun_high_n_s32(uint16x4_t lowpart, int32x4_t input) { in test_vqshrun_high_n_s32()
326 uint16x8_t test_vqrshrun_high_n_s32(uint16x4_t lowpart, int32x4_t input) { in test_vqrshrun_high_n_s32()
356 uint16x8_t test_vqshrn_high_n_u32(uint16x4_t lowpart, uint32x4_t input) { in test_vqshrn_high_n_u32()
386 uint16x8_t test_vqrshrn_high_n_u32(uint16x4_t lowpart, uint32x4_t input) { in test_vqrshrn_high_n_u32()
416 uint16x8_t test_vaddhn_high_u32(uint16x4_t lowpart, uint32x4_t lhs, uint32x4_t rhs) { in test_vaddhn_high_u32()
446 uint16x8_t test_vraddhn_high_u32(uint16x4_t lowpart, uint32x4_t lhs, uint32x4_t rhs) { in test_vraddhn_high_u32()
476 uint16x8_t test_vmovn_high_u32(uint16x4_t lowpart, uint32x4_t wide) { in test_vmovn_high_u32()
506 uint16x8_t test_vqmovn_high_u32(uint16x4_t lowpart, int32x4_t wide) { in test_vqmovn_high_u32()
[all …]
Daarch64-neon-across.c27 uint32_t test_vaddlv_u16(uint16x4_t a) { in test_vaddlv_u16()
87 uint16_t test_vmaxv_u16(uint16x4_t a) { in test_vmaxv_u16()
147 uint16_t test_vminv_u16(uint16x4_t a) { in test_vminv_u16()
207 uint16_t test_vaddv_u16(uint16x4_t a) { in test_vaddv_u16()
Darm64_vsli.c44 uint16x4_t test_vsli_n_u16(uint16x4_t a1, uint16x4_t a2) { in test_vsli_n_u16()
Darm64_vsri.c45 uint16x4_t test_vsri_n_u16(uint16x4_t a1, uint16x4_t a2) { in test_vsri_n_u16()
Daarch64-neon-extract.c62 uint16x4_t test_vext_u16(uint16x4_t a, uint16x4_t b) { in test_vext_u16()
Darm64_vqmov.c32 uint16x8_t test_vqmovn_high_u32(uint16x4_t Vdlow, uint32x4_t Vn) in test_vqmovn_high_u32()
64 uint16x8_t test_vqmovun_high_s32(uint16x4_t Vdlow, uint32x4_t Vn) in test_vqmovun_high_s32()
Daarch64-neon-vcombine.c38 uint16x8_t test_vcombine_u16(uint16x4_t low, uint16x4_t high) { in test_vcombine_u16()
Darm-neon-vget.c29 uint16x4_t low_u16(uint16x8_t a) { in low_u16()
85 uint16x4_t high_u16(uint16x8_t a) { in high_u16()
Daarch64-neon-vget.c14 uint16_t test_vget_lane_u16(uint16x4_t a) { in test_vget_lane_u16()
187 uint16x4_t test_vset_lane_u16(uint16_t a, uint16x4_t b) { in test_vset_lane_u16()
Daarch64-neon-vget-hilo.c39 uint16x4_t test_vget_high_u16(uint16x8_t a) { in test_vget_high_u16()
123 uint16x4_t test_vget_low_u16(uint16x8_t a) { in test_vget_low_u16()
/external/libvpx/libvpx/vpx_dsp/arm/
Dintrapred_neon.c30 const uint16x4_t p0 = vpaddl_u8(A); // cascading summation of the top in dc_4x4()
31 const uint16x4_t p1 = vpadd_u16(p0, p0); in dc_4x4()
37 const uint16x4_t p0 = vpaddl_u8(L); // cascading summation of the left in dc_4x4()
38 const uint16x4_t p1 = vpadd_u16(p0, p0); in dc_4x4()
99 const uint16x4_t p0 = vpaddl_u8(A); // cascading summation of the top in dc_8x8()
100 const uint16x4_t p1 = vpadd_u16(p0, p0); in dc_8x8()
101 const uint16x4_t p2 = vpadd_u16(p1, p1); in dc_8x8()
107 const uint16x4_t p0 = vpaddl_u8(L); // cascading summation of the left in dc_8x8()
108 const uint16x4_t p1 = vpadd_u16(p0, p0); in dc_8x8()
109 const uint16x4_t p2 = vpadd_u16(p1, p1); in dc_8x8()
[all …]
/external/webp/src/dsp/
Drescaler_neon.c78 const uint16x4_t C0 = vmovn_u32(B0); in RescalerExportRowExpand()
79 const uint16x4_t C1 = vmovn_u32(B1); in RescalerExportRowExpand()
99 const uint16x4_t E0 = vmovn_u32(D0); in RescalerExportRowExpand()
100 const uint16x4_t E1 = vmovn_u32(D1); in RescalerExportRowExpand()
140 const uint16x4_t D0 = vmovn_u32(C0); in RescalerExportRowShrink()
141 const uint16x4_t D1 = vmovn_u32(C1); in RescalerExportRowShrink()
158 const uint16x4_t B0 = vmovn_u32(A0); in RescalerExportRowShrink()
159 const uint16x4_t B1 = vmovn_u32(A1); in RescalerExportRowShrink()
/external/skia/src/opts/
DSkBlitMask_opts_arm_neon.cpp184 uint16x4_t vtmp_lo = vmovn_u32(vdst32_lo) & vget_low_u16(vmaskq_ng16); in SkRGB16BlitterBlitV_neon()
185 uint16x4_t vtmp_hi = vshrn_n_u32(vdst32_lo, 16) & vget_low_u16(vmaskq_g16); in SkRGB16BlitterBlitV_neon()
186 uint16x4_t vdst16_lo = vorr_u16(vtmp_lo, vtmp_hi); in SkRGB16BlitterBlitV_neon()
189 uint16x4_t vdst16_hi = vorr_u16(vtmp_lo, vtmp_hi); in SkRGB16BlitterBlitV_neon()
DSkBitmapProcState_filter_neon.h28 uint16x4_t vx, vconst16_16, v16_x, tmp; in Filter_32_opaque_neon()
63 uint16x4_t vx, vconst16_16, v16_x, tmp, vscale; in Filter_32_alpha_neon()
DSkNx_neon.h230 SkNx(const uint16x4_t& vec) : fVec(vec) {} in SkNx()
237 fVec = (uint16x4_t) { a,b,c,d }; in SkNx()
253 union { uint16x4_t v; uint16_t us[4]; } pun = {fVec};
261 uint16x4_t fVec;
373 uint16x4_t _16 = vqmovn_u32(_32);
/external/clang/test/CodeGenCXX/
Daarch64-mangle-neon-vectors.cpp24 typedef __attribute__((neon_vector_type(4))) uint16_t uint16x4_t; typedef
48 void f5(uint16x4_t) {} in f5() argument
/external/libhevc/common/arm/
Dihevc_weighted_pred_neon_intr.c121 uint16x4_t sto_res_tmp2; in ihevc_weighted_pred_uni_neonintr()
254 uint16x4_t sto_res_tmp2; in ihevc_weighted_pred_chroma_uni_neonintr()
421 uint16x4_t sto_res_tmp2; in ihevc_weighted_pred_bi_neonintr()
590 uint16x4_t sto_res_tmp2; in ihevc_weighted_pred_chroma_bi_neonintr()
752 uint16x4_t sto_res_tmp2; in ihevc_weighted_pred_bi_default_neonintr()
901 uint16x4_t sto_res_tmp2; in ihevc_weighted_pred_chroma_bi_default_neonintr()

123