/external/skia/src/opts/ |
D | SkTextureCompressor_opts.h | 53 static inline uint64x2_t shift_swap(const uint64x2_t &x, const uint64x2_t &mask) { in shift_swap() 54 uint64x2_t t = vandq_u64(mask, veorq_u64(x, vshrq_n_u64(x, shift))); in shift_swap() 58 static inline uint64x2_t pack_indices(const uint64x2_t &x) { in pack_indices() 61 static const uint64x2_t kMask1 = { 0x3FC0003FC00000ULL, 0x3FC0003FC00000ULL }; in pack_indices() 62 uint64x2_t ret = shift_swap<10>(x, kMask1); in pack_indices() 65 static const uint64x2_t kMask2 = { (0x3FULL << 52), (0x3FULL << 52) }; in pack_indices() 66 static const uint64x2_t kMask3 = { (0x3FULL << 28), (0x3FULL << 28) }; in pack_indices() 67 const uint64x2_t x1 = vandq_u64(vshlq_n_u64(ret, 52), kMask2); in pack_indices() 68 const uint64x2_t x2 = vandq_u64(vshlq_n_u64(ret, 20), kMask3); in pack_indices() 73 static const uint64x2_t kMask4 = { 0xFC0000ULL, 0xFC0000ULL }; in pack_indices() [all …]
|
/external/clang/test/CodeGen/ |
D | arm64_vCMP.c | 49 uint64x2_t test_vceqq_u64(uint64x2_t a1, uint64x2_t a2) { in test_vceqq_u64() 55 uint64x2_t test_vcgeq_s64(int64x2_t a1, int64x2_t a2) { in test_vcgeq_s64() 61 uint64x2_t test_vcgeq_u64(uint64x2_t a1, uint64x2_t a2) { in test_vcgeq_u64() 67 uint64x2_t test_vcgtq_s64(int64x2_t a1, int64x2_t a2) { in test_vcgtq_s64() 73 uint64x2_t test_vcgtq_u64(uint64x2_t a1, uint64x2_t a2) { in test_vcgtq_u64() 79 uint64x2_t test_vcleq_s64(int64x2_t a1, int64x2_t a2) { in test_vcleq_s64() 85 uint64x2_t test_vcleq_u64(uint64x2_t a1, uint64x2_t a2) { in test_vcleq_u64() 91 uint64x2_t test_vcltq_s64(int64x2_t a1, int64x2_t a2) { in test_vcltq_s64() 97 uint64x2_t test_vcltq_u64(uint64x2_t a1, uint64x2_t a2) { in test_vcltq_u64()
|
D | aarch64-neon-3v.c | 98 uint64x2_t test_vandq_u64(uint64x2_t a, uint64x2_t b) { in test_vandq_u64() 194 uint64x2_t test_vorrq_u64(uint64x2_t a, uint64x2_t b) { in test_vorrq_u64() 290 uint64x2_t test_veorq_u64(uint64x2_t a, uint64x2_t b) { in test_veorq_u64() 386 uint64x2_t test_vbicq_u64(uint64x2_t a, uint64x2_t b) { in test_vbicq_u64() 482 uint64x2_t test_vornq_u64(uint64x2_t a, uint64x2_t b) { in test_vornq_u64()
|
D | arm64_vtst.c | 6 uint64x2_t test_vtstq_s64(int64x2_t a1, int64x2_t a2) { in test_vtstq_s64() 15 uint64x2_t test_vtstq_u64(uint64x2_t a1, uint64x2_t a2) { in test_vtstq_u64()
|
D | arm64_neon_high_half.c | 31 uint64x2_t test_vaddw_high_u32(uint64x2_t lhs, uint32x4_t rhs) { in test_vaddw_high_u32() 61 uint64x2_t test_vsubw_high_u32(uint64x2_t lhs, uint32x4_t rhs) { in test_vsubw_high_u32() 91 uint64x2_t test_vabdl_high_u32(uint32x4_t lhs, uint32x4_t rhs) { in test_vabdl_high_u32() 121 uint64x2_t test_vabal_high_u32(uint64x2_t accum, uint32x4_t lhs, uint32x4_t rhs) { in test_vabal_high_u32() 241 uint64x2_t test_vsubl_high_u32(uint32x4_t lhs, uint32x4_t rhs) { in test_vsubl_high_u32() 271 uint32x4_t test_vrshrn_high_n_u64(uint32x2_t lowpart, uint64x2_t input) { in test_vrshrn_high_n_u64() 301 uint32x4_t test_vshrn_high_n_u64(uint32x2_t lowpart, uint64x2_t input) { in test_vshrn_high_n_u64() 361 uint32x4_t test_vqshrn_high_n_u64(uint32x2_t lowpart, uint64x2_t input) { in test_vqshrn_high_n_u64() 391 uint32x4_t test_vqrshrn_high_n_u64(uint32x2_t lowpart, uint64x2_t input) { in test_vqrshrn_high_n_u64() 421 uint32x4_t test_vaddhn_high_u64(uint32x2_t lowpart, uint64x2_t lhs, uint64x2_t rhs) { in test_vaddhn_high_u64() [all …]
|
D | arm64_vca.c | 33 uint64x2_t test_vcagtq_f64(float64x2_t a1, float64x2_t a2) { in test_vcagtq_f64() 40 uint64x2_t test_vcaltq_f64(float64x2_t a1, float64x2_t a2) { in test_vcaltq_f64() 47 uint64x2_t test_vcageq_f64(float64x2_t a1, float64x2_t a2) { in test_vcageq_f64() 54 uint64x2_t test_vcaleq_f64(float64x2_t a1, float64x2_t a2) { in test_vcaleq_f64()
|
D | aarch64-neon-misc.c | 65 uint64x2_t test_vceqzq_s64(int64x2_t a) { in test_vceqzq_s64() 107 uint64x2_t test_vceqzq_u64(uint64x2_t a) { in test_vceqzq_u64() 155 uint64x2_t test_vceqzq_f64(float64x2_t a) { in test_vceqzq_f64() 161 uint64x2_t test_vceqzq_p64(poly64x2_t a) { in test_vceqzq_p64() 209 uint64x2_t test_vcgezq_s64(int64x2_t a) { in test_vcgezq_s64() 233 uint64x2_t test_vcgezq_f64(float64x2_t a) { in test_vcgezq_f64() 281 uint64x2_t test_vclezq_s64(int64x2_t a) { in test_vclezq_s64() 305 uint64x2_t test_vclezq_f64(float64x2_t a) { in test_vclezq_f64() 353 uint64x2_t test_vcgtzq_s64(int64x2_t a) { in test_vcgtzq_s64() 377 uint64x2_t test_vcgtzq_f64(float64x2_t a) { in test_vcgtzq_f64() [all …]
|
D | arm64_vshift.c | 95 uint64x2_t test_vqshlq_n_u64(uint64x2_t in) { in test_vqshlq_n_u64() 192 uint64x2_t test_vrshrq_n_u64(uint64x2_t in) { in test_vrshrq_n_u64() 352 uint64x2_t test_vrsraq_n_u64(uint64x2_t acc, uint64x2_t in) { in test_vrsraq_n_u64()
|
D | arm-neon-misc.c | 16 uint64x2_t q = vld1q_u64(src); in t1() 24 void t2(uint64_t *src1, uint8_t *src2, uint64x2_t *dst) { in t2() 26 uint64x2_t q = vld1q_u64(src1); in t2()
|
D | aarch64-neon-intrinsics.c | 117 uint64x2_t test_vaddq_u64(uint64x2_t v1, uint64x2_t v2) { in test_vaddq_u64() 229 uint64x2_t test_vsubq_u64(uint64x2_t v1, uint64x2_t v2) { in test_vsubq_u64() 822 int64x2_t test_vbslq_s64(uint64x2_t v1, int64x2_t v2, int64x2_t v3) { in test_vbslq_s64() 846 uint64x2_t test_vbslq_u64(uint64x2_t v1, uint64x2_t v2, uint64x2_t v3) { in test_vbslq_u64() 870 float64x2_t test_vbslq_f64(uint64x2_t v1, float64x2_t v2, float64x2_t v3) { in test_vbslq_f64() 930 uint64x2_t test_vcageq_f64(float64x2_t v1, float64x2_t v2) { in test_vcageq_f64() 954 uint64x2_t test_vcagtq_f64(float64x2_t v1, float64x2_t v2) { in test_vcagtq_f64() 980 uint64x2_t test_vcaleq_f64(float64x2_t v1, float64x2_t v2) { in test_vcaleq_f64() 1007 uint64x2_t test_vcaltq_f64(float64x2_t v1, float64x2_t v2) { in test_vcaltq_f64() 1086 uint64x2_t test_vtstq_s64(int64x2_t v1, int64x2_t v2) { in test_vtstq_s64() [all …]
|
D | aarch64-neon-perm.c | 86 uint64x2_t test_vuzp1q_u64(uint64x2_t a, uint64x2_t b) { in test_vuzp1q_u64() 212 uint64x2_t test_vuzp2q_u64(uint64x2_t a, uint64x2_t b) { in test_vuzp2q_u64() 338 uint64x2_t test_vzip1q_u64(uint64x2_t a, uint64x2_t b) { in test_vzip1q_u64() 464 uint64x2_t test_vzip2q_u64(uint64x2_t a, uint64x2_t b) { in test_vzip2q_u64() 590 uint64x2_t test_vtrn1q_u64(uint64x2_t a, uint64x2_t b) { in test_vtrn1q_u64() 716 uint64x2_t test_vtrn2q_u64(uint64x2_t a, uint64x2_t b) { in test_vtrn2q_u64()
|
D | arm_neon_intrinsics.c | 117 uint64x2_t test_vabal_u32(uint64x2_t a, uint32x2_t b, uint32x2_t c) { in test_vabal_u32() 239 uint64x2_t test_vabdl_u32(uint32x2_t a, uint32x2_t b) { in test_vabdl_u32() 397 uint64x2_t test_vaddq_u64(uint64x2_t a, uint64x2_t b) { in test_vaddq_u64() 434 uint32x2_t test_vaddhn_u64(uint64x2_t a, uint64x2_t b) { in test_vaddhn_u64() 471 uint64x2_t test_vaddl_u32(uint32x2_t a, uint32x2_t b) { in test_vaddl_u32() 508 uint64x2_t test_vaddw_u32(uint64x2_t a, uint32x2_t b) { in test_vaddw_u32() 605 uint64x2_t test_vandq_u64(uint64x2_t a, uint64x2_t b) { in test_vandq_u64() 702 uint64x2_t test_vbicq_u64(uint64x2_t a, uint64x2_t b) { in test_vbicq_u64() 793 int64x2_t test_vbslq_s64(uint64x2_t a, int64x2_t b, int64x2_t c) { in test_vbslq_s64() 817 uint64x2_t test_vbslq_u64(uint64x2_t a, uint64x2_t b, uint64x2_t c) { in test_vbslq_u64() [all …]
|
D | arm64_vcopy.c | 52 uint64x2_t test_vcopyq_laneq_u64(uint64x2_t a1, uint64x2_t a2) { in test_vcopyq_laneq_u64()
|
D | arm64_vsli.c | 128 uint64x2_t test_vsliq_n_u64(uint64x2_t a1, uint64x2_t a2) { in test_vsliq_n_u64()
|
D | arm64_vsri.c | 129 uint64x2_t test_vsriq_n_u64(uint64x2_t a1, uint64x2_t a2) { in test_vsriq_n_u64()
|
D | debug-info.c | 59 typedef uint64_t uint64x2_t __attribute__((ext_vector_type(2))); typedef 60 uint64x2_t extvectbar[4];
|
D | aarch64-neon-extract.c | 97 uint64x2_t test_vextq_u64(uint64x2_t a, uint64x2_t b) { in test_vextq_u64()
|
D | arm64_vqmov.c | 48 uint32x4_t test_vqmovn_high_u64(uint32x2_t Vdlow, uint64x2_t Vn) in test_vqmovn_high_u64() 72 uint32x4_t test_vqmovun_high_s64(uint32x2_t Vdlow, uint64x2_t Vn) in test_vqmovun_high_s64()
|
D | arm-neon-vget.c | 49 uint64x1_t low_u64(uint64x2_t a) { in low_u64() 105 uint64x1_t high_u64(uint64x2_t a) { in high_u64()
|
D | aarch64-neon-vget.c | 172 uint64_t test_vgetq_lane_u64(uint64x2_t a) { in test_vgetq_lane_u64() 343 uint64x2_t test_vsetq_lane_u64(uint64_t a, uint64x2_t b) { in test_vsetq_lane_u64()
|
D | aarch64-neon-vget-hilo.c | 51 uint64x1_t test_vget_high_u64(uint64x2_t a) { in test_vget_high_u64() 135 uint64x1_t test_vget_low_u64(uint64x2_t a) { in test_vget_low_u64()
|
/external/clang/test/CodeGenCXX/ |
D | mangle-neon-vectors.cpp | 20 typedef __attribute__((neon_vector_type(2))) uint64_t uint64x2_t; typedef 45 void f4(uint64x2_t v) { } in f4()
|
D | aarch64-mangle-neon-vectors.cpp | 28 typedef __attribute__((neon_vector_type(2))) uint64_t uint64x2_t; typedef 82 void f22(uint64x2_t) {} in f22() argument
|
/external/libvpx/libvpx/vpx_dsp/arm/ |
D | sad_neon.c | 25 uint64x2_t q3; in vpx_sad8x16_neon() 90 uint64x2_t q3; in vpx_sad16x8_neon() 126 const uint64x2_t b = vpaddlq_u32(a); in horizontal_long_add_16x8() 133 const uint64x2_t b = vpaddlq_u32(a); in horizontal_add_16x8()
|
/external/webp/src/dsp/ |
D | rescaler_neon.c | 49 const uint64x2_t C0 = vmull_n_u32(vget_low_u32(A0), A); in Interpolate() 50 const uint64x2_t C1 = vmull_n_u32(vget_high_u32(A0), A); in Interpolate() 51 const uint64x2_t D0 = vmlal_n_u32(C0, vget_low_u32(B0), B); in Interpolate() 52 const uint64x2_t D1 = vmlal_n_u32(C1, vget_high_u32(B0), B); in Interpolate()
|