Searched refs:vshl_u64 (Results 1 – 8 of 8) sorted by relevance
/external/XNNPACK/src/x8-zip/ |
D | xm-neon.c | 81 const uint64x1_t vx = vshl_u64(vreinterpret_u64_u8(vld1_u8(x)), vshift); in xnn_x8_zip_xm_ukernel__neon() 82 const uint64x1_t vy = vshl_u64(vreinterpret_u64_u8(vld1_u8(y)), vshift); in xnn_x8_zip_xm_ukernel__neon() 83 const uint64x1_t vz = vshl_u64(vreinterpret_u64_u8(vld1_u8(z)), vshift); in xnn_x8_zip_xm_ukernel__neon() 84 const uint64x1_t vw = vshl_u64(vreinterpret_u64_u8(vld1_u8(w)), vshift); w += 8; in xnn_x8_zip_xm_ukernel__neon()
|
/external/llvm-project/libc/AOR_v20.02/networking/arm/ |
D | chksum_simd.c | 41 vmask = vshl_u64(vmask, vshiftl); in __chksum_arm_simd() 121 vmask = vshl_u64(vmask, vshiftr);/* Shift right */ in __chksum_arm_simd()
|
/external/arm-optimized-routines/networking/arm/ |
D | chksum_simd.c | 40 vmask = vshl_u64(vmask, vshiftl); in __chksum_arm_simd() 120 vmask = vshl_u64(vmask, vshiftr);/* Shift right */ in __chksum_arm_simd()
|
/external/llvm-project/clang/test/CodeGen/ |
D | aarch64-neon-intrinsics.c | 3198 return vshl_u64(a, b); in test_vshl_u64()
|
D | arm_neon_intrinsics.c | 14437 return vshl_u64(a, b); in test_vshl_u64()
|
/external/clang/test/CodeGen/ |
D | aarch64-neon-intrinsics.c | 3549 return vshl_u64(a, b); in test_vshl_u64()
|
D | arm_neon_intrinsics.c | 16890 return vshl_u64(a, b); in test_vshl_u64()
|
/external/neon_2_sse/ |
D | NEON_2_SSE.h | 962 _NEON2SSESTORAGE uint64x1_t vshl_u64(uint64x1_t a, int64x1_t b); // VSHL.U64 d0,d0,d0 7023 _NEON2SSESTORAGE uint64x1_t vshl_u64(uint64x1_t a, int64x1_t b); // VSHL.U64 d0,d0,d0 7024 _NEON2SSE_INLINE uint64x1_t vshl_u64(uint64x1_t a, int64x1_t b) //if we use the SERIAL_SHIFT macro … in vshl_u64() function
|