1 /*
2 * Copyright (c) 2016, Alliance for Open Media. All rights reserved
3 *
4 * This source code is subject to the terms of the BSD 2 Clause License and
5 * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
6 * was not distributed with this source code in the LICENSE file, you can
7 * obtain it at www.aomedia.org/license/software. If the Alliance for Open
8 * Media Patent License 1.0 was not distributed with this source code in the
9 * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
10 */
11
12 #ifndef AOM_AOM_DSP_SIMD_V128_INTRINSICS_ARM_H_
13 #define AOM_AOM_DSP_SIMD_V128_INTRINSICS_ARM_H_
14
15 #include <arm_neon.h>
16
17 #include "aom_dsp/simd/v64_intrinsics_arm.h"
18
19 typedef int64x2_t v128;
20
v128_low_u32(v128 a)21 SIMD_INLINE uint32_t v128_low_u32(v128 a) {
22 return v64_low_u32(vget_low_s64(a));
23 }
24
v128_low_v64(v128 a)25 SIMD_INLINE v64 v128_low_v64(v128 a) { return vget_low_s64(a); }
26
v128_high_v64(v128 a)27 SIMD_INLINE v64 v128_high_v64(v128 a) { return vget_high_s64(a); }
28
v128_from_v64(v64 a,v64 b)29 SIMD_INLINE v128 v128_from_v64(v64 a, v64 b) { return vcombine_s64(b, a); }
30
v128_from_64(uint64_t a,uint64_t b)31 SIMD_INLINE v128 v128_from_64(uint64_t a, uint64_t b) {
32 return vcombine_s64((int64x1_t)b, (int64x1_t)a);
33 }
34
v128_from_32(uint32_t a,uint32_t b,uint32_t c,uint32_t d)35 SIMD_INLINE v128 v128_from_32(uint32_t a, uint32_t b, uint32_t c, uint32_t d) {
36 return vcombine_s64(v64_from_32(c, d), v64_from_32(a, b));
37 }
38
v128_load_aligned(const void * p)39 SIMD_INLINE v128 v128_load_aligned(const void *p) {
40 return vreinterpretq_s64_u8(vld1q_u8((const uint8_t *)p));
41 }
42
v128_load_unaligned(const void * p)43 SIMD_INLINE v128 v128_load_unaligned(const void *p) {
44 return v128_load_aligned(p);
45 }
46
v128_store_aligned(void * p,v128 r)47 SIMD_INLINE void v128_store_aligned(void *p, v128 r) {
48 vst1q_u8((uint8_t *)p, vreinterpretq_u8_s64(r));
49 }
50
v128_store_unaligned(void * p,v128 r)51 SIMD_INLINE void v128_store_unaligned(void *p, v128 r) {
52 vst1q_u8((uint8_t *)p, vreinterpretq_u8_s64(r));
53 }
54
v128_align(v128 a,v128 b,unsigned int c)55 SIMD_INLINE v128 v128_align(v128 a, v128 b, unsigned int c) {
56 // The following functions require an immediate.
57 // Some compilers will check this during optimisation, others wont.
58 #if defined(__OPTIMIZE__) && __OPTIMIZE__ && !defined(__clang__)
59 return c ? vreinterpretq_s64_s8(
60 vextq_s8(vreinterpretq_s8_s64(b), vreinterpretq_s8_s64(a), c))
61 : b;
62 #else
63 return c < 8 ? v128_from_v64(v64_align(v128_low_v64(a), v128_high_v64(b), c),
64 v64_align(v128_high_v64(b), v128_low_v64(b), c))
65 : v128_from_v64(
66 v64_align(v128_high_v64(a), v128_low_v64(a), c - 8),
67 v64_align(v128_low_v64(a), v128_high_v64(b), c - 8));
68 #endif
69 }
70
v128_zero()71 SIMD_INLINE v128 v128_zero() { return vreinterpretq_s64_u8(vdupq_n_u8(0)); }
72
v128_ones()73 SIMD_INLINE v128 v128_ones() { return vreinterpretq_s64_u8(vdupq_n_u8(-1)); }
74
v128_dup_8(uint8_t x)75 SIMD_INLINE v128 v128_dup_8(uint8_t x) {
76 return vreinterpretq_s64_u8(vdupq_n_u8(x));
77 }
78
v128_dup_16(uint16_t x)79 SIMD_INLINE v128 v128_dup_16(uint16_t x) {
80 return vreinterpretq_s64_u16(vdupq_n_u16(x));
81 }
82
v128_dup_32(uint32_t x)83 SIMD_INLINE v128 v128_dup_32(uint32_t x) {
84 return vreinterpretq_s64_u32(vdupq_n_u32(x));
85 }
86
v128_dup_64(uint64_t x)87 SIMD_INLINE v128 v128_dup_64(uint64_t x) {
88 return vreinterpretq_s64_u64(vdupq_n_u64(x));
89 }
90
v128_dotp_su8(v128 a,v128 b)91 SIMD_INLINE int64_t v128_dotp_su8(v128 a, v128 b) {
92 int16x8_t t1 = vmulq_s16(
93 vmovl_s8(vreinterpret_s8_s64(vget_low_s64(a))),
94 vreinterpretq_s16_u16(vmovl_u8(vreinterpret_u8_s64(vget_low_s64(b)))));
95 int16x8_t t2 = vmulq_s16(
96 vmovl_s8(vreinterpret_s8_s64(vget_high_s64(a))),
97 vreinterpretq_s16_u16(vmovl_u8(vreinterpret_u8_s64(vget_high_s64(b)))));
98 #if defined(__aarch64__)
99 return vaddlvq_s16(t1) + vaddlvq_s16(t2);
100 #else
101 int64x2_t t = vpaddlq_s32(vaddq_s32(vpaddlq_s16(t1), vpaddlq_s16(t2)));
102 return (int64_t)vget_high_s64(t) + (int64_t)vget_low_s64(t);
103 #endif
104 }
105
v128_dotp_s16(v128 a,v128 b)106 SIMD_INLINE int64_t v128_dotp_s16(v128 a, v128 b) {
107 return v64_dotp_s16(vget_high_s64(a), vget_high_s64(b)) +
108 v64_dotp_s16(vget_low_s64(a), vget_low_s64(b));
109 }
110
v128_dotp_s32(v128 a,v128 b)111 SIMD_INLINE int64_t v128_dotp_s32(v128 a, v128 b) {
112 int64x2_t t = vpaddlq_s32(
113 vmulq_s32(vreinterpretq_s32_s64(a), vreinterpretq_s32_s64(b)));
114 return (int64_t)vget_high_s64(t) + (int64_t)vget_low_s64(t);
115 }
116
v128_hadd_u8(v128 x)117 SIMD_INLINE uint64_t v128_hadd_u8(v128 x) {
118 #if defined(__aarch64__)
119 return vaddlvq_u8(vreinterpretq_u8_s64(x));
120 #else
121 uint64x2_t t = vpaddlq_u32(vpaddlq_u16(vpaddlq_u8(vreinterpretq_u8_s64(x))));
122 return vget_lane_s32(
123 vreinterpret_s32_u64(vadd_u64(vget_high_u64(t), vget_low_u64(t))), 0);
124 #endif
125 }
126
v128_padd_s16(v128 a)127 SIMD_INLINE v128 v128_padd_s16(v128 a) {
128 return vreinterpretq_s64_s32(vpaddlq_s16(vreinterpretq_s16_s64(a)));
129 }
130
v128_padd_u8(v128 a)131 SIMD_INLINE v128 v128_padd_u8(v128 a) {
132 return vreinterpretq_s64_u16(vpaddlq_u8(vreinterpretq_u8_s64(a)));
133 }
134
135 typedef struct {
136 sad64_internal hi, lo;
137 } sad128_internal;
138
v128_sad_u8_init()139 SIMD_INLINE sad128_internal v128_sad_u8_init() {
140 sad128_internal s;
141 s.hi = s.lo = vdupq_n_u16(0);
142 return s;
143 }
144
145 /* Implementation dependent return value. Result must be finalised with
146 v128_sad_u8_sum().
147 The result for more than 32 v128_sad_u8() calls is undefined. */
v128_sad_u8(sad128_internal s,v128 a,v128 b)148 SIMD_INLINE sad128_internal v128_sad_u8(sad128_internal s, v128 a, v128 b) {
149 sad128_internal r;
150 r.hi = v64_sad_u8(s.hi, vget_high_s64(a), vget_high_s64(b));
151 r.lo = v64_sad_u8(s.lo, vget_low_s64(a), vget_low_s64(b));
152 return r;
153 }
154
v128_sad_u8_sum(sad128_internal s)155 SIMD_INLINE uint32_t v128_sad_u8_sum(sad128_internal s) {
156 #if defined(__aarch64__)
157 return vaddlvq_u16(s.hi) + vaddlvq_u16(s.lo);
158 #else
159 uint64x2_t t = vpaddlq_u32(vpaddlq_u16(vaddq_u16(s.hi, s.lo)));
160 return (uint32_t)(uint64_t)(vget_high_u64(t) + vget_low_u64(t));
161 #endif
162 }
163
164 typedef struct {
165 ssd64_internal hi, lo;
166 } ssd128_internal;
167
v128_ssd_u8_init()168 SIMD_INLINE ssd128_internal v128_ssd_u8_init() {
169 ssd128_internal s;
170 s.hi = s.lo = v64_ssd_u8_init();
171 return s;
172 }
173
174 /* Implementation dependent return value. Result must be finalised with
175 * v128_ssd_u8_sum(). */
v128_ssd_u8(ssd128_internal s,v128 a,v128 b)176 SIMD_INLINE ssd128_internal v128_ssd_u8(ssd128_internal s, v128 a, v128 b) {
177 ssd128_internal r;
178 r.hi = v64_ssd_u8(s.hi, vget_high_s64(a), vget_high_s64(b));
179 r.lo = v64_ssd_u8(s.lo, vget_low_s64(a), vget_low_s64(b));
180 return r;
181 }
182
v128_ssd_u8_sum(ssd128_internal s)183 SIMD_INLINE uint32_t v128_ssd_u8_sum(ssd128_internal s) {
184 return (uint32_t)(v64_ssd_u8_sum(s.hi) + v64_ssd_u8_sum(s.lo));
185 }
186
v128_or(v128 x,v128 y)187 SIMD_INLINE v128 v128_or(v128 x, v128 y) { return vorrq_s64(x, y); }
188
v128_xor(v128 x,v128 y)189 SIMD_INLINE v128 v128_xor(v128 x, v128 y) { return veorq_s64(x, y); }
190
v128_and(v128 x,v128 y)191 SIMD_INLINE v128 v128_and(v128 x, v128 y) { return vandq_s64(x, y); }
192
v128_andn(v128 x,v128 y)193 SIMD_INLINE v128 v128_andn(v128 x, v128 y) { return vbicq_s64(x, y); }
194
v128_add_8(v128 x,v128 y)195 SIMD_INLINE v128 v128_add_8(v128 x, v128 y) {
196 return vreinterpretq_s64_u8(
197 vaddq_u8(vreinterpretq_u8_s64(x), vreinterpretq_u8_s64(y)));
198 }
199
v128_sadd_u8(v128 x,v128 y)200 SIMD_INLINE v128 v128_sadd_u8(v128 x, v128 y) {
201 return vreinterpretq_s64_u8(
202 vqaddq_u8(vreinterpretq_u8_s64(x), vreinterpretq_u8_s64(y)));
203 }
204
v128_sadd_s8(v128 x,v128 y)205 SIMD_INLINE v128 v128_sadd_s8(v128 x, v128 y) {
206 return vreinterpretq_s64_s8(
207 vqaddq_s8(vreinterpretq_s8_s64(x), vreinterpretq_s8_s64(y)));
208 }
209
v128_add_16(v128 x,v128 y)210 SIMD_INLINE v128 v128_add_16(v128 x, v128 y) {
211 return vreinterpretq_s64_s16(
212 vaddq_s16(vreinterpretq_s16_s64(x), vreinterpretq_s16_s64(y)));
213 }
214
v128_sadd_s16(v128 x,v128 y)215 SIMD_INLINE v128 v128_sadd_s16(v128 x, v128 y) {
216 return vreinterpretq_s64_s16(
217 vqaddq_s16(vreinterpretq_s16_s64(x), vreinterpretq_s16_s64(y)));
218 }
219
v128_add_32(v128 x,v128 y)220 SIMD_INLINE v128 v128_add_32(v128 x, v128 y) {
221 return vreinterpretq_s64_u32(
222 vaddq_u32(vreinterpretq_u32_s64(x), vreinterpretq_u32_s64(y)));
223 }
224
v128_add_64(v128 x,v128 y)225 SIMD_INLINE v128 v128_add_64(v128 x, v128 y) {
226 return vreinterpretq_s64_u64(
227 vaddq_u64(vreinterpretq_u64_s64(x), vreinterpretq_u64_s64(y)));
228 }
229
v128_sub_8(v128 x,v128 y)230 SIMD_INLINE v128 v128_sub_8(v128 x, v128 y) {
231 return vreinterpretq_s64_u8(
232 vsubq_u8(vreinterpretq_u8_s64(x), vreinterpretq_u8_s64(y)));
233 }
234
v128_sub_16(v128 x,v128 y)235 SIMD_INLINE v128 v128_sub_16(v128 x, v128 y) {
236 return vreinterpretq_s64_s16(
237 vsubq_s16(vreinterpretq_s16_s64(x), vreinterpretq_s16_s64(y)));
238 }
239
v128_ssub_s16(v128 x,v128 y)240 SIMD_INLINE v128 v128_ssub_s16(v128 x, v128 y) {
241 return vreinterpretq_s64_s16(
242 vqsubq_s16(vreinterpretq_s16_s64(x), vreinterpretq_s16_s64(y)));
243 }
244
v128_ssub_u16(v128 x,v128 y)245 SIMD_INLINE v128 v128_ssub_u16(v128 x, v128 y) {
246 return vreinterpretq_s64_u16(
247 vqsubq_u16(vreinterpretq_u16_s64(x), vreinterpretq_u16_s64(y)));
248 }
249
v128_ssub_u8(v128 x,v128 y)250 SIMD_INLINE v128 v128_ssub_u8(v128 x, v128 y) {
251 return vreinterpretq_s64_u8(
252 vqsubq_u8(vreinterpretq_u8_s64(x), vreinterpretq_u8_s64(y)));
253 }
254
v128_ssub_s8(v128 x,v128 y)255 SIMD_INLINE v128 v128_ssub_s8(v128 x, v128 y) {
256 return vreinterpretq_s64_s8(
257 vqsubq_s8(vreinterpretq_s8_s64(x), vreinterpretq_s8_s64(y)));
258 }
259
v128_sub_32(v128 x,v128 y)260 SIMD_INLINE v128 v128_sub_32(v128 x, v128 y) {
261 return vreinterpretq_s64_s32(
262 vsubq_s32(vreinterpretq_s32_s64(x), vreinterpretq_s32_s64(y)));
263 }
264
v128_sub_64(v128 x,v128 y)265 SIMD_INLINE v128 v128_sub_64(v128 x, v128 y) { return vsubq_s64(x, y); }
266
v128_abs_s16(v128 x)267 SIMD_INLINE v128 v128_abs_s16(v128 x) {
268 return vreinterpretq_s64_s16(vabsq_s16(vreinterpretq_s16_s64(x)));
269 }
270
v128_abs_s8(v128 x)271 SIMD_INLINE v128 v128_abs_s8(v128 x) {
272 return vreinterpretq_s64_s8(vabsq_s8(vreinterpretq_s8_s64(x)));
273 }
274
v128_mul_s16(v64 a,v64 b)275 SIMD_INLINE v128 v128_mul_s16(v64 a, v64 b) {
276 return vreinterpretq_s64_s32(
277 vmull_s16(vreinterpret_s16_s64(a), vreinterpret_s16_s64(b)));
278 }
279
v128_mullo_s16(v128 a,v128 b)280 SIMD_INLINE v128 v128_mullo_s16(v128 a, v128 b) {
281 return vreinterpretq_s64_s16(
282 vmulq_s16(vreinterpretq_s16_s64(a), vreinterpretq_s16_s64(b)));
283 }
284
v128_mulhi_s16(v128 a,v128 b)285 SIMD_INLINE v128 v128_mulhi_s16(v128 a, v128 b) {
286 #if defined(__aarch64__)
287 return vreinterpretq_s64_s16(vuzp2q_s16(
288 vreinterpretq_s16_s32(vmull_s16(vreinterpret_s16_s64(vget_low_s64(a)),
289 vreinterpret_s16_s64(vget_low_s64(b)))),
290 vreinterpretq_s16_s32(
291 vmull_high_s16(vreinterpretq_s16_s64(a), vreinterpretq_s16_s64(b)))));
292 #else
293 return v128_from_v64(v64_mulhi_s16(vget_high_s64(a), vget_high_s64(b)),
294 v64_mulhi_s16(vget_low_s64(a), vget_low_s64(b)));
295 #endif
296 }
297
v128_mullo_s32(v128 a,v128 b)298 SIMD_INLINE v128 v128_mullo_s32(v128 a, v128 b) {
299 return vreinterpretq_s64_s32(
300 vmulq_s32(vreinterpretq_s32_s64(a), vreinterpretq_s32_s64(b)));
301 }
302
v128_madd_s16(v128 a,v128 b)303 SIMD_INLINE v128 v128_madd_s16(v128 a, v128 b) {
304 #if defined(__aarch64__)
305 int32x4_t t1 = vmull_s16(vreinterpret_s16_s64(vget_low_s64(a)),
306 vreinterpret_s16_s64(vget_low_s64(b)));
307 int32x4_t t2 =
308 vmull_high_s16(vreinterpretq_s16_s64(a), vreinterpretq_s16_s64(b));
309 return vreinterpretq_s64_s32(vpaddq_s32(t1, t2));
310 #else
311 return v128_from_v64(v64_madd_s16(vget_high_s64(a), vget_high_s64(b)),
312 v64_madd_s16(vget_low_s64(a), vget_low_s64(b)));
313 #endif
314 }
315
v128_madd_us8(v128 a,v128 b)316 SIMD_INLINE v128 v128_madd_us8(v128 a, v128 b) {
317 #if defined(__aarch64__)
318 int16x8_t t1 = vmulq_s16(
319 vreinterpretq_s16_u16(vmovl_u8(vreinterpret_u8_s64(vget_low_s64(a)))),
320 vmovl_s8(vreinterpret_s8_s64(vget_low_s64(b))));
321 int16x8_t t2 = vmulq_s16(
322 vreinterpretq_s16_u16(vmovl_u8(vreinterpret_u8_s64(vget_high_s64(a)))),
323 vmovl_s8(vreinterpret_s8_s64(vget_high_s64(b))));
324 return vreinterpretq_s64_s16(
325 vqaddq_s16(vuzp1q_s16(t1, t2), vuzp2q_s16(t1, t2)));
326 #else
327 return v128_from_v64(v64_madd_us8(vget_high_s64(a), vget_high_s64(b)),
328 v64_madd_us8(vget_low_s64(a), vget_low_s64(b)));
329 #endif
330 }
331
v128_avg_u8(v128 x,v128 y)332 SIMD_INLINE v128 v128_avg_u8(v128 x, v128 y) {
333 return vreinterpretq_s64_u8(
334 vrhaddq_u8(vreinterpretq_u8_s64(x), vreinterpretq_u8_s64(y)));
335 }
336
v128_rdavg_u8(v128 x,v128 y)337 SIMD_INLINE v128 v128_rdavg_u8(v128 x, v128 y) {
338 return vreinterpretq_s64_u8(
339 vhaddq_u8(vreinterpretq_u8_s64(x), vreinterpretq_u8_s64(y)));
340 }
341
v128_rdavg_u16(v128 x,v128 y)342 SIMD_INLINE v128 v128_rdavg_u16(v128 x, v128 y) {
343 return vreinterpretq_s64_u16(
344 vhaddq_u16(vreinterpretq_u16_s64(x), vreinterpretq_u16_s64(y)));
345 }
346
v128_avg_u16(v128 x,v128 y)347 SIMD_INLINE v128 v128_avg_u16(v128 x, v128 y) {
348 return vreinterpretq_s64_u16(
349 vrhaddq_u16(vreinterpretq_u16_s64(x), vreinterpretq_u16_s64(y)));
350 }
351
v128_min_u8(v128 x,v128 y)352 SIMD_INLINE v128 v128_min_u8(v128 x, v128 y) {
353 return vreinterpretq_s64_u8(
354 vminq_u8(vreinterpretq_u8_s64(x), vreinterpretq_u8_s64(y)));
355 }
356
v128_max_u8(v128 x,v128 y)357 SIMD_INLINE v128 v128_max_u8(v128 x, v128 y) {
358 return vreinterpretq_s64_u8(
359 vmaxq_u8(vreinterpretq_u8_s64(x), vreinterpretq_u8_s64(y)));
360 }
361
v128_min_s8(v128 x,v128 y)362 SIMD_INLINE v128 v128_min_s8(v128 x, v128 y) {
363 return vreinterpretq_s64_s8(
364 vminq_s8(vreinterpretq_s8_s64(x), vreinterpretq_s8_s64(y)));
365 }
366
v128_movemask_8(v128 a)367 SIMD_INLINE uint32_t v128_movemask_8(v128 a) {
368 a = vreinterpretq_s64_u8(vcltq_s8(vreinterpretq_s8_s64(a), vdupq_n_s8(0)));
369 #if defined(__aarch64__)
370 uint8x16_t m =
371 vandq_u8(vreinterpretq_u8_s64(a),
372 vreinterpretq_u8_u64(vdupq_n_u64(0x8040201008040201ULL)));
373 return vaddv_u8(vget_low_u8(m)) + (vaddv_u8(vget_high_u8(m)) << 8);
374 #else
375 uint64x2_t m = vpaddlq_u32(vpaddlq_u16(vpaddlq_u8(
376 vandq_u8(vreinterpretq_u8_s64(a),
377 vreinterpretq_u8_u64(vdupq_n_u64(0x8040201008040201ULL))))));
378 return v64_low_u32(
379 v64_ziplo_8(v128_high_v64((v128)m), v128_low_v64((v128)m)));
380 #endif
381 }
382
v128_blend_8(v128 a,v128 b,v128 c)383 SIMD_INLINE v128 v128_blend_8(v128 a, v128 b, v128 c) {
384 c = vreinterpretq_s64_u8(vcltq_s8(vreinterpretq_s8_s64(c), vdupq_n_s8(0)));
385 return v128_or(v128_and(b, c), v128_andn(a, c));
386 }
387
v128_max_s8(v128 x,v128 y)388 SIMD_INLINE v128 v128_max_s8(v128 x, v128 y) {
389 return vreinterpretq_s64_s8(
390 vmaxq_s8(vreinterpretq_s8_s64(x), vreinterpretq_s8_s64(y)));
391 }
392
v128_min_s16(v128 x,v128 y)393 SIMD_INLINE v128 v128_min_s16(v128 x, v128 y) {
394 return vreinterpretq_s64_s16(
395 vminq_s16(vreinterpretq_s16_s64(x), vreinterpretq_s16_s64(y)));
396 }
397
v128_max_s16(v128 x,v128 y)398 SIMD_INLINE v128 v128_max_s16(v128 x, v128 y) {
399 return vreinterpretq_s64_s16(
400 vmaxq_s16(vreinterpretq_s16_s64(x), vreinterpretq_s16_s64(y)));
401 }
402
v128_min_s32(v128 x,v128 y)403 SIMD_INLINE v128 v128_min_s32(v128 x, v128 y) {
404 return vreinterpretq_s64_s32(
405 vminq_s32(vreinterpretq_s32_s64(x), vreinterpretq_s32_s64(y)));
406 }
407
v128_max_s32(v128 x,v128 y)408 SIMD_INLINE v128 v128_max_s32(v128 x, v128 y) {
409 return vreinterpretq_s64_s32(
410 vmaxq_s32(vreinterpretq_s32_s64(x), vreinterpretq_s32_s64(y)));
411 }
412
v128_ziplo_8(v128 x,v128 y)413 SIMD_INLINE v128 v128_ziplo_8(v128 x, v128 y) {
414 #if defined(__aarch64__)
415 return vreinterpretq_s64_u8(
416 vzip1q_u8(vreinterpretq_u8_s64(y), vreinterpretq_u8_s64(x)));
417 #else
418 uint8x16x2_t r = vzipq_u8(vreinterpretq_u8_s64(y), vreinterpretq_u8_s64(x));
419 return vreinterpretq_s64_u8(r.val[0]);
420 #endif
421 }
422
v128_ziphi_8(v128 x,v128 y)423 SIMD_INLINE v128 v128_ziphi_8(v128 x, v128 y) {
424 #if defined(__aarch64__)
425 return vreinterpretq_s64_u8(
426 vzip2q_u8(vreinterpretq_u8_s64(y), vreinterpretq_u8_s64(x)));
427 #else
428 uint8x16x2_t r = vzipq_u8(vreinterpretq_u8_s64(y), vreinterpretq_u8_s64(x));
429 return vreinterpretq_s64_u8(r.val[1]);
430 #endif
431 }
432
v128_zip_8(v64 x,v64 y)433 SIMD_INLINE v128 v128_zip_8(v64 x, v64 y) {
434 uint8x8x2_t r = vzip_u8(vreinterpret_u8_s64(y), vreinterpret_u8_s64(x));
435 return vreinterpretq_s64_u8(vcombine_u8(r.val[0], r.val[1]));
436 }
437
v128_ziplo_16(v128 x,v128 y)438 SIMD_INLINE v128 v128_ziplo_16(v128 x, v128 y) {
439 #if defined(__aarch64__)
440 return vreinterpretq_s64_u16(
441 vzip1q_u16(vreinterpretq_u16_s64(y), vreinterpretq_u16_s64(x)));
442 #else
443 int16x8x2_t r = vzipq_s16(vreinterpretq_s16_s64(y), vreinterpretq_s16_s64(x));
444 return vreinterpretq_s64_s16(r.val[0]);
445 #endif
446 }
447
v128_ziphi_16(v128 x,v128 y)448 SIMD_INLINE v128 v128_ziphi_16(v128 x, v128 y) {
449 #if defined(__aarch64__)
450 return vreinterpretq_s64_u16(
451 vzip2q_u16(vreinterpretq_u16_s64(y), vreinterpretq_u16_s64(x)));
452 #else
453 int16x8x2_t r = vzipq_s16(vreinterpretq_s16_s64(y), vreinterpretq_s16_s64(x));
454 return vreinterpretq_s64_s16(r.val[1]);
455 #endif
456 }
457
v128_zip_16(v64 x,v64 y)458 SIMD_INLINE v128 v128_zip_16(v64 x, v64 y) {
459 uint16x4x2_t r = vzip_u16(vreinterpret_u16_s64(y), vreinterpret_u16_s64(x));
460 return vreinterpretq_s64_u16(vcombine_u16(r.val[0], r.val[1]));
461 }
462
v128_ziplo_32(v128 x,v128 y)463 SIMD_INLINE v128 v128_ziplo_32(v128 x, v128 y) {
464 #if defined(__aarch64__)
465 return vreinterpretq_s64_u32(
466 vzip1q_u32(vreinterpretq_u32_s64(y), vreinterpretq_u32_s64(x)));
467 #else
468 int32x4x2_t r = vzipq_s32(vreinterpretq_s32_s64(y), vreinterpretq_s32_s64(x));
469 return vreinterpretq_s64_s32(r.val[0]);
470 #endif
471 }
472
v128_ziphi_32(v128 x,v128 y)473 SIMD_INLINE v128 v128_ziphi_32(v128 x, v128 y) {
474 #if defined(__aarch64__)
475 return vreinterpretq_s64_u32(
476 vzip2q_u32(vreinterpretq_u32_s64(y), vreinterpretq_u32_s64(x)));
477 #else
478 int32x4x2_t r = vzipq_s32(vreinterpretq_s32_s64(y), vreinterpretq_s32_s64(x));
479 return vreinterpretq_s64_s32(r.val[1]);
480 #endif
481 }
482
v128_zip_32(v64 x,v64 y)483 SIMD_INLINE v128 v128_zip_32(v64 x, v64 y) {
484 uint32x2x2_t r = vzip_u32(vreinterpret_u32_s64(y), vreinterpret_u32_s64(x));
485 return vreinterpretq_s64_u32(vcombine_u32(r.val[0], r.val[1]));
486 }
487
v128_ziplo_64(v128 a,v128 b)488 SIMD_INLINE v128 v128_ziplo_64(v128 a, v128 b) {
489 return v128_from_v64(vget_low_s64((int64x2_t)a), vget_low_s64((int64x2_t)b));
490 }
491
v128_ziphi_64(v128 a,v128 b)492 SIMD_INLINE v128 v128_ziphi_64(v128 a, v128 b) {
493 return v128_from_v64(vget_high_s64((int64x2_t)a),
494 vget_high_s64((int64x2_t)b));
495 }
496
v128_unziplo_8(v128 x,v128 y)497 SIMD_INLINE v128 v128_unziplo_8(v128 x, v128 y) {
498 #if defined(__aarch64__)
499 return vreinterpretq_s64_u8(
500 vuzp1q_u8(vreinterpretq_u8_s64(y), vreinterpretq_u8_s64(x)));
501 #else
502 uint8x16x2_t r = vuzpq_u8(vreinterpretq_u8_s64(y), vreinterpretq_u8_s64(x));
503 return vreinterpretq_s64_u8(r.val[0]);
504 #endif
505 }
506
v128_unziphi_8(v128 x,v128 y)507 SIMD_INLINE v128 v128_unziphi_8(v128 x, v128 y) {
508 #if defined(__aarch64__)
509 return vreinterpretq_s64_u8(
510 vuzp2q_u8(vreinterpretq_u8_s64(y), vreinterpretq_u8_s64(x)));
511 #else
512 uint8x16x2_t r = vuzpq_u8(vreinterpretq_u8_s64(y), vreinterpretq_u8_s64(x));
513 return vreinterpretq_s64_u8(r.val[1]);
514 #endif
515 }
516
v128_unziplo_16(v128 x,v128 y)517 SIMD_INLINE v128 v128_unziplo_16(v128 x, v128 y) {
518 #if defined(__aarch64__)
519 return vreinterpretq_s64_u16(
520 vuzp1q_u16(vreinterpretq_u16_s64(y), vreinterpretq_u16_s64(x)));
521 #else
522 uint16x8x2_t r =
523 vuzpq_u16(vreinterpretq_u16_s64(y), vreinterpretq_u16_s64(x));
524 return vreinterpretq_s64_u16(r.val[0]);
525 #endif
526 }
527
v128_unziphi_16(v128 x,v128 y)528 SIMD_INLINE v128 v128_unziphi_16(v128 x, v128 y) {
529 #if defined(__aarch64__)
530 return vreinterpretq_s64_u16(
531 vuzp2q_u16(vreinterpretq_u16_s64(y), vreinterpretq_u16_s64(x)));
532 #else
533 uint16x8x2_t r =
534 vuzpq_u16(vreinterpretq_u16_s64(y), vreinterpretq_u16_s64(x));
535 return vreinterpretq_s64_u16(r.val[1]);
536 #endif
537 }
538
v128_unziplo_32(v128 x,v128 y)539 SIMD_INLINE v128 v128_unziplo_32(v128 x, v128 y) {
540 #if defined(__aarch64__)
541 return vreinterpretq_s64_u32(
542 vuzp1q_u32(vreinterpretq_u32_s64(y), vreinterpretq_u32_s64(x)));
543 #else
544 uint32x4x2_t r =
545 vuzpq_u32(vreinterpretq_u32_s64(y), vreinterpretq_u32_s64(x));
546 return vreinterpretq_s64_u32(r.val[0]);
547 #endif
548 }
549
v128_unziphi_32(v128 x,v128 y)550 SIMD_INLINE v128 v128_unziphi_32(v128 x, v128 y) {
551 #if defined(__aarch64__)
552 return vreinterpretq_s64_u32(
553 vuzp2q_u32(vreinterpretq_u32_s64(y), vreinterpretq_u32_s64(x)));
554 #else
555 uint32x4x2_t r =
556 vuzpq_u32(vreinterpretq_u32_s64(y), vreinterpretq_u32_s64(x));
557 return vreinterpretq_s64_u32(r.val[1]);
558 #endif
559 }
560
v128_unpack_u8_s16(v64 a)561 SIMD_INLINE v128 v128_unpack_u8_s16(v64 a) {
562 return vreinterpretq_s64_u16(vmovl_u8(vreinterpret_u8_s64(a)));
563 }
564
v128_unpacklo_u8_s16(v128 a)565 SIMD_INLINE v128 v128_unpacklo_u8_s16(v128 a) {
566 return vreinterpretq_s64_u16(vmovl_u8(vreinterpret_u8_s64(vget_low_s64(a))));
567 }
568
v128_unpackhi_u8_s16(v128 a)569 SIMD_INLINE v128 v128_unpackhi_u8_s16(v128 a) {
570 return vreinterpretq_s64_u16(vmovl_u8(vreinterpret_u8_s64(vget_high_s64(a))));
571 }
572
v128_unpack_s8_s16(v64 a)573 SIMD_INLINE v128 v128_unpack_s8_s16(v64 a) {
574 return vreinterpretq_s64_s16(vmovl_s8(vreinterpret_s8_s64(a)));
575 }
576
v128_unpacklo_s8_s16(v128 a)577 SIMD_INLINE v128 v128_unpacklo_s8_s16(v128 a) {
578 return vreinterpretq_s64_s16(vmovl_s8(vreinterpret_s8_s64(vget_low_s64(a))));
579 }
580
v128_unpackhi_s8_s16(v128 a)581 SIMD_INLINE v128 v128_unpackhi_s8_s16(v128 a) {
582 return vreinterpretq_s64_s16(vmovl_s8(vreinterpret_s8_s64(vget_high_s64(a))));
583 }
584
v128_pack_s32_s16(v128 a,v128 b)585 SIMD_INLINE v128 v128_pack_s32_s16(v128 a, v128 b) {
586 return v128_from_v64(
587 vreinterpret_s64_s16(vqmovn_s32(vreinterpretq_s32_s64(a))),
588 vreinterpret_s64_s16(vqmovn_s32(vreinterpretq_s32_s64(b))));
589 }
590
v128_pack_s32_u16(v128 a,v128 b)591 SIMD_INLINE v128 v128_pack_s32_u16(v128 a, v128 b) {
592 return v128_from_v64(
593 vreinterpret_s64_u16(vqmovun_s32(vreinterpretq_s32_s64(a))),
594 vreinterpret_s64_u16(vqmovun_s32(vreinterpretq_s32_s64(b))));
595 }
596
v128_pack_s16_u8(v128 a,v128 b)597 SIMD_INLINE v128 v128_pack_s16_u8(v128 a, v128 b) {
598 return v128_from_v64(
599 vreinterpret_s64_u8(vqmovun_s16(vreinterpretq_s16_s64(a))),
600 vreinterpret_s64_u8(vqmovun_s16(vreinterpretq_s16_s64(b))));
601 }
602
v128_pack_s16_s8(v128 a,v128 b)603 SIMD_INLINE v128 v128_pack_s16_s8(v128 a, v128 b) {
604 return v128_from_v64(
605 vreinterpret_s64_s8(vqmovn_s16(vreinterpretq_s16_s64(a))),
606 vreinterpret_s64_s8(vqmovn_s16(vreinterpretq_s16_s64(b))));
607 }
608
v128_unpack_u16_s32(v64 a)609 SIMD_INLINE v128 v128_unpack_u16_s32(v64 a) {
610 return vreinterpretq_s64_u32(vmovl_u16(vreinterpret_u16_s64(a)));
611 }
612
v128_unpack_s16_s32(v64 a)613 SIMD_INLINE v128 v128_unpack_s16_s32(v64 a) {
614 return vreinterpretq_s64_s32(vmovl_s16(vreinterpret_s16_s64(a)));
615 }
616
v128_unpacklo_u16_s32(v128 a)617 SIMD_INLINE v128 v128_unpacklo_u16_s32(v128 a) {
618 return vreinterpretq_s64_u32(
619 vmovl_u16(vreinterpret_u16_s64(vget_low_s64(a))));
620 }
621
v128_unpacklo_s16_s32(v128 a)622 SIMD_INLINE v128 v128_unpacklo_s16_s32(v128 a) {
623 return vreinterpretq_s64_s32(
624 vmovl_s16(vreinterpret_s16_s64(vget_low_s64(a))));
625 }
626
v128_unpackhi_u16_s32(v128 a)627 SIMD_INLINE v128 v128_unpackhi_u16_s32(v128 a) {
628 return vreinterpretq_s64_u32(
629 vmovl_u16(vreinterpret_u16_s64(vget_high_s64(a))));
630 }
631
v128_unpackhi_s16_s32(v128 a)632 SIMD_INLINE v128 v128_unpackhi_s16_s32(v128 a) {
633 return vreinterpretq_s64_s32(
634 vmovl_s16(vreinterpret_s16_s64(vget_high_s64(a))));
635 }
636
v128_shuffle_8(v128 x,v128 pattern)637 SIMD_INLINE v128 v128_shuffle_8(v128 x, v128 pattern) {
638 #if defined(__aarch64__)
639 return vreinterpretq_s64_u8(
640 vqtbl1q_u8(vreinterpretq_u8_s64(x), vreinterpretq_u8_s64(pattern)));
641 #else
642 uint8x8x2_t p = { { vget_low_u8(vreinterpretq_u8_s64(x)),
643 vget_high_u8(vreinterpretq_u8_s64(x)) } };
644 return v128_from_64((uint64_t)vreinterpret_s64_u8(vtbl2_u8(
645 p, vreinterpret_u8_s64(vget_high_s64(pattern)))),
646 (uint64_t)vreinterpret_s64_u8(vtbl2_u8(
647 p, vreinterpret_u8_s64(vget_low_s64(pattern)))));
648 #endif
649 }
650
v128_cmpgt_s8(v128 x,v128 y)651 SIMD_INLINE v128 v128_cmpgt_s8(v128 x, v128 y) {
652 return vreinterpretq_s64_u8(
653 vcgtq_s8(vreinterpretq_s8_s64(x), vreinterpretq_s8_s64(y)));
654 }
655
v128_cmplt_s8(v128 x,v128 y)656 SIMD_INLINE v128 v128_cmplt_s8(v128 x, v128 y) {
657 return vreinterpretq_s64_u8(
658 vcltq_s8(vreinterpretq_s8_s64(x), vreinterpretq_s8_s64(y)));
659 }
660
v128_cmpeq_8(v128 x,v128 y)661 SIMD_INLINE v128 v128_cmpeq_8(v128 x, v128 y) {
662 return vreinterpretq_s64_u8(
663 vceqq_u8(vreinterpretq_u8_s64(x), vreinterpretq_u8_s64(y)));
664 }
665
v128_cmpgt_s16(v128 x,v128 y)666 SIMD_INLINE v128 v128_cmpgt_s16(v128 x, v128 y) {
667 return vreinterpretq_s64_u16(
668 vcgtq_s16(vreinterpretq_s16_s64(x), vreinterpretq_s16_s64(y)));
669 }
670
v128_cmplt_s16(v128 x,v128 y)671 SIMD_INLINE v128 v128_cmplt_s16(v128 x, v128 y) {
672 return vreinterpretq_s64_u16(
673 vcltq_s16(vreinterpretq_s16_s64(x), vreinterpretq_s16_s64(y)));
674 }
675
v128_cmpeq_16(v128 x,v128 y)676 SIMD_INLINE v128 v128_cmpeq_16(v128 x, v128 y) {
677 return vreinterpretq_s64_u16(
678 vceqq_s16(vreinterpretq_s16_s64(x), vreinterpretq_s16_s64(y)));
679 }
680
v128_cmpgt_s32(v128 x,v128 y)681 SIMD_INLINE v128 v128_cmpgt_s32(v128 x, v128 y) {
682 return vreinterpretq_s64_u32(
683 vcgtq_s32(vreinterpretq_s32_s64(x), vreinterpretq_s32_s64(y)));
684 }
685
v128_cmplt_s32(v128 x,v128 y)686 SIMD_INLINE v128 v128_cmplt_s32(v128 x, v128 y) {
687 return vreinterpretq_s64_u32(
688 vcltq_s32(vreinterpretq_s32_s64(x), vreinterpretq_s32_s64(y)));
689 }
690
v128_cmpeq_32(v128 x,v128 y)691 SIMD_INLINE v128 v128_cmpeq_32(v128 x, v128 y) {
692 return vreinterpretq_s64_u32(
693 vceqq_s32(vreinterpretq_s32_s64(x), vreinterpretq_s32_s64(y)));
694 }
695
v128_shl_8(v128 a,unsigned int c)696 SIMD_INLINE v128 v128_shl_8(v128 a, unsigned int c) {
697 return (c > 7) ? v128_zero()
698 : vreinterpretq_s64_u8(
699 vshlq_u8(vreinterpretq_u8_s64(a), vdupq_n_s8(c)));
700 }
701
v128_shr_u8(v128 a,unsigned int c)702 SIMD_INLINE v128 v128_shr_u8(v128 a, unsigned int c) {
703 return (c > 7) ? v128_zero()
704 : vreinterpretq_s64_u8(
705 vshlq_u8(vreinterpretq_u8_s64(a), vdupq_n_s8(-c)));
706 }
707
v128_shr_s8(v128 a,unsigned int c)708 SIMD_INLINE v128 v128_shr_s8(v128 a, unsigned int c) {
709 return (c > 7) ? v128_ones()
710 : vreinterpretq_s64_s8(
711 vshlq_s8(vreinterpretq_s8_s64(a), vdupq_n_s8(-c)));
712 }
713
v128_shl_16(v128 a,unsigned int c)714 SIMD_INLINE v128 v128_shl_16(v128 a, unsigned int c) {
715 return (c > 15) ? v128_zero()
716 : vreinterpretq_s64_u16(
717 vshlq_u16(vreinterpretq_u16_s64(a), vdupq_n_s16(c)));
718 }
719
v128_shr_u16(v128 a,unsigned int c)720 SIMD_INLINE v128 v128_shr_u16(v128 a, unsigned int c) {
721 return (c > 15) ? v128_zero()
722 : vreinterpretq_s64_u16(
723 vshlq_u16(vreinterpretq_u16_s64(a), vdupq_n_s16(-c)));
724 }
725
v128_shr_s16(v128 a,unsigned int c)726 SIMD_INLINE v128 v128_shr_s16(v128 a, unsigned int c) {
727 return (c > 15) ? v128_ones()
728 : vreinterpretq_s64_s16(
729 vshlq_s16(vreinterpretq_s16_s64(a), vdupq_n_s16(-c)));
730 }
731
v128_shl_32(v128 a,unsigned int c)732 SIMD_INLINE v128 v128_shl_32(v128 a, unsigned int c) {
733 return (c > 31) ? v128_zero()
734 : vreinterpretq_s64_u32(
735 vshlq_u32(vreinterpretq_u32_s64(a), vdupq_n_s32(c)));
736 }
737
v128_shr_u32(v128 a,unsigned int c)738 SIMD_INLINE v128 v128_shr_u32(v128 a, unsigned int c) {
739 return (c > 31) ? v128_zero()
740 : vreinterpretq_s64_u32(
741 vshlq_u32(vreinterpretq_u32_s64(a), vdupq_n_s32(-c)));
742 }
743
v128_shr_s32(v128 a,unsigned int c)744 SIMD_INLINE v128 v128_shr_s32(v128 a, unsigned int c) {
745 return (c > 31) ? v128_ones()
746 : vreinterpretq_s64_s32(
747 vshlq_s32(vreinterpretq_s32_s64(a), vdupq_n_s32(-c)));
748 }
749
v128_shl_64(v128 a,unsigned int c)750 SIMD_INLINE v128 v128_shl_64(v128 a, unsigned int c) {
751 return (c > 63) ? v128_zero()
752 : vreinterpretq_s64_u64(
753 vshlq_u64(vreinterpretq_u64_s64(a), vdupq_n_s64(c)));
754 }
755
v128_shr_u64(v128 a,unsigned int c)756 SIMD_INLINE v128 v128_shr_u64(v128 a, unsigned int c) {
757 return (c > 63) ? v128_zero()
758 : vreinterpretq_s64_u64(
759 vshlq_u64(vreinterpretq_u64_s64(a), vdupq_n_s64(-c)));
760 }
761
v128_shr_s64(v128 a,unsigned int c)762 SIMD_INLINE v128 v128_shr_s64(v128 a, unsigned int c) {
763 return (c > 63) ? v128_ones() : vshlq_s64(a, vdupq_n_s64(-c));
764 }
765
766 #if defined(__OPTIMIZE__) && __OPTIMIZE__ && !defined(__clang__)
767
v128_shl_n_byte(v128 a,unsigned int n)768 SIMD_INLINE v128 v128_shl_n_byte(v128 a, unsigned int n) {
769 return n < 8
770 ? v128_from_64(
771 (uint64_t)vorr_u64(
772 vshl_n_u64(vreinterpret_u64_s64(vget_high_s64(a)),
773 n * 8),
774 vshr_n_u64(vreinterpret_u64_s64(vget_low_s64(a)),
775 (8 - n) * 8)),
776 (uint64_t)vshl_n_u64(vreinterpret_u64_s64(vget_low_s64(a)),
777 n * 8))
778 : (n == 8 ? v128_from_64(
779 (uint64_t)vreinterpret_u64_s64(vget_low_s64(a)), 0)
780 : v128_from_64((uint64_t)vshl_n_u64(
781 vreinterpret_u64_s64(vget_low_s64(a)),
782 (n - 8) * 8),
783 0));
784 }
785
v128_shr_n_byte(v128 a,unsigned int n)786 SIMD_INLINE v128 v128_shr_n_byte(v128 a, unsigned int n) {
787 return n < 8
788 ? v128_from_64(
789 (uint64_t)vshr_n_u64(vreinterpret_u64_s64(vget_high_s64(a)),
790 n * 8),
791 (uint64_t)vorr_u64(
792 vshr_n_u64(vreinterpret_u64_s64(vget_low_s64(a)), n * 8),
793 vshl_n_u64(vreinterpret_u64_s64(vget_high_s64(a)),
794 (8 - n) * 8)))
795 : (n == 8 ? v128_from_64(0, (uint64_t)vreinterpret_u64_s64(
796 vget_high_s64(a)))
797 : v128_from_64(
798 0, (uint64_t)vshr_n_u64(
799 vreinterpret_u64_s64(vget_high_s64(a)),
800 (n - 8) * 8)));
801 }
802
v128_shl_n_8(v128 a,unsigned int c)803 SIMD_INLINE v128 v128_shl_n_8(v128 a, unsigned int c) {
804 return vreinterpretq_s64_u8(vshlq_n_u8(vreinterpretq_u8_s64(a), c));
805 }
806
v128_shr_n_u8(v128 a,unsigned int c)807 SIMD_INLINE v128 v128_shr_n_u8(v128 a, unsigned int c) {
808 return vreinterpretq_s64_u8(vshrq_n_u8(vreinterpretq_u8_s64(a), c));
809 }
810
v128_shr_n_s8(v128 a,unsigned int c)811 SIMD_INLINE v128 v128_shr_n_s8(v128 a, unsigned int c) {
812 return vreinterpretq_s64_s8(vshrq_n_s8(vreinterpretq_s8_s64(a), c));
813 }
814
v128_shl_n_16(v128 a,unsigned int c)815 SIMD_INLINE v128 v128_shl_n_16(v128 a, unsigned int c) {
816 return vreinterpretq_s64_u16(vshlq_n_u16(vreinterpretq_u16_s64(a), c));
817 }
818
v128_shr_n_u16(v128 a,unsigned int c)819 SIMD_INLINE v128 v128_shr_n_u16(v128 a, unsigned int c) {
820 return vreinterpretq_s64_u16(vshrq_n_u16(vreinterpretq_u16_s64(a), c));
821 }
822
v128_shr_n_s16(v128 a,unsigned int c)823 SIMD_INLINE v128 v128_shr_n_s16(v128 a, unsigned int c) {
824 return vreinterpretq_s64_s16(vshrq_n_s16(vreinterpretq_s16_s64(a), c));
825 }
826
v128_shl_n_32(v128 a,unsigned int c)827 SIMD_INLINE v128 v128_shl_n_32(v128 a, unsigned int c) {
828 return vreinterpretq_s64_u32(vshlq_n_u32(vreinterpretq_u32_s64(a), c));
829 }
830
v128_shr_n_u32(v128 a,unsigned int c)831 SIMD_INLINE v128 v128_shr_n_u32(v128 a, unsigned int c) {
832 return vreinterpretq_s64_u32(vshrq_n_u32(vreinterpretq_u32_s64(a), c));
833 }
834
v128_shr_n_s32(v128 a,unsigned int c)835 SIMD_INLINE v128 v128_shr_n_s32(v128 a, unsigned int c) {
836 return vreinterpretq_s64_s32(vshrq_n_s32(vreinterpretq_s32_s64(a), c));
837 }
838
v128_shl_n_64(v128 a,unsigned int c)839 SIMD_INLINE v128 v128_shl_n_64(v128 a, unsigned int c) {
840 return vreinterpretq_s64_u64(vshlq_n_u64(vreinterpretq_u64_s64(a), c));
841 }
842
v128_shr_n_u64(v128 a,unsigned int c)843 SIMD_INLINE v128 v128_shr_n_u64(v128 a, unsigned int c) {
844 return vreinterpretq_s64_u64(vshrq_n_u64(vreinterpretq_u64_s64(a), c));
845 }
846
v128_shr_n_s64(v128 a,unsigned int c)847 SIMD_INLINE v128 v128_shr_n_s64(v128 a, unsigned int c) {
848 return vshrq_n_s64(a, c);
849 }
850
851 #else
852
v128_shl_n_byte(v128 a,unsigned int n)853 SIMD_INLINE v128 v128_shl_n_byte(v128 a, unsigned int n) {
854 if (n < 8)
855 return v128_from_v64(v64_or(v64_shl_n_byte(v128_high_v64(a), n),
856 v64_shr_n_byte(v128_low_v64(a), 8 - n)),
857 v64_shl_n_byte(v128_low_v64(a), n));
858 else
859 return v128_from_v64(v64_shl_n_byte(v128_low_v64(a), n - 8), v64_zero());
860 }
861
v128_shr_n_byte(v128 a,unsigned int n)862 SIMD_INLINE v128 v128_shr_n_byte(v128 a, unsigned int n) {
863 if (n < 8)
864 return v128_from_v64(v64_shr_n_byte(v128_high_v64(a), n),
865 v64_or(v64_shr_n_byte(v128_low_v64(a), n),
866 v64_shl_n_byte(v128_high_v64(a), 8 - n)));
867 else
868 return v128_from_v64(v64_zero(), v64_shr_n_byte(v128_high_v64(a), n - 8));
869 }
870
v128_shl_n_8(v128 a,unsigned int c)871 SIMD_INLINE v128 v128_shl_n_8(v128 a, unsigned int c) {
872 return v128_shl_8(a, c);
873 }
874
v128_shr_n_u8(v128 a,unsigned int c)875 SIMD_INLINE v128 v128_shr_n_u8(v128 a, unsigned int c) {
876 return v128_shr_u8(a, c);
877 }
878
v128_shr_n_s8(v128 a,unsigned int c)879 SIMD_INLINE v128 v128_shr_n_s8(v128 a, unsigned int c) {
880 return v128_shr_s8(a, c);
881 }
882
v128_shl_n_16(v128 a,unsigned int c)883 SIMD_INLINE v128 v128_shl_n_16(v128 a, unsigned int c) {
884 return v128_shl_16(a, c);
885 }
886
v128_shr_n_u16(v128 a,unsigned int c)887 SIMD_INLINE v128 v128_shr_n_u16(v128 a, unsigned int c) {
888 return v128_shr_u16(a, c);
889 }
890
v128_shr_n_s16(v128 a,unsigned int c)891 SIMD_INLINE v128 v128_shr_n_s16(v128 a, unsigned int c) {
892 return v128_shr_s16(a, c);
893 }
894
v128_shl_n_32(v128 a,unsigned int c)895 SIMD_INLINE v128 v128_shl_n_32(v128 a, unsigned int c) {
896 return v128_shl_32(a, c);
897 }
898
v128_shr_n_u32(v128 a,unsigned int c)899 SIMD_INLINE v128 v128_shr_n_u32(v128 a, unsigned int c) {
900 return v128_shr_u32(a, c);
901 }
902
v128_shr_n_s32(v128 a,unsigned int c)903 SIMD_INLINE v128 v128_shr_n_s32(v128 a, unsigned int c) {
904 return v128_shr_s32(a, c);
905 }
906
v128_shl_n_64(v128 a,unsigned int c)907 SIMD_INLINE v128 v128_shl_n_64(v128 a, unsigned int c) {
908 return v128_shl_64(a, c);
909 }
910
v128_shr_n_u64(v128 a,unsigned int c)911 SIMD_INLINE v128 v128_shr_n_u64(v128 a, unsigned int c) {
912 return v128_shr_u64(a, c);
913 }
914
v128_shr_n_s64(v128 a,unsigned int c)915 SIMD_INLINE v128 v128_shr_n_s64(v128 a, unsigned int c) {
916 return v128_shr_s64(a, c);
917 }
918
919 #endif
920
921 typedef uint32x4_t sad128_internal_u16;
922
v128_sad_u16_init()923 SIMD_INLINE sad128_internal_u16 v128_sad_u16_init() { return vdupq_n_u32(0); }
924
925 /* Implementation dependent return value. Result must be finalised with
926 * v128_sad_u16_sum(). */
v128_sad_u16(sad128_internal_u16 s,v128 a,v128 b)927 SIMD_INLINE sad128_internal_u16 v128_sad_u16(sad128_internal_u16 s, v128 a,
928 v128 b) {
929 return vaddq_u32(
930 s, vpaddlq_u16(vsubq_u16(
931 vmaxq_u16(vreinterpretq_u16_s64(a), vreinterpretq_u16_s64(b)),
932 vminq_u16(vreinterpretq_u16_s64(a), vreinterpretq_u16_s64(b)))));
933 }
934
v128_sad_u16_sum(sad128_internal_u16 s)935 SIMD_INLINE uint32_t v128_sad_u16_sum(sad128_internal_u16 s) {
936 uint64x2_t t = vpaddlq_u32(s);
937 return (uint32_t)(uint64_t)vget_high_u64(t) +
938 (uint32_t)(uint64_t)vget_low_u64(t);
939 }
940
941 typedef v128 ssd128_internal_s16;
v128_ssd_s16_init()942 SIMD_INLINE ssd128_internal_s16 v128_ssd_s16_init() { return v128_zero(); }
943
944 /* Implementation dependent return value. Result must be finalised with
945 * v128_ssd_s16_sum(). */
v128_ssd_s16(ssd128_internal_s16 s,v128 a,v128 b)946 SIMD_INLINE ssd128_internal_s16 v128_ssd_s16(ssd128_internal_s16 s, v128 a,
947 v128 b) {
948 v128 d = v128_sub_16(a, b);
949 d = v128_madd_s16(d, d);
950 return v128_add_64(
951 s, vreinterpretq_s64_u64(vpaddlq_u32(vreinterpretq_u32_s64(d))));
952 }
953
v128_ssd_s16_sum(ssd128_internal_s16 s)954 SIMD_INLINE uint64_t v128_ssd_s16_sum(ssd128_internal_s16 s) {
955 return v64_u64(v128_low_v64(s)) + v64_u64(v128_high_v64(s));
956 }
957
958 #endif // AOM_AOM_DSP_SIMD_V128_INTRINSICS_ARM_H_
959