1// Copyright 2020 Google LLC
2//
3// This source code is licensed under the BSD-style license found in the
4// LICENSE file in the root directory of this source tree.
5
6$assert BATCH_TILE % 8 == 0
7$assert BATCH_TILE >= 8
8$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
9#include <assert.h>
10
11#include <wasm_simd128.h>
12
13#include <xnnpack/vadd.h>
14
15
16void xnn_qs8_vaddc_minmax_ukernel__wasmsimd_x${BATCH_TILE}(
17    size_t n,
18    const int8_t* input_x,
19    const int8_t* input_y,
20    int8_t* output,
21    const union xnn_qs8_add_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
22{
23  const v128_t vx_multiplier = wasm_v128_load(params->wasmsimd.x_multiplier);
24  const v128_t vremainder_mask = wasm_v128_load(params->wasmsimd.remainder_mask);
25  const v128_t vremainder_threshold = wasm_v128_load(params->wasmsimd.remainder_threshold);
26  const int32_t vshift = params->wasmsimd.shift;
27  const v128_t voutput_zero_point = wasm_v128_load(params->wasmsimd.output_zero_point);
28  const v128_t voutput_min = wasm_v128_load(params->wasmsimd.output_min);
29  const v128_t voutput_max = wasm_v128_load(params->wasmsimd.output_max);
30
31  v128_t vzero_point_product = wasm_i32x4_splat((int32_t) *input_y * params->wasmsimd.y_multiplier[0]);
32  vzero_point_product = wasm_i32x4_add(vzero_point_product, wasm_v128_load(params->wasmsimd.zero_point_product));
33
34  for (; n >= ${BATCH_TILE} * sizeof(int8_t); n -= ${BATCH_TILE} * sizeof(int8_t)) {
35    const v128_t vx${ABC[0:8]} = wasm_i16x8_load_8x8(input_x);
36    $for N in range(8, BATCH_TILE, 8):
37      const v128_t vx${ABC[N:N+8]} = wasm_i16x8_load_8x8(input_x + ${N});
38    input_x += ${BATCH_TILE};
39
40    $for N in range(0, BATCH_TILE, 8):
41      v128_t vacc${ABC[N:N+4]} = wasm_i32x4_add(vzero_point_product, wasm_i32x4_mul(wasm_i32x4_widen_low_i16x8(vx${ABC[N:N+8]}), vx_multiplier));
42      v128_t vacc${ABC[N+4:N+8]} = wasm_i32x4_add(vzero_point_product, wasm_i32x4_mul(wasm_i32x4_widen_high_i16x8(vx${ABC[N:N+8]}), vx_multiplier));
43
44    $for N in range(0, BATCH_TILE, 4):
45      const v128_t vrem${ABC[N:N+4]} = wasm_i32x4_add(wasm_v128_and(vacc${ABC[N:N+4]}, vremainder_mask), wasm_i32x4_shr(vacc${ABC[N:N+4]}, 31));
46
47    $for N in range(0, BATCH_TILE, 4):
48      vacc${ABC[N:N+4]} = wasm_i32x4_sub(wasm_i32x4_shr(vacc${ABC[N:N+4]}, vshift), wasm_i32x4_gt(vrem${ABC[N:N+4]}, vremainder_threshold));
49
50    $for N in range(0, BATCH_TILE, 8):
51      v128_t vout${ABC[N:N+8]} = wasm_i16x8_add_saturate(wasm_i16x8_narrow_i32x4(vacc${ABC[N:N+4]}, vacc${ABC[N+4:N+8]}), voutput_zero_point);
52
53    $for N in range(0, BATCH_TILE, 16):
54      $if N + 8 < BATCH_TILE:
55        v128_t vout${ABC[N:N+16]} = wasm_i8x16_narrow_i16x8(vout${ABC[N:N+8]}, vout${ABC[N+8:N+16]});
56      $else:
57        v128_t vout${ABC[N:N+8]}${ABC[N:N+8]} = wasm_i8x16_narrow_i16x8(vout${ABC[N:N+8]}, vout${ABC[N:N+8]});
58
59    $for N in range(0, BATCH_TILE, 16):
60      $if N + 8 < BATCH_TILE:
61        vout${ABC[N:N+16]} = wasm_i8x16_max(vout${ABC[N:N+16]}, voutput_min);
62      $else:
63        vout${ABC[N:N+8]}${ABC[N:N+8]} = wasm_i8x16_max(vout${ABC[N:N+8]}${ABC[N:N+8]}, voutput_min);
64
65    $for N in range(0, BATCH_TILE, 16):
66      $if N + 8 < BATCH_TILE:
67        vout${ABC[N:N+16]} = wasm_i8x16_min(vout${ABC[N:N+16]}, voutput_max);
68      $else:
69        vout${ABC[N:N+8]}${ABC[N:N+8]} = wasm_i8x16_min(vout${ABC[N:N+8]}${ABC[N:N+8]}, voutput_max);
70
71    $if BATCH_TILE >= 16:
72      wasm_v128_store(output, vout${ABC[0:16]});
73    $else:
74      *((double*) output) = wasm_f64x2_extract_lane(vout${ABC[0:8]}${ABC[0:8]}, 0);
75    $for N in range(16, BATCH_TILE, 16):
76      $if N + 8 < BATCH_TILE:
77        wasm_v128_store(output + ${N}, vout${ABC[N:N+16]});
78      $else:
79        *((double*) (output + ${N})) = wasm_f64x2_extract_lane(vout${ABC[N:N+8]}${ABC[N:N+8]}, 0);
80    output += ${BATCH_TILE};
81  }
82  if XNN_UNLIKELY(n != 0) {
83    ${"do " if BATCH_TILE > 8 else ""}{
84      const v128_t vx${ABC[0:8]} = wasm_i16x8_load_8x8(input_x);
85      $if BATCH_TILE > 8:
86        input_x += 8;
87
88      v128_t vacc${ABC[0:4]} = wasm_i32x4_add(vzero_point_product, wasm_i32x4_mul(wasm_i32x4_widen_low_i16x8(vx${ABC[0:8]}), vx_multiplier));
89      v128_t vacc${ABC[4:8]} = wasm_i32x4_add(vzero_point_product, wasm_i32x4_mul(wasm_i32x4_widen_high_i16x8(vx${ABC[0:8]}), vx_multiplier));
90
91      const v128_t vrem${ABC[0:4]} = wasm_i32x4_add(wasm_v128_and(vacc${ABC[0:4]}, vremainder_mask), wasm_i32x4_shr(vacc${ABC[0:4]}, 31));
92      const v128_t vrem${ABC[4:8]} = wasm_i32x4_add(wasm_v128_and(vacc${ABC[4:8]}, vremainder_mask), wasm_i32x4_shr(vacc${ABC[4:8]}, 31));
93
94      vacc${ABC[0:4]} = wasm_i32x4_sub(wasm_i32x4_shr(vacc${ABC[0:4]}, vshift), wasm_i32x4_gt(vrem${ABC[0:4]}, vremainder_threshold));
95      vacc${ABC[4:8]} = wasm_i32x4_sub(wasm_i32x4_shr(vacc${ABC[4:8]}, vshift), wasm_i32x4_gt(vrem${ABC[4:8]}, vremainder_threshold));
96
97      v128_t vout${ABC[0:8]} = wasm_i16x8_add_saturate(wasm_i16x8_narrow_i32x4(vacc${ABC[0:4]}, vacc${ABC[4:8]}), voutput_zero_point);
98
99      v128_t vout${ABC[0:8]}${ABC[0:8]} = wasm_i8x16_narrow_i16x8(vout${ABC[0:8]}, vout${ABC[0:8]});
100      vout${ABC[0:8]}${ABC[0:8]} = wasm_i8x16_max(vout${ABC[0:8]}${ABC[0:8]}, voutput_min);
101      vout${ABC[0:8]}${ABC[0:8]} = wasm_i8x16_min(vout${ABC[0:8]}${ABC[0:8]}, voutput_max);
102
103      $if BATCH_TILE > 8:
104        if XNN_LIKELY(n >= (8 * sizeof(int8_t))) {
105          *((double*) output) = wasm_f64x2_extract_lane(vout${ABC[0:8]}${ABC[0:8]}, 0);
106          output += 8;
107          n -= 8 * sizeof(int8_t);
108        } else {
109          if (n & (4 * sizeof(int8_t))) {
110            *((uint32_t*) output) = (uint32_t) wasm_i32x4_extract_lane(vout${ABC[0:8]}${ABC[0:8]}, 0);
111            vout${ABC[0:8]}${ABC[0:8]} = wasm_u64x2_shr(vout${ABC[0:8]}${ABC[0:8]}, 32);
112            output += 4;
113          }
114          if (n & (2 * sizeof(int8_t))) {
115            *((uint16_t*) output) = (uint16_t) wasm_i16x8_extract_lane(vout${ABC[0:8]}${ABC[0:8]}, 0);
116            vout${ABC[0:8]}${ABC[0:8]} = wasm_u32x4_shr(vout${ABC[0:8]}${ABC[0:8]}, 16);
117            output += 2;
118          }
119          if (n & (1 * sizeof(int8_t))) {
120            *output = wasm_i8x16_extract_lane(vout${ABC[0:8]}${ABC[0:8]}, 0);
121          }
122          n = 0;
123        }
124      $else:
125        if (n & (4 * sizeof(int8_t))) {
126          *((uint32_t*) output) = (uint32_t) wasm_i32x4_extract_lane(vout${ABC[0:8]}${ABC[0:8]}, 0);
127          vout${ABC[0:8]}${ABC[0:8]} = wasm_u64x2_shr(vout${ABC[0:8]}${ABC[0:8]}, 32);
128          output += 4;
129        }
130        if (n & (2 * sizeof(int8_t))) {
131          *((uint16_t*) output) = (uint16_t) wasm_i16x8_extract_lane(vout${ABC[0:8]}${ABC[0:8]}, 0);
132          vout${ABC[0:8]}${ABC[0:8]} = wasm_u32x4_shr(vout${ABC[0:8]}${ABC[0:8]}, 16);
133          output += 2;
134        }
135        if (n & (1 * sizeof(int8_t))) {
136          *output = wasm_i8x16_extract_lane(vout${ABC[0:8]}${ABC[0:8]}, 0);
137        }
138    }${" while (n != 0);" if BATCH_TILE > 8 else ""}
139  }
140}
141