1// Copyright 2020 Google LLC
2//
3// This source code is licensed under the BSD-style license found in the
4// LICENSE file in the root directory of this source tree.
5
6$assert CHANNEL_TILE % 4 == 0
7$assert CHANNEL_TILE >= 4
8$assert ROW_TILE >= 1
9$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
10#include <assert.h>
11
12#include <wasm_simd128.h>
13
14#include <xnnpack/math.h>
15#include <xnnpack/prelu.h>
16
17
18void xnn_f32_prelu_ukernel__wasmsimd_bitselect_${ROW_TILE}x${CHANNEL_TILE}(
19    size_t rows,
20    size_t channels,
21    const float*restrict input,
22    size_t input_stride,
23    const float*restrict weights,
24    float*restrict output,
25    size_t output_stride) XNN_DISABLE_TSAN
26{
27  assert(rows != 0);
28  assert(channels != 0);
29  assert(channels % sizeof(float) == 0);
30
31  const float* i0 = input;
32  float* o0 = output;
33  $for M in range(1, ROW_TILE):
34    const float* i${M} = (const float*) ((uintptr_t) i${M-1} + input_stride);
35    float* o${M} = (float*) ((uintptr_t) o${M-1} + output_stride);
36    $if M % 2 == 0:
37      if XNN_UNPREDICTABLE(rows <= ${M}) {
38        i${M} = i${M-1};
39        o${M} = o${M-1};
40      }
41    $else:
42      if XNN_UNPREDICTABLE(rows < ${M+1}) {
43        i${M} = i${M-1};
44        o${M} = o${M-1};
45      }
46
47  const size_t input_increment = input_stride * ${ROW_TILE} - channels;
48  const size_t output_increment = output_stride * ${ROW_TILE} - channels;
49
50  const v128_t vzero = wasm_i32x4_splat(0);
51  do {
52    const float* w = weights;
53    size_t c = channels;
54    for (; c >= ${CHANNEL_TILE} * sizeof(float); c -= ${CHANNEL_TILE} * sizeof(float)) {
55      const v128_t vw${ABC[0:4]} = wasm_v128_load(w);
56      $for C in range(4, CHANNEL_TILE, 4):
57        const v128_t vw${ABC[C:C+4]} = wasm_v128_load(w + ${C});
58      w += ${CHANNEL_TILE};
59
60      $for M in range(ROW_TILE):
61        const v128_t vi${M}x${ABC[0:4]} = wasm_v128_load(i${M});
62        $for C in range(4, CHANNEL_TILE, 4):
63          const v128_t vi${M}x${ABC[C:C+4]} = wasm_v128_load(i${M} + ${C});
64        i${M} += ${CHANNEL_TILE};
65
66      $for M in range(ROW_TILE):
67        $for C in range(0, CHANNEL_TILE, 4):
68          v128_t vacc${M}x${ABC[C:C+4]} = wasm_f32x4_mul(vi${M}x${ABC[C:C+4]}, vw${ABC[C:C+4]});
69          const v128_t vmask${M}x${ABC[C:C+4]} = wasm_i32x4_lt(vi${M}x${ABC[C:C+4]}, vzero);
70
71      $for M in range(ROW_TILE):
72        $for C in range(0, CHANNEL_TILE, 4):
73          vacc${M}x${ABC[C:C+4]} = wasm_v128_bitselect(vacc${M}x${ABC[C:C+4]}, vi${M}x${ABC[C:C+4]}, vmask${M}x${ABC[C:C+4]});
74
75      $for M in range(ROW_TILE):
76        wasm_v128_store(o${M}, vacc${M}x${ABC[0:4]});
77        $for C in range(4, CHANNEL_TILE, 4):
78          wasm_v128_store(o${M} + ${C}, vacc${M}x${ABC[C:C+4]});
79        o${M} += ${CHANNEL_TILE};
80    }
81    $if CHANNEL_TILE > 4:
82      for (; c >= 4 * sizeof(float); c -= 4 * sizeof(float)) {
83        const v128_t vw0123 = wasm_v128_load(w);
84        w += 4;
85
86        $for M in range(ROW_TILE):
87          const v128_t vi${M}x0123 = wasm_v128_load(i${M});
88          i${M} += 4;
89
90        $for M in range(ROW_TILE):
91          v128_t vacc${M}x0123 = wasm_f32x4_mul(vi${M}x0123, vw0123);
92          const v128_t vmask${M}x0123 = wasm_i32x4_lt(vi${M}x0123, vzero);
93
94        $for M in range(ROW_TILE):
95          vacc${M}x0123 = wasm_v128_bitselect(vacc${M}x0123, vi${M}x0123, vmask${M}x0123);
96
97        $for M in range(ROW_TILE):
98          wasm_v128_store(o${M}, vacc${M}x0123);
99          o${M} += 4;
100      }
101    if XNN_UNLIKELY(c != 0) {
102      const v128_t vw0123 = wasm_v128_load(w);
103      w = (const float*) ((uintptr_t) w + c);
104
105      $for M in range(ROW_TILE):
106        const v128_t vi${M}x0123 = wasm_v128_load(i${M});
107        i${M} = (const float*) ((uintptr_t) i${M} + c);
108
109      $for M in range(ROW_TILE):
110        v128_t vacc${M}x0123 = wasm_f32x4_mul(vi${M}x0123, vw0123);
111        const v128_t vmask${M}x0123 = wasm_i32x4_lt(vi${M}x0123, vzero);
112
113      $for M in range(ROW_TILE):
114        vacc${M}x0123 = wasm_v128_bitselect(vacc${M}x0123, vi${M}x0123, vmask${M}x0123);
115
116      if (c & (2 * sizeof(float))) {
117        $for M in range(ROW_TILE):
118          *((double*) o${M}) = wasm_f64x2_extract_lane(vacc${M}x0123, 0);
119
120        $for M in range(ROW_TILE):
121          vacc${M}x0123 = wasm_v32x4_shuffle(vacc${M}x0123, vacc${M}x0123, 2, 3, 2, 3);
122
123        $for M in range(ROW_TILE):
124          o${M} += 2;
125      }
126      if (c & (1 * sizeof(float))) {
127        $for M in range(ROW_TILE):
128          *o${M} = wasm_f32x4_extract_lane(vacc${M}x0123, 0);
129
130        $for M in range(ROW_TILE):
131          o${M} += 1;
132      }
133    }
134    $for M in range(ROW_TILE):
135      i${M} = (const float*) ((uintptr_t) i${M} + input_increment);
136      o${M} = (float*) ((uintptr_t) o${M} + output_increment);
137      $if M % 2 == 1:
138        if XNN_UNPREDICTABLE(rows < ${ROW_TILE + M+1}) {
139          i${M} = i${M-1};
140          o${M} = o${M-1};
141        }
142      $elif M != 0:
143        if XNN_UNPREDICTABLE(rows <= ${ROW_TILE + M}) {
144          i${M} = i${M-1};
145          o${M} = o${M-1};
146        }
147    rows = doz(rows, ${ROW_TILE});
148  } while (rows != 0);
149}
150