1// Copyright 2020 Google LLC
2//
3// This source code is licensed under the BSD-style license found in the
4// LICENSE file in the root directory of this source tree.
5
6$assert NR == 2
7$assert MR % 2 == 0
8#include <assert.h>
9
10#include <wasm_simd128.h>
11
12#include <xnnpack/gemm.h>
13
14
15$assert ACTIVATION in ["LINEAR", "RELU", "MINMAX"]
16$ACTIVATION_SUFFIX = {"LINEAR": ""}.get(ACTIVATION, "_" + ACTIVATION.lower())
17$ARCH_SUFFIX = "" if ACTIVATION in ["LINEAR", "RELU"] else "_x86" if X86 else "_arm"
18$PARAMS = {"LINEAR": "xnn_f32_default_params", "RELU": "xnn_f32_relu_params", "MINMAX": "xnn_f32_minmax_params"}[ACTIVATION]
19void xnn_f32_gemm${ACTIVATION_SUFFIX}_ukernel_${MR}x${NR}c4__wasmsimd${ARCH_SUFFIX}(
20    size_t mr,
21    size_t nc,
22    size_t kc,
23    const float* restrict a,
24    size_t a_stride,
25    const float* restrict w,
26    float* restrict c,
27    size_t cm_stride,
28    size_t cn_stride,
29    const union ${PARAMS} params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
30{
31  assert(mr != 0);
32  assert(mr <= ${MR});
33  assert(nc != 0);
34  assert(kc != 0);
35  assert(kc % sizeof(float) == 0);
36  assert(a != NULL);
37  assert(w != NULL);
38  assert(c != NULL);
39
40  const float* a0 = a;
41  float* c0 = c;
42  $for M in range(1, MR):
43    const float* a${M} = (const float*) ((uintptr_t) a${M-1} + a_stride);
44    float* c${M} = (float*) ((uintptr_t) c${M-1} + cm_stride);
45    $if M % 2 == 0:
46      if XNN_UNPREDICTABLE(mr <= ${M}) {
47        a${M} = a${M-1};
48        c${M} = c${M-1};
49      }
50    $elif M + 1 == MR:
51      if XNN_UNPREDICTABLE(mr != ${M+1}) {
52        a${M} = a${M-1};
53        c${M} = c${M-1};
54      }
55    $else:
56      if XNN_UNPREDICTABLE(mr < ${M+1}) {
57        a${M} = a${M-1};
58        c${M} = c${M-1};
59      }
60
61  $if ACTIVATION == "MINMAX" and not X86:
62    const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
63    const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
64  do {
65    v128_t vacc0x0c4 = wasm_f32x4_replace_lane(wasm_f32x4_splat(0.0f), 0, w[0]);
66    $for N in range(1, NR):
67      v128_t vacc0x${N}c4 = wasm_f32x4_replace_lane(vacc0x0c4, 0, w[${N}]);
68    $for M in range(1, MR):
69      $for N in range(NR):
70        v128_t vacc${M}x${N}c4 = vacc0x${N}c4;
71    w += ${NR};
72
73    size_t k = kc;
74    for (; k >= 4 * sizeof(float); k -= 4 * sizeof(float)) {
75      $for M in range(MR):
76        const v128_t va${M} = wasm_v128_load(a${M});
77        a${M} += 4;
78
79      const v128_t vb0 = wasm_v128_load(w);
80      $for N in range(1, NR):
81        const v128_t vb${N} = wasm_v128_load(w + ${N * 4});
82      w += ${NR * 4};
83
84      $for M in range(MR):
85        $for N in range(NR):
86          vacc${M}x${N}c4 = wasm_f32x4_add(vacc${M}x${N}c4, wasm_f32x4_mul(va${M}, vb${N}));
87    }
88    if XNN_UNLIKELY(k != 0) {
89      $for M in range(MR):
90        const v128_t va${M} = wasm_v128_load(a${M});
91        a${M} = (const float*) ((uintptr_t) a${M} + k);
92
93      const v128_t vb0 = wasm_v128_load(w);
94      $for N in range(1, NR):
95        const v128_t vb${N} = wasm_v128_load(w + ${N * 4});
96      w += ${NR * 4};
97
98      const v128_t vzero = wasm_f32x4_splat(0.0f);
99      $for N in range(NR):
100        const v128_t vmask${N} = wasm_f32x4_eq(vb${N}, vzero);
101
102      $for M in range(MR):
103        $for N in range(NR):
104          vacc${M}x${N}c4 = wasm_f32x4_add(vacc${M}x${N}c4, wasm_f32x4_mul(wasm_v128_andnot(va${M}, vmask${N}), vb${N}));
105    }
106
107    $for M in range(MR):
108      const v128_t vacc${M}x01c2 = wasm_f32x4_add(
109        wasm_v32x4_shuffle(vacc${M}x0c4, vacc${M}x1c4, 0, 4, 1, 5),
110        wasm_v32x4_shuffle(vacc${M}x0c4, vacc${M}x1c4, 2, 6, 3, 7));
111
112    $for M in range(0, MR, 2):
113      v128_t vacc${M}${M+1}x01 = wasm_f32x4_add(
114        wasm_v32x4_shuffle(vacc${M}x01c2, vacc${M+1}x01c2, 0, 1, 4, 5),
115        wasm_v32x4_shuffle(vacc${M}x01c2, vacc${M+1}x01c2, 2, 3, 6, 7));
116
117    $if ACTIVATION == "MINMAX":
118      $if X86:
119        const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
120        $for M in range(0, MR, 2):
121          vacc${M}${M+1}x01 = wasm_v128_bitselect(vmin, vacc${M}${M+1}x01, wasm_f32x4_lt(vacc${M}${M+1}x01, vmin));
122      $else:
123        $for M in range(0, MR, 2):
124          vacc${M}${M+1}x01 = wasm_f32x4_max(vacc${M}${M+1}x01, vmin);
125
126      $if X86:
127        const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
128        $for M in range(0, MR, 2):
129          vacc${M}${M+1}x01 = wasm_v128_bitselect(vacc${M}${M+1}x01, vmax, wasm_f32x4_le(vacc${M}${M+1}x01, vmax));
130      $else:
131        $for M in range(0, MR, 2):
132          vacc${M}${M+1}x01 = wasm_f32x4_min(vacc${M}${M+1}x01, vmax);
133    $elif ACTIVATION == "RELU":
134      const v128_t vzero = wasm_f32x4_splat(0.0f);
135      $for M in range(0, MR, 2):
136        vacc${M}${M+1}x01 = wasm_i32x4_max(vacc${M}${M+1}x01, vzero);
137
138    if XNN_LIKELY(nc >= ${NR}) {
139      $for M in reversed(range(0, MR, 2)):
140        *((double*) c${M}) = wasm_f64x2_extract_lane(vacc${M}${M+1}x01, 0);
141        c${M} = (float*) ((uintptr_t) c${M} + cn_stride);
142        a${M} = (const float*) ((uintptr_t) a${M} - kc);
143        *((double*) c${M+1}) = wasm_f64x2_extract_lane(vacc${M}${M+1}x01, 1);
144        c${M+1} = (float*) ((uintptr_t) c${M+1} + cn_stride);
145        a${M+1} = (const float*) ((uintptr_t) a${M+1} - kc);
146
147      nc -= ${NR};
148    } else {
149      assert(nc == 1);
150      $for M in reversed(range(0, MR, 2)):
151        *c${M} = wasm_f32x4_extract_lane(vacc${M}${M+1}x01, 0);
152        *c${M+1} = wasm_f32x4_extract_lane(vacc${M}${M+1}x01, 2);
153
154      nc = 0;
155    }
156  } while (nc != 0);
157}
158