1// Copyright 2020 Google LLC 2// 3// This source code is licensed under the BSD-style license found in the 4// LICENSE file in the root directory of this source tree. 5 6$assert NR == 2 7$assert MR % 2 == 0 8#include <assert.h> 9 10#include <wasm_simd128.h> 11 12#include <xnnpack/igemm.h> 13 14 15$assert ACTIVATION in ["LINEAR", "RELU", "MINMAX"] 16$ACTIVATION_SUFFIX = {"LINEAR": ""}.get(ACTIVATION, "_" + ACTIVATION.lower()) 17$ARCH_SUFFIX = "" if ACTIVATION in ["LINEAR", "RELU"] else "_x86" if X86 else "_arm" 18$PARAMS = {"LINEAR": "xnn_f32_default_params", "RELU": "xnn_f32_relu_params", "MINMAX": "xnn_f32_minmax_params"}[ACTIVATION] 19void xnn_f32_igemm${ACTIVATION_SUFFIX}_ukernel_${MR}x${NR}c4__wasmsimd${ARCH_SUFFIX}( 20 size_t mr, 21 size_t nc, 22 size_t kc, 23 size_t ks, 24 const float**restrict a, 25 const float*restrict w, 26 float*restrict c, 27 size_t cm_stride, 28 size_t cn_stride, 29 size_t a_offset, 30 const float* zero, 31 const union ${PARAMS} params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN 32{ 33 assert(mr != 0); 34 assert(mr <= ${MR}); 35 assert(nc != 0); 36 assert(kc != 0); 37 assert(kc % sizeof(float) == 0); 38 assert(ks != 0); 39 assert(ks % (${MR} * sizeof(void*)) == 0); 40 assert(a_offset % sizeof(float) == 0); 41 assert(a != NULL); 42 assert(w != NULL); 43 assert(c != NULL); 44 45 float* c0 = c; 46 $for M in range(1, MR): 47 float* c${M} = (float*) ((uintptr_t) c${M-1} + cm_stride); 48 $if M % 2 == 0: 49 if XNN_UNPREDICTABLE(mr <= ${M}) { 50 c${M} = c${M-1}; 51 } 52 $elif M + 1 == MR: 53 if XNN_UNPREDICTABLE(mr != ${M+1}) { 54 c${M} = c${M-1}; 55 } 56 $else: 57 if XNN_UNPREDICTABLE(mr < ${M+1}) { 58 c${M} = c${M-1}; 59 } 60 61 $if ACTIVATION == "MINMAX" and not X86: 62 const v128_t vmin = wasm_v32x4_load_splat(¶ms->scalar.min); 63 const v128_t vmax = wasm_v32x4_load_splat(¶ms->scalar.max); 64 do { 65 v128_t vacc0x0c4 = wasm_f32x4_replace_lane(wasm_f32x4_splat(0.0f), 0, w[0]); 66 $for N in range(1, NR): 67 v128_t vacc0x${N}c4 = wasm_f32x4_replace_lane(vacc0x0c4, 0, w[${N}]); 68 $for M in range(1, MR): 69 $for N in range(NR): 70 v128_t vacc${M}x${N}c4 = vacc0x${N}c4; 71 w += ${NR}; 72 73 size_t p = ks; 74 do { 75 $for M in range(MR): 76 const float* restrict a${M} = a[${M}]; 77 assert(a${M} != NULL); 78 if XNN_UNPREDICTABLE(a${M} != zero) { 79 a${M} = (const float*) ((uintptr_t) a${M} + a_offset); 80 } 81 a += ${MR}; 82 83 size_t k = kc; 84 for (; k >= 4 * sizeof(float); k -= 4 * sizeof(float)) { 85 $for M in range(MR): 86 const v128_t va${M} = wasm_v128_load(a${M}); 87 a${M} += 4; 88 89 const v128_t vb0 = wasm_v128_load(w); 90 $for N in range(1, NR): 91 const v128_t vb${N} = wasm_v128_load(w + ${N * 4}); 92 w += ${NR * 4}; 93 94 $for M in range(MR): 95 $for N in range(NR): 96 vacc${M}x${N}c4 = wasm_f32x4_add(vacc${M}x${N}c4, wasm_f32x4_mul(va${M}, vb${N})); 97 } 98 if XNN_UNLIKELY(k != 0) { 99 $for M in range(MR): 100 const v128_t va${M} = wasm_v128_load(a${M}); 101 102 const v128_t vb0 = wasm_v128_load(w); 103 $for N in range(1, NR): 104 const v128_t vb${N} = wasm_v128_load(w + ${N * 4}); 105 w += ${NR * 4}; 106 107 const v128_t vzero = wasm_f32x4_splat(0.0f); 108 $for N in range(NR): 109 const v128_t vmask${N} = wasm_f32x4_eq(vb${N}, vzero); 110 111 $for M in range(MR): 112 $for N in range(NR): 113 vacc${M}x${N}c4 = wasm_f32x4_add(vacc${M}x${N}c4, wasm_f32x4_mul(wasm_v128_andnot(va${M}, vmask${N}), vb${N})); 114 } 115 p -= ${MR} * sizeof(void*); 116 } while (p != 0); 117 118 $for M in range(MR): 119 const v128_t vacc${M}x01c2 = wasm_f32x4_add( 120 wasm_v32x4_shuffle(vacc${M}x0c4, vacc${M}x1c4, 0, 4, 1, 5), 121 wasm_v32x4_shuffle(vacc${M}x0c4, vacc${M}x1c4, 2, 6, 3, 7)); 122 123 $for M in range(0, MR, 2): 124 v128_t vacc${M}${M+1}x01 = wasm_f32x4_add( 125 wasm_v32x4_shuffle(vacc${M}x01c2, vacc${M+1}x01c2, 0, 1, 4, 5), 126 wasm_v32x4_shuffle(vacc${M}x01c2, vacc${M+1}x01c2, 2, 3, 6, 7)); 127 128 $if ACTIVATION == "MINMAX": 129 $if X86: 130 const v128_t vmin = wasm_v32x4_load_splat(¶ms->scalar.min); 131 $for M in range(0, MR, 2): 132 vacc${M}${M+1}x01 = wasm_v128_bitselect(vmin, vacc${M}${M+1}x01, wasm_f32x4_lt(vacc${M}${M+1}x01, vmin)); 133 $else: 134 $for M in range(0, MR, 2): 135 vacc${M}${M+1}x01 = wasm_f32x4_max(vacc${M}${M+1}x01, vmin); 136 137 $if X86: 138 const v128_t vmax = wasm_v32x4_load_splat(¶ms->scalar.max); 139 $for M in range(0, MR, 2): 140 vacc${M}${M+1}x01 = wasm_v128_bitselect(vacc${M}${M+1}x01, vmax, wasm_f32x4_le(vacc${M}${M+1}x01, vmax)); 141 $else: 142 $for M in range(0, MR, 2): 143 vacc${M}${M+1}x01 = wasm_f32x4_min(vacc${M}${M+1}x01, vmax); 144 $elif ACTIVATION == "RELU": 145 const v128_t vzero = wasm_f32x4_splat(0.0f); 146 $for M in range(0, MR, 2): 147 vacc${M}${M+1}x01 = wasm_i32x4_max(vacc${M}${M+1}x01, vzero); 148 149 if XNN_LIKELY(nc >= ${NR}) { 150 $for M in reversed(range(0, MR, 2)): 151 *((double*) c${M+1}) = wasm_f64x2_extract_lane(vacc${M}${M+1}x01, 1); 152 c${M+1} = (float*) ((uintptr_t) c${M+1} + cn_stride); 153 *((double*) c${M}) = wasm_f64x2_extract_lane(vacc${M}${M+1}x01, 0); 154 c${M} = (float*) ((uintptr_t) c${M} + cn_stride); 155 156 a = (const float**restrict) ((uintptr_t) a - ks); 157 nc -= ${NR}; 158 } else { 159 assert(nc == 1); 160 $for M in reversed(range(0, MR, 2)): 161 *c${M+1} = wasm_f32x4_extract_lane(vacc${M}${M+1}x01, 2); 162 *c${M} = wasm_f32x4_extract_lane(vacc${M}${M+1}x01, 0); 163 164 nc = 0; 165 } 166 } while (nc != 0); 167} 168