1 // Auto-generated file. Do not edit!
2 // Template: src/f32-ppmm/wasmsimd-splat.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <wasm_simd128.h>
13
14 #include <xnnpack/ppmm.h>
15
16
xnn_f32_ppmm_minmax_ukernel_4x8__wasmsimd_arm_splat(size_t mr,size_t nc,size_t kc,const float * restrict a,const float * restrict w,float * restrict c,size_t cm_stride,size_t cn_stride,const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])17 void xnn_f32_ppmm_minmax_ukernel_4x8__wasmsimd_arm_splat(
18 size_t mr,
19 size_t nc,
20 size_t kc,
21 const float*restrict a,
22 const float*restrict w,
23 float*restrict c,
24 size_t cm_stride,
25 size_t cn_stride,
26 const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
27 {
28 assert(mr != 0);
29 assert(mr <= 4);
30 assert(nc != 0);
31 assert(kc != 0);
32 assert(kc % sizeof(float) == 0);
33
34 float* c0 = c;
35 float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
36 if XNN_UNPREDICTABLE(mr < 2) {
37 c1 = c0;
38 }
39 float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
40 if XNN_UNPREDICTABLE(mr <= 2) {
41 c2 = c1;
42 }
43 float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
44 if XNN_UNPREDICTABLE(mr != 4) {
45 c3 = c2;
46 }
47
48 const v128_t vmin = wasm_v32x4_load_splat(¶ms->scalar.min);
49 const v128_t vmax = wasm_v32x4_load_splat(¶ms->scalar.max);
50 do {
51 v128_t vacc0x0123 = wasm_v128_load(w);
52 v128_t vacc0x4567 = wasm_v128_load(w + 4);
53 v128_t vacc1x0123 = vacc0x0123;
54 v128_t vacc1x4567 = vacc0x4567;
55 v128_t vacc2x0123 = vacc0x0123;
56 v128_t vacc2x4567 = vacc0x4567;
57 v128_t vacc3x0123 = vacc0x0123;
58 v128_t vacc3x4567 = vacc0x4567;
59 w += 8;
60
61 size_t k = kc;
62 do {
63 const v128_t va0123 = wasm_v128_load(a);
64 a += 4;
65
66 const v128_t vb0123 = wasm_v128_load(w);
67 const v128_t vb4567 = wasm_v128_load(w + 4);
68 w += 8;
69
70 const v128_t va0000 = wasm_v32x4_shuffle(va0123, va0123, 0, 0, 0, 0);
71 const v128_t va1111 = wasm_v32x4_shuffle(va0123, va0123, 1, 1, 1, 1);
72 const v128_t va2222 = wasm_v32x4_shuffle(va0123, va0123, 2, 2, 2, 2);
73 const v128_t va3333 = wasm_v32x4_shuffle(va0123, va0123, 3, 3, 3, 3);
74
75 vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0000, vb0123));
76 vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1111, vb0123));
77 vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2222, vb0123));
78 vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3333, vb0123));
79 vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0000, vb4567));
80 vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1111, vb4567));
81 vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2222, vb4567));
82 vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3333, vb4567));
83
84 k -= sizeof(float);
85 } while (k != 0);
86
87 vacc0x0123 = wasm_f32x4_max(vacc0x0123, vmin);
88 vacc1x0123 = wasm_f32x4_max(vacc1x0123, vmin);
89 vacc2x0123 = wasm_f32x4_max(vacc2x0123, vmin);
90 vacc3x0123 = wasm_f32x4_max(vacc3x0123, vmin);
91 vacc0x4567 = wasm_f32x4_max(vacc0x4567, vmin);
92 vacc1x4567 = wasm_f32x4_max(vacc1x4567, vmin);
93 vacc2x4567 = wasm_f32x4_max(vacc2x4567, vmin);
94 vacc3x4567 = wasm_f32x4_max(vacc3x4567, vmin);
95
96 vacc0x0123 = wasm_f32x4_min(vacc0x0123, vmax);
97 vacc1x0123 = wasm_f32x4_min(vacc1x0123, vmax);
98 vacc2x0123 = wasm_f32x4_min(vacc2x0123, vmax);
99 vacc3x0123 = wasm_f32x4_min(vacc3x0123, vmax);
100 vacc0x4567 = wasm_f32x4_min(vacc0x4567, vmax);
101 vacc1x4567 = wasm_f32x4_min(vacc1x4567, vmax);
102 vacc2x4567 = wasm_f32x4_min(vacc2x4567, vmax);
103 vacc3x4567 = wasm_f32x4_min(vacc3x4567, vmax);
104
105 if XNN_LIKELY(nc >= 8) {
106 wasm_v128_store(c3, vacc3x0123);
107 wasm_v128_store(c3 + 4, vacc3x4567);
108 wasm_v128_store(c2, vacc2x0123);
109 wasm_v128_store(c2 + 4, vacc2x4567);
110 wasm_v128_store(c1, vacc1x0123);
111 wasm_v128_store(c1 + 4, vacc1x4567);
112 wasm_v128_store(c0, vacc0x0123);
113 wasm_v128_store(c0 + 4, vacc0x4567);
114
115 a = (const float*) ((uintptr_t) a - kc * 4);
116
117 c3 = (float*) ((uintptr_t) c3 + cn_stride);
118 c2 = (float*) ((uintptr_t) c2 + cn_stride);
119 c1 = (float*) ((uintptr_t) c1 + cn_stride);
120 c0 = (float*) ((uintptr_t) c0 + cn_stride);
121
122 nc -= 8;
123 } else {
124 if (nc & 4) {
125 wasm_v128_store(c3, vacc3x0123);
126 wasm_v128_store(c2, vacc2x0123);
127 wasm_v128_store(c1, vacc1x0123);
128 wasm_v128_store(c0, vacc0x0123);
129
130 vacc3x0123 = vacc3x4567;
131 vacc2x0123 = vacc2x4567;
132 vacc1x0123 = vacc1x4567;
133 vacc0x0123 = vacc0x4567;
134
135 c3 += 4;
136 c2 += 4;
137 c1 += 4;
138 c0 += 4;
139 }
140 if (nc & 2) {
141 *((double*) c3) = wasm_f64x2_extract_lane(vacc3x0123, 0);
142 *((double*) c2) = wasm_f64x2_extract_lane(vacc2x0123, 0);
143 *((double*) c1) = wasm_f64x2_extract_lane(vacc1x0123, 0);
144 *((double*) c0) = wasm_f64x2_extract_lane(vacc0x0123, 0);
145
146 vacc3x0123 = wasm_v32x4_shuffle(vacc3x0123, vacc3x0123, 2, 3, 2, 3);
147 vacc2x0123 = wasm_v32x4_shuffle(vacc2x0123, vacc2x0123, 2, 3, 2, 3);
148 vacc1x0123 = wasm_v32x4_shuffle(vacc1x0123, vacc1x0123, 2, 3, 2, 3);
149 vacc0x0123 = wasm_v32x4_shuffle(vacc0x0123, vacc0x0123, 2, 3, 2, 3);
150
151 c3 += 2;
152 c2 += 2;
153 c1 += 2;
154 c0 += 2;
155 }
156 if (nc & 1) {
157 *c3 = wasm_f32x4_extract_lane(vacc3x0123, 0);
158 *c2 = wasm_f32x4_extract_lane(vacc2x0123, 0);
159 *c1 = wasm_f32x4_extract_lane(vacc1x0123, 0);
160 *c0 = wasm_f32x4_extract_lane(vacc0x0123, 0);
161 }
162
163 nc = 0;
164 }
165 } while (nc != 0);
166 }
167