1 // Auto-generated file. Do not edit!
2 // Template: src/f32-dwconv2d-chw/5x5p2-wasmsimd-loadsplat.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <wasm_simd128.h>
13
14 #include <xnnpack/dwconv.h>
15 #include <xnnpack/math.h>
16
17
18
xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_4x4(size_t input_height,size_t input_width,const float * input,const float * weights,const float * zero,float * output,uint32_t padding_top,const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS (1)])19 void xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_4x4(
20 size_t input_height,
21 size_t input_width,
22 const float* input,
23 const float* weights,
24 const float* zero,
25 float* output,
26 uint32_t padding_top,
27 const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
28 {
29 assert(input_height != 0);
30 assert(input_width != 0);
31 assert(input_width % sizeof(float) == 0);
32 assert(padding_top == 2);
33
34 const v128_t vmask = wasm_v128_load(params->scalar.mask);
35 const v128_t vmax = wasm_v32x4_load_splat(¶ms->scalar.max);
36 const v128_t vmin = wasm_v32x4_load_splat(¶ms->scalar.min);
37
38 const v128_t vw0123 = wasm_v128_load(weights);
39 const v128_t vw4567 = wasm_v128_load(weights + 4);
40 const v128_t vw89AB = wasm_v128_load(weights + 8);
41 const v128_t vwCDEF = wasm_v128_load(weights + 12);
42 const v128_t vwGHIJ = wasm_v128_load(weights + 16);
43 const v128_t vwKLMN = wasm_v128_load(weights + 20);
44 const v128_t vwOP = wasm_v64x2_load_splat(weights + 24);
45 const v128_t vbias = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
46 const v128_t vk00 = wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1);
47 const v128_t vk01 = wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2);
48 const v128_t vk02 = wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3);
49 const v128_t vk03 = wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0);
50 const v128_t vk04 = wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1);
51 const v128_t vk10 = wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2);
52 const v128_t vk11 = wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3);
53 const v128_t vk12 = wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0);
54 const v128_t vk13 = wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1);
55 const v128_t vk14 = wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2);
56 const v128_t vk20 = wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3);
57 const v128_t vk21 = wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0);
58 const v128_t vk22 = wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1);
59 const v128_t vk23 = wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2);
60 const v128_t vk24 = wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3);
61 const v128_t vk30 = wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0);
62 const v128_t vk31 = wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1);
63 const v128_t vk32 = wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2);
64 const v128_t vk33 = wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3);
65 const v128_t vk34 = wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0);
66 const v128_t vk40 = wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1);
67 const v128_t vk41 = wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2);
68 const v128_t vk42 = wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3);
69 const v128_t vk43 = wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0);
70 const v128_t vk44 = wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1);
71
72 const v128_t vzero = wasm_f32x4_splat(0.0f);
73
74 const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
75
76 const float* i0 = zero;
77 const float* i1 = zero;
78 const float* i2 = input;
79 const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
80 const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
81 const float* i5 = (const float*) ((uintptr_t) i4 + input_width);
82 const float* i6 = (const float*) ((uintptr_t) i5 + input_width);
83 const float* i7 = (const float*) ((uintptr_t) i6 + input_width);
84
85 float* o0 = output;
86 float* o1 = (float*) ((uintptr_t) o0 + input_width);
87 float* o2 = (float*) ((uintptr_t) o1 + input_width);
88 float* o3 = (float*) ((uintptr_t) o2 + input_width);
89
90 size_t output_height = input_height;
91 do {
92 if XNN_UNPREDICTABLE(output_height < 2) {
93 i3 = zero;
94 o1 = o0;
95 }
96 if XNN_UNPREDICTABLE(output_height < 3) {
97 i4 = zero;
98 o2 = o1;
99 }
100 if XNN_UNPREDICTABLE(output_height < 4) {
101 i5 = zero;
102 o3 = o2;
103 }
104 if XNN_UNPREDICTABLE(output_height < 5) {
105 i6 = zero;
106 }
107 if XNN_UNPREDICTABLE(output_height < 6) {
108 i7 = zero;
109 }
110
111 v128_t vi0x0123 = vzero;
112 v128_t vi1x0123 = vzero;
113 v128_t vi2x0123 = vzero;
114 v128_t vi3x0123 = vzero;
115 v128_t vi4x0123 = vzero;
116 v128_t vi5x0123 = vzero;
117 v128_t vi6x0123 = vzero;
118 v128_t vi7x0123 = vzero;
119
120 v128_t vi0x4567 = wasm_v128_load(i0); i0 += 4;
121 v128_t vi1x4567 = wasm_v128_load(i1); i1 += 4;
122 v128_t vi2x4567 = wasm_v128_load(i2); i2 += 4;
123 v128_t vi3x4567 = wasm_v128_load(i3); i3 += 4;
124 v128_t vi4x4567 = wasm_v128_load(i4); i4 += 4;
125 v128_t vi5x4567 = wasm_v128_load(i5); i5 += 4;
126 v128_t vi6x4567 = wasm_v128_load(i6); i6 += 4;
127 v128_t vi7x4567 = wasm_v128_load(i7); i7 += 4;
128
129 size_t w = input_width;
130 for (; w > 8 * sizeof(float); w -= 4 * sizeof(float)) {
131 v128_t vo0p0 = vbias;
132 v128_t vo1p0 = vbias;
133 v128_t vo2p0 = vbias;
134 v128_t vo3p0 = vbias;
135
136 const v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
137 const v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
138 const v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
139 const v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
140 const v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
141 const v128_t vi5x89AB = wasm_v128_load(i5); i5 += 4;
142 const v128_t vi6x89AB = wasm_v128_load(i6); i6 += 4;
143 const v128_t vi7x89AB = wasm_v128_load(i7); i7 += 4;
144
145 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, vk02));
146 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x4567, vk02));
147 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x4567, vk02));
148 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x4567, vk02));
149
150 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk12));
151 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, vk12));
152 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, vk12));
153 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x4567, vk12));
154
155 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk22));
156 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, vk22));
157 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, vk22));
158 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x4567, vk22));
159
160 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x4567, vk32));
161 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x4567, vk32));
162 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x4567, vk32));
163 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x4567, vk32));
164
165 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, vk42));
166 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x4567, vk42));
167 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x4567, vk42));
168 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x4567, vk42));
169
170 const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
171 const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
172 const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
173 const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
174 const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
175 const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
176 const v128_t vi6x3456 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 3, 4, 5, 6);
177 const v128_t vi7x3456 = wasm_v32x4_shuffle(vi7x0123, vi7x4567, 3, 4, 5, 6);
178
179 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, vk01));
180 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, vk01));
181 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x3456, vk01));
182 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x3456, vk01));
183
184 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk11));
185 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, vk11));
186 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, vk11));
187 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x3456, vk11));
188
189 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, vk21));
190 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, vk21));
191 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x3456, vk21));
192 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x3456, vk21));
193
194 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, vk31));
195 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x3456, vk31));
196 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x3456, vk31));
197 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x3456, vk31));
198
199 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x3456, vk41));
200 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x3456, vk41));
201 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x3456, vk41));
202 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x3456, vk41));
203
204 const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
205 vi0x0123 = vi0x4567;
206 const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
207 vi1x0123 = vi1x4567;
208 const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
209 vi2x0123 = vi2x4567;
210 const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
211 vi3x0123 = vi3x4567;
212 const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
213 vi4x0123 = vi4x4567;
214 const v128_t vi5x2345 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 2, 3, 4, 5);
215 vi5x0123 = vi5x4567;
216 const v128_t vi6x2345 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 2, 3, 4, 5);
217 vi6x0123 = vi6x4567;
218 const v128_t vi7x2345 = wasm_v32x4_shuffle(vi7x0123, vi7x4567, 2, 3, 4, 5);
219 vi7x0123 = vi7x4567;
220
221 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, vk00));
222 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x2345, vk00));
223 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x2345, vk00));
224 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x2345, vk00));
225
226 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x2345, vk10));
227 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x2345, vk10));
228 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x2345, vk10));
229 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x2345, vk10));
230
231 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, vk20));
232 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x2345, vk20));
233 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x2345, vk20));
234 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x2345, vk20));
235
236 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, vk30));
237 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x2345, vk30));
238 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x2345, vk30));
239 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x2345, vk30));
240
241 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, vk40));
242 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x2345, vk40));
243 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x2345, vk40));
244 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x2345, vk40));
245
246 const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
247 const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
248 const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
249 const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
250 const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
251 const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 1, 2, 3, 4);
252 const v128_t vi6x5678 = wasm_v32x4_shuffle(vi6x4567, vi6x89AB, 1, 2, 3, 4);
253 const v128_t vi7x5678 = wasm_v32x4_shuffle(vi7x4567, vi7x89AB, 1, 2, 3, 4);
254
255 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk03));
256 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, vk03));
257 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x5678, vk03));
258 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x5678, vk03));
259
260 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk13));
261 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, vk13));
262 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, vk13));
263 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x5678, vk13));
264
265 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk23));
266 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, vk23));
267 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x5678, vk23));
268 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x5678, vk23));
269
270 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, vk33));
271 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x5678, vk33));
272 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x5678, vk33));
273 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x5678, vk33));
274
275 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x5678, vk43));
276 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x5678, vk43));
277 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x5678, vk43));
278 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x5678, vk43));
279
280 const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
281 vi0x4567 = vi0x89AB;
282 const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
283 vi1x4567 = vi1x89AB;
284 const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
285 vi2x4567 = vi2x89AB;
286 const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
287 vi3x4567 = vi3x89AB;
288 const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
289 vi4x4567 = vi4x89AB;
290 const v128_t vi5x6789 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 2, 3, 4, 5);
291 vi5x4567 = vi5x89AB;
292 const v128_t vi6x6789 = wasm_v32x4_shuffle(vi6x4567, vi6x89AB, 2, 3, 4, 5);
293 vi6x4567 = vi6x89AB;
294 const v128_t vi7x6789 = wasm_v32x4_shuffle(vi7x4567, vi7x89AB, 2, 3, 4, 5);
295 vi7x4567 = vi7x89AB;
296
297 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, vk04));
298 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x6789, vk04));
299 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x6789, vk04));
300 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x6789, vk04));
301
302 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x6789, vk14));
303 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x6789, vk14));
304 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x6789, vk14));
305 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x6789, vk14));
306
307 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, vk24));
308 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x6789, vk24));
309 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x6789, vk24));
310 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x6789, vk24));
311
312 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x6789, vk34));
313 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x6789, vk34));
314 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x6789, vk34));
315 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x6789, vk34));
316
317 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, vk44));
318 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x6789, vk44));
319 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x6789, vk44));
320 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x6789, vk44));
321
322
323 v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
324 v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
325 v128_t vo2 = wasm_f32x4_max(vo2p0, vmin);
326 v128_t vo3 = wasm_f32x4_max(vo3p0, vmin);
327 vo0 = wasm_f32x4_min(vo0, vmax);
328 vo1 = wasm_f32x4_min(vo1, vmax);
329 vo2 = wasm_f32x4_min(vo2, vmax);
330 vo3 = wasm_f32x4_min(vo3, vmax);
331
332 wasm_v128_store(o3, vo3); o3 += 4;
333 wasm_v128_store(o2, vo2); o2 += 4;
334 wasm_v128_store(o1, vo1); o1 += 4;
335 wasm_v128_store(o0, vo0); o0 += 4;
336 }
337 // Always process the last block of 5..8 pixels.
338 if XNN_LIKELY(w > 4 * sizeof(float)) {
339 v128_t vo0p0 = vbias;
340 v128_t vo1p0 = vbias;
341 v128_t vo2p0 = vbias;
342 v128_t vo3p0 = vbias;
343
344 v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
345 v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
346 v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
347 v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
348 v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
349 v128_t vi5x89AB = wasm_v128_load(i5); i5 += 4;
350 v128_t vi6x89AB = wasm_v128_load(i6); i6 += 4;
351 v128_t vi7x89AB = wasm_v128_load(i7); i7 += 4;
352
353 vi0x89AB = wasm_v128_and(vmask, vi0x89AB);
354 vi1x89AB = wasm_v128_and(vmask, vi1x89AB);
355 vi2x89AB = wasm_v128_and(vmask, vi2x89AB);
356 vi3x89AB = wasm_v128_and(vmask, vi3x89AB);
357 vi4x89AB = wasm_v128_and(vmask, vi4x89AB);
358 vi5x89AB = wasm_v128_and(vmask, vi5x89AB);
359 vi6x89AB = wasm_v128_and(vmask, vi6x89AB);
360 vi7x89AB = wasm_v128_and(vmask, vi7x89AB);
361
362 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, vk02));
363 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x4567, vk02));
364 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x4567, vk02));
365 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x4567, vk02));
366
367 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk12));
368 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, vk12));
369 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, vk12));
370 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x4567, vk12));
371
372 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk22));
373 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, vk22));
374 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, vk22));
375 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x4567, vk22));
376
377 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x4567, vk32));
378 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x4567, vk32));
379 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x4567, vk32));
380 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x4567, vk32));
381
382 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, vk42));
383 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x4567, vk42));
384 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x4567, vk42));
385 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x4567, vk42));
386
387 const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
388 const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
389 const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
390 const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
391 const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
392 const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
393 const v128_t vi6x3456 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 3, 4, 5, 6);
394 const v128_t vi7x3456 = wasm_v32x4_shuffle(vi7x0123, vi7x4567, 3, 4, 5, 6);
395
396 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, vk01));
397 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, vk01));
398 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x3456, vk01));
399 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x3456, vk01));
400
401 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk11));
402 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, vk11));
403 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, vk11));
404 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x3456, vk11));
405
406 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, vk21));
407 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, vk21));
408 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x3456, vk21));
409 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x3456, vk21));
410
411 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, vk31));
412 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x3456, vk31));
413 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x3456, vk31));
414 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x3456, vk31));
415
416 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x3456, vk41));
417 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x3456, vk41));
418 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x3456, vk41));
419 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x3456, vk41));
420
421 const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
422 vi0x0123 = vi0x4567;
423 const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
424 vi1x0123 = vi1x4567;
425 const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
426 vi2x0123 = vi2x4567;
427 const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
428 vi3x0123 = vi3x4567;
429 const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
430 vi4x0123 = vi4x4567;
431 const v128_t vi5x2345 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 2, 3, 4, 5);
432 vi5x0123 = vi5x4567;
433 const v128_t vi6x2345 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 2, 3, 4, 5);
434 vi6x0123 = vi6x4567;
435 const v128_t vi7x2345 = wasm_v32x4_shuffle(vi7x0123, vi7x4567, 2, 3, 4, 5);
436 vi7x0123 = vi7x4567;
437
438 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, vk00));
439 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x2345, vk00));
440 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x2345, vk00));
441 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x2345, vk00));
442
443 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x2345, vk10));
444 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x2345, vk10));
445 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x2345, vk10));
446 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x2345, vk10));
447
448 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, vk20));
449 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x2345, vk20));
450 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x2345, vk20));
451 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x2345, vk20));
452
453 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, vk30));
454 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x2345, vk30));
455 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x2345, vk30));
456 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x2345, vk30));
457
458 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, vk40));
459 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x2345, vk40));
460 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x2345, vk40));
461 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x2345, vk40));
462
463 const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
464 const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
465 const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
466 const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
467 const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
468 const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 1, 2, 3, 4);
469 const v128_t vi6x5678 = wasm_v32x4_shuffle(vi6x4567, vi6x89AB, 1, 2, 3, 4);
470 const v128_t vi7x5678 = wasm_v32x4_shuffle(vi7x4567, vi7x89AB, 1, 2, 3, 4);
471
472 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk03));
473 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, vk03));
474 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x5678, vk03));
475 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x5678, vk03));
476
477 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk13));
478 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, vk13));
479 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, vk13));
480 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x5678, vk13));
481
482 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk23));
483 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, vk23));
484 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x5678, vk23));
485 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x5678, vk23));
486
487 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, vk33));
488 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x5678, vk33));
489 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x5678, vk33));
490 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x5678, vk33));
491
492 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x5678, vk43));
493 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x5678, vk43));
494 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x5678, vk43));
495 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x5678, vk43));
496
497 const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
498 vi0x4567 = vi0x89AB;
499 const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
500 vi1x4567 = vi1x89AB;
501 const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
502 vi2x4567 = vi2x89AB;
503 const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
504 vi3x4567 = vi3x89AB;
505 const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
506 vi4x4567 = vi4x89AB;
507 const v128_t vi5x6789 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 2, 3, 4, 5);
508 vi5x4567 = vi5x89AB;
509 const v128_t vi6x6789 = wasm_v32x4_shuffle(vi6x4567, vi6x89AB, 2, 3, 4, 5);
510 vi6x4567 = vi6x89AB;
511 const v128_t vi7x6789 = wasm_v32x4_shuffle(vi7x4567, vi7x89AB, 2, 3, 4, 5);
512 vi7x4567 = vi7x89AB;
513
514 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, vk04));
515 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x6789, vk04));
516 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x6789, vk04));
517 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x6789, vk04));
518
519 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x6789, vk14));
520 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x6789, vk14));
521 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x6789, vk14));
522 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x6789, vk14));
523
524 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, vk24));
525 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x6789, vk24));
526 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x6789, vk24));
527 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x6789, vk24));
528
529 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x6789, vk34));
530 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x6789, vk34));
531 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x6789, vk34));
532 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x6789, vk34));
533
534 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, vk44));
535 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x6789, vk44));
536 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x6789, vk44));
537 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x6789, vk44));
538
539
540 v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
541 v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
542 v128_t vo2 = wasm_f32x4_max(vo2p0, vmin);
543 v128_t vo3 = wasm_f32x4_max(vo3p0, vmin);
544 vo0 = wasm_f32x4_min(vo0, vmax);
545 vo1 = wasm_f32x4_min(vo1, vmax);
546 vo2 = wasm_f32x4_min(vo2, vmax);
547 vo3 = wasm_f32x4_min(vo3, vmax);
548
549 wasm_v128_store(o3, vo3); o3 += 4;
550 wasm_v128_store(o2, vo2); o2 += 4;
551 wasm_v128_store(o1, vo1); o1 += 4;
552 wasm_v128_store(o0, vo0); o0 += 4;
553
554 w -= 4 * sizeof(float);
555 }
556 assert(w >= 1 * sizeof(float));
557 assert(w <= 4 * sizeof(float));
558 {
559 v128_t vo0p0 = vbias;
560 v128_t vo1p0 = vbias;
561 v128_t vo2p0 = vbias;
562 v128_t vo3p0 = vbias;
563
564 vi0x4567 = wasm_v128_and(vmask, vi0x4567);
565 vi1x4567 = wasm_v128_and(vmask, vi1x4567);
566 vi2x4567 = wasm_v128_and(vmask, vi2x4567);
567 vi3x4567 = wasm_v128_and(vmask, vi3x4567);
568 vi4x4567 = wasm_v128_and(vmask, vi4x4567);
569 vi5x4567 = wasm_v128_and(vmask, vi5x4567);
570 vi6x4567 = wasm_v128_and(vmask, vi6x4567);
571 vi7x4567 = wasm_v128_and(vmask, vi7x4567);
572
573 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, vk02));
574 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x4567, vk02));
575 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x4567, vk02));
576 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x4567, vk02));
577
578 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk12));
579 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, vk12));
580 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, vk12));
581 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x4567, vk12));
582
583 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk22));
584 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, vk22));
585 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, vk22));
586 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x4567, vk22));
587
588 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x4567, vk32));
589 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x4567, vk32));
590 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x4567, vk32));
591 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x4567, vk32));
592
593 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, vk42));
594 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x4567, vk42));
595 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x4567, vk42));
596 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x4567, vk42));
597
598 const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
599 const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
600 const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
601 const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
602 const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
603 const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
604 const v128_t vi6x3456 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 3, 4, 5, 6);
605 const v128_t vi7x3456 = wasm_v32x4_shuffle(vi7x0123, vi7x4567, 3, 4, 5, 6);
606
607 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, vk01));
608 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, vk01));
609 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x3456, vk01));
610 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x3456, vk01));
611
612 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk11));
613 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, vk11));
614 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, vk11));
615 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x3456, vk11));
616
617 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, vk21));
618 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, vk21));
619 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x3456, vk21));
620 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x3456, vk21));
621
622 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, vk31));
623 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x3456, vk31));
624 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x3456, vk31));
625 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x3456, vk31));
626
627 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x3456, vk41));
628 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x3456, vk41));
629 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x3456, vk41));
630 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x3456, vk41));
631
632 const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
633 const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
634 const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
635 const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
636 const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
637 const v128_t vi5x2345 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 2, 3, 4, 5);
638 const v128_t vi6x2345 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 2, 3, 4, 5);
639 const v128_t vi7x2345 = wasm_v32x4_shuffle(vi7x0123, vi7x4567, 2, 3, 4, 5);
640
641 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, vk00));
642 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x2345, vk00));
643 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x2345, vk00));
644 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x2345, vk00));
645
646 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x2345, vk10));
647 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x2345, vk10));
648 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x2345, vk10));
649 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x2345, vk10));
650
651 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, vk20));
652 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x2345, vk20));
653 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x2345, vk20));
654 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x2345, vk20));
655
656 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, vk30));
657 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x2345, vk30));
658 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x2345, vk30));
659 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x2345, vk30));
660
661 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, vk40));
662 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x2345, vk40));
663 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x2345, vk40));
664 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x2345, vk40));
665
666 const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
667 const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
668 const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
669 const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
670 const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vzero, 1, 2, 3, 4);
671 const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vzero, 1, 2, 3, 4);
672 const v128_t vi6x5678 = wasm_v32x4_shuffle(vi6x4567, vzero, 1, 2, 3, 4);
673 const v128_t vi7x5678 = wasm_v32x4_shuffle(vi7x4567, vzero, 1, 2, 3, 4);
674
675 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk03));
676 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, vk03));
677 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x5678, vk03));
678 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x5678, vk03));
679
680 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk13));
681 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, vk13));
682 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, vk13));
683 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x5678, vk13));
684
685 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk23));
686 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, vk23));
687 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x5678, vk23));
688 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x5678, vk23));
689
690 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, vk33));
691 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x5678, vk33));
692 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x5678, vk33));
693 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x5678, vk33));
694
695 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x5678, vk43));
696 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x5678, vk43));
697 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x5678, vk43));
698 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x5678, vk43));
699
700 const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x5678, vzero, 1, 2, 3, 4);
701 const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x5678, vzero, 1, 2, 3, 4);
702 const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x5678, vzero, 1, 2, 3, 4);
703 const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x5678, vzero, 1, 2, 3, 4);
704 const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x5678, vzero, 1, 2, 3, 4);
705 const v128_t vi5x6789 = wasm_v32x4_shuffle(vi5x5678, vzero, 1, 2, 3, 4);
706 const v128_t vi6x6789 = wasm_v32x4_shuffle(vi6x5678, vzero, 1, 2, 3, 4);
707 const v128_t vi7x6789 = wasm_v32x4_shuffle(vi7x5678, vzero, 1, 2, 3, 4);
708
709 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, vk04));
710 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x6789, vk04));
711 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x6789, vk04));
712 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x6789, vk04));
713
714 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x6789, vk14));
715 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x6789, vk14));
716 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x6789, vk14));
717 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x6789, vk14));
718
719 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, vk24));
720 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x6789, vk24));
721 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x6789, vk24));
722 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x6789, vk24));
723
724 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x6789, vk34));
725 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x6789, vk34));
726 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x6789, vk34));
727 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x6789, vk34));
728
729 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, vk44));
730 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x6789, vk44));
731 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x6789, vk44));
732 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x6789, vk44));
733
734
735 v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
736 v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
737 v128_t vo2 = wasm_f32x4_max(vo2p0, vmin);
738 v128_t vo3 = wasm_f32x4_max(vo3p0, vmin);
739 vo0 = wasm_f32x4_min(vo0, vmax);
740 vo1 = wasm_f32x4_min(vo1, vmax);
741 vo2 = wasm_f32x4_min(vo2, vmax);
742 vo3 = wasm_f32x4_min(vo3, vmax);
743
744 if XNN_LIKELY(w & (4 * sizeof(float))) {
745 wasm_v128_store(o3, vo3); o3 += 4;
746 wasm_v128_store(o2, vo2); o2 += 4;
747 wasm_v128_store(o1, vo1); o1 += 4;
748 wasm_v128_store(o0, vo0); o0 += 4;
749 } else {
750 if (w & (2 * sizeof(float))) {
751 *((double*) o3) = wasm_f64x2_extract_lane(vo3, 0); o3 += 2;
752 *((double*) o2) = wasm_f64x2_extract_lane(vo2, 0); o2 += 2;
753 *((double*) o1) = wasm_f64x2_extract_lane(vo1, 0); o1 += 2;
754 *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
755
756 vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
757 vo1 = wasm_v32x4_shuffle(vo1, vo1, 2, 3, 0, 1);
758 vo2 = wasm_v32x4_shuffle(vo2, vo2, 2, 3, 0, 1);
759 vo3 = wasm_v32x4_shuffle(vo3, vo3, 2, 3, 0, 1);
760 }
761 if (w & (1 * sizeof(float))) {
762 *o3 = wasm_f32x4_extract_lane(vo3, 0); o3 += 1;
763 *o2 = wasm_f32x4_extract_lane(vo2, 0); o2 += 1;
764 *o1 = wasm_f32x4_extract_lane(vo1, 0); o1 += 1;
765 *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
766 }
767 }
768 }
769
770 i0 = (const float*) ((uintptr_t) i4 - input_decrement);
771 i1 = (const float*) ((uintptr_t) i5 - input_decrement);
772 i2 = (const float*) ((uintptr_t) i1 + input_width);
773 i3 = (const float*) ((uintptr_t) i2 + input_width);
774 i4 = (const float*) ((uintptr_t) i3 + input_width);
775 i5 = (const float*) ((uintptr_t) i4 + input_width);
776 i6 = (const float*) ((uintptr_t) i5 + input_width);
777 i7 = (const float*) ((uintptr_t) i6 + input_width);
778
779 o0 = o3;
780 o1 = (float*) ((uintptr_t) o0 + input_width);
781 o2 = (float*) ((uintptr_t) o1 + input_width);
782 o3 = (float*) ((uintptr_t) o2 + input_width);
783
784 output_height = doz(output_height, 4);
785 } while (output_height != 0);
786 }
787