1 // Auto-generated file. Do not edit!
2 //   Template: src/f32-spmm/wasmsimd-pipelined.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <wasm_simd128.h>
13 
14 #include <xnnpack/spmm.h>
15 
16 
xnn_f32_spmm_minmax_ukernel_32x1__wasmsimd_x86_pipelined(size_t mc,size_t nc,const float * restrict input,const float * restrict weights,const int32_t * restrict widx_dmap,const uint32_t * restrict nidx_nnzmap,float * restrict output,size_t output_stride,const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])17 void xnn_f32_spmm_minmax_ukernel_32x1__wasmsimd_x86_pipelined(
18     size_t mc,
19     size_t nc,
20     const float*restrict input,
21     const float*restrict weights,
22     const int32_t*restrict widx_dmap,
23     const uint32_t*restrict nidx_nnzmap,
24     float*restrict output,
25     size_t output_stride,
26     const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
27 {
28   assert(mc != 0);
29   assert(mc % sizeof(float) == 0);
30   assert(nc != 0);
31 
32   const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
33   const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
34   size_t output_decrement = output_stride * nc - 32 * sizeof(float);
35   while XNN_LIKELY(mc >= 32 * sizeof(float)) {
36     const float*restrict w = weights;
37     const int32_t* dmap = widx_dmap;
38     const uint32_t* nnzmap = nidx_nnzmap;
39     v128_t vw = wasm_v32x4_load_splat(w); w += 1;
40     intptr_t diff = *dmap++;
41     v128_t vi0123 = wasm_v128_load(input + 0);
42     v128_t vi4567 = wasm_v128_load(input + 4);
43     v128_t vi89AB = wasm_v128_load(input + 8);
44     v128_t viCDEF = wasm_v128_load(input + 12);
45     v128_t viGHIJ = wasm_v128_load(input + 16);
46     v128_t viKLMN = wasm_v128_load(input + 20);
47     v128_t viOPQR = wasm_v128_load(input + 24);
48     v128_t viSTUV = wasm_v128_load(input + 28);
49     size_t n = nc;
50     do {
51       uint32_t nnz = *nnzmap++;
52        v128_t vacc0123 = vw;
53        v128_t vacc4567 = vw;
54        v128_t vacc89AB = vw;
55        v128_t vaccCDEF = vw;
56        v128_t vaccGHIJ = vw;
57        v128_t vaccKLMN = vw;
58        v128_t vaccOPQR = vw;
59        v128_t vaccSTUV = vw;
60       vw = wasm_v32x4_load_splat(w); w += 1;
61 
62 
63       if XNN_LIKELY(nnz != 0) {
64         do {
65           vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
66           vacc4567 = wasm_f32x4_add(vacc4567, wasm_f32x4_mul(vi4567, vw));
67           vacc89AB = wasm_f32x4_add(vacc89AB, wasm_f32x4_mul(vi89AB, vw));
68           vaccCDEF = wasm_f32x4_add(vaccCDEF, wasm_f32x4_mul(viCDEF, vw));
69           vaccGHIJ = wasm_f32x4_add(vaccGHIJ, wasm_f32x4_mul(viGHIJ, vw));
70           vaccKLMN = wasm_f32x4_add(vaccKLMN, wasm_f32x4_mul(viKLMN, vw));
71           vaccOPQR = wasm_f32x4_add(vaccOPQR, wasm_f32x4_mul(viOPQR, vw));
72           vaccSTUV = wasm_f32x4_add(vaccSTUV, wasm_f32x4_mul(viSTUV, vw));
73           input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
74 
75           diff = *dmap++;
76           vw = wasm_v32x4_load_splat(w); w += 1;
77           vi0123 = wasm_v128_load(input + 0);
78           vi4567 = wasm_v128_load(input + 4);
79           vi89AB = wasm_v128_load(input + 8);
80           viCDEF = wasm_v128_load(input + 12);
81           viGHIJ = wasm_v128_load(input + 16);
82           viKLMN = wasm_v128_load(input + 20);
83           viOPQR = wasm_v128_load(input + 24);
84           viSTUV = wasm_v128_load(input + 28);
85         } while (--nnz != 0);
86       }
87       v128_t vout0123 = wasm_v128_bitselect(vacc0123, vmax, wasm_f32x4_le(vacc0123, vmax));
88       v128_t vout4567 = wasm_v128_bitselect(vacc4567, vmax, wasm_f32x4_le(vacc4567, vmax));
89       v128_t vout89AB = wasm_v128_bitselect(vacc89AB, vmax, wasm_f32x4_le(vacc89AB, vmax));
90       v128_t voutCDEF = wasm_v128_bitselect(vaccCDEF, vmax, wasm_f32x4_le(vaccCDEF, vmax));
91       v128_t voutGHIJ = wasm_v128_bitselect(vaccGHIJ, vmax, wasm_f32x4_le(vaccGHIJ, vmax));
92       v128_t voutKLMN = wasm_v128_bitselect(vaccKLMN, vmax, wasm_f32x4_le(vaccKLMN, vmax));
93       v128_t voutOPQR = wasm_v128_bitselect(vaccOPQR, vmax, wasm_f32x4_le(vaccOPQR, vmax));
94       v128_t voutSTUV = wasm_v128_bitselect(vaccSTUV, vmax, wasm_f32x4_le(vaccSTUV, vmax));
95       vout0123 = wasm_v128_bitselect(vmin, vout0123, wasm_f32x4_lt(vout0123, vmin));
96       vout4567 = wasm_v128_bitselect(vmin, vout4567, wasm_f32x4_lt(vout4567, vmin));
97       vout89AB = wasm_v128_bitselect(vmin, vout89AB, wasm_f32x4_lt(vout89AB, vmin));
98       voutCDEF = wasm_v128_bitselect(vmin, voutCDEF, wasm_f32x4_lt(voutCDEF, vmin));
99       voutGHIJ = wasm_v128_bitselect(vmin, voutGHIJ, wasm_f32x4_lt(voutGHIJ, vmin));
100       voutKLMN = wasm_v128_bitselect(vmin, voutKLMN, wasm_f32x4_lt(voutKLMN, vmin));
101       voutOPQR = wasm_v128_bitselect(vmin, voutOPQR, wasm_f32x4_lt(voutOPQR, vmin));
102       voutSTUV = wasm_v128_bitselect(vmin, voutSTUV, wasm_f32x4_lt(voutSTUV, vmin));
103       wasm_v128_store(output, vout0123);
104       wasm_v128_store(output + 4, vout4567);
105       wasm_v128_store(output + 8, vout89AB);
106       wasm_v128_store(output + 12, voutCDEF);
107       wasm_v128_store(output + 16, voutGHIJ);
108       wasm_v128_store(output + 20, voutKLMN);
109       wasm_v128_store(output + 24, voutOPQR);
110       wasm_v128_store(output + 28, voutSTUV);
111       output = (float*restrict) ((uintptr_t) output + output_stride);
112     } while (--n != 0);
113     output = (float*restrict) ((uintptr_t) output - output_decrement);
114     input += 32;
115     mc -= 32 * sizeof(float);
116   }
117   if XNN_UNLIKELY(mc != 0) {
118     output_decrement += 16 * sizeof(float);
119     if (mc & (16 * sizeof(float))) {
120       const float*restrict w = weights;
121       const int32_t* dmap = widx_dmap;
122       const uint32_t* nnzmap = nidx_nnzmap;
123       size_t n = nc;
124       do {
125         uint32_t nnz = *nnzmap++;
126         v128_t vacc0123 = wasm_v32x4_load_splat(w); w += 1;
127         v128_t vacc4567 = vacc0123;
128         v128_t vacc89AB = vacc0123;
129         v128_t vaccCDEF = vacc0123;
130         if XNN_LIKELY(nnz != 0) {
131           do {
132             const intptr_t diff = *dmap++;
133             const v128_t vi0123 = wasm_v128_load(input);
134             const v128_t vi4567 = wasm_v128_load(input + 4);
135             const v128_t vi89AB = wasm_v128_load(input + 8);
136             const v128_t viCDEF = wasm_v128_load(input + 12);
137             input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
138             const v128_t vw = wasm_v32x4_load_splat(w); w += 1;
139             vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
140             vacc4567 = wasm_f32x4_add(vacc4567, wasm_f32x4_mul(vi4567, vw));
141             vacc89AB = wasm_f32x4_add(vacc89AB, wasm_f32x4_mul(vi89AB, vw));
142             vaccCDEF = wasm_f32x4_add(vaccCDEF, wasm_f32x4_mul(viCDEF, vw));
143           } while (--nnz != 0);
144         }
145         v128_t vout0123 = wasm_v128_bitselect(vacc0123, vmax, wasm_f32x4_le(vacc0123, vmax));
146         v128_t vout4567 = wasm_v128_bitselect(vacc4567, vmax, wasm_f32x4_le(vacc4567, vmax));
147         v128_t vout89AB = wasm_v128_bitselect(vacc89AB, vmax, wasm_f32x4_le(vacc89AB, vmax));
148         v128_t voutCDEF = wasm_v128_bitselect(vaccCDEF, vmax, wasm_f32x4_le(vaccCDEF, vmax));
149         vout0123 = wasm_v128_bitselect(vmin, vout0123, wasm_f32x4_lt(vout0123, vmin));
150         vout4567 = wasm_v128_bitselect(vmin, vout4567, wasm_f32x4_lt(vout4567, vmin));
151         vout89AB = wasm_v128_bitselect(vmin, vout89AB, wasm_f32x4_lt(vout89AB, vmin));
152         voutCDEF = wasm_v128_bitselect(vmin, voutCDEF, wasm_f32x4_lt(voutCDEF, vmin));
153         wasm_v128_store(output, vout0123);
154 
155         wasm_v128_store(output + 4, vout4567);
156         wasm_v128_store(output + 8, vout89AB);
157         wasm_v128_store(output + 12, voutCDEF);
158         output = (float*restrict) ((uintptr_t) output + output_stride);
159       } while (--n != 0);
160       output = (float*restrict) ((uintptr_t) output - output_decrement);
161       input += 16;
162     }
163     output_decrement += 8 * sizeof(float);
164     if (mc & (8 * sizeof(float))) {
165       const float*restrict w = weights;
166       const int32_t* dmap = widx_dmap;
167       const uint32_t* nnzmap = nidx_nnzmap;
168       size_t n = nc;
169       do {
170         uint32_t nnz = *nnzmap++;
171         v128_t vacc0123 = wasm_v32x4_load_splat(w); w += 1;
172         v128_t vacc4567 = vacc0123;
173         if XNN_LIKELY(nnz != 0) {
174           do {
175             const intptr_t diff = *dmap++;
176             const v128_t vi0123 = wasm_v128_load(input);
177             const v128_t vi4567 = wasm_v128_load(input + 4);
178             input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
179             const v128_t vw = wasm_v32x4_load_splat(w); w += 1;
180             vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
181             vacc4567 = wasm_f32x4_add(vacc4567, wasm_f32x4_mul(vi4567, vw));
182           } while (--nnz != 0);
183         }
184         v128_t vout0123 = wasm_v128_bitselect(vacc0123, vmax, wasm_f32x4_le(vacc0123, vmax));
185         v128_t vout4567 = wasm_v128_bitselect(vacc4567, vmax, wasm_f32x4_le(vacc4567, vmax));
186         vout0123 = wasm_v128_bitselect(vmin, vout0123, wasm_f32x4_lt(vout0123, vmin));
187         vout4567 = wasm_v128_bitselect(vmin, vout4567, wasm_f32x4_lt(vout4567, vmin));
188         wasm_v128_store(output, vout0123);
189 
190         wasm_v128_store(output + 4, vout4567);
191         output = (float*restrict) ((uintptr_t) output + output_stride);
192       } while (--n != 0);
193       output = (float*restrict) ((uintptr_t) output - output_decrement);
194       input += 8;
195     }
196     output_decrement += 4 * sizeof(float);
197     if (mc & (4 * sizeof(float))) {
198       const float*restrict w = weights;
199       const int32_t* dmap = widx_dmap;
200       const uint32_t* nnzmap = nidx_nnzmap;
201       size_t n = nc;
202       do {
203         uint32_t nnz = *nnzmap++;
204         v128_t vacc0123 = wasm_v32x4_load_splat(w); w += 1;
205         if XNN_LIKELY(nnz != 0) {
206           do {
207             const intptr_t diff = *dmap++;
208             const v128_t vi0123 = wasm_v128_load(input);
209             input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
210             const v128_t vw = wasm_v32x4_load_splat(w); w += 1;
211             vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
212           } while (--nnz != 0);
213         }
214         v128_t vout0123 = wasm_v128_bitselect(vacc0123, vmax, wasm_f32x4_le(vacc0123, vmax));
215         vout0123 = wasm_v128_bitselect(vmin, vout0123, wasm_f32x4_lt(vout0123, vmin));
216         wasm_v128_store(output, vout0123);
217 
218         output = (float*restrict) ((uintptr_t) output + output_stride);
219       } while (--n != 0);
220       output = (float*restrict) ((uintptr_t) output - output_decrement);
221       input += 4;
222     }
223     output_decrement += 2 * sizeof(float);
224     if (mc & (2 * sizeof(float))) {
225       const float*restrict w = weights;
226       const int32_t* dmap = widx_dmap;
227       const uint32_t* nnzmap = nidx_nnzmap;
228       size_t n = nc;
229       do {
230         uint32_t nnz = *nnzmap++;
231         v128_t vacc01 = wasm_v32x4_load_splat(w); w += 1;
232         if XNN_LIKELY(nnz != 0) {
233           do {
234             const intptr_t diff = *dmap++;
235             const v128_t vi01 = wasm_v64x2_load_splat(input);
236             input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
237             const v128_t vw = wasm_v32x4_load_splat(w); w += 1;
238             vacc01 = wasm_f32x4_add(vacc01, wasm_f32x4_mul(vi01, vw));
239           } while (--nnz != 0);
240         }
241         v128_t vout01 = wasm_v128_bitselect(vacc01, vmax, wasm_f32x4_le(vacc01, vmax));
242         vout01 = wasm_v128_bitselect(vmin, vout01, wasm_f32x4_lt(vout01, vmin));
243         *((double*) output) = wasm_f64x2_extract_lane(vout01, 0);
244 
245         output = (float*restrict) ((uintptr_t) output + output_stride);
246       } while (--n != 0);
247       output = (float*restrict) ((uintptr_t) output - output_decrement);
248       input += 2;
249     }
250     output_decrement += 1 * sizeof(float);
251     if (mc & (1 * sizeof(float))) {
252       const float*restrict w = weights;
253       const int32_t* dmap = widx_dmap;
254       const uint32_t* nnzmap = nidx_nnzmap;
255       size_t n = nc;
256       do {
257         uint32_t nnz = *nnzmap++;
258         v128_t vacc0 = wasm_v32x4_load_splat(w); w += 1;
259         if XNN_LIKELY(nnz != 0) {
260           do {
261             const intptr_t diff = *dmap++;
262             const v128_t vi0 = wasm_v32x4_load_splat(input);
263             input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
264             const v128_t vw = wasm_v32x4_load_splat(w); w += 1;
265             vacc0 = wasm_f32x4_add(vacc0, wasm_f32x4_mul(vi0, vw));
266           } while (--nnz != 0);
267         }
268         v128_t vout0 = wasm_v128_bitselect(vacc0, vmax, wasm_f32x4_le(vacc0, vmax));
269         vout0 = wasm_v128_bitselect(vmin, vout0, wasm_f32x4_lt(vout0, vmin));
270         *output = wasm_f32x4_extract_lane(vout0, 0);
271 
272         output = (float*restrict) ((uintptr_t) output + output_stride);
273       } while (--n != 0);
274       output = (float*restrict) ((uintptr_t) output - output_decrement);
275       input += 1;
276     }
277   }
278 }
279