1 // Auto-generated file. Do not edit!
2 //   Template: src/f32-dwconv2d-chw/3x3p1-wasmsimd-splat.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <wasm_simd128.h>
13 
14 #include <xnnpack/dwconv.h>
15 #include <xnnpack/math.h>
16 
17 
18 
xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_splat_1x4_acc2(size_t input_height,size_t input_width,const float * input,const float * weights,const float * zero,float * output,uint32_t padding_top,const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS (1)])19 void xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_splat_1x4_acc2(
20     size_t input_height,
21     size_t input_width,
22     const float* input,
23     const float* weights,
24     const float* zero,
25     float* output,
26     uint32_t padding_top,
27     const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
28 {
29   assert(input_height != 0);
30   assert(input_width != 0);
31   assert(input_width % sizeof(float) == 0);
32   assert(padding_top == 1);
33 
34   const v128_t vmask = wasm_v128_load(params->scalar.mask);
35   const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
36   const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
37 
38   const v128_t vw0123 = wasm_v128_load(weights);
39   const v128_t vw4567 = wasm_v128_load(weights + 4);
40   const v128_t vw89 = wasm_v64x2_load_splat(weights + 8);
41 
42   const v128_t vzero = wasm_f32x4_splat(0.0f);
43 
44   const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
45 
46   const float* i0 = zero;
47   const float* i1 = input;
48   const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
49 
50   float* o0 = output;
51 
52   size_t output_height = input_height;
53   do {
54     if XNN_UNPREDICTABLE(output_height < 2) {
55       i2 = zero;
56     }
57 
58     v128_t vi0x0123 = vzero;
59     v128_t vi1x0123 = vzero;
60     v128_t vi2x0123 = vzero;
61 
62     v128_t vi0x4567 = wasm_v128_load(i0); i0 += 4;
63     v128_t vi1x4567 = wasm_v128_load(i1); i1 += 4;
64     v128_t vi2x4567 = wasm_v128_load(i2); i2 += 4;
65 
66     size_t w = input_width;
67     for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
68       v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
69 
70       const v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
71       const v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
72       const v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
73 
74       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
75 
76       v128_t vo0p1 = wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1));
77 
78       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
79 
80       const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
81       const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
82       const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
83 
84       vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
85 
86       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
87 
88       vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
89 
90       vi0x0123 = vi0x4567;
91       vi1x0123 = vi1x4567;
92       vi2x0123 = vi2x4567;
93 
94       const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
95       const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
96       const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
97 
98       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
99 
100       vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
101 
102       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
103 
104       vi0x4567 = vi0x89AB;
105       vi1x4567 = vi1x89AB;
106       vi2x4567 = vi2x89AB;
107 
108       vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
109 
110       v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
111       vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
112 
113       wasm_v128_store(o0, vo0); o0 += 4;
114     }
115     // Always process the last block of 1..4 pixels.
116     assert(w >= 1 * sizeof(float));
117     assert(w <= 4 * sizeof(float));
118     {
119       v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
120 
121       vi0x4567 = wasm_v128_and(vmask, vi0x4567);
122       vi1x4567 = wasm_v128_and(vmask, vi1x4567);
123       vi2x4567 = wasm_v128_and(vmask, vi2x4567);
124 
125       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
126 
127       v128_t vo0p1 = wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1));
128 
129       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
130 
131       const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
132       const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
133       const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
134 
135       vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
136 
137       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
138 
139       vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
140 
141       const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
142       const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
143       const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
144 
145       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
146 
147       vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
148 
149       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
150 
151       vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
152 
153       v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
154       vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
155 
156       if XNN_LIKELY(w == 4 * sizeof(float)) {
157         wasm_v128_store(o0, vo0); o0 += 4;
158       } else {
159         if (w & (2 * sizeof(float))) {
160           *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
161 
162           vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
163         }
164         if (w & (1 * sizeof(float))) {
165           *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
166         }
167       }
168     }
169 
170     i0 = (const float*) ((uintptr_t) i1 - input_decrement);
171     i1 = (const float*) ((uintptr_t) i2 - input_decrement);
172     i2 = (const float*) ((uintptr_t) i1 + input_width);
173 
174 
175   } while (--output_height != 0);
176 }
177