1 // Auto-generated file. Do not edit!
2 // Template: src/f32-dwconv2d-chw/3x3s2p1-wasmsimd-loadsplat.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <wasm_simd128.h>
13
14 #include <xnnpack/dwconv.h>
15 #include <xnnpack/math.h>
16
17
18
xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_1x4_acc3(size_t input_height,size_t input_width,const float * input,const float * weights,const float * zero,float * output,uint32_t padding_top,const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS (1)])19 void xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_1x4_acc3(
20 size_t input_height,
21 size_t input_width,
22 const float* input,
23 const float* weights,
24 const float* zero,
25 float* output,
26 uint32_t padding_top,
27 const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
28 {
29 assert(input_height != 0);
30 assert(input_width != 0);
31 assert(input_width % sizeof(float) == 0);
32 assert(padding_top >= 0);
33 assert(padding_top <= 1);
34
35 const v128_t vmask_even = wasm_v128_load(params->scalar.mask_even);
36 const v128_t vmask_odd = wasm_v128_load(params->scalar.mask_odd);
37 const v128_t vmax = wasm_v32x4_load_splat(¶ms->scalar.max);
38 const v128_t vmin = wasm_v32x4_load_splat(¶ms->scalar.min);
39
40 const v128_t vw0123 = wasm_v128_load(weights);
41 const v128_t vw4567 = wasm_v128_load(weights + 4);
42 const v128_t vw89 = wasm_v64x2_load_splat(weights + 8);
43 const v128_t vbias = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
44 const v128_t vk00 = wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1);
45 const v128_t vk01 = wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2);
46 const v128_t vk02 = wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3);
47 const v128_t vk10 = wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0);
48 const v128_t vk11 = wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1);
49 const v128_t vk12 = wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2);
50 const v128_t vk20 = wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3);
51 const v128_t vk21 = wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0);
52 const v128_t vk22 = wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1);
53
54 const v128_t vzero = wasm_f32x4_splat(0.0f);
55
56 const size_t input_decrement = round_down_po2(input_width, 4 /* SIMD output width */ * 2 /* subsampling */ * sizeof(float));
57
58 const float* i0 = (const float*) ((uintptr_t) input - ((-padding_top) & input_width));
59 const float* i1 = (const float*) ((uintptr_t) i0 + input_width);
60 if XNN_UNPREDICTABLE(padding_top != 0) {
61 i0 = zero;
62 }
63 const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
64
65 float* o0 = output;
66
67 size_t padded_input_height = input_height + padding_top + 1 /* padding bottom */;
68 size_t output_height = (padded_input_height - 3 /* kernel size */ + 2 /* subsampling */) / 2;
69 do {
70 if XNN_UNPREDICTABLE(padded_input_height < 4) {
71 i2 = zero;
72 }
73
74 v128_t vi0x1357 = vzero;
75 v128_t vi1x1357 = vzero;
76 v128_t vi2x1357 = vzero;
77
78 size_t w = input_width;
79 for (; w >= 8 * sizeof(float); w -= 8 * sizeof(float)) {
80 v128_t vo0p0 = vbias;
81
82 const v128_t vi0x89AB = wasm_v128_load(i0);
83 const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
84 i0 += 8;
85 const v128_t vi1x89AB = wasm_v128_load(i1);
86 const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
87 i1 += 8;
88 const v128_t vi2x89AB = wasm_v128_load(i2);
89 const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
90 i2 += 8;
91
92 const v128_t vi0x8ACE = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6);
93 const v128_t vi0x9BDF = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7);
94 const v128_t vi1x8ACE = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6);
95 const v128_t vi1x9BDF = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7);
96 const v128_t vi2x8ACE = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6);
97 const v128_t vi2x9BDF = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7);
98
99 v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, vk01);
100
101 v128_t vo0p2 = wasm_f32x4_mul(vi1x8ACE, vk11);
102
103 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x8ACE, vk21));
104
105 const v128_t vi0x7BDF = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
106 vi0x1357 = vi0x9BDF;
107 const v128_t vi1x7BDF = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
108 vi1x1357 = vi1x9BDF;
109 const v128_t vi2x7BDF = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
110 vi2x1357 = vi2x9BDF;
111
112 vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x7BDF, vk00));
113
114 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x7BDF, vk10));
115
116 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x7BDF, vk20));
117
118 vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x9BDF, vk02));
119
120 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, vk12));
121
122 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x9BDF, vk22));
123
124 vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
125 vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
126
127 v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
128 vo0 = wasm_f32x4_min(vo0, vmax);
129
130 wasm_v128_store(o0, vo0); o0 += 4;
131 }
132 // Last block has 0-7 pixels to process.
133 assert(w < 8 * sizeof(float));
134 if XNN_LIKELY(w != 0) {
135 v128_t vo0p0 = vbias;
136
137 const v128_t vi0x89AB = wasm_v128_load(i0);
138 const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
139 const v128_t vi1x89AB = wasm_v128_load(i1);
140 const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
141 const v128_t vi2x89AB = wasm_v128_load(i2);
142 const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
143
144 const v128_t vi0x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6));
145 const v128_t vi0x9BDF = wasm_v128_and(vmask_odd, wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7));
146 const v128_t vi1x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6));
147 const v128_t vi1x9BDF = wasm_v128_and(vmask_odd, wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7));
148 const v128_t vi2x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6));
149 const v128_t vi2x9BDF = wasm_v128_and(vmask_odd, wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7));
150
151 v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, vk01);
152
153 v128_t vo0p2 = wasm_f32x4_mul(vi1x8ACE, vk11);
154
155 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x8ACE, vk21));
156
157 const v128_t vi0x7BDF = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
158 const v128_t vi1x7BDF = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
159 const v128_t vi2x7BDF = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
160
161 vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x7BDF, vk00));
162
163 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x7BDF, vk10));
164
165 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x7BDF, vk20));
166
167 vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x9BDF, vk02));
168
169 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, vk12));
170
171 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x9BDF, vk22));
172
173 vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
174 vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
175
176 v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
177 vo0 = wasm_f32x4_min(vo0, vmax);
178
179 w += 1 * sizeof(float);
180 if (w & (8 * sizeof(float))) {
181 wasm_v128_store(o0, vo0); o0 += 4;
182 } else {
183 if (w & (4 * sizeof(float))) {
184 *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
185
186 vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
187 }
188 if (w & (2 * sizeof(float))) {
189 *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
190 }
191 }
192 }
193
194 i0 = (const float*) ((uintptr_t) i2 - input_decrement);
195 i1 = (const float*) ((uintptr_t) i0 + input_width);
196 i2 = (const float*) ((uintptr_t) i1 + input_width);
197
198
199 output_height -= 1;
200 padded_input_height -= 2;
201 } while (output_height != 0);
202 }
203