1 // Auto-generated file. Do not edit!
2 // Template: src/f32-dwconv2d-chw/5x5p2-sse.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <xmmintrin.h>
13
14 #include <xnnpack/dwconv.h>
15 #include <xnnpack/math.h>
16
17
xnn_f32_dwconv2d_chw_ukernel_5x5p2__sse_1x4_acc2(size_t input_height,size_t input_width,const float * input,const float * weights,const float * zero,float * output,uint32_t padding_top,const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS (1)])18 void xnn_f32_dwconv2d_chw_ukernel_5x5p2__sse_1x4_acc2(
19 size_t input_height,
20 size_t input_width,
21 const float* input,
22 const float* weights,
23 const float* zero,
24 float* output,
25 uint32_t padding_top,
26 const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
27 {
28 assert(input_height != 0);
29 assert(input_width != 0);
30 assert(input_width % sizeof(float) == 0);
31 assert(padding_top == 2);
32
33 const __m128 vmask = _mm_load_ps((const float*) params->sse.mask);
34 const __m128 vmax = _mm_load_ps(params->sse.max);
35 const __m128 vmin = _mm_load_ps(params->sse.min);
36
37 const __m128 vbias = _mm_load1_ps(weights);
38 const __m128 vk00 = _mm_load1_ps(weights + 1);
39 const __m128 vk01 = _mm_load1_ps(weights + 2);
40 const __m128 vk02 = _mm_load1_ps(weights + 3);
41 const __m128 vk03 = _mm_load1_ps(weights + 4);
42 const __m128 vk04 = _mm_load1_ps(weights + 5);
43 const __m128 vk10 = _mm_load1_ps(weights + 6);
44 const __m128 vk11 = _mm_load1_ps(weights + 7);
45 const __m128 vk12 = _mm_load1_ps(weights + 8);
46 const __m128 vk13 = _mm_load1_ps(weights + 9);
47 const __m128 vk14 = _mm_load1_ps(weights + 10);
48 const __m128 vk20 = _mm_load1_ps(weights + 11);
49 const __m128 vk21 = _mm_load1_ps(weights + 12);
50 const __m128 vk22 = _mm_load1_ps(weights + 13);
51 const __m128 vk23 = _mm_load1_ps(weights + 14);
52 const __m128 vk24 = _mm_load1_ps(weights + 15);
53 const __m128 vk30 = _mm_load1_ps(weights + 16);
54 const __m128 vk31 = _mm_load1_ps(weights + 17);
55 const __m128 vk32 = _mm_load1_ps(weights + 18);
56 const __m128 vk33 = _mm_load1_ps(weights + 19);
57 const __m128 vk34 = _mm_load1_ps(weights + 20);
58 const __m128 vk40 = _mm_load1_ps(weights + 21);
59 const __m128 vk41 = _mm_load1_ps(weights + 22);
60 const __m128 vk42 = _mm_load1_ps(weights + 23);
61 const __m128 vk43 = _mm_load1_ps(weights + 24);
62 const __m128 vk44 = _mm_load1_ps(weights + 25);
63
64 const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
65
66 const float* i0 = zero;
67 const float* i1 = zero;
68 const float* i2 = input;
69 const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
70 const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
71
72 float* o0 = output;
73
74 size_t output_height = input_height;
75 do {
76 if XNN_UNPREDICTABLE(output_height < 2) {
77 i3 = zero;
78 }
79 if XNN_UNPREDICTABLE(output_height < 3) {
80 i4 = zero;
81 }
82
83 __m128 vi0x3012 = _mm_setzero_ps();
84 __m128 vi1x3012 = _mm_setzero_ps();
85 __m128 vi2x3012 = _mm_setzero_ps();
86 __m128 vi3x3012 = _mm_setzero_ps();
87 __m128 vi4x3012 = _mm_setzero_ps();
88
89 __m128 vi0x4567 = _mm_loadu_ps(i0);
90 i0 += 4;
91 __m128 vi1x4567 = _mm_loadu_ps(i1);
92 i1 += 4;
93 __m128 vi2x4567 = _mm_loadu_ps(i2);
94 i2 += 4;
95 __m128 vi3x4567 = _mm_loadu_ps(i3);
96 i3 += 4;
97 __m128 vi4x4567 = _mm_loadu_ps(i4);
98 i4 += 4;
99
100 size_t w = input_width;
101 for (; w > 8 * sizeof(float); w -= 4 * sizeof(float)) {
102 __m128 vo0p0 = _mm_add_ps(vbias, _mm_mul_ps(vi0x4567, vk02));
103 __m128 vo0p1 = _mm_mul_ps(vi1x4567, vk12);
104 vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x4567, vk22));
105 vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi3x4567, vk32));
106 vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi4x4567, vk42));
107
108 const __m128 vi0x7456 = _mm_shuffle_ps(vi0x4567, vi0x4567, _MM_SHUFFLE(2, 1, 0, 3));
109 const __m128 vi1x7456 = _mm_shuffle_ps(vi1x4567, vi1x4567, _MM_SHUFFLE(2, 1, 0, 3));
110 const __m128 vi2x7456 = _mm_shuffle_ps(vi2x4567, vi2x4567, _MM_SHUFFLE(2, 1, 0, 3));
111 const __m128 vi3x7456 = _mm_shuffle_ps(vi3x4567, vi3x4567, _MM_SHUFFLE(2, 1, 0, 3));
112 const __m128 vi4x7456 = _mm_shuffle_ps(vi4x4567, vi4x4567, _MM_SHUFFLE(2, 1, 0, 3));
113
114 const __m128 vi0x89AB = _mm_loadu_ps(i0);
115 i0 += 4;
116 const __m128 vi1x89AB = _mm_loadu_ps(i1);
117 i1 += 4;
118 const __m128 vi2x89AB = _mm_loadu_ps(i2);
119 i2 += 4;
120 const __m128 vi3x89AB = _mm_loadu_ps(i3);
121 i3 += 4;
122 const __m128 vi4x89AB = _mm_loadu_ps(i4);
123 i4 += 4;
124
125 const __m128 vi0x3456 = _mm_move_ss(vi0x7456, vi0x3012);
126 const __m128 vi1x3456 = _mm_move_ss(vi1x7456, vi1x3012);
127 const __m128 vi2x3456 = _mm_move_ss(vi2x7456, vi2x3012);
128 const __m128 vi3x3456 = _mm_move_ss(vi3x7456, vi3x3012);
129 const __m128 vi4x3456 = _mm_move_ss(vi4x7456, vi4x3012);
130
131 vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi0x3456, vk01));
132 vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x3456, vk11));
133 vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi2x3456, vk21));
134 vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi3x3456, vk31));
135 vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi4x3456, vk41));
136
137 const __m128 vi0x2345 = _mm_shuffle_ps(vi0x3012, vi0x7456, _MM_SHUFFLE(2, 1, 0, 3));
138 vi0x3012 = vi0x7456;
139 const __m128 vi1x2345 = _mm_shuffle_ps(vi1x3012, vi1x7456, _MM_SHUFFLE(2, 1, 0, 3));
140 vi1x3012 = vi1x7456;
141 const __m128 vi2x2345 = _mm_shuffle_ps(vi2x3012, vi2x7456, _MM_SHUFFLE(2, 1, 0, 3));
142 vi2x3012 = vi2x7456;
143 const __m128 vi3x2345 = _mm_shuffle_ps(vi3x3012, vi3x7456, _MM_SHUFFLE(2, 1, 0, 3));
144 vi3x3012 = vi3x7456;
145 const __m128 vi4x2345 = _mm_shuffle_ps(vi4x3012, vi4x7456, _MM_SHUFFLE(2, 1, 0, 3));
146 vi4x3012 = vi4x7456;
147
148 const __m128 vi0x8567 = _mm_move_ss(vi0x4567, vi0x89AB);
149 vi0x4567 = vi0x89AB;
150 const __m128 vi1x8567 = _mm_move_ss(vi1x4567, vi1x89AB);
151 vi1x4567 = vi1x89AB;
152 const __m128 vi2x8567 = _mm_move_ss(vi2x4567, vi2x89AB);
153 vi2x4567 = vi2x89AB;
154 const __m128 vi3x8567 = _mm_move_ss(vi3x4567, vi3x89AB);
155 vi3x4567 = vi3x89AB;
156 const __m128 vi4x8567 = _mm_move_ss(vi4x4567, vi4x89AB);
157 vi4x4567 = vi4x89AB;
158
159 vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x2345, vk00));
160 vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi1x2345, vk10));
161 vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x2345, vk20));
162 vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi3x2345, vk30));
163 vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi4x2345, vk40));
164
165 const __m128 vi0x5678 = _mm_shuffle_ps(vi0x8567, vi0x8567, _MM_SHUFFLE(0, 3, 2, 1));
166 const __m128 vi1x5678 = _mm_shuffle_ps(vi1x8567, vi1x8567, _MM_SHUFFLE(0, 3, 2, 1));
167 const __m128 vi2x5678 = _mm_shuffle_ps(vi2x8567, vi2x8567, _MM_SHUFFLE(0, 3, 2, 1));
168 const __m128 vi3x5678 = _mm_shuffle_ps(vi3x8567, vi3x8567, _MM_SHUFFLE(0, 3, 2, 1));
169 const __m128 vi4x5678 = _mm_shuffle_ps(vi4x8567, vi4x8567, _MM_SHUFFLE(0, 3, 2, 1));
170
171 vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi0x5678, vk03));
172 vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x5678, vk13));
173 vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi2x5678, vk23));
174 vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi3x5678, vk33));
175 vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi4x5678, vk43));
176
177 const __m128 vi0x6789 = _mm_shuffle_ps(vi0x5678, vi0x89AB, _MM_SHUFFLE(1, 0, 2, 1));
178 const __m128 vi1x6789 = _mm_shuffle_ps(vi1x5678, vi1x89AB, _MM_SHUFFLE(1, 0, 2, 1));
179 const __m128 vi2x6789 = _mm_shuffle_ps(vi2x5678, vi2x89AB, _MM_SHUFFLE(1, 0, 2, 1));
180 const __m128 vi3x6789 = _mm_shuffle_ps(vi3x5678, vi3x89AB, _MM_SHUFFLE(1, 0, 2, 1));
181 const __m128 vi4x6789 = _mm_shuffle_ps(vi4x5678, vi4x89AB, _MM_SHUFFLE(1, 0, 2, 1));
182
183 vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x6789, vk04));
184 vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi1x6789, vk14));
185 vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x6789, vk24));
186 vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi3x6789, vk34));
187 vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi4x6789, vk44));
188
189 vo0p0 = _mm_add_ps(vo0p0, vo0p1);
190
191 __m128 vo0 = _mm_max_ps(vo0p0, vmin);
192
193 vo0 = _mm_min_ps(vo0, vmax);
194
195 _mm_storeu_ps(o0, vo0);
196 o0 += 4;
197 }
198 // Always process the last block of 5..8 pixels.
199 if XNN_LIKELY(w > 4 * sizeof(float)) {
200 __m128 vo0p0 = _mm_add_ps(vbias, _mm_mul_ps(vi0x4567, vk02));
201 __m128 vo0p1 = _mm_mul_ps(vi1x4567, vk12);
202 vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x4567, vk22));
203 vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi3x4567, vk32));
204 vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi4x4567, vk42));
205
206 const __m128 vi0x7456 = _mm_shuffle_ps(vi0x4567, vi0x4567, _MM_SHUFFLE(2, 1, 0, 3));
207 const __m128 vi1x7456 = _mm_shuffle_ps(vi1x4567, vi1x4567, _MM_SHUFFLE(2, 1, 0, 3));
208 const __m128 vi2x7456 = _mm_shuffle_ps(vi2x4567, vi2x4567, _MM_SHUFFLE(2, 1, 0, 3));
209 const __m128 vi3x7456 = _mm_shuffle_ps(vi3x4567, vi3x4567, _MM_SHUFFLE(2, 1, 0, 3));
210 const __m128 vi4x7456 = _mm_shuffle_ps(vi4x4567, vi4x4567, _MM_SHUFFLE(2, 1, 0, 3));
211
212 const __m128 vi0x89AB = _mm_and_ps(_mm_loadu_ps(i0), vmask);
213 i0 += 4;
214 const __m128 vi1x89AB = _mm_and_ps(_mm_loadu_ps(i1), vmask);
215 i1 += 4;
216 const __m128 vi2x89AB = _mm_and_ps(_mm_loadu_ps(i2), vmask);
217 i2 += 4;
218 const __m128 vi3x89AB = _mm_and_ps(_mm_loadu_ps(i3), vmask);
219 i3 += 4;
220 const __m128 vi4x89AB = _mm_and_ps(_mm_loadu_ps(i4), vmask);
221 i4 += 4;
222
223 const __m128 vi0x3456 = _mm_move_ss(vi0x7456, vi0x3012);
224 const __m128 vi1x3456 = _mm_move_ss(vi1x7456, vi1x3012);
225 const __m128 vi2x3456 = _mm_move_ss(vi2x7456, vi2x3012);
226 const __m128 vi3x3456 = _mm_move_ss(vi3x7456, vi3x3012);
227 const __m128 vi4x3456 = _mm_move_ss(vi4x7456, vi4x3012);
228
229 vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi0x3456, vk01));
230 vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x3456, vk11));
231 vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi2x3456, vk21));
232 vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi3x3456, vk31));
233 vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi4x3456, vk41));
234
235 const __m128 vi0x2345 = _mm_shuffle_ps(vi0x3012, vi0x7456, _MM_SHUFFLE(2, 1, 0, 3));
236 vi0x3012 = vi0x7456;
237 const __m128 vi1x2345 = _mm_shuffle_ps(vi1x3012, vi1x7456, _MM_SHUFFLE(2, 1, 0, 3));
238 vi1x3012 = vi1x7456;
239 const __m128 vi2x2345 = _mm_shuffle_ps(vi2x3012, vi2x7456, _MM_SHUFFLE(2, 1, 0, 3));
240 vi2x3012 = vi2x7456;
241 const __m128 vi3x2345 = _mm_shuffle_ps(vi3x3012, vi3x7456, _MM_SHUFFLE(2, 1, 0, 3));
242 vi3x3012 = vi3x7456;
243 const __m128 vi4x2345 = _mm_shuffle_ps(vi4x3012, vi4x7456, _MM_SHUFFLE(2, 1, 0, 3));
244 vi4x3012 = vi4x7456;
245
246 const __m128 vi0x8567 = _mm_move_ss(vi0x4567, vi0x89AB);
247 vi0x4567 = vi0x89AB;
248 const __m128 vi1x8567 = _mm_move_ss(vi1x4567, vi1x89AB);
249 vi1x4567 = vi1x89AB;
250 const __m128 vi2x8567 = _mm_move_ss(vi2x4567, vi2x89AB);
251 vi2x4567 = vi2x89AB;
252 const __m128 vi3x8567 = _mm_move_ss(vi3x4567, vi3x89AB);
253 vi3x4567 = vi3x89AB;
254 const __m128 vi4x8567 = _mm_move_ss(vi4x4567, vi4x89AB);
255 vi4x4567 = vi4x89AB;
256
257 vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x2345, vk00));
258 vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi1x2345, vk10));
259 vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x2345, vk20));
260 vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi3x2345, vk30));
261 vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi4x2345, vk40));
262
263 const __m128 vi0x5678 = _mm_shuffle_ps(vi0x8567, vi0x8567, _MM_SHUFFLE(0, 3, 2, 1));
264 const __m128 vi1x5678 = _mm_shuffle_ps(vi1x8567, vi1x8567, _MM_SHUFFLE(0, 3, 2, 1));
265 const __m128 vi2x5678 = _mm_shuffle_ps(vi2x8567, vi2x8567, _MM_SHUFFLE(0, 3, 2, 1));
266 const __m128 vi3x5678 = _mm_shuffle_ps(vi3x8567, vi3x8567, _MM_SHUFFLE(0, 3, 2, 1));
267 const __m128 vi4x5678 = _mm_shuffle_ps(vi4x8567, vi4x8567, _MM_SHUFFLE(0, 3, 2, 1));
268
269 vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi0x5678, vk03));
270 vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x5678, vk13));
271 vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi2x5678, vk23));
272 vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi3x5678, vk33));
273 vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi4x5678, vk43));
274
275 const __m128 vi0x6789 = _mm_shuffle_ps(vi0x5678, vi0x89AB, _MM_SHUFFLE(1, 0, 2, 1));
276 const __m128 vi1x6789 = _mm_shuffle_ps(vi1x5678, vi1x89AB, _MM_SHUFFLE(1, 0, 2, 1));
277 const __m128 vi2x6789 = _mm_shuffle_ps(vi2x5678, vi2x89AB, _MM_SHUFFLE(1, 0, 2, 1));
278 const __m128 vi3x6789 = _mm_shuffle_ps(vi3x5678, vi3x89AB, _MM_SHUFFLE(1, 0, 2, 1));
279 const __m128 vi4x6789 = _mm_shuffle_ps(vi4x5678, vi4x89AB, _MM_SHUFFLE(1, 0, 2, 1));
280
281 vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x6789, vk04));
282 vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi1x6789, vk14));
283 vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x6789, vk24));
284 vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi3x6789, vk34));
285 vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi4x6789, vk44));
286
287 vo0p0 = _mm_add_ps(vo0p0, vo0p1);
288
289 __m128 vo0 = _mm_max_ps(vo0p0, vmin);
290
291 vo0 = _mm_min_ps(vo0, vmax);
292
293 _mm_storeu_ps(o0, vo0);
294 o0 += 4;
295
296 w -= 4 * sizeof(float);
297 }
298 assert(w >= 1 * sizeof(float));
299 assert(w <= 4 * sizeof(float));
300 {
301 vi0x4567 = _mm_and_ps(vi0x4567, vmask);
302 vi1x4567 = _mm_and_ps(vi1x4567, vmask);
303 vi2x4567 = _mm_and_ps(vi2x4567, vmask);
304 vi3x4567 = _mm_and_ps(vi3x4567, vmask);
305 vi4x4567 = _mm_and_ps(vi4x4567, vmask);
306
307 __m128 vo0p0 = _mm_add_ps(vbias, _mm_mul_ps(vi0x4567, vk02));
308 __m128 vo0p1 = _mm_mul_ps(vi1x4567, vk12);
309 vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x4567, vk22));
310 vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi3x4567, vk32));
311 vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi4x4567, vk42));
312
313 const __m128 vi0x7456 = _mm_shuffle_ps(vi0x4567, vi0x4567, _MM_SHUFFLE(2, 1, 0, 3));
314 const __m128 vi1x7456 = _mm_shuffle_ps(vi1x4567, vi1x4567, _MM_SHUFFLE(2, 1, 0, 3));
315 const __m128 vi2x7456 = _mm_shuffle_ps(vi2x4567, vi2x4567, _MM_SHUFFLE(2, 1, 0, 3));
316 const __m128 vi3x7456 = _mm_shuffle_ps(vi3x4567, vi3x4567, _MM_SHUFFLE(2, 1, 0, 3));
317 const __m128 vi4x7456 = _mm_shuffle_ps(vi4x4567, vi4x4567, _MM_SHUFFLE(2, 1, 0, 3));
318
319 const __m128 vi0x3456 = _mm_move_ss(vi0x7456, vi0x3012);
320 const __m128 vi1x3456 = _mm_move_ss(vi1x7456, vi1x3012);
321 const __m128 vi2x3456 = _mm_move_ss(vi2x7456, vi2x3012);
322 const __m128 vi3x3456 = _mm_move_ss(vi3x7456, vi3x3012);
323 const __m128 vi4x3456 = _mm_move_ss(vi4x7456, vi4x3012);
324
325 vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi0x3456, vk01));
326 vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x3456, vk11));
327 vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi2x3456, vk21));
328 vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi3x3456, vk31));
329 vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi4x3456, vk41));
330
331 const __m128 vi0x2345 = _mm_shuffle_ps(vi0x3012, vi0x7456, _MM_SHUFFLE(2, 1, 0, 3));
332 const __m128 vi1x2345 = _mm_shuffle_ps(vi1x3012, vi1x7456, _MM_SHUFFLE(2, 1, 0, 3));
333 const __m128 vi2x2345 = _mm_shuffle_ps(vi2x3012, vi2x7456, _MM_SHUFFLE(2, 1, 0, 3));
334 const __m128 vi3x2345 = _mm_shuffle_ps(vi3x3012, vi3x7456, _MM_SHUFFLE(2, 1, 0, 3));
335 const __m128 vi4x2345 = _mm_shuffle_ps(vi4x3012, vi4x7456, _MM_SHUFFLE(2, 1, 0, 3));
336
337 const __m128 vzero = _mm_setzero_ps();
338 const __m128 vi0x8567 = _mm_move_ss(vi0x4567, vzero);
339 const __m128 vi1x8567 = _mm_move_ss(vi1x4567, vzero);
340 const __m128 vi2x8567 = _mm_move_ss(vi2x4567, vzero);
341 const __m128 vi3x8567 = _mm_move_ss(vi3x4567, vzero);
342 const __m128 vi4x8567 = _mm_move_ss(vi4x4567, vzero);
343
344 vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x2345, vk00));
345 vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi1x2345, vk10));
346 vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x2345, vk20));
347 vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi3x2345, vk30));
348 vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi4x2345, vk40));
349
350 const __m128 vi0x5678 = _mm_shuffle_ps(vi0x8567, vi0x8567, _MM_SHUFFLE(0, 3, 2, 1));
351 const __m128 vi1x5678 = _mm_shuffle_ps(vi1x8567, vi1x8567, _MM_SHUFFLE(0, 3, 2, 1));
352 const __m128 vi2x5678 = _mm_shuffle_ps(vi2x8567, vi2x8567, _MM_SHUFFLE(0, 3, 2, 1));
353 const __m128 vi3x5678 = _mm_shuffle_ps(vi3x8567, vi3x8567, _MM_SHUFFLE(0, 3, 2, 1));
354 const __m128 vi4x5678 = _mm_shuffle_ps(vi4x8567, vi4x8567, _MM_SHUFFLE(0, 3, 2, 1));
355
356 vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi0x5678, vk03));
357 vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x5678, vk13));
358 vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi2x5678, vk23));
359 vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi3x5678, vk33));
360 vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi4x5678, vk43));
361
362 const __m128 vi0x6789 = _mm_shuffle_ps(vi0x5678, vzero, _MM_SHUFFLE(1, 0, 2, 1));
363 const __m128 vi1x6789 = _mm_shuffle_ps(vi1x5678, vzero, _MM_SHUFFLE(1, 0, 2, 1));
364 const __m128 vi2x6789 = _mm_shuffle_ps(vi2x5678, vzero, _MM_SHUFFLE(1, 0, 2, 1));
365 const __m128 vi3x6789 = _mm_shuffle_ps(vi3x5678, vzero, _MM_SHUFFLE(1, 0, 2, 1));
366 const __m128 vi4x6789 = _mm_shuffle_ps(vi4x5678, vzero, _MM_SHUFFLE(1, 0, 2, 1));
367
368 vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x6789, vk04));
369 vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi1x6789, vk14));
370 vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x6789, vk24));
371 vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi3x6789, vk34));
372 vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi4x6789, vk44));
373
374 vo0p0 = _mm_add_ps(vo0p0, vo0p1);
375
376 __m128 vo0 = _mm_max_ps(vo0p0, vmin);
377
378 vo0 = _mm_min_ps(vo0, vmax);
379
380 if XNN_LIKELY(w & (4 * sizeof(float))) {
381 _mm_storeu_ps(o0, vo0);
382 o0 += 4;
383 } else {
384 if (w & (2 * sizeof(float))) {
385 _mm_storel_pi((__m64*) o0, vo0);
386 o0 += 2;
387
388 vo0 = _mm_movehl_ps(vo0, vo0);
389 }
390 if (w & (1 * sizeof(float))) {
391 _mm_store_ss(o0, vo0);
392 o0 += 1;
393 }
394 }
395 }
396
397 i0 = (const float*) ((uintptr_t) i1 - input_decrement);
398 i1 = (const float*) ((uintptr_t) i2 - input_decrement);
399 i2 = (const float*) ((uintptr_t) i1 + input_width);
400 i3 = (const float*) ((uintptr_t) i2 + input_width);
401 i4 = (const float*) ((uintptr_t) i3 + input_width);
402
403
404 } while (--output_height != 0);
405 }
406