1 // Auto-generated file. Do not edit!
2 //   Template: src/f32-dwconv2d-chw/5x5p2-sse.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <xmmintrin.h>
13 
14 #include <xnnpack/dwconv.h>
15 #include <xnnpack/math.h>
16 
17 
xnn_f32_dwconv2d_chw_ukernel_5x5p2__sse_2x4(size_t input_height,size_t input_width,const float * input,const float * weights,const float * zero,float * output,uint32_t padding_top,const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS (1)])18 void xnn_f32_dwconv2d_chw_ukernel_5x5p2__sse_2x4(
19     size_t input_height,
20     size_t input_width,
21     const float* input,
22     const float* weights,
23     const float* zero,
24     float* output,
25     uint32_t padding_top,
26     const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
27 {
28   assert(input_height != 0);
29   assert(input_width != 0);
30   assert(input_width % sizeof(float) == 0);
31   assert(padding_top == 2);
32 
33   const __m128 vmask = _mm_load_ps((const float*) params->sse.mask);
34   const __m128 vmax = _mm_load_ps(params->sse.max);
35   const __m128 vmin = _mm_load_ps(params->sse.min);
36 
37   const __m128 vbias = _mm_load1_ps(weights);
38   const __m128 vk00 = _mm_load1_ps(weights + 1);
39   const __m128 vk01 = _mm_load1_ps(weights + 2);
40   const __m128 vk02 = _mm_load1_ps(weights + 3);
41   const __m128 vk03 = _mm_load1_ps(weights + 4);
42   const __m128 vk04 = _mm_load1_ps(weights + 5);
43   const __m128 vk10 = _mm_load1_ps(weights + 6);
44   const __m128 vk11 = _mm_load1_ps(weights + 7);
45   const __m128 vk12 = _mm_load1_ps(weights + 8);
46   const __m128 vk13 = _mm_load1_ps(weights + 9);
47   const __m128 vk14 = _mm_load1_ps(weights + 10);
48   const __m128 vk20 = _mm_load1_ps(weights + 11);
49   const __m128 vk21 = _mm_load1_ps(weights + 12);
50   const __m128 vk22 = _mm_load1_ps(weights + 13);
51   const __m128 vk23 = _mm_load1_ps(weights + 14);
52   const __m128 vk24 = _mm_load1_ps(weights + 15);
53   const __m128 vk30 = _mm_load1_ps(weights + 16);
54   const __m128 vk31 = _mm_load1_ps(weights + 17);
55   const __m128 vk32 = _mm_load1_ps(weights + 18);
56   const __m128 vk33 = _mm_load1_ps(weights + 19);
57   const __m128 vk34 = _mm_load1_ps(weights + 20);
58   const __m128 vk40 = _mm_load1_ps(weights + 21);
59   const __m128 vk41 = _mm_load1_ps(weights + 22);
60   const __m128 vk42 = _mm_load1_ps(weights + 23);
61   const __m128 vk43 = _mm_load1_ps(weights + 24);
62   const __m128 vk44 = _mm_load1_ps(weights + 25);
63 
64   const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
65 
66   const float* i0 = zero;
67   const float* i1 = zero;
68   const float* i2 = input;
69   const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
70   const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
71   const float* i5 = (const float*) ((uintptr_t) i4 + input_width);
72 
73   float* o0 = output;
74   float* o1 = (float*) ((uintptr_t) o0 + input_width);
75 
76   size_t output_height = input_height;
77   do {
78     if XNN_UNPREDICTABLE(output_height < 2) {
79       i3 = zero;
80       o1 = o0;
81     }
82     if XNN_UNPREDICTABLE(output_height < 3) {
83       i4 = zero;
84     }
85     if XNN_UNPREDICTABLE(output_height < 4) {
86       i5 = zero;
87     }
88 
89     __m128 vi0x3012 = _mm_setzero_ps();
90     __m128 vi1x3012 = _mm_setzero_ps();
91     __m128 vi2x3012 = _mm_setzero_ps();
92     __m128 vi3x3012 = _mm_setzero_ps();
93     __m128 vi4x3012 = _mm_setzero_ps();
94     __m128 vi5x3012 = _mm_setzero_ps();
95 
96     __m128 vi0x4567 = _mm_loadu_ps(i0);
97     i0 += 4;
98     __m128 vi1x4567 = _mm_loadu_ps(i1);
99     i1 += 4;
100     __m128 vi2x4567 = _mm_loadu_ps(i2);
101     i2 += 4;
102     __m128 vi3x4567 = _mm_loadu_ps(i3);
103     i3 += 4;
104     __m128 vi4x4567 = _mm_loadu_ps(i4);
105     i4 += 4;
106     __m128 vi5x4567 = _mm_loadu_ps(i5);
107     i5 += 4;
108 
109     size_t w = input_width;
110     for (; w > 8 * sizeof(float); w -= 4 * sizeof(float)) {
111       __m128 vo0p0 = _mm_add_ps(vbias, _mm_mul_ps(vi0x4567, vk02));
112       __m128 vo1p0 = _mm_add_ps(vbias, _mm_mul_ps(vi1x4567, vk02));
113       vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x4567, vk12));
114       vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x4567, vk12));
115       vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x4567, vk22));
116       vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x4567, vk22));
117       vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi3x4567, vk32));
118       vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi4x4567, vk32));
119       vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi4x4567, vk42));
120       vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi5x4567, vk42));
121 
122       const __m128 vi0x7456 = _mm_shuffle_ps(vi0x4567, vi0x4567, _MM_SHUFFLE(2, 1, 0, 3));
123       const __m128 vi1x7456 = _mm_shuffle_ps(vi1x4567, vi1x4567, _MM_SHUFFLE(2, 1, 0, 3));
124       const __m128 vi2x7456 = _mm_shuffle_ps(vi2x4567, vi2x4567, _MM_SHUFFLE(2, 1, 0, 3));
125       const __m128 vi3x7456 = _mm_shuffle_ps(vi3x4567, vi3x4567, _MM_SHUFFLE(2, 1, 0, 3));
126       const __m128 vi4x7456 = _mm_shuffle_ps(vi4x4567, vi4x4567, _MM_SHUFFLE(2, 1, 0, 3));
127       const __m128 vi5x7456 = _mm_shuffle_ps(vi5x4567, vi5x4567, _MM_SHUFFLE(2, 1, 0, 3));
128 
129       const __m128 vi0x89AB = _mm_loadu_ps(i0);
130       i0 += 4;
131       const __m128 vi1x89AB = _mm_loadu_ps(i1);
132       i1 += 4;
133       const __m128 vi2x89AB = _mm_loadu_ps(i2);
134       i2 += 4;
135       const __m128 vi3x89AB = _mm_loadu_ps(i3);
136       i3 += 4;
137       const __m128 vi4x89AB = _mm_loadu_ps(i4);
138       i4 += 4;
139       const __m128 vi5x89AB = _mm_loadu_ps(i5);
140       i5 += 4;
141 
142       const __m128 vi0x3456 = _mm_move_ss(vi0x7456, vi0x3012);
143       const __m128 vi1x3456 = _mm_move_ss(vi1x7456, vi1x3012);
144       const __m128 vi2x3456 = _mm_move_ss(vi2x7456, vi2x3012);
145       const __m128 vi3x3456 = _mm_move_ss(vi3x7456, vi3x3012);
146       const __m128 vi4x3456 = _mm_move_ss(vi4x7456, vi4x3012);
147       const __m128 vi5x3456 = _mm_move_ss(vi5x7456, vi5x3012);
148 
149       vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x3456, vk01));
150       vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi1x3456, vk01));
151       vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x3456, vk11));
152       vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x3456, vk11));
153       vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x3456, vk21));
154       vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x3456, vk21));
155       vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi3x3456, vk31));
156       vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi4x3456, vk31));
157       vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi4x3456, vk41));
158       vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi5x3456, vk41));
159 
160       const __m128 vi0x2345 = _mm_shuffle_ps(vi0x3012, vi0x7456, _MM_SHUFFLE(2, 1, 0, 3));
161       vi0x3012 = vi0x7456;
162       const __m128 vi1x2345 = _mm_shuffle_ps(vi1x3012, vi1x7456, _MM_SHUFFLE(2, 1, 0, 3));
163       vi1x3012 = vi1x7456;
164       const __m128 vi2x2345 = _mm_shuffle_ps(vi2x3012, vi2x7456, _MM_SHUFFLE(2, 1, 0, 3));
165       vi2x3012 = vi2x7456;
166       const __m128 vi3x2345 = _mm_shuffle_ps(vi3x3012, vi3x7456, _MM_SHUFFLE(2, 1, 0, 3));
167       vi3x3012 = vi3x7456;
168       const __m128 vi4x2345 = _mm_shuffle_ps(vi4x3012, vi4x7456, _MM_SHUFFLE(2, 1, 0, 3));
169       vi4x3012 = vi4x7456;
170       const __m128 vi5x2345 = _mm_shuffle_ps(vi5x3012, vi5x7456, _MM_SHUFFLE(2, 1, 0, 3));
171       vi5x3012 = vi5x7456;
172 
173       const __m128 vi0x8567 = _mm_move_ss(vi0x4567, vi0x89AB);
174       vi0x4567 = vi0x89AB;
175       const __m128 vi1x8567 = _mm_move_ss(vi1x4567, vi1x89AB);
176       vi1x4567 = vi1x89AB;
177       const __m128 vi2x8567 = _mm_move_ss(vi2x4567, vi2x89AB);
178       vi2x4567 = vi2x89AB;
179       const __m128 vi3x8567 = _mm_move_ss(vi3x4567, vi3x89AB);
180       vi3x4567 = vi3x89AB;
181       const __m128 vi4x8567 = _mm_move_ss(vi4x4567, vi4x89AB);
182       vi4x4567 = vi4x89AB;
183       const __m128 vi5x8567 = _mm_move_ss(vi5x4567, vi5x89AB);
184       vi5x4567 = vi5x89AB;
185 
186       vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x2345, vk00));
187       vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi1x2345, vk00));
188       vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x2345, vk10));
189       vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x2345, vk10));
190       vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x2345, vk20));
191       vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x2345, vk20));
192       vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi3x2345, vk30));
193       vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi4x2345, vk30));
194       vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi4x2345, vk40));
195       vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi5x2345, vk40));
196 
197       const __m128 vi0x5678 = _mm_shuffle_ps(vi0x8567, vi0x8567, _MM_SHUFFLE(0, 3, 2, 1));
198       const __m128 vi1x5678 = _mm_shuffle_ps(vi1x8567, vi1x8567, _MM_SHUFFLE(0, 3, 2, 1));
199       const __m128 vi2x5678 = _mm_shuffle_ps(vi2x8567, vi2x8567, _MM_SHUFFLE(0, 3, 2, 1));
200       const __m128 vi3x5678 = _mm_shuffle_ps(vi3x8567, vi3x8567, _MM_SHUFFLE(0, 3, 2, 1));
201       const __m128 vi4x5678 = _mm_shuffle_ps(vi4x8567, vi4x8567, _MM_SHUFFLE(0, 3, 2, 1));
202       const __m128 vi5x5678 = _mm_shuffle_ps(vi5x8567, vi5x8567, _MM_SHUFFLE(0, 3, 2, 1));
203 
204       vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x5678, vk03));
205       vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi1x5678, vk03));
206       vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x5678, vk13));
207       vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x5678, vk13));
208       vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x5678, vk23));
209       vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x5678, vk23));
210       vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi3x5678, vk33));
211       vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi4x5678, vk33));
212       vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi4x5678, vk43));
213       vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi5x5678, vk43));
214 
215       const __m128 vi0x6789 = _mm_shuffle_ps(vi0x5678, vi0x89AB, _MM_SHUFFLE(1, 0, 2, 1));
216       const __m128 vi1x6789 = _mm_shuffle_ps(vi1x5678, vi1x89AB, _MM_SHUFFLE(1, 0, 2, 1));
217       const __m128 vi2x6789 = _mm_shuffle_ps(vi2x5678, vi2x89AB, _MM_SHUFFLE(1, 0, 2, 1));
218       const __m128 vi3x6789 = _mm_shuffle_ps(vi3x5678, vi3x89AB, _MM_SHUFFLE(1, 0, 2, 1));
219       const __m128 vi4x6789 = _mm_shuffle_ps(vi4x5678, vi4x89AB, _MM_SHUFFLE(1, 0, 2, 1));
220       const __m128 vi5x6789 = _mm_shuffle_ps(vi5x5678, vi5x89AB, _MM_SHUFFLE(1, 0, 2, 1));
221 
222       vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x6789, vk04));
223       vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi1x6789, vk04));
224       vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x6789, vk14));
225       vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x6789, vk14));
226       vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x6789, vk24));
227       vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x6789, vk24));
228       vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi3x6789, vk34));
229       vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi4x6789, vk34));
230       vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi4x6789, vk44));
231       vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi5x6789, vk44));
232 
233 
234       __m128 vo0 = _mm_max_ps(vo0p0, vmin);
235       __m128 vo1 = _mm_max_ps(vo1p0, vmin);
236 
237       vo0 = _mm_min_ps(vo0, vmax);
238       vo1 = _mm_min_ps(vo1, vmax);
239 
240       _mm_storeu_ps(o1, vo1);
241       o1 += 4;
242       _mm_storeu_ps(o0, vo0);
243       o0 += 4;
244     }
245     // Always process the last block of 5..8 pixels.
246     if XNN_LIKELY(w > 4 * sizeof(float)) {
247       __m128 vo0p0 = _mm_add_ps(vbias, _mm_mul_ps(vi0x4567, vk02));
248       __m128 vo1p0 = _mm_add_ps(vbias, _mm_mul_ps(vi1x4567, vk02));
249       vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x4567, vk12));
250       vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x4567, vk12));
251       vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x4567, vk22));
252       vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x4567, vk22));
253       vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi3x4567, vk32));
254       vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi4x4567, vk32));
255       vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi4x4567, vk42));
256       vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi5x4567, vk42));
257 
258       const __m128 vi0x7456 = _mm_shuffle_ps(vi0x4567, vi0x4567, _MM_SHUFFLE(2, 1, 0, 3));
259       const __m128 vi1x7456 = _mm_shuffle_ps(vi1x4567, vi1x4567, _MM_SHUFFLE(2, 1, 0, 3));
260       const __m128 vi2x7456 = _mm_shuffle_ps(vi2x4567, vi2x4567, _MM_SHUFFLE(2, 1, 0, 3));
261       const __m128 vi3x7456 = _mm_shuffle_ps(vi3x4567, vi3x4567, _MM_SHUFFLE(2, 1, 0, 3));
262       const __m128 vi4x7456 = _mm_shuffle_ps(vi4x4567, vi4x4567, _MM_SHUFFLE(2, 1, 0, 3));
263       const __m128 vi5x7456 = _mm_shuffle_ps(vi5x4567, vi5x4567, _MM_SHUFFLE(2, 1, 0, 3));
264 
265       const __m128 vi0x89AB = _mm_and_ps(_mm_loadu_ps(i0), vmask);
266       i0 += 4;
267       const __m128 vi1x89AB = _mm_and_ps(_mm_loadu_ps(i1), vmask);
268       i1 += 4;
269       const __m128 vi2x89AB = _mm_and_ps(_mm_loadu_ps(i2), vmask);
270       i2 += 4;
271       const __m128 vi3x89AB = _mm_and_ps(_mm_loadu_ps(i3), vmask);
272       i3 += 4;
273       const __m128 vi4x89AB = _mm_and_ps(_mm_loadu_ps(i4), vmask);
274       i4 += 4;
275       const __m128 vi5x89AB = _mm_and_ps(_mm_loadu_ps(i5), vmask);
276       i5 += 4;
277 
278       const __m128 vi0x3456 = _mm_move_ss(vi0x7456, vi0x3012);
279       const __m128 vi1x3456 = _mm_move_ss(vi1x7456, vi1x3012);
280       const __m128 vi2x3456 = _mm_move_ss(vi2x7456, vi2x3012);
281       const __m128 vi3x3456 = _mm_move_ss(vi3x7456, vi3x3012);
282       const __m128 vi4x3456 = _mm_move_ss(vi4x7456, vi4x3012);
283       const __m128 vi5x3456 = _mm_move_ss(vi5x7456, vi5x3012);
284 
285       vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x3456, vk01));
286       vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi1x3456, vk01));
287       vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x3456, vk11));
288       vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x3456, vk11));
289       vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x3456, vk21));
290       vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x3456, vk21));
291       vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi3x3456, vk31));
292       vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi4x3456, vk31));
293       vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi4x3456, vk41));
294       vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi5x3456, vk41));
295 
296       const __m128 vi0x2345 = _mm_shuffle_ps(vi0x3012, vi0x7456, _MM_SHUFFLE(2, 1, 0, 3));
297       vi0x3012 = vi0x7456;
298       const __m128 vi1x2345 = _mm_shuffle_ps(vi1x3012, vi1x7456, _MM_SHUFFLE(2, 1, 0, 3));
299       vi1x3012 = vi1x7456;
300       const __m128 vi2x2345 = _mm_shuffle_ps(vi2x3012, vi2x7456, _MM_SHUFFLE(2, 1, 0, 3));
301       vi2x3012 = vi2x7456;
302       const __m128 vi3x2345 = _mm_shuffle_ps(vi3x3012, vi3x7456, _MM_SHUFFLE(2, 1, 0, 3));
303       vi3x3012 = vi3x7456;
304       const __m128 vi4x2345 = _mm_shuffle_ps(vi4x3012, vi4x7456, _MM_SHUFFLE(2, 1, 0, 3));
305       vi4x3012 = vi4x7456;
306       const __m128 vi5x2345 = _mm_shuffle_ps(vi5x3012, vi5x7456, _MM_SHUFFLE(2, 1, 0, 3));
307       vi5x3012 = vi5x7456;
308 
309       const __m128 vi0x8567 = _mm_move_ss(vi0x4567, vi0x89AB);
310       vi0x4567 = vi0x89AB;
311       const __m128 vi1x8567 = _mm_move_ss(vi1x4567, vi1x89AB);
312       vi1x4567 = vi1x89AB;
313       const __m128 vi2x8567 = _mm_move_ss(vi2x4567, vi2x89AB);
314       vi2x4567 = vi2x89AB;
315       const __m128 vi3x8567 = _mm_move_ss(vi3x4567, vi3x89AB);
316       vi3x4567 = vi3x89AB;
317       const __m128 vi4x8567 = _mm_move_ss(vi4x4567, vi4x89AB);
318       vi4x4567 = vi4x89AB;
319       const __m128 vi5x8567 = _mm_move_ss(vi5x4567, vi5x89AB);
320       vi5x4567 = vi5x89AB;
321 
322       vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x2345, vk00));
323       vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi1x2345, vk00));
324       vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x2345, vk10));
325       vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x2345, vk10));
326       vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x2345, vk20));
327       vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x2345, vk20));
328       vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi3x2345, vk30));
329       vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi4x2345, vk30));
330       vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi4x2345, vk40));
331       vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi5x2345, vk40));
332 
333       const __m128 vi0x5678 = _mm_shuffle_ps(vi0x8567, vi0x8567, _MM_SHUFFLE(0, 3, 2, 1));
334       const __m128 vi1x5678 = _mm_shuffle_ps(vi1x8567, vi1x8567, _MM_SHUFFLE(0, 3, 2, 1));
335       const __m128 vi2x5678 = _mm_shuffle_ps(vi2x8567, vi2x8567, _MM_SHUFFLE(0, 3, 2, 1));
336       const __m128 vi3x5678 = _mm_shuffle_ps(vi3x8567, vi3x8567, _MM_SHUFFLE(0, 3, 2, 1));
337       const __m128 vi4x5678 = _mm_shuffle_ps(vi4x8567, vi4x8567, _MM_SHUFFLE(0, 3, 2, 1));
338       const __m128 vi5x5678 = _mm_shuffle_ps(vi5x8567, vi5x8567, _MM_SHUFFLE(0, 3, 2, 1));
339 
340       vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x5678, vk03));
341       vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi1x5678, vk03));
342       vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x5678, vk13));
343       vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x5678, vk13));
344       vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x5678, vk23));
345       vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x5678, vk23));
346       vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi3x5678, vk33));
347       vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi4x5678, vk33));
348       vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi4x5678, vk43));
349       vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi5x5678, vk43));
350 
351       const __m128 vi0x6789 = _mm_shuffle_ps(vi0x5678, vi0x89AB, _MM_SHUFFLE(1, 0, 2, 1));
352       const __m128 vi1x6789 = _mm_shuffle_ps(vi1x5678, vi1x89AB, _MM_SHUFFLE(1, 0, 2, 1));
353       const __m128 vi2x6789 = _mm_shuffle_ps(vi2x5678, vi2x89AB, _MM_SHUFFLE(1, 0, 2, 1));
354       const __m128 vi3x6789 = _mm_shuffle_ps(vi3x5678, vi3x89AB, _MM_SHUFFLE(1, 0, 2, 1));
355       const __m128 vi4x6789 = _mm_shuffle_ps(vi4x5678, vi4x89AB, _MM_SHUFFLE(1, 0, 2, 1));
356       const __m128 vi5x6789 = _mm_shuffle_ps(vi5x5678, vi5x89AB, _MM_SHUFFLE(1, 0, 2, 1));
357 
358       vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x6789, vk04));
359       vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi1x6789, vk04));
360       vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x6789, vk14));
361       vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x6789, vk14));
362       vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x6789, vk24));
363       vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x6789, vk24));
364       vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi3x6789, vk34));
365       vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi4x6789, vk34));
366       vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi4x6789, vk44));
367       vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi5x6789, vk44));
368 
369 
370       __m128 vo0 = _mm_max_ps(vo0p0, vmin);
371       __m128 vo1 = _mm_max_ps(vo1p0, vmin);
372 
373       vo0 = _mm_min_ps(vo0, vmax);
374       vo1 = _mm_min_ps(vo1, vmax);
375 
376       _mm_storeu_ps(o1, vo1);
377       o1 += 4;
378       _mm_storeu_ps(o0, vo0);
379       o0 += 4;
380 
381       w -= 4 * sizeof(float);
382     }
383     assert(w >= 1 * sizeof(float));
384     assert(w <= 4 * sizeof(float));
385     {
386       vi0x4567 = _mm_and_ps(vi0x4567, vmask);
387       vi1x4567 = _mm_and_ps(vi1x4567, vmask);
388       vi2x4567 = _mm_and_ps(vi2x4567, vmask);
389       vi3x4567 = _mm_and_ps(vi3x4567, vmask);
390       vi4x4567 = _mm_and_ps(vi4x4567, vmask);
391       vi5x4567 = _mm_and_ps(vi5x4567, vmask);
392 
393       __m128 vo0p0 = _mm_add_ps(vbias, _mm_mul_ps(vi0x4567, vk02));
394       __m128 vo1p0 = _mm_add_ps(vbias, _mm_mul_ps(vi1x4567, vk02));
395       vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x4567, vk12));
396       vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x4567, vk12));
397       vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x4567, vk22));
398       vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x4567, vk22));
399       vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi3x4567, vk32));
400       vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi4x4567, vk32));
401       vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi4x4567, vk42));
402       vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi5x4567, vk42));
403 
404       const __m128 vi0x7456 = _mm_shuffle_ps(vi0x4567, vi0x4567, _MM_SHUFFLE(2, 1, 0, 3));
405       const __m128 vi1x7456 = _mm_shuffle_ps(vi1x4567, vi1x4567, _MM_SHUFFLE(2, 1, 0, 3));
406       const __m128 vi2x7456 = _mm_shuffle_ps(vi2x4567, vi2x4567, _MM_SHUFFLE(2, 1, 0, 3));
407       const __m128 vi3x7456 = _mm_shuffle_ps(vi3x4567, vi3x4567, _MM_SHUFFLE(2, 1, 0, 3));
408       const __m128 vi4x7456 = _mm_shuffle_ps(vi4x4567, vi4x4567, _MM_SHUFFLE(2, 1, 0, 3));
409       const __m128 vi5x7456 = _mm_shuffle_ps(vi5x4567, vi5x4567, _MM_SHUFFLE(2, 1, 0, 3));
410 
411       const __m128 vi0x3456 = _mm_move_ss(vi0x7456, vi0x3012);
412       const __m128 vi1x3456 = _mm_move_ss(vi1x7456, vi1x3012);
413       const __m128 vi2x3456 = _mm_move_ss(vi2x7456, vi2x3012);
414       const __m128 vi3x3456 = _mm_move_ss(vi3x7456, vi3x3012);
415       const __m128 vi4x3456 = _mm_move_ss(vi4x7456, vi4x3012);
416       const __m128 vi5x3456 = _mm_move_ss(vi5x7456, vi5x3012);
417 
418       vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x3456, vk01));
419       vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi1x3456, vk01));
420       vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x3456, vk11));
421       vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x3456, vk11));
422       vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x3456, vk21));
423       vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x3456, vk21));
424       vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi3x3456, vk31));
425       vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi4x3456, vk31));
426       vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi4x3456, vk41));
427       vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi5x3456, vk41));
428 
429       const __m128 vi0x2345 = _mm_shuffle_ps(vi0x3012, vi0x7456, _MM_SHUFFLE(2, 1, 0, 3));
430       const __m128 vi1x2345 = _mm_shuffle_ps(vi1x3012, vi1x7456, _MM_SHUFFLE(2, 1, 0, 3));
431       const __m128 vi2x2345 = _mm_shuffle_ps(vi2x3012, vi2x7456, _MM_SHUFFLE(2, 1, 0, 3));
432       const __m128 vi3x2345 = _mm_shuffle_ps(vi3x3012, vi3x7456, _MM_SHUFFLE(2, 1, 0, 3));
433       const __m128 vi4x2345 = _mm_shuffle_ps(vi4x3012, vi4x7456, _MM_SHUFFLE(2, 1, 0, 3));
434       const __m128 vi5x2345 = _mm_shuffle_ps(vi5x3012, vi5x7456, _MM_SHUFFLE(2, 1, 0, 3));
435 
436       const __m128 vzero = _mm_setzero_ps();
437       const __m128 vi0x8567 = _mm_move_ss(vi0x4567, vzero);
438       const __m128 vi1x8567 = _mm_move_ss(vi1x4567, vzero);
439       const __m128 vi2x8567 = _mm_move_ss(vi2x4567, vzero);
440       const __m128 vi3x8567 = _mm_move_ss(vi3x4567, vzero);
441       const __m128 vi4x8567 = _mm_move_ss(vi4x4567, vzero);
442       const __m128 vi5x8567 = _mm_move_ss(vi5x4567, vzero);
443 
444       vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x2345, vk00));
445       vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi1x2345, vk00));
446       vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x2345, vk10));
447       vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x2345, vk10));
448       vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x2345, vk20));
449       vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x2345, vk20));
450       vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi3x2345, vk30));
451       vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi4x2345, vk30));
452       vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi4x2345, vk40));
453       vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi5x2345, vk40));
454 
455       const __m128 vi0x5678 = _mm_shuffle_ps(vi0x8567, vi0x8567, _MM_SHUFFLE(0, 3, 2, 1));
456       const __m128 vi1x5678 = _mm_shuffle_ps(vi1x8567, vi1x8567, _MM_SHUFFLE(0, 3, 2, 1));
457       const __m128 vi2x5678 = _mm_shuffle_ps(vi2x8567, vi2x8567, _MM_SHUFFLE(0, 3, 2, 1));
458       const __m128 vi3x5678 = _mm_shuffle_ps(vi3x8567, vi3x8567, _MM_SHUFFLE(0, 3, 2, 1));
459       const __m128 vi4x5678 = _mm_shuffle_ps(vi4x8567, vi4x8567, _MM_SHUFFLE(0, 3, 2, 1));
460       const __m128 vi5x5678 = _mm_shuffle_ps(vi5x8567, vi5x8567, _MM_SHUFFLE(0, 3, 2, 1));
461 
462       vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x5678, vk03));
463       vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi1x5678, vk03));
464       vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x5678, vk13));
465       vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x5678, vk13));
466       vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x5678, vk23));
467       vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x5678, vk23));
468       vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi3x5678, vk33));
469       vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi4x5678, vk33));
470       vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi4x5678, vk43));
471       vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi5x5678, vk43));
472 
473       const __m128 vi0x6789 = _mm_shuffle_ps(vi0x5678, vzero, _MM_SHUFFLE(1, 0, 2, 1));
474       const __m128 vi1x6789 = _mm_shuffle_ps(vi1x5678, vzero, _MM_SHUFFLE(1, 0, 2, 1));
475       const __m128 vi2x6789 = _mm_shuffle_ps(vi2x5678, vzero, _MM_SHUFFLE(1, 0, 2, 1));
476       const __m128 vi3x6789 = _mm_shuffle_ps(vi3x5678, vzero, _MM_SHUFFLE(1, 0, 2, 1));
477       const __m128 vi4x6789 = _mm_shuffle_ps(vi4x5678, vzero, _MM_SHUFFLE(1, 0, 2, 1));
478       const __m128 vi5x6789 = _mm_shuffle_ps(vi5x5678, vzero, _MM_SHUFFLE(1, 0, 2, 1));
479 
480       vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x6789, vk04));
481       vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi1x6789, vk04));
482       vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x6789, vk14));
483       vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x6789, vk14));
484       vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x6789, vk24));
485       vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x6789, vk24));
486       vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi3x6789, vk34));
487       vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi4x6789, vk34));
488       vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi4x6789, vk44));
489       vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi5x6789, vk44));
490 
491 
492       __m128 vo0 = _mm_max_ps(vo0p0, vmin);
493       __m128 vo1 = _mm_max_ps(vo1p0, vmin);
494 
495       vo0 = _mm_min_ps(vo0, vmax);
496       vo1 = _mm_min_ps(vo1, vmax);
497 
498       if XNN_LIKELY(w & (4 * sizeof(float))) {
499         _mm_storeu_ps(o1, vo1);
500         o1 += 4;
501         _mm_storeu_ps(o0, vo0);
502         o0 += 4;
503       } else {
504         if (w & (2 * sizeof(float))) {
505           _mm_storel_pi((__m64*) o1, vo1);
506           o1 += 2;
507           _mm_storel_pi((__m64*) o0, vo0);
508           o0 += 2;
509 
510           vo0 = _mm_movehl_ps(vo0, vo0);
511           vo1 = _mm_movehl_ps(vo1, vo1);
512         }
513         if (w & (1 * sizeof(float))) {
514           _mm_store_ss(o1, vo1);
515           o1 += 1;
516           _mm_store_ss(o0, vo0);
517           o0 += 1;
518         }
519       }
520     }
521 
522     i0 = (const float*) ((uintptr_t) i2 - input_decrement);
523     i1 = (const float*) ((uintptr_t) i3 - input_decrement);
524     i2 = (const float*) ((uintptr_t) i1 + input_width);
525     i3 = (const float*) ((uintptr_t) i2 + input_width);
526     i4 = (const float*) ((uintptr_t) i3 + input_width);
527     i5 = (const float*) ((uintptr_t) i4 + input_width);
528 
529     o0 = o1;
530     o1 = (float*) ((uintptr_t) o0 + input_width);
531 
532     output_height = doz(output_height, 2);
533   } while (output_height != 0);
534 }
535