1 // Auto-generated file. Do not edit!
2 // Template: src/f32-dwconv2d-chw/5x5s2p2-sse.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <xmmintrin.h>
13
14 #include <xnnpack/dwconv.h>
15 #include <xnnpack/math.h>
16
17
xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__sse_1x4_acc3(size_t input_height,size_t input_width,const float * input,const float * weights,const float * zero,float * output,uint32_t padding_top,const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS (1)])18 void xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__sse_1x4_acc3(
19 size_t input_height,
20 size_t input_width,
21 const float* input,
22 const float* weights,
23 const float* zero,
24 float* output,
25 uint32_t padding_top,
26 const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
27 {
28 assert(input_height != 0);
29 assert(input_width != 0);
30 assert(input_width % sizeof(float) == 0);
31 assert(padding_top >= 1);
32 assert(padding_top <= 2);
33
34 const __m128 vmask_even = _mm_load_ps((const float*) params->sse.mask_even);
35 const __m128 vmask_odd = _mm_load_ps((const float*) params->sse.mask_odd);
36 const __m128 vmax = _mm_load_ps(params->sse.max);
37 const __m128 vmin = _mm_load_ps(params->sse.min);
38
39 const __m128 vbias = _mm_load1_ps(weights);
40 const __m128 vk00 = _mm_load1_ps(weights + 1);
41 const __m128 vk01 = _mm_load1_ps(weights + 2);
42 const __m128 vk02 = _mm_load1_ps(weights + 3);
43 const __m128 vk03 = _mm_load1_ps(weights + 4);
44 const __m128 vk04 = _mm_load1_ps(weights + 5);
45 const __m128 vk10 = _mm_load1_ps(weights + 6);
46 const __m128 vk11 = _mm_load1_ps(weights + 7);
47 const __m128 vk12 = _mm_load1_ps(weights + 8);
48 const __m128 vk13 = _mm_load1_ps(weights + 9);
49 const __m128 vk14 = _mm_load1_ps(weights + 10);
50 const __m128 vk20 = _mm_load1_ps(weights + 11);
51 const __m128 vk21 = _mm_load1_ps(weights + 12);
52 const __m128 vk22 = _mm_load1_ps(weights + 13);
53 const __m128 vk23 = _mm_load1_ps(weights + 14);
54 const __m128 vk24 = _mm_load1_ps(weights + 15);
55 const __m128 vk30 = _mm_load1_ps(weights + 16);
56 const __m128 vk31 = _mm_load1_ps(weights + 17);
57 const __m128 vk32 = _mm_load1_ps(weights + 18);
58 const __m128 vk33 = _mm_load1_ps(weights + 19);
59 const __m128 vk34 = _mm_load1_ps(weights + 20);
60 const __m128 vk40 = _mm_load1_ps(weights + 21);
61 const __m128 vk41 = _mm_load1_ps(weights + 22);
62 const __m128 vk42 = _mm_load1_ps(weights + 23);
63 const __m128 vk43 = _mm_load1_ps(weights + 24);
64 const __m128 vk44 = _mm_load1_ps(weights + 25);
65
66 const uint32_t padding_top_less_1 = padding_top - 1;
67 const size_t input_decrement = round_up_po2(input_width, 8 * sizeof(float));
68
69 const float* i0 = zero;
70 const float* i1 = (const float*) ((uintptr_t) input - ((-padding_top_less_1) & input_width));
71 const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
72 if XNN_UNPREDICTABLE(padding_top_less_1 != 0) {
73 i1 = zero;
74 }
75 const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
76 const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
77
78
79 float* o0 = output;
80
81 size_t padded_input_height = input_height + (padding_top_less_1 + 1) + 2 /* padding bottom */;
82 size_t output_height = (padded_input_height - 5 /* kernel size */ + 2 /* subsampling */) / 2;
83 do {
84 if XNN_UNPREDICTABLE(padded_input_height < 6) {
85 i3 = zero;
86 }
87 if XNN_UNPREDICTABLE(padded_input_height < 7) {
88 i4 = zero;
89 }
90
91 __m128 vi0x6024 = _mm_setzero_ps();
92 __m128 vi1x6024 = _mm_setzero_ps();
93 __m128 vi2x6024 = _mm_setzero_ps();
94 __m128 vi3x6024 = _mm_setzero_ps();
95 __m128 vi4x6024 = _mm_setzero_ps();
96
97 __m128 vi0x7135 = _mm_setzero_ps();
98 __m128 vi1x7135 = _mm_setzero_ps();
99 __m128 vi2x7135 = _mm_setzero_ps();
100 __m128 vi3x7135 = _mm_setzero_ps();
101 __m128 vi4x7135 = _mm_setzero_ps();
102
103 const __m128 vi0x89AB = _mm_loadu_ps(i0);
104 const __m128 vi0xCDEF = _mm_loadu_ps(i0 + 4);
105 i0 += 8;
106 const __m128 vi1x89AB = _mm_loadu_ps(i1);
107 const __m128 vi1xCDEF = _mm_loadu_ps(i1 + 4);
108 i1 += 8;
109 const __m128 vi2x89AB = _mm_loadu_ps(i2);
110 const __m128 vi2xCDEF = _mm_loadu_ps(i2 + 4);
111 i2 += 8;
112 const __m128 vi3x89AB = _mm_loadu_ps(i3);
113 const __m128 vi3xCDEF = _mm_loadu_ps(i3 + 4);
114 i3 += 8;
115 const __m128 vi4x89AB = _mm_loadu_ps(i4);
116 const __m128 vi4xCDEF = _mm_loadu_ps(i4 + 4);
117 i4 += 8;
118
119 __m128 vi0x8ACE = _mm_shuffle_ps(vi0x89AB, vi0xCDEF, _MM_SHUFFLE(2, 0, 2, 0));
120 __m128 vi0x9BDF = _mm_shuffle_ps(vi0x89AB, vi0xCDEF, _MM_SHUFFLE(3, 1, 3, 1));
121 __m128 vi1x8ACE = _mm_shuffle_ps(vi1x89AB, vi1xCDEF, _MM_SHUFFLE(2, 0, 2, 0));
122 __m128 vi1x9BDF = _mm_shuffle_ps(vi1x89AB, vi1xCDEF, _MM_SHUFFLE(3, 1, 3, 1));
123 __m128 vi2x8ACE = _mm_shuffle_ps(vi2x89AB, vi2xCDEF, _MM_SHUFFLE(2, 0, 2, 0));
124 __m128 vi2x9BDF = _mm_shuffle_ps(vi2x89AB, vi2xCDEF, _MM_SHUFFLE(3, 1, 3, 1));
125 __m128 vi3x8ACE = _mm_shuffle_ps(vi3x89AB, vi3xCDEF, _MM_SHUFFLE(2, 0, 2, 0));
126 __m128 vi3x9BDF = _mm_shuffle_ps(vi3x89AB, vi3xCDEF, _MM_SHUFFLE(3, 1, 3, 1));
127 __m128 vi4x8ACE = _mm_shuffle_ps(vi4x89AB, vi4xCDEF, _MM_SHUFFLE(2, 0, 2, 0));
128 __m128 vi4x9BDF = _mm_shuffle_ps(vi4x89AB, vi4xCDEF, _MM_SHUFFLE(3, 1, 3, 1));
129
130 size_t w = input_width;
131 for (; w > 8 * sizeof(float); w -= 8 * sizeof(float)) {
132 __m128 vo0p0 = _mm_add_ps(vbias, _mm_mul_ps(vi0x8ACE, vk02));
133 __m128 vo0p1 = _mm_mul_ps(vi1x8ACE, vk12);
134 __m128 vo0p2 = _mm_mul_ps(vi2x8ACE, vk22);
135 vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi3x8ACE, vk32));
136 vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi4x8ACE, vk42));
137
138 const __m128 vi0xE8AC = _mm_shuffle_ps(vi0x8ACE, vi0x8ACE, _MM_SHUFFLE(2, 1, 0, 3));
139 const __m128 vi1xE8AC = _mm_shuffle_ps(vi1x8ACE, vi1x8ACE, _MM_SHUFFLE(2, 1, 0, 3));
140 const __m128 vi2xE8AC = _mm_shuffle_ps(vi2x8ACE, vi2x8ACE, _MM_SHUFFLE(2, 1, 0, 3));
141 const __m128 vi3xE8AC = _mm_shuffle_ps(vi3x8ACE, vi3x8ACE, _MM_SHUFFLE(2, 1, 0, 3));
142 const __m128 vi4xE8AC = _mm_shuffle_ps(vi4x8ACE, vi4x8ACE, _MM_SHUFFLE(2, 1, 0, 3));
143
144 vo0p2 = _mm_add_ps(vo0p2, _mm_mul_ps(vi0x9BDF, vk03));
145 vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x9BDF, vk13));
146 vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi2x9BDF, vk23));
147 vo0p2 = _mm_add_ps(vo0p2, _mm_mul_ps(vi3x9BDF, vk33));
148 vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi4x9BDF, vk43));
149
150 const __m128 vi0x68AC = _mm_move_ss(vi0xE8AC, vi0x6024);
151 vi0x6024 = vi0xE8AC;
152 const __m128 vi1x68AC = _mm_move_ss(vi1xE8AC, vi1x6024);
153 vi1x6024 = vi1xE8AC;
154 const __m128 vi2x68AC = _mm_move_ss(vi2xE8AC, vi2x6024);
155 vi2x6024 = vi2xE8AC;
156 const __m128 vi3x68AC = _mm_move_ss(vi3xE8AC, vi3x6024);
157 vi3x6024 = vi3xE8AC;
158 const __m128 vi4x68AC = _mm_move_ss(vi4xE8AC, vi4x6024);
159 vi4x6024 = vi4xE8AC;
160
161 const __m128 vi0xF9BD = _mm_shuffle_ps(vi0x9BDF, vi0x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
162 const __m128 vi1xF9BD = _mm_shuffle_ps(vi1x9BDF, vi1x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
163 const __m128 vi2xF9BD = _mm_shuffle_ps(vi2x9BDF, vi2x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
164 const __m128 vi3xF9BD = _mm_shuffle_ps(vi3x9BDF, vi3x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
165 const __m128 vi4xF9BD = _mm_shuffle_ps(vi4x9BDF, vi4x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
166
167 vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi0x68AC, vk00));
168 vo0p2 = _mm_add_ps(vo0p2, _mm_mul_ps(vi1x68AC, vk10));
169 vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x68AC, vk20));
170 vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi3x68AC, vk30));
171 vo0p2 = _mm_add_ps(vo0p2, _mm_mul_ps(vi4x68AC, vk40));
172
173 const __m128 vi0xGHIJ = _mm_loadu_ps(i0);
174 const __m128 vi0xKLMN = _mm_loadu_ps(i0 + 4);
175 i0 += 8;
176 const __m128 vi1xGHIJ = _mm_loadu_ps(i1);
177 const __m128 vi1xKLMN = _mm_loadu_ps(i1 + 4);
178 i1 += 8;
179 const __m128 vi2xGHIJ = _mm_loadu_ps(i2);
180 const __m128 vi2xKLMN = _mm_loadu_ps(i2 + 4);
181 i2 += 8;
182 const __m128 vi3xGHIJ = _mm_loadu_ps(i3);
183 const __m128 vi3xKLMN = _mm_loadu_ps(i3 + 4);
184 i3 += 8;
185 const __m128 vi4xGHIJ = _mm_loadu_ps(i4);
186 const __m128 vi4xKLMN = _mm_loadu_ps(i4 + 4);
187 i4 += 8;
188
189 const __m128 vi0x79BD = _mm_move_ss(vi0xF9BD, vi0x7135);
190 vi0x7135 = vi0xF9BD;
191 const __m128 vi1x79BD = _mm_move_ss(vi1xF9BD, vi1x7135);
192 vi1x7135 = vi1xF9BD;
193 const __m128 vi2x79BD = _mm_move_ss(vi2xF9BD, vi2x7135);
194 vi2x7135 = vi2xF9BD;
195 const __m128 vi3x79BD = _mm_move_ss(vi3xF9BD, vi3x7135);
196 vi3x7135 = vi3xF9BD;
197 const __m128 vi4x79BD = _mm_move_ss(vi4xF9BD, vi4x7135);
198 vi4x7135 = vi4xF9BD;
199
200 const __m128 vi0xGIKM = _mm_shuffle_ps(vi0xGHIJ, vi0xKLMN, _MM_SHUFFLE(2, 0, 2, 0));
201 const __m128 vi0xHJLN = _mm_shuffle_ps(vi0xGHIJ, vi0xKLMN, _MM_SHUFFLE(3, 1, 3, 1));
202 vi0x9BDF = vi0xHJLN;
203 const __m128 vi1xGIKM = _mm_shuffle_ps(vi1xGHIJ, vi1xKLMN, _MM_SHUFFLE(2, 0, 2, 0));
204 const __m128 vi1xHJLN = _mm_shuffle_ps(vi1xGHIJ, vi1xKLMN, _MM_SHUFFLE(3, 1, 3, 1));
205 vi1x9BDF = vi1xHJLN;
206 const __m128 vi2xGIKM = _mm_shuffle_ps(vi2xGHIJ, vi2xKLMN, _MM_SHUFFLE(2, 0, 2, 0));
207 const __m128 vi2xHJLN = _mm_shuffle_ps(vi2xGHIJ, vi2xKLMN, _MM_SHUFFLE(3, 1, 3, 1));
208 vi2x9BDF = vi2xHJLN;
209 const __m128 vi3xGIKM = _mm_shuffle_ps(vi3xGHIJ, vi3xKLMN, _MM_SHUFFLE(2, 0, 2, 0));
210 const __m128 vi3xHJLN = _mm_shuffle_ps(vi3xGHIJ, vi3xKLMN, _MM_SHUFFLE(3, 1, 3, 1));
211 vi3x9BDF = vi3xHJLN;
212 const __m128 vi4xGIKM = _mm_shuffle_ps(vi4xGHIJ, vi4xKLMN, _MM_SHUFFLE(2, 0, 2, 0));
213 const __m128 vi4xHJLN = _mm_shuffle_ps(vi4xGHIJ, vi4xKLMN, _MM_SHUFFLE(3, 1, 3, 1));
214 vi4x9BDF = vi4xHJLN;
215
216 vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x79BD, vk01));
217 vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi1x79BD, vk11));
218 vo0p2 = _mm_add_ps(vo0p2, _mm_mul_ps(vi2x79BD, vk21));
219 vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi3x79BD, vk31));
220 vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi4x79BD, vk41));
221
222 const __m128 vi0xGACE = _mm_move_ss(vi0x8ACE, vi0xGIKM);
223 vi0x8ACE = vi0xGIKM;
224 const __m128 vi1xGACE = _mm_move_ss(vi1x8ACE, vi1xGIKM);
225 vi1x8ACE = vi1xGIKM;
226 const __m128 vi2xGACE = _mm_move_ss(vi2x8ACE, vi2xGIKM);
227 vi2x8ACE = vi2xGIKM;
228 const __m128 vi3xGACE = _mm_move_ss(vi3x8ACE, vi3xGIKM);
229 vi3x8ACE = vi3xGIKM;
230 const __m128 vi4xGACE = _mm_move_ss(vi4x8ACE, vi4xGIKM);
231 vi4x8ACE = vi4xGIKM;
232
233 const __m128 vi0xACEG = _mm_shuffle_ps(vi0xGACE, vi0xGACE, _MM_SHUFFLE(0, 3, 2, 1));
234 const __m128 vi1xACEG = _mm_shuffle_ps(vi1xGACE, vi1xGACE, _MM_SHUFFLE(0, 3, 2, 1));
235 const __m128 vi2xACEG = _mm_shuffle_ps(vi2xGACE, vi2xGACE, _MM_SHUFFLE(0, 3, 2, 1));
236 const __m128 vi3xACEG = _mm_shuffle_ps(vi3xGACE, vi3xGACE, _MM_SHUFFLE(0, 3, 2, 1));
237 const __m128 vi4xACEG = _mm_shuffle_ps(vi4xGACE, vi4xGACE, _MM_SHUFFLE(0, 3, 2, 1));
238
239 vo0p2 = _mm_add_ps(vo0p2, _mm_mul_ps(vi0xACEG, vk04));
240 vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1xACEG, vk14));
241 vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi2xACEG, vk24));
242 vo0p2 = _mm_add_ps(vo0p2, _mm_mul_ps(vi3xACEG, vk34));
243 vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi4xACEG, vk44));
244
245 vo0p0 = _mm_add_ps(vo0p0, vo0p1);
246 vo0p0 = _mm_add_ps(vo0p0, vo0p2);
247
248 __m128 vo0 = _mm_max_ps(vo0p0, vmin);
249
250 vo0 = _mm_min_ps(vo0, vmax);
251
252 _mm_storeu_ps(o0, vo0);
253 o0 += 4;
254 }
255 // Last block has 1-8 pixels to process.
256 assert(w <= 8 * sizeof(float));
257 assert(w >= 1 * sizeof(float));
258 {
259 vi0x8ACE = _mm_and_ps(vi0x8ACE, vmask_even);
260 vi0x9BDF = _mm_and_ps(vi0x9BDF, vmask_odd);
261 vi1x8ACE = _mm_and_ps(vi1x8ACE, vmask_even);
262 vi1x9BDF = _mm_and_ps(vi1x9BDF, vmask_odd);
263 vi2x8ACE = _mm_and_ps(vi2x8ACE, vmask_even);
264 vi2x9BDF = _mm_and_ps(vi2x9BDF, vmask_odd);
265 vi3x8ACE = _mm_and_ps(vi3x8ACE, vmask_even);
266 vi3x9BDF = _mm_and_ps(vi3x9BDF, vmask_odd);
267 vi4x8ACE = _mm_and_ps(vi4x8ACE, vmask_even);
268 vi4x9BDF = _mm_and_ps(vi4x9BDF, vmask_odd);
269
270 __m128 vo0p0 = _mm_add_ps(vbias, _mm_mul_ps(vi0x8ACE, vk02));
271 __m128 vo0p1 = _mm_mul_ps(vi1x8ACE, vk12);
272 __m128 vo0p2 = _mm_mul_ps(vi2x8ACE, vk22);
273 vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi3x8ACE, vk32));
274 vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi4x8ACE, vk42));
275
276 const __m128 vi0xE8AC = _mm_shuffle_ps(vi0x8ACE, vi0x8ACE, _MM_SHUFFLE(2, 1, 0, 3));
277 const __m128 vi1xE8AC = _mm_shuffle_ps(vi1x8ACE, vi1x8ACE, _MM_SHUFFLE(2, 1, 0, 3));
278 const __m128 vi2xE8AC = _mm_shuffle_ps(vi2x8ACE, vi2x8ACE, _MM_SHUFFLE(2, 1, 0, 3));
279 const __m128 vi3xE8AC = _mm_shuffle_ps(vi3x8ACE, vi3x8ACE, _MM_SHUFFLE(2, 1, 0, 3));
280 const __m128 vi4xE8AC = _mm_shuffle_ps(vi4x8ACE, vi4x8ACE, _MM_SHUFFLE(2, 1, 0, 3));
281
282 vo0p2 = _mm_add_ps(vo0p2, _mm_mul_ps(vi0x9BDF, vk03));
283 vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x9BDF, vk13));
284 vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi2x9BDF, vk23));
285 vo0p2 = _mm_add_ps(vo0p2, _mm_mul_ps(vi3x9BDF, vk33));
286 vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi4x9BDF, vk43));
287
288 const __m128 vi0x68AC = _mm_move_ss(vi0xE8AC, vi0x6024);
289 const __m128 vi1x68AC = _mm_move_ss(vi1xE8AC, vi1x6024);
290 const __m128 vi2x68AC = _mm_move_ss(vi2xE8AC, vi2x6024);
291 const __m128 vi3x68AC = _mm_move_ss(vi3xE8AC, vi3x6024);
292 const __m128 vi4x68AC = _mm_move_ss(vi4xE8AC, vi4x6024);
293
294 const __m128 vi0xF9BD = _mm_shuffle_ps(vi0x9BDF, vi0x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
295 const __m128 vi1xF9BD = _mm_shuffle_ps(vi1x9BDF, vi1x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
296 const __m128 vi2xF9BD = _mm_shuffle_ps(vi2x9BDF, vi2x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
297 const __m128 vi3xF9BD = _mm_shuffle_ps(vi3x9BDF, vi3x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
298 const __m128 vi4xF9BD = _mm_shuffle_ps(vi4x9BDF, vi4x9BDF, _MM_SHUFFLE(2, 1, 0, 3));
299
300 vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi0x68AC, vk00));
301 vo0p2 = _mm_add_ps(vo0p2, _mm_mul_ps(vi1x68AC, vk10));
302 vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x68AC, vk20));
303 vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi3x68AC, vk30));
304 vo0p2 = _mm_add_ps(vo0p2, _mm_mul_ps(vi4x68AC, vk40));
305
306 const __m128 vi0x79BD = _mm_move_ss(vi0xF9BD, vi0x7135);
307 const __m128 vi1x79BD = _mm_move_ss(vi1xF9BD, vi1x7135);
308 const __m128 vi2x79BD = _mm_move_ss(vi2xF9BD, vi2x7135);
309 const __m128 vi3x79BD = _mm_move_ss(vi3xF9BD, vi3x7135);
310 const __m128 vi4x79BD = _mm_move_ss(vi4xF9BD, vi4x7135);
311
312 vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x79BD, vk01));
313 vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi1x79BD, vk11));
314 vo0p2 = _mm_add_ps(vo0p2, _mm_mul_ps(vi2x79BD, vk21));
315 vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi3x79BD, vk31));
316 vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi4x79BD, vk41));
317
318 const __m128 vzero = _mm_setzero_ps();
319 const __m128 vi0xGACE = _mm_move_ss(vi0x8ACE, vzero);
320 const __m128 vi1xGACE = _mm_move_ss(vi1x8ACE, vzero);
321 const __m128 vi2xGACE = _mm_move_ss(vi2x8ACE, vzero);
322 const __m128 vi3xGACE = _mm_move_ss(vi3x8ACE, vzero);
323 const __m128 vi4xGACE = _mm_move_ss(vi4x8ACE, vzero);
324
325 const __m128 vi0xACEG = _mm_shuffle_ps(vi0xGACE, vi0xGACE, _MM_SHUFFLE(0, 3, 2, 1));
326 const __m128 vi1xACEG = _mm_shuffle_ps(vi1xGACE, vi1xGACE, _MM_SHUFFLE(0, 3, 2, 1));
327 const __m128 vi2xACEG = _mm_shuffle_ps(vi2xGACE, vi2xGACE, _MM_SHUFFLE(0, 3, 2, 1));
328 const __m128 vi3xACEG = _mm_shuffle_ps(vi3xGACE, vi3xGACE, _MM_SHUFFLE(0, 3, 2, 1));
329 const __m128 vi4xACEG = _mm_shuffle_ps(vi4xGACE, vi4xGACE, _MM_SHUFFLE(0, 3, 2, 1));
330
331 vo0p2 = _mm_add_ps(vo0p2, _mm_mul_ps(vi0xACEG, vk04));
332 vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1xACEG, vk14));
333 vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi2xACEG, vk24));
334 vo0p2 = _mm_add_ps(vo0p2, _mm_mul_ps(vi3xACEG, vk34));
335 vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi4xACEG, vk44));
336
337 vo0p0 = _mm_add_ps(vo0p0, vo0p1);
338 vo0p0 = _mm_add_ps(vo0p0, vo0p2);
339
340 __m128 vo0 = _mm_max_ps(vo0p0, vmin);
341
342 vo0 = _mm_min_ps(vo0, vmax);
343
344 size_t w_tmp = (w + 1 * sizeof(float)) / (2 * sizeof(float));
345 if XNN_LIKELY(w_tmp >= 4) {
346 _mm_storeu_ps(o0, vo0);
347 o0 += 4;
348 } else {
349 if (w_tmp & 2) {
350 _mm_storel_pi((__m64*) o0, vo0);
351 o0 += 2;
352
353 vo0 = _mm_movehl_ps(vo0, vo0);
354 }
355 if (w_tmp & 1) {
356 _mm_store_ss(o0, vo0);
357 o0 += 1;
358 }
359 }
360 }
361
362 i0 = (const float*) ((uintptr_t) i2 - input_decrement);
363 i1 = (const float*) ((uintptr_t) i3 - input_decrement);
364 i2 = (const float*) ((uintptr_t) i4 - input_decrement);
365 i3 = (const float*) ((uintptr_t) i2 + input_width);
366 i4 = (const float*) ((uintptr_t) i3 + input_width);
367
368
369 output_height -= 1;
370 padded_input_height -= 2;
371 } while (output_height != 0);
372 }
373