1 // Auto-generated file. Do not edit!
2 // Template: src/f32-dwconv/up-sse.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2019 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <xmmintrin.h>
13
14 #include <xnnpack/dwconv.h>
15
16
xnn_f32_dwconv_minmax_ukernel_up8x25__sse_acc2(size_t channels,size_t output_width,const float ** input,const float * weights,float * output,size_t input_stride,size_t output_increment,size_t input_offset,const float * zero,const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])17 void xnn_f32_dwconv_minmax_ukernel_up8x25__sse_acc2(
18 size_t channels,
19 size_t output_width,
20 const float** input,
21 const float* weights,
22 float* output,
23 size_t input_stride,
24 size_t output_increment,
25 size_t input_offset,
26 const float* zero,
27 const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
28 {
29 assert(channels != 0);
30 assert(output_width != 0);
31
32 const __m128 vmax = _mm_load_ps(params->sse.max);
33 const __m128 vmin = _mm_load_ps(params->sse.min);
34 do {
35 const float* i0 = input[0];
36 assert(i0 != NULL);
37 if XNN_UNPREDICTABLE(i0 != zero) {
38 i0 = (const float*) ((uintptr_t) i0 + input_offset);
39 }
40 const float* i1 = input[1];
41 assert(i1 != NULL);
42 if XNN_UNPREDICTABLE(i1 != zero) {
43 i1 = (const float*) ((uintptr_t) i1 + input_offset);
44 }
45 const float* i2 = input[2];
46 assert(i2 != NULL);
47 if XNN_UNPREDICTABLE(i2 != zero) {
48 i2 = (const float*) ((uintptr_t) i2 + input_offset);
49 }
50 const float* i3 = input[3];
51 assert(i3 != NULL);
52 if XNN_UNPREDICTABLE(i3 != zero) {
53 i3 = (const float*) ((uintptr_t) i3 + input_offset);
54 }
55 const float* i4 = input[4];
56 assert(i4 != NULL);
57 if XNN_UNPREDICTABLE(i4 != zero) {
58 i4 = (const float*) ((uintptr_t) i4 + input_offset);
59 }
60 const float* i5 = input[5];
61 assert(i5 != NULL);
62 if XNN_UNPREDICTABLE(i5 != zero) {
63 i5 = (const float*) ((uintptr_t) i5 + input_offset);
64 }
65 const float* i6 = input[6];
66 assert(i6 != NULL);
67 if XNN_UNPREDICTABLE(i6 != zero) {
68 i6 = (const float*) ((uintptr_t) i6 + input_offset);
69 }
70 const float* i7 = input[7];
71 assert(i7 != NULL);
72 if XNN_UNPREDICTABLE(i7 != zero) {
73 i7 = (const float*) ((uintptr_t) i7 + input_offset);
74 }
75 const float* i8 = input[8];
76 assert(i8 != NULL);
77 if XNN_UNPREDICTABLE(i8 != zero) {
78 i8 = (const float*) ((uintptr_t) i8 + input_offset);
79 }
80 const float* i9 = input[9];
81 assert(i9 != NULL);
82 if XNN_UNPREDICTABLE(i9 != zero) {
83 i9 = (const float*) ((uintptr_t) i9 + input_offset);
84 }
85 const float* i10 = input[10];
86 assert(i10 != NULL);
87 if XNN_UNPREDICTABLE(i10 != zero) {
88 i10 = (const float*) ((uintptr_t) i10 + input_offset);
89 }
90 const float* i11 = input[11];
91 assert(i11 != NULL);
92 if XNN_UNPREDICTABLE(i11 != zero) {
93 i11 = (const float*) ((uintptr_t) i11 + input_offset);
94 }
95 const float* i12 = input[12];
96 assert(i12 != NULL);
97 if XNN_UNPREDICTABLE(i12 != zero) {
98 i12 = (const float*) ((uintptr_t) i12 + input_offset);
99 }
100 const float* i13 = input[13];
101 assert(i13 != NULL);
102 if XNN_UNPREDICTABLE(i13 != zero) {
103 i13 = (const float*) ((uintptr_t) i13 + input_offset);
104 }
105 const float* i14 = input[14];
106 assert(i14 != NULL);
107 if XNN_UNPREDICTABLE(i14 != zero) {
108 i14 = (const float*) ((uintptr_t) i14 + input_offset);
109 }
110 const float* i15 = input[15];
111 assert(i15 != NULL);
112 if XNN_UNPREDICTABLE(i15 != zero) {
113 i15 = (const float*) ((uintptr_t) i15 + input_offset);
114 }
115 const float* i16 = input[16];
116 assert(i16 != NULL);
117 if XNN_UNPREDICTABLE(i16 != zero) {
118 i16 = (const float*) ((uintptr_t) i16 + input_offset);
119 }
120 const float* i17 = input[17];
121 assert(i17 != NULL);
122 if XNN_UNPREDICTABLE(i17 != zero) {
123 i17 = (const float*) ((uintptr_t) i17 + input_offset);
124 }
125 const float* i18 = input[18];
126 assert(i18 != NULL);
127 if XNN_UNPREDICTABLE(i18 != zero) {
128 i18 = (const float*) ((uintptr_t) i18 + input_offset);
129 }
130 const float* i19 = input[19];
131 assert(i19 != NULL);
132 if XNN_UNPREDICTABLE(i19 != zero) {
133 i19 = (const float*) ((uintptr_t) i19 + input_offset);
134 }
135 const float* i20 = input[20];
136 assert(i20 != NULL);
137 if XNN_UNPREDICTABLE(i20 != zero) {
138 i20 = (const float*) ((uintptr_t) i20 + input_offset);
139 }
140 const float* i21 = input[21];
141 assert(i21 != NULL);
142 if XNN_UNPREDICTABLE(i21 != zero) {
143 i21 = (const float*) ((uintptr_t) i21 + input_offset);
144 }
145 const float* i22 = input[22];
146 assert(i22 != NULL);
147 if XNN_UNPREDICTABLE(i22 != zero) {
148 i22 = (const float*) ((uintptr_t) i22 + input_offset);
149 }
150 const float* i23 = input[23];
151 assert(i23 != NULL);
152 if XNN_UNPREDICTABLE(i23 != zero) {
153 i23 = (const float*) ((uintptr_t) i23 + input_offset);
154 }
155 const float* i24 = input[24];
156 assert(i24 != NULL);
157 if XNN_UNPREDICTABLE(i24 != zero) {
158 i24 = (const float*) ((uintptr_t) i24 + input_offset);
159 }
160 input = (const float**) ((uintptr_t) input + input_stride);
161
162 size_t c = channels;
163 const float* w = weights;
164 for (; c >= 8; c -= 8) {
165 __m128 vacc0123p0 = _mm_load_ps(w);
166 __m128 vacc4567p0 = _mm_load_ps(w + 4);
167
168
169 const __m128 vi0x0123 = _mm_loadu_ps(i0);
170 const __m128 vi0x4567 = _mm_loadu_ps(i0 + 4);
171 i0 += 8;
172
173 const __m128 vk0x0123 = _mm_load_ps(w + 8);
174 const __m128 vk0x4567 = _mm_load_ps(w + 12);
175 vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi0x0123, vk0x0123));
176 vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi0x4567, vk0x4567));
177
178 const __m128 vi1x0123 = _mm_loadu_ps(i1);
179 const __m128 vi1x4567 = _mm_loadu_ps(i1 + 4);
180 i1 += 8;
181
182 const __m128 vk1x0123 = _mm_load_ps(w + 16);
183 const __m128 vk1x4567 = _mm_load_ps(w + 20);
184 __m128 vacc0123p1 = _mm_mul_ps(vi1x0123, vk1x0123);
185 __m128 vacc4567p1 = _mm_mul_ps(vi1x4567, vk1x4567);
186
187 const __m128 vi2x0123 = _mm_loadu_ps(i2);
188 const __m128 vi2x4567 = _mm_loadu_ps(i2 + 4);
189 i2 += 8;
190
191 const __m128 vk2x0123 = _mm_load_ps(w + 24);
192 const __m128 vk2x4567 = _mm_load_ps(w + 28);
193 vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi2x0123, vk2x0123));
194 vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi2x4567, vk2x4567));
195
196 const __m128 vi3x0123 = _mm_loadu_ps(i3);
197 const __m128 vi3x4567 = _mm_loadu_ps(i3 + 4);
198 i3 += 8;
199
200 const __m128 vk3x0123 = _mm_load_ps(w + 32);
201 const __m128 vk3x4567 = _mm_load_ps(w + 36);
202 vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi3x0123, vk3x0123));
203 vacc4567p1 = _mm_add_ps(vacc4567p1, _mm_mul_ps(vi3x4567, vk3x4567));
204
205 const __m128 vi4x0123 = _mm_loadu_ps(i4);
206 const __m128 vi4x4567 = _mm_loadu_ps(i4 + 4);
207 i4 += 8;
208
209 const __m128 vk4x0123 = _mm_load_ps(w + 40);
210 const __m128 vk4x4567 = _mm_load_ps(w + 44);
211 vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi4x0123, vk4x0123));
212 vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi4x4567, vk4x4567));
213
214 const __m128 vi5x0123 = _mm_loadu_ps(i5);
215 const __m128 vi5x4567 = _mm_loadu_ps(i5 + 4);
216 i5 += 8;
217
218 const __m128 vk5x0123 = _mm_load_ps(w + 48);
219 const __m128 vk5x4567 = _mm_load_ps(w + 52);
220 vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi5x0123, vk5x0123));
221 vacc4567p1 = _mm_add_ps(vacc4567p1, _mm_mul_ps(vi5x4567, vk5x4567));
222
223 const __m128 vi6x0123 = _mm_loadu_ps(i6);
224 const __m128 vi6x4567 = _mm_loadu_ps(i6 + 4);
225 i6 += 8;
226
227 const __m128 vk6x0123 = _mm_load_ps(w + 56);
228 const __m128 vk6x4567 = _mm_load_ps(w + 60);
229 vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi6x0123, vk6x0123));
230 vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi6x4567, vk6x4567));
231
232 const __m128 vi7x0123 = _mm_loadu_ps(i7);
233 const __m128 vi7x4567 = _mm_loadu_ps(i7 + 4);
234 i7 += 8;
235
236 const __m128 vk7x0123 = _mm_load_ps(w + 64);
237 const __m128 vk7x4567 = _mm_load_ps(w + 68);
238 vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi7x0123, vk7x0123));
239 vacc4567p1 = _mm_add_ps(vacc4567p1, _mm_mul_ps(vi7x4567, vk7x4567));
240
241 const __m128 vi8x0123 = _mm_loadu_ps(i8);
242 const __m128 vi8x4567 = _mm_loadu_ps(i8 + 4);
243 i8 += 8;
244
245 const __m128 vk8x0123 = _mm_load_ps(w + 72);
246 const __m128 vk8x4567 = _mm_load_ps(w + 76);
247 vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi8x0123, vk8x0123));
248 vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi8x4567, vk8x4567));
249
250 const __m128 vi9x0123 = _mm_loadu_ps(i9);
251 const __m128 vi9x4567 = _mm_loadu_ps(i9 + 4);
252 i9 += 8;
253
254 const __m128 vk9x0123 = _mm_load_ps(w + 80);
255 const __m128 vk9x4567 = _mm_load_ps(w + 84);
256 vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi9x0123, vk9x0123));
257 vacc4567p1 = _mm_add_ps(vacc4567p1, _mm_mul_ps(vi9x4567, vk9x4567));
258
259 const __m128 vi10x0123 = _mm_loadu_ps(i10);
260 const __m128 vi10x4567 = _mm_loadu_ps(i10 + 4);
261 i10 += 8;
262
263 const __m128 vk10x0123 = _mm_load_ps(w + 88);
264 const __m128 vk10x4567 = _mm_load_ps(w + 92);
265 vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi10x0123, vk10x0123));
266 vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi10x4567, vk10x4567));
267
268 const __m128 vi11x0123 = _mm_loadu_ps(i11);
269 const __m128 vi11x4567 = _mm_loadu_ps(i11 + 4);
270 i11 += 8;
271
272 const __m128 vk11x0123 = _mm_load_ps(w + 96);
273 const __m128 vk11x4567 = _mm_load_ps(w + 100);
274 vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi11x0123, vk11x0123));
275 vacc4567p1 = _mm_add_ps(vacc4567p1, _mm_mul_ps(vi11x4567, vk11x4567));
276
277 const __m128 vi12x0123 = _mm_loadu_ps(i12);
278 const __m128 vi12x4567 = _mm_loadu_ps(i12 + 4);
279 i12 += 8;
280
281 const __m128 vk12x0123 = _mm_load_ps(w + 104);
282 const __m128 vk12x4567 = _mm_load_ps(w + 108);
283 vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi12x0123, vk12x0123));
284 vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi12x4567, vk12x4567));
285
286 const __m128 vi13x0123 = _mm_loadu_ps(i13);
287 const __m128 vi13x4567 = _mm_loadu_ps(i13 + 4);
288 i13 += 8;
289
290 const __m128 vk13x0123 = _mm_load_ps(w + 112);
291 const __m128 vk13x4567 = _mm_load_ps(w + 116);
292 vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi13x0123, vk13x0123));
293 vacc4567p1 = _mm_add_ps(vacc4567p1, _mm_mul_ps(vi13x4567, vk13x4567));
294
295 const __m128 vi14x0123 = _mm_loadu_ps(i14);
296 const __m128 vi14x4567 = _mm_loadu_ps(i14 + 4);
297 i14 += 8;
298
299 const __m128 vk14x0123 = _mm_load_ps(w + 120);
300 const __m128 vk14x4567 = _mm_load_ps(w + 124);
301 vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi14x0123, vk14x0123));
302 vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi14x4567, vk14x4567));
303
304 const __m128 vi15x0123 = _mm_loadu_ps(i15);
305 const __m128 vi15x4567 = _mm_loadu_ps(i15 + 4);
306 i15 += 8;
307
308 const __m128 vk15x0123 = _mm_load_ps(w + 128);
309 const __m128 vk15x4567 = _mm_load_ps(w + 132);
310 vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi15x0123, vk15x0123));
311 vacc4567p1 = _mm_add_ps(vacc4567p1, _mm_mul_ps(vi15x4567, vk15x4567));
312
313 const __m128 vi16x0123 = _mm_loadu_ps(i16);
314 const __m128 vi16x4567 = _mm_loadu_ps(i16 + 4);
315 i16 += 8;
316
317 const __m128 vk16x0123 = _mm_load_ps(w + 136);
318 const __m128 vk16x4567 = _mm_load_ps(w + 140);
319 vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi16x0123, vk16x0123));
320 vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi16x4567, vk16x4567));
321
322 const __m128 vi17x0123 = _mm_loadu_ps(i17);
323 const __m128 vi17x4567 = _mm_loadu_ps(i17 + 4);
324 i17 += 8;
325
326 const __m128 vk17x0123 = _mm_load_ps(w + 144);
327 const __m128 vk17x4567 = _mm_load_ps(w + 148);
328 vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi17x0123, vk17x0123));
329 vacc4567p1 = _mm_add_ps(vacc4567p1, _mm_mul_ps(vi17x4567, vk17x4567));
330
331 const __m128 vi18x0123 = _mm_loadu_ps(i18);
332 const __m128 vi18x4567 = _mm_loadu_ps(i18 + 4);
333 i18 += 8;
334
335 const __m128 vk18x0123 = _mm_load_ps(w + 152);
336 const __m128 vk18x4567 = _mm_load_ps(w + 156);
337 vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi18x0123, vk18x0123));
338 vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi18x4567, vk18x4567));
339
340 const __m128 vi19x0123 = _mm_loadu_ps(i19);
341 const __m128 vi19x4567 = _mm_loadu_ps(i19 + 4);
342 i19 += 8;
343
344 const __m128 vk19x0123 = _mm_load_ps(w + 160);
345 const __m128 vk19x4567 = _mm_load_ps(w + 164);
346 vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi19x0123, vk19x0123));
347 vacc4567p1 = _mm_add_ps(vacc4567p1, _mm_mul_ps(vi19x4567, vk19x4567));
348
349 const __m128 vi20x0123 = _mm_loadu_ps(i20);
350 const __m128 vi20x4567 = _mm_loadu_ps(i20 + 4);
351 i20 += 8;
352
353 const __m128 vk20x0123 = _mm_load_ps(w + 168);
354 const __m128 vk20x4567 = _mm_load_ps(w + 172);
355 vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi20x0123, vk20x0123));
356 vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi20x4567, vk20x4567));
357
358 const __m128 vi21x0123 = _mm_loadu_ps(i21);
359 const __m128 vi21x4567 = _mm_loadu_ps(i21 + 4);
360 i21 += 8;
361
362 const __m128 vk21x0123 = _mm_load_ps(w + 176);
363 const __m128 vk21x4567 = _mm_load_ps(w + 180);
364 vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi21x0123, vk21x0123));
365 vacc4567p1 = _mm_add_ps(vacc4567p1, _mm_mul_ps(vi21x4567, vk21x4567));
366
367 const __m128 vi22x0123 = _mm_loadu_ps(i22);
368 const __m128 vi22x4567 = _mm_loadu_ps(i22 + 4);
369 i22 += 8;
370
371 const __m128 vk22x0123 = _mm_load_ps(w + 184);
372 const __m128 vk22x4567 = _mm_load_ps(w + 188);
373 vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi22x0123, vk22x0123));
374 vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi22x4567, vk22x4567));
375
376 const __m128 vi23x0123 = _mm_loadu_ps(i23);
377 const __m128 vi23x4567 = _mm_loadu_ps(i23 + 4);
378 i23 += 8;
379
380 const __m128 vk23x0123 = _mm_load_ps(w + 192);
381 const __m128 vk23x4567 = _mm_load_ps(w + 196);
382 vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi23x0123, vk23x0123));
383 vacc4567p1 = _mm_add_ps(vacc4567p1, _mm_mul_ps(vi23x4567, vk23x4567));
384
385 const __m128 vi24x0123 = _mm_loadu_ps(i24);
386 const __m128 vi24x4567 = _mm_loadu_ps(i24 + 4);
387 i24 += 8;
388
389 const __m128 vk24x0123 = _mm_load_ps(w + 200);
390 const __m128 vk24x4567 = _mm_load_ps(w + 204);
391 vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi24x0123, vk24x0123));
392 vacc4567p0 = _mm_add_ps(vacc4567p0, _mm_mul_ps(vi24x4567, vk24x4567));
393
394 w += 208;
395
396 // Add up all accumulators to vacc01234567p0
397 vacc0123p0 = _mm_add_ps(vacc0123p0, vacc0123p1);
398 vacc4567p0 = _mm_add_ps(vacc4567p0, vacc4567p1);
399
400 __m128 vacc0123 = _mm_max_ps(vacc0123p0, vmin);
401 __m128 vacc4567 = _mm_max_ps(vacc4567p0, vmin);
402 vacc0123 = _mm_min_ps(vacc0123, vmax);
403 vacc4567 = _mm_min_ps(vacc4567, vmax);
404
405 _mm_storeu_ps(output, vacc0123);
406 _mm_storeu_ps(output + 4, vacc4567);
407 output += 8;
408 }
409 for (; c >= 4; c -= 4) {
410 __m128 vacc0123p0 = _mm_load_ps(w);
411
412 const __m128 vi0x0123 = _mm_loadu_ps(i0);
413 i0 += 4;
414
415 const __m128 vk0x0123 = _mm_load_ps(w + 8);
416 vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi0x0123, vk0x0123));
417
418 const __m128 vi1x0123 = _mm_loadu_ps(i1);
419 i1 += 4;
420
421 const __m128 vk1x0123 = _mm_load_ps(w + 16);
422 __m128 vacc0123p1 = _mm_mul_ps(vi1x0123, vk1x0123);
423
424 const __m128 vi2x0123 = _mm_loadu_ps(i2);
425 i2 += 4;
426
427 const __m128 vk2x0123 = _mm_load_ps(w + 24);
428 vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi2x0123, vk2x0123));
429
430 const __m128 vi3x0123 = _mm_loadu_ps(i3);
431 i3 += 4;
432
433 const __m128 vk3x0123 = _mm_load_ps(w + 32);
434 vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi3x0123, vk3x0123));
435
436 const __m128 vi4x0123 = _mm_loadu_ps(i4);
437 i4 += 4;
438
439 const __m128 vk4x0123 = _mm_load_ps(w + 40);
440 vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi4x0123, vk4x0123));
441
442 const __m128 vi5x0123 = _mm_loadu_ps(i5);
443 i5 += 4;
444
445 const __m128 vk5x0123 = _mm_load_ps(w + 48);
446 vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi5x0123, vk5x0123));
447
448 const __m128 vi6x0123 = _mm_loadu_ps(i6);
449 i6 += 4;
450
451 const __m128 vk6x0123 = _mm_load_ps(w + 56);
452 vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi6x0123, vk6x0123));
453
454 const __m128 vi7x0123 = _mm_loadu_ps(i7);
455 i7 += 4;
456
457 const __m128 vk7x0123 = _mm_load_ps(w + 64);
458 vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi7x0123, vk7x0123));
459
460 const __m128 vi8x0123 = _mm_loadu_ps(i8);
461 i8 += 4;
462
463 const __m128 vk8x0123 = _mm_load_ps(w + 72);
464 vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi8x0123, vk8x0123));
465
466 const __m128 vi9x0123 = _mm_loadu_ps(i9);
467 i9 += 4;
468
469 const __m128 vk9x0123 = _mm_load_ps(w + 80);
470 vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi9x0123, vk9x0123));
471
472 const __m128 vi10x0123 = _mm_loadu_ps(i10);
473 i10 += 4;
474
475 const __m128 vk10x0123 = _mm_load_ps(w + 88);
476 vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi10x0123, vk10x0123));
477
478 const __m128 vi11x0123 = _mm_loadu_ps(i11);
479 i11 += 4;
480
481 const __m128 vk11x0123 = _mm_load_ps(w + 96);
482 vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi11x0123, vk11x0123));
483
484 const __m128 vi12x0123 = _mm_loadu_ps(i12);
485 i12 += 4;
486
487 const __m128 vk12x0123 = _mm_load_ps(w + 104);
488 vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi12x0123, vk12x0123));
489
490 const __m128 vi13x0123 = _mm_loadu_ps(i13);
491 i13 += 4;
492
493 const __m128 vk13x0123 = _mm_load_ps(w + 112);
494 vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi13x0123, vk13x0123));
495
496 const __m128 vi14x0123 = _mm_loadu_ps(i14);
497 i14 += 4;
498
499 const __m128 vk14x0123 = _mm_load_ps(w + 120);
500 vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi14x0123, vk14x0123));
501
502 const __m128 vi15x0123 = _mm_loadu_ps(i15);
503 i15 += 4;
504
505 const __m128 vk15x0123 = _mm_load_ps(w + 128);
506 vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi15x0123, vk15x0123));
507
508 const __m128 vi16x0123 = _mm_loadu_ps(i16);
509 i16 += 4;
510
511 const __m128 vk16x0123 = _mm_load_ps(w + 136);
512 vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi16x0123, vk16x0123));
513
514 const __m128 vi17x0123 = _mm_loadu_ps(i17);
515 i17 += 4;
516
517 const __m128 vk17x0123 = _mm_load_ps(w + 144);
518 vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi17x0123, vk17x0123));
519
520 const __m128 vi18x0123 = _mm_loadu_ps(i18);
521 i18 += 4;
522
523 const __m128 vk18x0123 = _mm_load_ps(w + 152);
524 vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi18x0123, vk18x0123));
525
526 const __m128 vi19x0123 = _mm_loadu_ps(i19);
527 i19 += 4;
528
529 const __m128 vk19x0123 = _mm_load_ps(w + 160);
530 vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi19x0123, vk19x0123));
531
532 const __m128 vi20x0123 = _mm_loadu_ps(i20);
533 i20 += 4;
534
535 const __m128 vk20x0123 = _mm_load_ps(w + 168);
536 vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi20x0123, vk20x0123));
537
538 const __m128 vi21x0123 = _mm_loadu_ps(i21);
539 i21 += 4;
540
541 const __m128 vk21x0123 = _mm_load_ps(w + 176);
542 vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi21x0123, vk21x0123));
543
544 const __m128 vi22x0123 = _mm_loadu_ps(i22);
545 i22 += 4;
546
547 const __m128 vk22x0123 = _mm_load_ps(w + 184);
548 vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi22x0123, vk22x0123));
549
550 const __m128 vi23x0123 = _mm_loadu_ps(i23);
551 i23 += 4;
552
553 const __m128 vk23x0123 = _mm_load_ps(w + 192);
554 vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi23x0123, vk23x0123));
555
556 const __m128 vi24x0123 = _mm_loadu_ps(i24);
557 i24 += 4;
558
559 const __m128 vk24x0123 = _mm_load_ps(w + 200);
560 vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi24x0123, vk24x0123));
561
562 w += 4;
563
564 // Add up all accumulators to vacc01234567p0
565 vacc0123p0 = _mm_add_ps(vacc0123p0, vacc0123p1);
566
567 __m128 vacc0123 = _mm_max_ps(vacc0123p0, vmin);
568 vacc0123 = _mm_min_ps(vacc0123, vmax);
569
570 _mm_storeu_ps(output, vacc0123);
571 output += 4;
572 }
573 if XNN_UNLIKELY(c != 0) {
574 __m128 vacc0123p0 = _mm_load_ps(w);
575
576 const __m128 vi0x0123 = _mm_loadu_ps(i0);
577 const __m128 vk0x0123 = _mm_load_ps(w + 8);
578 vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi0x0123, vk0x0123));
579
580 const __m128 vi1x0123 = _mm_loadu_ps(i1);
581 const __m128 vk1x0123 = _mm_load_ps(w + 16);
582 __m128 vacc0123p1 = _mm_mul_ps(vi1x0123, vk1x0123);
583
584 const __m128 vi2x0123 = _mm_loadu_ps(i2);
585 const __m128 vk2x0123 = _mm_load_ps(w + 24);
586 vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi2x0123, vk2x0123));
587
588 const __m128 vi3x0123 = _mm_loadu_ps(i3);
589 const __m128 vk3x0123 = _mm_load_ps(w + 32);
590 vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi3x0123, vk3x0123));
591
592 const __m128 vi4x0123 = _mm_loadu_ps(i4);
593 const __m128 vk4x0123 = _mm_load_ps(w + 40);
594 vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi4x0123, vk4x0123));
595
596 const __m128 vi5x0123 = _mm_loadu_ps(i5);
597 const __m128 vk5x0123 = _mm_load_ps(w + 48);
598 vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi5x0123, vk5x0123));
599
600 const __m128 vi6x0123 = _mm_loadu_ps(i6);
601 const __m128 vk6x0123 = _mm_load_ps(w + 56);
602 vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi6x0123, vk6x0123));
603
604 const __m128 vi7x0123 = _mm_loadu_ps(i7);
605 const __m128 vk7x0123 = _mm_load_ps(w + 64);
606 vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi7x0123, vk7x0123));
607
608 const __m128 vi8x0123 = _mm_loadu_ps(i8);
609 const __m128 vk8x0123 = _mm_load_ps(w + 72);
610 vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi8x0123, vk8x0123));
611
612 const __m128 vi9x0123 = _mm_loadu_ps(i9);
613 const __m128 vk9x0123 = _mm_load_ps(w + 80);
614 vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi9x0123, vk9x0123));
615
616 const __m128 vi10x0123 = _mm_loadu_ps(i10);
617 const __m128 vk10x0123 = _mm_load_ps(w + 88);
618 vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi10x0123, vk10x0123));
619
620 const __m128 vi11x0123 = _mm_loadu_ps(i11);
621 const __m128 vk11x0123 = _mm_load_ps(w + 96);
622 vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi11x0123, vk11x0123));
623
624 const __m128 vi12x0123 = _mm_loadu_ps(i12);
625 const __m128 vk12x0123 = _mm_load_ps(w + 104);
626 vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi12x0123, vk12x0123));
627
628 const __m128 vi13x0123 = _mm_loadu_ps(i13);
629 const __m128 vk13x0123 = _mm_load_ps(w + 112);
630 vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi13x0123, vk13x0123));
631
632 const __m128 vi14x0123 = _mm_loadu_ps(i14);
633 const __m128 vk14x0123 = _mm_load_ps(w + 120);
634 vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi14x0123, vk14x0123));
635
636 const __m128 vi15x0123 = _mm_loadu_ps(i15);
637 const __m128 vk15x0123 = _mm_load_ps(w + 128);
638 vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi15x0123, vk15x0123));
639
640 const __m128 vi16x0123 = _mm_loadu_ps(i16);
641 const __m128 vk16x0123 = _mm_load_ps(w + 136);
642 vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi16x0123, vk16x0123));
643
644 const __m128 vi17x0123 = _mm_loadu_ps(i17);
645 const __m128 vk17x0123 = _mm_load_ps(w + 144);
646 vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi17x0123, vk17x0123));
647
648 const __m128 vi18x0123 = _mm_loadu_ps(i18);
649 const __m128 vk18x0123 = _mm_load_ps(w + 152);
650 vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi18x0123, vk18x0123));
651
652 const __m128 vi19x0123 = _mm_loadu_ps(i19);
653 const __m128 vk19x0123 = _mm_load_ps(w + 160);
654 vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi19x0123, vk19x0123));
655
656 const __m128 vi20x0123 = _mm_loadu_ps(i20);
657 const __m128 vk20x0123 = _mm_load_ps(w + 168);
658 vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi20x0123, vk20x0123));
659
660 const __m128 vi21x0123 = _mm_loadu_ps(i21);
661 const __m128 vk21x0123 = _mm_load_ps(w + 176);
662 vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi21x0123, vk21x0123));
663
664 const __m128 vi22x0123 = _mm_loadu_ps(i22);
665 const __m128 vk22x0123 = _mm_load_ps(w + 184);
666 vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi22x0123, vk22x0123));
667
668 const __m128 vi23x0123 = _mm_loadu_ps(i23);
669 const __m128 vk23x0123 = _mm_load_ps(w + 192);
670 vacc0123p1 = _mm_add_ps(vacc0123p1, _mm_mul_ps(vi23x0123, vk23x0123));
671
672 const __m128 vi24x0123 = _mm_loadu_ps(i24);
673 const __m128 vk24x0123 = _mm_load_ps(w + 200);
674 vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi24x0123, vk24x0123));
675
676 // Add up all accumulators to vacc01234567p0
677 vacc0123p0 = _mm_add_ps(vacc0123p0, vacc0123p1);
678
679 __m128 vacc0123 = _mm_max_ps(vacc0123p0, vmin);
680 vacc0123 = _mm_min_ps(vacc0123, vmax);
681
682 if (c & 2) {
683 _mm_storel_pi((__m64*) output, vacc0123);
684 vacc0123 = _mm_movehl_ps(vacc0123, vacc0123);
685 output += 2;
686 }
687 if (c & 1) {
688 _mm_store_ss(output, vacc0123);
689 output += 1;
690 }
691 }
692
693 output = (float*) ((uintptr_t) output + output_increment);
694 } while (--output_width != 0);
695 }
696