1 // Auto-generated file. Do not edit!
2 //   Template: src/f32-igemm/sse-shuffle.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2019 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <xmmintrin.h>
13 
14 #include <xnnpack/igemm.h>
15 
16 
xnn_f32_igemm_minmax_ukernel_5x8s4__sse(size_t mr,size_t nc,size_t kc,size_t ks,const float ** restrict a,const float * restrict w,float * restrict c,size_t cm_stride,size_t cn_stride,size_t a_offset,const float * zero,const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])17 void xnn_f32_igemm_minmax_ukernel_5x8s4__sse(
18     size_t mr,
19     size_t nc,
20     size_t kc,
21     size_t ks,
22     const float**restrict a,
23     const float*restrict w,
24     float*restrict c,
25     size_t cm_stride,
26     size_t cn_stride,
27     size_t a_offset,
28     const float* zero,
29     const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
30 {
31   assert(mr != 0);
32   assert(mr <= 5);
33   assert(nc != 0);
34   assert(kc != 0);
35   assert(kc % sizeof(float) == 0);
36   assert(ks != 0);
37   assert(ks % (5 * sizeof(void*)) == 0);
38   assert(a_offset % sizeof(float) == 0);
39   assert(a != NULL);
40   assert(w != NULL);
41   assert(c != NULL);
42 
43   float* c0 = c;
44   float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
45   if XNN_UNPREDICTABLE(mr < 2) {
46     c1 = c0;
47   }
48   float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
49   if XNN_UNPREDICTABLE(mr <= 2) {
50     c2 = c1;
51   }
52   float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
53   if XNN_UNPREDICTABLE(mr < 4) {
54     c3 = c2;
55   }
56   float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
57   if XNN_UNPREDICTABLE(mr <= 4) {
58     c4 = c3;
59   }
60 
61   do {
62     __m128 vacc0x0123 = _mm_load_ps(w);
63     __m128 vacc0x4567 = _mm_load_ps(w + 4);
64     __m128 vacc1x0123 = vacc0x0123;
65     __m128 vacc1x4567 = vacc0x4567;
66     __m128 vacc2x0123 = vacc0x0123;
67     __m128 vacc2x4567 = vacc0x4567;
68     __m128 vacc3x0123 = vacc0x0123;
69     __m128 vacc3x4567 = vacc0x4567;
70     __m128 vacc4x0123 = vacc0x0123;
71     __m128 vacc4x4567 = vacc0x4567;
72     w += 8;
73 
74     size_t p = ks;
75     do {
76       const float* restrict a0 = a[0];
77       assert(a0 != NULL);
78       if XNN_UNPREDICTABLE(a0 != zero) {
79         a0 = (const float*) ((uintptr_t) a0 + a_offset);
80       }
81       const float* restrict a1 = a[1];
82       assert(a1 != NULL);
83       if XNN_UNPREDICTABLE(a1 != zero) {
84         a1 = (const float*) ((uintptr_t) a1 + a_offset);
85       }
86       const float* restrict a2 = a[2];
87       assert(a2 != NULL);
88       if XNN_UNPREDICTABLE(a2 != zero) {
89         a2 = (const float*) ((uintptr_t) a2 + a_offset);
90       }
91       const float* restrict a3 = a[3];
92       assert(a3 != NULL);
93       if XNN_UNPREDICTABLE(a3 != zero) {
94         a3 = (const float*) ((uintptr_t) a3 + a_offset);
95       }
96       const float* restrict a4 = a[4];
97       assert(a4 != NULL);
98       if XNN_UNPREDICTABLE(a4 != zero) {
99         a4 = (const float*) ((uintptr_t) a4 + a_offset);
100       }
101       a += 5;
102 
103       size_t k = kc;
104       while (k >= 4 * sizeof(float)) {
105         __m128 va0 = _mm_loadu_ps(a0);
106         a0 += 4;
107         __m128 va1 = _mm_loadu_ps(a1);
108         a1 += 4;
109         __m128 va2 = _mm_loadu_ps(a2);
110         a2 += 4;
111         __m128 va3 = _mm_loadu_ps(a3);
112         a3 += 4;
113         __m128 va4 = _mm_loadu_ps(a4);
114         a4 += 4;
115 
116 
117         const __m128 vb0123c0 = _mm_load_ps(w + 0);
118         const __m128 vb4567c0 = _mm_load_ps(w + 4);
119 
120         vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123c0));
121         vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123c0));
122         vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123c0));
123         vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3, vb0123c0));
124         vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(va4, vb0123c0));
125         vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567c0));
126         vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567c0));
127         vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567c0));
128         vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3, vb4567c0));
129         vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(va4, vb4567c0));
130 
131         va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
132         va1 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 3, 2, 1));
133         va2 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 3, 2, 1));
134         va3 = _mm_shuffle_ps(va3, va3, _MM_SHUFFLE(0, 3, 2, 1));
135         va4 = _mm_shuffle_ps(va4, va4, _MM_SHUFFLE(0, 3, 2, 1));
136 
137         const __m128 vb0123c1 = _mm_load_ps(w + 8);
138         const __m128 vb4567c1 = _mm_load_ps(w + 12);
139 
140         vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123c1));
141         vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123c1));
142         vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123c1));
143         vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3, vb0123c1));
144         vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(va4, vb0123c1));
145         vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567c1));
146         vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567c1));
147         vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567c1));
148         vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3, vb4567c1));
149         vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(va4, vb4567c1));
150 
151         va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
152         va1 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 3, 2, 1));
153         va2 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 3, 2, 1));
154         va3 = _mm_shuffle_ps(va3, va3, _MM_SHUFFLE(0, 3, 2, 1));
155         va4 = _mm_shuffle_ps(va4, va4, _MM_SHUFFLE(0, 3, 2, 1));
156 
157         const __m128 vb0123c2 = _mm_load_ps(w + 16);
158         const __m128 vb4567c2 = _mm_load_ps(w + 20);
159 
160         vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123c2));
161         vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123c2));
162         vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123c2));
163         vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3, vb0123c2));
164         vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(va4, vb0123c2));
165         vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567c2));
166         vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567c2));
167         vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567c2));
168         vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3, vb4567c2));
169         vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(va4, vb4567c2));
170 
171         va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
172         va1 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 3, 2, 1));
173         va2 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 3, 2, 1));
174         va3 = _mm_shuffle_ps(va3, va3, _MM_SHUFFLE(0, 3, 2, 1));
175         va4 = _mm_shuffle_ps(va4, va4, _MM_SHUFFLE(0, 3, 2, 1));
176 
177         const __m128 vb0123c3 = _mm_load_ps(w + 24);
178         const __m128 vb4567c3 = _mm_load_ps(w + 28);
179 
180         vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123c3));
181         vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123c3));
182         vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123c3));
183         vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3, vb0123c3));
184         vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(va4, vb0123c3));
185         vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567c3));
186         vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567c3));
187         vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567c3));
188         vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3, vb4567c3));
189         vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(va4, vb4567c3));
190 
191 
192         w += 32;
193         k -= 4 * sizeof(float);
194       }
195       if XNN_UNLIKELY(k != 0) {
196         do {
197           const __m128 vb0123 = _mm_load_ps(w);
198           const __m128 vb4567 = _mm_load_ps(w + 4);
199           w += 8;
200 
201           const __m128 va0 = _mm_load1_ps(a0);
202           a0 += 1;
203           const __m128 va1 = _mm_load1_ps(a1);
204           a1 += 1;
205           const __m128 va2 = _mm_load1_ps(a2);
206           a2 += 1;
207           const __m128 va3 = _mm_load1_ps(a3);
208           a3 += 1;
209           const __m128 va4 = _mm_load1_ps(a4);
210           a4 += 1;
211 
212           vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123));
213           vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567));
214           vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123));
215           vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567));
216           vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123));
217           vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567));
218           vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3, vb0123));
219           vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3, vb4567));
220           vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(va4, vb0123));
221           vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(va4, vb4567));
222           k -= sizeof(float);
223         } while (k != 0);
224       }
225       p -= 5 * sizeof(void*);
226     } while (p != 0);
227 
228     const __m128 vmax = _mm_load_ps(params->sse.max);
229     vacc0x0123 = _mm_min_ps(vacc0x0123, vmax);
230     vacc1x0123 = _mm_min_ps(vacc1x0123, vmax);
231     vacc2x0123 = _mm_min_ps(vacc2x0123, vmax);
232     vacc3x0123 = _mm_min_ps(vacc3x0123, vmax);
233     vacc4x0123 = _mm_min_ps(vacc4x0123, vmax);
234     vacc0x4567 = _mm_min_ps(vacc0x4567, vmax);
235     vacc1x4567 = _mm_min_ps(vacc1x4567, vmax);
236     vacc2x4567 = _mm_min_ps(vacc2x4567, vmax);
237     vacc3x4567 = _mm_min_ps(vacc3x4567, vmax);
238     vacc4x4567 = _mm_min_ps(vacc4x4567, vmax);
239 
240     const __m128 vmin = _mm_load_ps(params->sse.min);
241     vacc0x0123 = _mm_max_ps(vacc0x0123, vmin);
242     vacc1x0123 = _mm_max_ps(vacc1x0123, vmin);
243     vacc2x0123 = _mm_max_ps(vacc2x0123, vmin);
244     vacc3x0123 = _mm_max_ps(vacc3x0123, vmin);
245     vacc4x0123 = _mm_max_ps(vacc4x0123, vmin);
246     vacc0x4567 = _mm_max_ps(vacc0x4567, vmin);
247     vacc1x4567 = _mm_max_ps(vacc1x4567, vmin);
248     vacc2x4567 = _mm_max_ps(vacc2x4567, vmin);
249     vacc3x4567 = _mm_max_ps(vacc3x4567, vmin);
250     vacc4x4567 = _mm_max_ps(vacc4x4567, vmin);
251 
252     if XNN_LIKELY(nc >= 8) {
253       _mm_storeu_ps(c4, vacc4x0123);
254       _mm_storeu_ps(c4 + 4, vacc4x4567);
255       c4 = (float*) ((uintptr_t) c4 + cn_stride);
256       _mm_storeu_ps(c3, vacc3x0123);
257       _mm_storeu_ps(c3 + 4, vacc3x4567);
258       c3 = (float*) ((uintptr_t) c3 + cn_stride);
259       _mm_storeu_ps(c2, vacc2x0123);
260       _mm_storeu_ps(c2 + 4, vacc2x4567);
261       c2 = (float*) ((uintptr_t) c2 + cn_stride);
262       _mm_storeu_ps(c1, vacc1x0123);
263       _mm_storeu_ps(c1 + 4, vacc1x4567);
264       c1 = (float*) ((uintptr_t) c1 + cn_stride);
265       _mm_storeu_ps(c0, vacc0x0123);
266       _mm_storeu_ps(c0 + 4, vacc0x4567);
267       c0 = (float*) ((uintptr_t) c0 + cn_stride);
268 
269       a = (const float**restrict) ((uintptr_t) a - ks);
270       nc -= 8;
271     } else {
272       if (nc & 4) {
273         _mm_storeu_ps(c4, vacc4x0123);
274         _mm_storeu_ps(c3, vacc3x0123);
275         _mm_storeu_ps(c2, vacc2x0123);
276         _mm_storeu_ps(c1, vacc1x0123);
277         _mm_storeu_ps(c0, vacc0x0123);
278 
279         vacc4x0123 = vacc4x4567;
280         vacc3x0123 = vacc3x4567;
281         vacc2x0123 = vacc2x4567;
282         vacc1x0123 = vacc1x4567;
283         vacc0x0123 = vacc0x4567;
284 
285         c4 += 4;
286         c3 += 4;
287         c2 += 4;
288         c1 += 4;
289         c0 += 4;
290       }
291       if (nc & 2) {
292         _mm_storel_pi((__m64*) c4, vacc4x0123);
293         _mm_storel_pi((__m64*) c3, vacc3x0123);
294         _mm_storel_pi((__m64*) c2, vacc2x0123);
295         _mm_storel_pi((__m64*) c1, vacc1x0123);
296         _mm_storel_pi((__m64*) c0, vacc0x0123);
297 
298         vacc4x0123 = _mm_movehl_ps(vacc4x0123, vacc4x0123);
299         vacc3x0123 = _mm_movehl_ps(vacc3x0123, vacc3x0123);
300         vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
301         vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
302         vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
303 
304         c4 += 2;
305         c3 += 2;
306         c2 += 2;
307         c1 += 2;
308         c0 += 2;
309       }
310       if (nc & 1) {
311         _mm_store_ss(c4, vacc4x0123);
312         _mm_store_ss(c3, vacc3x0123);
313         _mm_store_ss(c2, vacc2x0123);
314         _mm_store_ss(c1, vacc1x0123);
315         _mm_store_ss(c0, vacc0x0123);
316       }
317 
318       nc = 0;
319     }
320   } while (nc != 0);
321 }
322