1 // Auto-generated file. Do not edit!
2 // Template: src/f32-gemm/sse-shuffle.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2019 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <xmmintrin.h>
13
14 #include <xnnpack/gemm.h>
15
16
xnn_f32_gemm_minmax_ukernel_5x8s4__sse(size_t mr,size_t nc,size_t kc,const float * restrict a,size_t a_stride,const float * restrict w,float * restrict c,size_t cm_stride,size_t cn_stride,const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])17 void xnn_f32_gemm_minmax_ukernel_5x8s4__sse(
18 size_t mr,
19 size_t nc,
20 size_t kc,
21 const float*restrict a,
22 size_t a_stride,
23 const float*restrict w,
24 float*restrict c,
25 size_t cm_stride,
26 size_t cn_stride,
27 const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
28 {
29 assert(mr != 0);
30 assert(mr <= 5);
31 assert(nc != 0);
32 assert(kc != 0);
33 assert(kc % sizeof(float) == 0);
34 assert(a != NULL);
35 assert(w != NULL);
36 assert(c != NULL);
37
38 const float* a0 = a;
39 float* c0 = c;
40 const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
41 float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
42 if XNN_UNPREDICTABLE(mr < 2) {
43 a1 = a0;
44 c1 = c0;
45 }
46 const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
47 float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
48 if XNN_UNPREDICTABLE(mr <= 2) {
49 a2 = a1;
50 c2 = c1;
51 }
52 const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
53 float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
54 if XNN_UNPREDICTABLE(mr < 4) {
55 a3 = a2;
56 c3 = c2;
57 }
58 const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
59 float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
60 if XNN_UNPREDICTABLE(mr <= 4) {
61 a4 = a3;
62 c4 = c3;
63 }
64
65 do {
66 __m128 vacc0x0123 = _mm_load_ps(w + 0);
67 __m128 vacc0x4567 = _mm_load_ps(w + 4);
68 __m128 vacc1x0123 = vacc0x0123;
69 __m128 vacc1x4567 = vacc0x4567;
70 __m128 vacc2x0123 = vacc0x0123;
71 __m128 vacc2x4567 = vacc0x4567;
72 __m128 vacc3x0123 = vacc0x0123;
73 __m128 vacc3x4567 = vacc0x4567;
74 __m128 vacc4x0123 = vacc0x0123;
75 __m128 vacc4x4567 = vacc0x4567;
76 w += 8;
77
78 size_t k = kc;
79 while (k >= 4 * sizeof(float)) {
80 __m128 va0 = _mm_loadu_ps(a0);
81 a0 += 4;
82 __m128 va1 = _mm_loadu_ps(a1);
83 a1 += 4;
84 __m128 va2 = _mm_loadu_ps(a2);
85 a2 += 4;
86 __m128 va3 = _mm_loadu_ps(a3);
87 a3 += 4;
88 __m128 va4 = _mm_loadu_ps(a4);
89 a4 += 4;
90
91
92 const __m128 vb0123c0 = _mm_load_ps(w + 0);
93 const __m128 vb4567c0 = _mm_load_ps(w + 4);
94
95 vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123c0));
96 vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123c0));
97 vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123c0));
98 vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3, vb0123c0));
99 vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(va4, vb0123c0));
100 vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567c0));
101 vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567c0));
102 vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567c0));
103 vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3, vb4567c0));
104 vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(va4, vb4567c0));
105
106 va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
107 va1 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 3, 2, 1));
108 va2 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 3, 2, 1));
109 va3 = _mm_shuffle_ps(va3, va3, _MM_SHUFFLE(0, 3, 2, 1));
110 va4 = _mm_shuffle_ps(va4, va4, _MM_SHUFFLE(0, 3, 2, 1));
111
112 const __m128 vb0123c1 = _mm_load_ps(w + 8);
113 const __m128 vb4567c1 = _mm_load_ps(w + 12);
114
115 vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123c1));
116 vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123c1));
117 vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123c1));
118 vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3, vb0123c1));
119 vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(va4, vb0123c1));
120 vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567c1));
121 vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567c1));
122 vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567c1));
123 vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3, vb4567c1));
124 vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(va4, vb4567c1));
125
126 va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
127 va1 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 3, 2, 1));
128 va2 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 3, 2, 1));
129 va3 = _mm_shuffle_ps(va3, va3, _MM_SHUFFLE(0, 3, 2, 1));
130 va4 = _mm_shuffle_ps(va4, va4, _MM_SHUFFLE(0, 3, 2, 1));
131
132 const __m128 vb0123c2 = _mm_load_ps(w + 16);
133 const __m128 vb4567c2 = _mm_load_ps(w + 20);
134
135 vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123c2));
136 vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123c2));
137 vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123c2));
138 vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3, vb0123c2));
139 vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(va4, vb0123c2));
140 vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567c2));
141 vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567c2));
142 vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567c2));
143 vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3, vb4567c2));
144 vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(va4, vb4567c2));
145
146 va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
147 va1 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 3, 2, 1));
148 va2 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 3, 2, 1));
149 va3 = _mm_shuffle_ps(va3, va3, _MM_SHUFFLE(0, 3, 2, 1));
150 va4 = _mm_shuffle_ps(va4, va4, _MM_SHUFFLE(0, 3, 2, 1));
151
152 const __m128 vb0123c3 = _mm_load_ps(w + 24);
153 const __m128 vb4567c3 = _mm_load_ps(w + 28);
154
155 vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123c3));
156 vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123c3));
157 vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123c3));
158 vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3, vb0123c3));
159 vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(va4, vb0123c3));
160 vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567c3));
161 vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567c3));
162 vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567c3));
163 vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3, vb4567c3));
164 vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(va4, vb4567c3));
165
166
167 w += 32;
168 k -= 4 * sizeof(float);
169 }
170 if XNN_UNLIKELY(k != 0) {
171 do {
172 const __m128 va0 = _mm_load1_ps(a0);
173 a0 += 1;
174 const __m128 va1 = _mm_load1_ps(a1);
175 a1 += 1;
176 const __m128 va2 = _mm_load1_ps(a2);
177 a2 += 1;
178 const __m128 va3 = _mm_load1_ps(a3);
179 a3 += 1;
180 const __m128 va4 = _mm_load1_ps(a4);
181 a4 += 1;
182
183 const __m128 vb0123 = _mm_load_ps(w);
184 const __m128 vb4567 = _mm_load_ps(w + 4);
185 w += 8;
186
187 vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123));
188 vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123));
189 vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123));
190 vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3, vb0123));
191 vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(va4, vb0123));
192 vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567));
193 vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567));
194 vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567));
195 vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3, vb4567));
196 vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(va4, vb4567));
197
198 k -= sizeof(float);
199 } while (k != 0);
200 }
201
202 const __m128 vmax = _mm_load_ps(params->sse.max);
203 vacc0x0123 = _mm_min_ps(vacc0x0123, vmax);
204 vacc1x0123 = _mm_min_ps(vacc1x0123, vmax);
205 vacc2x0123 = _mm_min_ps(vacc2x0123, vmax);
206 vacc3x0123 = _mm_min_ps(vacc3x0123, vmax);
207 vacc4x0123 = _mm_min_ps(vacc4x0123, vmax);
208 vacc0x4567 = _mm_min_ps(vacc0x4567, vmax);
209 vacc1x4567 = _mm_min_ps(vacc1x4567, vmax);
210 vacc2x4567 = _mm_min_ps(vacc2x4567, vmax);
211 vacc3x4567 = _mm_min_ps(vacc3x4567, vmax);
212 vacc4x4567 = _mm_min_ps(vacc4x4567, vmax);
213
214 const __m128 vmin = _mm_load_ps(params->sse.min);
215 vacc0x0123 = _mm_max_ps(vacc0x0123, vmin);
216 vacc1x0123 = _mm_max_ps(vacc1x0123, vmin);
217 vacc2x0123 = _mm_max_ps(vacc2x0123, vmin);
218 vacc3x0123 = _mm_max_ps(vacc3x0123, vmin);
219 vacc4x0123 = _mm_max_ps(vacc4x0123, vmin);
220 vacc0x4567 = _mm_max_ps(vacc0x4567, vmin);
221 vacc1x4567 = _mm_max_ps(vacc1x4567, vmin);
222 vacc2x4567 = _mm_max_ps(vacc2x4567, vmin);
223 vacc3x4567 = _mm_max_ps(vacc3x4567, vmin);
224 vacc4x4567 = _mm_max_ps(vacc4x4567, vmin);
225
226 if XNN_LIKELY(nc >= 8) {
227 _mm_storeu_ps(c4, vacc4x0123);
228 _mm_storeu_ps(c4 + 4, vacc4x4567);
229 c4 = (float*) ((uintptr_t) c4 + cn_stride);
230 _mm_storeu_ps(c3, vacc3x0123);
231 _mm_storeu_ps(c3 + 4, vacc3x4567);
232 c3 = (float*) ((uintptr_t) c3 + cn_stride);
233 _mm_storeu_ps(c2, vacc2x0123);
234 _mm_storeu_ps(c2 + 4, vacc2x4567);
235 c2 = (float*) ((uintptr_t) c2 + cn_stride);
236 _mm_storeu_ps(c1, vacc1x0123);
237 _mm_storeu_ps(c1 + 4, vacc1x4567);
238 c1 = (float*) ((uintptr_t) c1 + cn_stride);
239 _mm_storeu_ps(c0, vacc0x0123);
240 _mm_storeu_ps(c0 + 4, vacc0x4567);
241 c0 = (float*) ((uintptr_t) c0 + cn_stride);
242
243 a4 = (const float*) ((uintptr_t) a4 - kc);
244 a3 = (const float*) ((uintptr_t) a3 - kc);
245 a2 = (const float*) ((uintptr_t) a2 - kc);
246 a1 = (const float*) ((uintptr_t) a1 - kc);
247 a0 = (const float*) ((uintptr_t) a0 - kc);
248
249 nc -= 8;
250 } else {
251 if (nc & 4) {
252 _mm_storeu_ps(c4, vacc4x0123);
253 _mm_storeu_ps(c3, vacc3x0123);
254 _mm_storeu_ps(c2, vacc2x0123);
255 _mm_storeu_ps(c1, vacc1x0123);
256 _mm_storeu_ps(c0, vacc0x0123);
257
258 vacc4x0123 = vacc4x4567;
259 vacc3x0123 = vacc3x4567;
260 vacc2x0123 = vacc2x4567;
261 vacc1x0123 = vacc1x4567;
262 vacc0x0123 = vacc0x4567;
263
264 c4 += 4;
265 c3 += 4;
266 c2 += 4;
267 c1 += 4;
268 c0 += 4;
269 }
270 if (nc & 2) {
271 _mm_storel_pi((__m64*) c4, vacc4x0123);
272 _mm_storel_pi((__m64*) c3, vacc3x0123);
273 _mm_storel_pi((__m64*) c2, vacc2x0123);
274 _mm_storel_pi((__m64*) c1, vacc1x0123);
275 _mm_storel_pi((__m64*) c0, vacc0x0123);
276
277 vacc4x0123 = _mm_movehl_ps(vacc4x0123, vacc4x0123);
278 vacc3x0123 = _mm_movehl_ps(vacc3x0123, vacc3x0123);
279 vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
280 vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
281 vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
282
283 c4 += 2;
284 c3 += 2;
285 c2 += 2;
286 c1 += 2;
287 c0 += 2;
288 }
289 if (nc & 1) {
290 _mm_store_ss(c4, vacc4x0123);
291 _mm_store_ss(c3, vacc3x0123);
292 _mm_store_ss(c2, vacc2x0123);
293 _mm_store_ss(c1, vacc1x0123);
294 _mm_store_ss(c0, vacc0x0123);
295 }
296
297 nc = 0;
298 }
299 } while (nc != 0);
300 }
301