1 // Copyright 2019 Google LLC
2 //
3 // This source code is licensed under the BSD-style license found in the
4 // LICENSE file in the root directory of this source tree.
5 
6 #include <assert.h>
7 
8 #include <emmintrin.h>
9 
10 #include <xnnpack/argmaxpool.h>
11 
12 
xnn_f32_argmaxpool_ukernel_9p8x__sse2_c4(size_t output_pixels,size_t pooling_elements,size_t channels,const float ** input,size_t input_offset,float * accumulation_buffer,uint32_t * index_buffer,float * output,uint32_t * index,size_t input_increment,size_t output_increment)13 void xnn_f32_argmaxpool_ukernel_9p8x__sse2_c4(
14     size_t output_pixels,
15     size_t pooling_elements,
16     size_t channels,
17     const float** input,
18     size_t input_offset,
19     float* accumulation_buffer,
20     uint32_t* index_buffer,
21     float* output,
22     uint32_t* index,
23     size_t input_increment,
24     size_t output_increment) XNN_DISABLE_TSAN
25 {
26   assert(output_pixels != 0);
27   assert(pooling_elements != 0);
28   assert(pooling_elements > 9);
29   assert(channels != 0);
30 
31   do {
32     {
33       float* ab = accumulation_buffer;
34       uint32_t* ib = index_buffer;
35 
36       const float* i0 = *input++;
37       const float* i1 = *input++;
38       const float* i2 = *input++;
39       const float* i3 = *input++;
40       const float* i4 = *input++;
41       const float* i5 = *input++;
42       const float* i6 = *input++;
43       const float* i7 = *input++;
44       const float* i8 = *input++;
45       i0 = (const float*) ((uintptr_t) i0 + input_offset);
46       i1 = (const float*) ((uintptr_t) i1 + input_offset);
47       i2 = (const float*) ((uintptr_t) i2 + input_offset);
48       i3 = (const float*) ((uintptr_t) i3 + input_offset);
49       i4 = (const float*) ((uintptr_t) i4 + input_offset);
50       i5 = (const float*) ((uintptr_t) i5 + input_offset);
51       i6 = (const float*) ((uintptr_t) i6 + input_offset);
52       i7 = (const float*) ((uintptr_t) i7 + input_offset);
53       i8 = (const float*) ((uintptr_t) i8 + input_offset);
54 
55       for (size_t c = 0; c < channels; c += 4) {
56         const __m128 vi0 = _mm_loadu_ps(i0);
57         i0 += 4;
58         const __m128 vi1 = _mm_loadu_ps(i1);
59         i1 += 4;
60         const __m128 vi2 = _mm_loadu_ps(i2);
61         i2 += 4;
62         const __m128 vi3 = _mm_loadu_ps(i3);
63         i3 += 4;
64         const __m128 vi4 = _mm_loadu_ps(i4);
65         i4 += 4;
66         const __m128 vi5 = _mm_loadu_ps(i5);
67         i5 += 4;
68         const __m128 vi6 = _mm_loadu_ps(i6);
69         i6 += 4;
70         const __m128 vi7 = _mm_loadu_ps(i7);
71         i7 += 4;
72         const __m128 vi8 = _mm_loadu_ps(i8);
73         i8 += 4;
74 
75         __m128 vmax = vi0;
76         __m128i vidx = _mm_setzero_si128();
77 
78         const __m128i vm1 = _mm_castps_si128(_mm_cmpgt_ps(vi1, vmax));
79         vmax = _mm_max_ps(vi1, vmax);
80         vidx = _mm_or_si128(_mm_andnot_si128(vm1, vidx), _mm_and_si128(vm1, _mm_set1_epi32(1)));
81 
82         const __m128i vm2 = _mm_castps_si128(_mm_cmpgt_ps(vi2, vmax));
83         vmax = _mm_max_ps(vi2, vmax);
84         vidx = _mm_or_si128(_mm_andnot_si128(vm2, vidx), _mm_and_si128(vm2, _mm_set1_epi32(2)));
85 
86         const __m128i vm3 = _mm_castps_si128(_mm_cmpgt_ps(vi3, vmax));
87         vmax = _mm_max_ps(vi3, vmax);
88         vidx = _mm_or_si128(_mm_andnot_si128(vm3, vidx), _mm_and_si128(vm3, _mm_set1_epi32(3)));
89 
90         const __m128i vm4 = _mm_castps_si128(_mm_cmpgt_ps(vi4, vmax));
91         vmax = _mm_max_ps(vi4, vmax);
92         vidx = _mm_or_si128(_mm_andnot_si128(vm4, vidx), _mm_and_si128(vm4, _mm_set1_epi32(4)));
93 
94         const __m128i vm5 = _mm_castps_si128(_mm_cmpgt_ps(vi5, vmax));
95         vmax = _mm_max_ps(vi5, vmax);
96         vidx = _mm_or_si128(_mm_andnot_si128(vm5, vidx), _mm_and_si128(vm5, _mm_set1_epi32(5)));
97 
98         const __m128i vm6 = _mm_castps_si128(_mm_cmpgt_ps(vi6, vmax));
99         vmax = _mm_max_ps(vi6, vmax);
100         vidx = _mm_or_si128(_mm_andnot_si128(vm6, vidx), _mm_and_si128(vm6, _mm_set1_epi32(6)));
101 
102         const __m128i vm7 = _mm_castps_si128(_mm_cmpgt_ps(vi7, vmax));
103         vmax = _mm_max_ps(vi7, vmax);
104         vidx = _mm_or_si128(_mm_andnot_si128(vm7, vidx), _mm_and_si128(vm7, _mm_set1_epi32(7)));
105 
106         const __m128i vm8 = _mm_castps_si128(_mm_cmpgt_ps(vi8, vmax));
107         vmax = _mm_max_ps(vi8, vmax);
108         vidx = _mm_or_si128(_mm_andnot_si128(vm8, vidx), _mm_and_si128(vm8, _mm_set1_epi32(8)));
109 
110         _mm_store_ps(ab, vmax);
111         ab += 4;
112         _mm_store_si128((__m128i*) ib, vidx);
113         ib += 4;
114       }
115     }
116     const __m128i v1 = _mm_set1_epi32(1);
117     const __m128i v8 = _mm_set1_epi32(8);
118     __m128i vidx0 = _mm_add_epi32(v1, v8);
119 
120     size_t k = pooling_elements;
121     for (k -= 9; k > 8; k -= 8) {
122       const float* i0 = *input++;
123       const float* i1 = *input++;
124       const float* i2 = *input++;
125       const float* i3 = *input++;
126       const float* i4 = *input++;
127       const float* i5 = *input++;
128       const float* i6 = *input++;
129       const float* i7 = *input++;
130       i0 = (const float*) ((uintptr_t) i0 + input_offset);
131       i1 = (const float*) ((uintptr_t) i1 + input_offset);
132       i2 = (const float*) ((uintptr_t) i2 + input_offset);
133       i3 = (const float*) ((uintptr_t) i3 + input_offset);
134       i4 = (const float*) ((uintptr_t) i4 + input_offset);
135       i5 = (const float*) ((uintptr_t) i5 + input_offset);
136       i6 = (const float*) ((uintptr_t) i6 + input_offset);
137       i7 = (const float*) ((uintptr_t) i7 + input_offset);
138 
139       float* ab = accumulation_buffer;
140       uint32_t* ib = index_buffer;
141 
142       for (size_t c = 0; c < channels; c += 4) {
143         const __m128 vi0 = _mm_loadu_ps(i0);
144         i0 += 4;
145         const __m128 vi1 = _mm_loadu_ps(i1);
146         i1 += 4;
147         const __m128 vi2 = _mm_loadu_ps(i2);
148         i2 += 4;
149         const __m128 vi3 = _mm_loadu_ps(i3);
150         i3 += 4;
151         const __m128 vi4 = _mm_loadu_ps(i4);
152         i4 += 4;
153         const __m128 vi5 = _mm_loadu_ps(i5);
154         i5 += 4;
155         const __m128 vi6 = _mm_loadu_ps(i6);
156         i6 += 4;
157         const __m128 vi7 = _mm_loadu_ps(i7);
158         i7 += 4;
159 
160         __m128 vmax = _mm_load_ps(ab);
161         __m128i vidx = _mm_load_si128((const __m128i*) ib);
162 
163         const __m128i vm0 = _mm_castps_si128(_mm_cmpgt_ps(vi0, vmax));
164         vmax = _mm_max_ps(vi0, vmax);
165         vidx = _mm_or_si128(_mm_andnot_si128(vm0, vidx), _mm_and_si128(vm0, vidx0));
166 
167         const __m128i vm1 = _mm_castps_si128(_mm_cmpgt_ps(vi1, vmax));
168         const __m128i vidx1 = _mm_add_epi32(vidx0, v1);
169         vmax = _mm_max_ps(vi1, vmax);
170         vidx = _mm_or_si128(_mm_andnot_si128(vm1, vidx), _mm_and_si128(vm1, vidx1));
171 
172         const __m128i vm2 = _mm_castps_si128(_mm_cmpgt_ps(vi2, vmax));
173         const __m128i vidx2 = _mm_add_epi32(vidx1, v1);
174         vmax = _mm_max_ps(vi2, vmax);
175         vidx = _mm_or_si128(_mm_andnot_si128(vm2, vidx), _mm_and_si128(vm2, vidx2));
176 
177         const __m128i vm3 = _mm_castps_si128(_mm_cmpgt_ps(vi3, vmax));
178         const __m128i vidx3 = _mm_add_epi32(vidx2, v1);
179         vmax = _mm_max_ps(vi3, vmax);
180         vidx = _mm_or_si128(_mm_andnot_si128(vm3, vidx), _mm_and_si128(vm3, vidx3));
181 
182         const __m128i vm4 = _mm_castps_si128(_mm_cmpgt_ps(vi4, vmax));
183         const __m128i vidx4 = _mm_add_epi32(vidx3, v1);
184         vmax = _mm_max_ps(vi4, vmax);
185         vidx = _mm_or_si128(_mm_andnot_si128(vm4, vidx), _mm_and_si128(vm4, vidx4));
186 
187         const __m128i vm5 = _mm_castps_si128(_mm_cmpgt_ps(vi5, vmax));
188         const __m128i vidx5 = _mm_add_epi32(vidx4, v1);
189         vmax = _mm_max_ps(vi5, vmax);
190         vidx = _mm_or_si128(_mm_andnot_si128(vm5, vidx), _mm_and_si128(vm5, vidx5));
191 
192         const __m128i vm6 = _mm_castps_si128(_mm_cmpgt_ps(vi6, vmax));
193         const __m128i vidx6 = _mm_add_epi32(vidx5, v1);
194         vmax = _mm_max_ps(vi6, vmax);
195         vidx = _mm_or_si128(_mm_andnot_si128(vm6, vidx), _mm_and_si128(vm6, vidx6));
196 
197         const __m128i vm7 = _mm_castps_si128(_mm_cmpgt_ps(vi7, vmax));
198         const __m128i vidx7 = _mm_add_epi32(vidx6, v1);
199         vmax = _mm_max_ps(vi7, vmax);
200         vidx = _mm_or_si128(_mm_andnot_si128(vm7, vidx), _mm_and_si128(vm7, vidx7));
201 
202         _mm_store_ps(ab, vmax);
203         ab += 4;
204         _mm_store_si128((__m128i*) ib, vidx);
205         ib += 4;
206       }
207       vidx0 = _mm_add_epi32(vidx0, v8);
208     }
209 
210     float* o = output;
211     uint32_t* i = index;
212     {
213       const float* i0 = input[0];
214       const float* i1 = input[1];
215       const float* i2 = input[2];
216       const float* i3 = input[3];
217       const float* i4 = input[4];
218       const float* i5 = input[5];
219       const float* i6 = input[6];
220       const float* i7 = input[7];
221       i0 = (const float*) ((uintptr_t) i0 + input_offset);
222       i1 = (const float*) ((uintptr_t) i1 + input_offset);
223       i2 = (const float*) ((uintptr_t) i2 + input_offset);
224       i3 = (const float*) ((uintptr_t) i3 + input_offset);
225       i4 = (const float*) ((uintptr_t) i4 + input_offset);
226       i5 = (const float*) ((uintptr_t) i5 + input_offset);
227       i6 = (const float*) ((uintptr_t) i6 + input_offset);
228       i7 = (const float*) ((uintptr_t) i7 + input_offset);
229       input = (const float**) ((uintptr_t) input + input_increment);
230       if (k < 2) {
231         i1 = i0;
232       }
233       if (k <= 2) {
234         i2 = i0;
235       }
236       if (k < 4) {
237         i3 = i0;
238       }
239       if (k <= 4) {
240         i4 = i0;
241       }
242       if (k < 6) {
243         i5 = i0;
244       }
245       if (k <= 6) {
246         i6 = i0;
247       }
248       if (k != 8) {
249         i7 = i0;
250       }
251 
252       size_t c = channels;
253       float* ab = accumulation_buffer;
254       uint32_t* ib = index_buffer;
255       for (; c >= 4; c -= 4) {
256         const __m128 vi0 = _mm_loadu_ps(i0);
257         i0 += 4;
258         const __m128 vi1 = _mm_loadu_ps(i1);
259         i1 += 4;
260         const __m128 vi2 = _mm_loadu_ps(i2);
261         i2 += 4;
262         const __m128 vi3 = _mm_loadu_ps(i3);
263         i3 += 4;
264         const __m128 vi4 = _mm_loadu_ps(i4);
265         i4 += 4;
266         const __m128 vi5 = _mm_loadu_ps(i5);
267         i5 += 4;
268         const __m128 vi6 = _mm_loadu_ps(i6);
269         i6 += 4;
270         const __m128 vi7 = _mm_loadu_ps(i7);
271         i7 += 4;
272 
273         __m128 vmax = _mm_load_ps(ab);
274         ab += 4;
275         __m128i vidx = _mm_load_si128((const __m128i*) ib);
276         ib += 4;
277 
278         const __m128i vm0 = _mm_castps_si128(_mm_cmpgt_ps(vi0, vmax));
279         vmax = _mm_max_ps(vi0, vmax);
280         vidx = _mm_or_si128(_mm_andnot_si128(vm0, vidx), _mm_and_si128(vm0, vidx0));
281 
282         const __m128i vm1 = _mm_castps_si128(_mm_cmpgt_ps(vi1, vmax));
283         const __m128i vidx1 = _mm_add_epi32(vidx0, v1);
284         vmax = _mm_max_ps(vi1, vmax);
285         vidx = _mm_or_si128(_mm_andnot_si128(vm1, vidx), _mm_and_si128(vm1, vidx1));
286 
287         const __m128i vm2 = _mm_castps_si128(_mm_cmpgt_ps(vi2, vmax));
288         const __m128i vidx2 = _mm_add_epi32(vidx1, v1);
289         vmax = _mm_max_ps(vi2, vmax);
290         vidx = _mm_or_si128(_mm_andnot_si128(vm2, vidx), _mm_and_si128(vm2, vidx2));
291 
292         const __m128i vm3 = _mm_castps_si128(_mm_cmpgt_ps(vi3, vmax));
293         const __m128i vidx3 = _mm_add_epi32(vidx2, v1);
294         vmax = _mm_max_ps(vi3, vmax);
295         vidx = _mm_or_si128(_mm_andnot_si128(vm3, vidx), _mm_and_si128(vm3, vidx3));
296 
297         const __m128i vm4 = _mm_castps_si128(_mm_cmpgt_ps(vi4, vmax));
298         const __m128i vidx4 = _mm_add_epi32(vidx3, v1);
299         vmax = _mm_max_ps(vi4, vmax);
300         vidx = _mm_or_si128(_mm_andnot_si128(vm4, vidx), _mm_and_si128(vm4, vidx4));
301 
302         const __m128i vm5 = _mm_castps_si128(_mm_cmpgt_ps(vi5, vmax));
303         const __m128i vidx5 = _mm_add_epi32(vidx4, v1);
304         vmax = _mm_max_ps(vi5, vmax);
305         vidx = _mm_or_si128(_mm_andnot_si128(vm5, vidx), _mm_and_si128(vm5, vidx5));
306 
307         const __m128i vm6 = _mm_castps_si128(_mm_cmpgt_ps(vi6, vmax));
308         const __m128i vidx6 = _mm_add_epi32(vidx5, v1);
309         vmax = _mm_max_ps(vi6, vmax);
310         vidx = _mm_or_si128(_mm_andnot_si128(vm6, vidx), _mm_and_si128(vm6, vidx6));
311 
312         const __m128i vm7 = _mm_castps_si128(_mm_cmpgt_ps(vi7, vmax));
313         const __m128i vidx7 = _mm_add_epi32(vidx6, v1);
314         vmax = _mm_max_ps(vi7, vmax);
315         vidx = _mm_or_si128(_mm_andnot_si128(vm7, vidx), _mm_and_si128(vm7, vidx7));
316 
317         _mm_storeu_ps(o, vmax);
318         o += 4;
319         _mm_storeu_si128((__m128i*) i, vidx);
320         i += 4;
321       }
322       if (c != 0) {
323         const __m128 vi0 = _mm_loadu_ps(i0);
324         const __m128 vi1 = _mm_loadu_ps(i1);
325         const __m128 vi2 = _mm_loadu_ps(i2);
326         const __m128 vi3 = _mm_loadu_ps(i3);
327         const __m128 vi4 = _mm_loadu_ps(i4);
328         const __m128 vi5 = _mm_loadu_ps(i5);
329         const __m128 vi6 = _mm_loadu_ps(i6);
330         const __m128 vi7 = _mm_loadu_ps(i7);
331 
332         __m128 vmax = _mm_load_ps(ab);
333         __m128i vidx = _mm_load_si128((const __m128i*) ib);
334 
335         const __m128i vm0 = _mm_castps_si128(_mm_cmpgt_ps(vi0, vmax));
336         vmax = _mm_max_ps(vi0, vmax);
337         vidx = _mm_or_si128(_mm_andnot_si128(vm0, vidx), _mm_and_si128(vm0, vidx0));
338 
339         const __m128i vm1 = _mm_castps_si128(_mm_cmpgt_ps(vi1, vmax));
340         const __m128i vidx1 = _mm_add_epi32(vidx0, v1);
341         vmax = _mm_max_ps(vi1, vmax);
342         vidx = _mm_or_si128(_mm_andnot_si128(vm1, vidx), _mm_and_si128(vm1, vidx1));
343 
344         const __m128i vm2 = _mm_castps_si128(_mm_cmpgt_ps(vi2, vmax));
345         const __m128i vidx2 = _mm_add_epi32(vidx1, v1);
346         vmax = _mm_max_ps(vi2, vmax);
347         vidx = _mm_or_si128(_mm_andnot_si128(vm2, vidx), _mm_and_si128(vm2, vidx2));
348 
349         const __m128i vm3 = _mm_castps_si128(_mm_cmpgt_ps(vi3, vmax));
350         const __m128i vidx3 = _mm_add_epi32(vidx2, v1);
351         vmax = _mm_max_ps(vi3, vmax);
352         vidx = _mm_or_si128(_mm_andnot_si128(vm3, vidx), _mm_and_si128(vm3, vidx3));
353 
354         const __m128i vm4 = _mm_castps_si128(_mm_cmpgt_ps(vi4, vmax));
355         const __m128i vidx4 = _mm_add_epi32(vidx3, v1);
356         vmax = _mm_max_ps(vi4, vmax);
357         vidx = _mm_or_si128(_mm_andnot_si128(vm4, vidx), _mm_and_si128(vm4, vidx4));
358 
359         const __m128i vm5 = _mm_castps_si128(_mm_cmpgt_ps(vi5, vmax));
360         const __m128i vidx5 = _mm_add_epi32(vidx4, v1);
361         vmax = _mm_max_ps(vi5, vmax);
362         vidx = _mm_or_si128(_mm_andnot_si128(vm5, vidx), _mm_and_si128(vm5, vidx5));
363 
364         const __m128i vm6 = _mm_castps_si128(_mm_cmpgt_ps(vi6, vmax));
365         const __m128i vidx6 = _mm_add_epi32(vidx5, v1);
366         vmax = _mm_max_ps(vi6, vmax);
367         vidx = _mm_or_si128(_mm_andnot_si128(vm6, vidx), _mm_and_si128(vm6, vidx6));
368 
369         const __m128i vm7 = _mm_castps_si128(_mm_cmpgt_ps(vi7, vmax));
370         const __m128i vidx7 = _mm_add_epi32(vidx6, v1);
371         vmax = _mm_max_ps(vi7, vmax);
372         vidx = _mm_or_si128(_mm_andnot_si128(vm7, vidx), _mm_and_si128(vm7, vidx7));
373 
374         if (c & 2) {
375           _mm_storel_pi((__m64*) o, vmax);
376           _mm_storel_epi64((__m128i*) i, vidx);
377           vmax = _mm_movehl_ps(vmax, vmax);
378           vidx = _mm_unpackhi_epi64(vidx, vidx);
379           o += 2;
380           i += 2;
381         }
382         if (c & 1) {
383           _mm_store_ss(o, vmax);
384           *i = (uint32_t) _mm_cvtsi128_si32(vidx);
385           o += 1;
386           i += 1;
387         }
388       }
389     }
390 
391     output = (float*) ((uintptr_t) o + output_increment);
392     index = (uint32_t*) i;
393   } while (--output_pixels != 0);
394 }
395