1 // Copyright 2019 Google LLC
2 //
3 // This source code is licensed under the BSD-style license found in the
4 // LICENSE file in the root directory of this source tree.
5
6 #include <assert.h>
7
8 #include <emmintrin.h>
9
10 #include <xnnpack/argmaxpool.h>
11
12
xnn_f32_argmaxpool_ukernel_4x__sse2_c4(size_t output_pixels,size_t pooling_elements,size_t channels,const float ** input,size_t input_offset,float * output,uint32_t * index,size_t input_increment,size_t output_increment)13 void xnn_f32_argmaxpool_ukernel_4x__sse2_c4(
14 size_t output_pixels,
15 size_t pooling_elements,
16 size_t channels,
17 const float** input,
18 size_t input_offset,
19 float* output,
20 uint32_t* index,
21 size_t input_increment,
22 size_t output_increment) XNN_DISABLE_TSAN
23 {
24 assert(output_pixels != 0);
25 assert(pooling_elements != 0);
26 assert(pooling_elements <= 4);
27 assert(channels != 0);
28
29 do {
30 const float* i0 = input[0];
31 const float* i1 = input[1];
32 const float* i2 = input[2];
33 const float* i3 = input[3];
34 i0 = (const float*) ((uintptr_t) i0 + input_offset);
35 i1 = (const float*) ((uintptr_t) i1 + input_offset);
36 i2 = (const float*) ((uintptr_t) i2 + input_offset);
37 i3 = (const float*) ((uintptr_t) i3 + input_offset);
38 if (pooling_elements < 2) {
39 i1 = i0;
40 }
41 if (pooling_elements <= 2) {
42 i2 = i0;
43 }
44 if (pooling_elements != 4) {
45 i3 = i0;
46 }
47
48 size_t c = channels;
49 for (; c >= 4; c -= 4) {
50 const __m128 vi0 = _mm_loadu_ps(i0);
51 i0 += 4;
52 const __m128 vi1 = _mm_loadu_ps(i1);
53 i1 += 4;
54 const __m128 vi2 = _mm_loadu_ps(i2);
55 i2 += 4;
56 const __m128 vi3 = _mm_loadu_ps(i3);
57 i3 += 4;
58
59 __m128 vmax = vi0;
60 __m128i vidx = _mm_setzero_si128();
61
62 const __m128i vm1 = _mm_castps_si128(_mm_cmpgt_ps(vi1, vmax));
63 vmax = _mm_max_ps(vi1, vmax);
64 vidx = _mm_or_si128(_mm_andnot_si128(vm1, vidx), _mm_and_si128(vm1, _mm_set1_epi32(1)));
65
66 const __m128i vm2 = _mm_castps_si128(_mm_cmpgt_ps(vi2, vmax));
67 vmax = _mm_max_ps(vi2, vmax);
68 vidx = _mm_or_si128(_mm_andnot_si128(vm2, vidx), _mm_and_si128(vm2, _mm_set1_epi32(2)));
69
70 const __m128i vm3 = _mm_castps_si128(_mm_cmpgt_ps(vi3, vmax));
71 vmax = _mm_max_ps(vi3, vmax);
72 vidx = _mm_or_si128(_mm_andnot_si128(vm3, vidx), _mm_and_si128(vm3, _mm_set1_epi32(3)));
73
74 _mm_storeu_ps(output, vmax);
75 output += 4;
76 _mm_storeu_si128((__m128i*) index, vidx);
77 index += 4;
78 }
79 if (c != 0) {
80 const __m128 vi0 = _mm_loadu_ps(i0);
81 const __m128 vi1 = _mm_loadu_ps(i1);
82 const __m128 vi2 = _mm_loadu_ps(i2);
83 const __m128 vi3 = _mm_loadu_ps(i3);
84
85 __m128 vmax = vi0;
86 __m128i vidx = _mm_setzero_si128();
87
88 const __m128i vm1 = _mm_castps_si128(_mm_cmpgt_ps(vi1, vmax));
89 vmax = _mm_max_ps(vi1, vmax);
90 vidx = _mm_or_si128(_mm_andnot_si128(vm1, vidx), _mm_and_si128(vm1, _mm_set1_epi32(1)));
91
92 const __m128i vm2 = _mm_castps_si128(_mm_cmpgt_ps(vi2, vmax));
93 vmax = _mm_max_ps(vi2, vmax);
94 vidx = _mm_or_si128(_mm_andnot_si128(vm2, vidx), _mm_and_si128(vm2, _mm_set1_epi32(2)));
95
96 const __m128i vm3 = _mm_castps_si128(_mm_cmpgt_ps(vi3, vmax));
97 vmax = _mm_max_ps(vi3, vmax);
98 vidx = _mm_or_si128(_mm_andnot_si128(vm3, vidx), _mm_and_si128(vm3, _mm_set1_epi32(3)));
99
100 if (c & 2) {
101 _mm_storel_pi((__m64*) output, vmax);
102 _mm_storel_epi64((__m128i*) index, vidx);
103 vmax = _mm_movehl_ps(vmax, vmax);
104 vidx = _mm_unpackhi_epi64(vidx, vidx);
105 output += 2;
106 index += 2;
107 }
108 if (c & 1) {
109 _mm_store_ss(output, vmax);
110 *index = (uint32_t) _mm_cvtsi128_si32(vidx);
111 output += 1;
112 index += 1;
113 }
114 }
115 input = (const float**) ((uintptr_t) input + input_increment);
116 output = (float*) ((uintptr_t) output + output_increment);
117 } while (--output_pixels != 0);
118 }
119