1 // Copyright 2020 Google LLC
2 //
3 // This source code is licensed under the BSD-style license found in the
4 // LICENSE file in the root directory of this source tree.
5 
6 #include <assert.h>
7 
8 #include <wasm_simd128.h>
9 
10 #include <xnnpack/argmaxpool.h>
11 
12 
xnn_f32_argmaxpool_ukernel_9x__wasmsimd_c4(size_t output_pixels,size_t pooling_elements,size_t channels,const float ** input,size_t input_offset,float * output,uint32_t * index_ptr,size_t input_increment,size_t output_increment)13 void xnn_f32_argmaxpool_ukernel_9x__wasmsimd_c4(
14     size_t output_pixels,
15     size_t pooling_elements,
16     size_t channels,
17     const float** input,
18     size_t input_offset,
19     float* output,
20     uint32_t* index_ptr,
21     size_t input_increment,
22     size_t output_increment) XNN_DISABLE_TSAN
23 {
24   assert(output_pixels != 0);
25   assert(pooling_elements != 0);
26   assert(pooling_elements <= 9);
27   assert(channels != 0);
28 
29   float* index = (float*) index_ptr;
30   do {
31     const float* i0 = input[0];
32     const float* i1 = input[1];
33     const float* i2 = input[2];
34     const float* i3 = input[3];
35     const float* i4 = input[4];
36     const float* i5 = input[5];
37     const float* i6 = input[6];
38     const float* i7 = input[7];
39     const float* i8 = input[8];
40     i0 = (const float*) ((uintptr_t) i0 + input_offset);
41     i1 = (const float*) ((uintptr_t) i1 + input_offset);
42     i2 = (const float*) ((uintptr_t) i2 + input_offset);
43     i3 = (const float*) ((uintptr_t) i3 + input_offset);
44     i4 = (const float*) ((uintptr_t) i4 + input_offset);
45     i5 = (const float*) ((uintptr_t) i5 + input_offset);
46     i6 = (const float*) ((uintptr_t) i6 + input_offset);
47     i7 = (const float*) ((uintptr_t) i7 + input_offset);
48     i8 = (const float*) ((uintptr_t) i8 + input_offset);
49     if (pooling_elements < 2) {
50       i1 = i0;
51     }
52     if (pooling_elements <= 2) {
53       i2 = i0;
54     }
55     if (pooling_elements < 4) {
56       i3 = i0;
57     }
58     if (pooling_elements <= 4) {
59       i4 = i0;
60     }
61     if (pooling_elements < 6) {
62       i5 = i0;
63     }
64     if (pooling_elements <= 6) {
65       i6 = i0;
66     }
67     if (pooling_elements < 8) {
68       i7 = i0;
69     }
70     if (pooling_elements <= 8) {
71       i8 = i0;
72     }
73 
74     size_t c = channels;
75     for (; c >= 4; c -= 4) {
76       const v128_t vi0 = wasm_v128_load(i0);
77       i0 += 4;
78       const v128_t vi1 = wasm_v128_load(i1);
79       i1 += 4;
80       const v128_t vi2 = wasm_v128_load(i2);
81       i2 += 4;
82       const v128_t vi3 = wasm_v128_load(i3);
83       i3 += 4;
84       const v128_t vi4 = wasm_v128_load(i4);
85       i4 += 4;
86       const v128_t vi5 = wasm_v128_load(i5);
87       i5 += 4;
88       const v128_t vi6 = wasm_v128_load(i6);
89       i6 += 4;
90       const v128_t vi7 = wasm_v128_load(i7);
91       i7 += 4;
92       const v128_t vi8 = wasm_v128_load(i8);
93       i8 += 4;
94 
95       v128_t vmax = vi0;
96       v128_t vidx = wasm_i32x4_splat(0);
97 
98       const v128_t vm1 = wasm_f32x4_gt(vi1, vmax);
99       vmax = wasm_v128_bitselect(vi1, vmax, vm1);
100       vidx = wasm_v128_bitselect(wasm_i32x4_splat(1), vidx, vm1);
101 
102       const v128_t vm2 = wasm_f32x4_gt(vi2, vmax);
103       vmax = wasm_v128_bitselect(vi2, vmax, vm2);
104       vidx = wasm_v128_bitselect(wasm_i32x4_splat(2), vidx, vm2);
105 
106       const v128_t vm3 = wasm_f32x4_gt(vi3, vmax);
107       vmax = wasm_v128_bitselect(vi3, vmax, vm3);
108       vidx = wasm_v128_bitselect(wasm_i32x4_splat(3), vidx, vm3);
109 
110       const v128_t vm4 = wasm_f32x4_gt(vi4, vmax);
111       vmax = wasm_v128_bitselect(vi4, vmax, vm4);
112       vidx = wasm_v128_bitselect(wasm_i32x4_splat(4), vidx, vm4);
113 
114       const v128_t vm5 = wasm_f32x4_gt(vi5, vmax);
115       vmax = wasm_v128_bitselect(vi5, vmax, vm5);
116       vidx = wasm_v128_bitselect(wasm_i32x4_splat(5), vidx, vm5);
117 
118       const v128_t vm6 = wasm_f32x4_gt(vi6, vmax);
119       vmax = wasm_v128_bitselect(vi6, vmax, vm6);
120       vidx = wasm_v128_bitselect(wasm_i32x4_splat(6), vidx, vm6);
121 
122       const v128_t vm7 = wasm_f32x4_gt(vi7, vmax);
123       vmax = wasm_v128_bitselect(vi7, vmax, vm7);
124       vidx = wasm_v128_bitselect(wasm_i32x4_splat(7), vidx, vm7);
125 
126       const v128_t vm8 = wasm_f32x4_gt(vi8, vmax);
127       vmax = wasm_v128_bitselect(vi8, vmax, vm8);
128       vidx = wasm_v128_bitselect(wasm_i32x4_splat(8), vidx, vm8);
129 
130       wasm_v128_store(output, vmax);
131       output += 4;
132       wasm_v128_store(index, vidx);
133       index += 4;
134     }
135     if (c != 0) {
136       const v128_t vi0 = wasm_v128_load(i0);
137       const v128_t vi1 = wasm_v128_load(i1);
138       const v128_t vi2 = wasm_v128_load(i2);
139       const v128_t vi3 = wasm_v128_load(i3);
140       const v128_t vi4 = wasm_v128_load(i4);
141       const v128_t vi5 = wasm_v128_load(i5);
142       const v128_t vi6 = wasm_v128_load(i6);
143       const v128_t vi7 = wasm_v128_load(i7);
144       const v128_t vi8 = wasm_v128_load(i8);
145 
146       v128_t vmax = vi0;
147       v128_t vidx = wasm_i32x4_splat(0);
148 
149       const v128_t vm1 = wasm_f32x4_gt(vi1, vmax);
150       vmax = wasm_v128_bitselect(vi1, vmax, vm1);
151       vidx = wasm_v128_bitselect(wasm_i32x4_splat(1), vidx, vm1);
152 
153       const v128_t vm2 = wasm_f32x4_gt(vi2, vmax);
154       vmax = wasm_v128_bitselect(vi2, vmax, vm2);
155       vidx = wasm_v128_bitselect(wasm_i32x4_splat(2), vidx, vm2);
156 
157       const v128_t vm3 = wasm_f32x4_gt(vi3, vmax);
158       vmax = wasm_v128_bitselect(vi3, vmax, vm3);
159       vidx = wasm_v128_bitselect(wasm_i32x4_splat(3), vidx, vm3);
160 
161       const v128_t vm4 = wasm_f32x4_gt(vi4, vmax);
162       vmax = wasm_v128_bitselect(vi4, vmax, vm4);
163       vidx = wasm_v128_bitselect(wasm_i32x4_splat(4), vidx, vm4);
164 
165       const v128_t vm5 = wasm_f32x4_gt(vi5, vmax);
166       vmax = wasm_v128_bitselect(vi5, vmax, vm5);
167       vidx = wasm_v128_bitselect(wasm_i32x4_splat(5), vidx, vm5);
168 
169       const v128_t vm6 = wasm_f32x4_gt(vi6, vmax);
170       vmax = wasm_v128_bitselect(vi6, vmax, vm6);
171       vidx = wasm_v128_bitselect(wasm_i32x4_splat(6), vidx, vm6);
172 
173       const v128_t vm7 = wasm_f32x4_gt(vi7, vmax);
174       vmax = wasm_v128_bitselect(vi7, vmax, vm7);
175       vidx = wasm_v128_bitselect(wasm_i32x4_splat(7), vidx, vm7);
176 
177       const v128_t vm8 = wasm_f32x4_gt(vi8, vmax);
178       vmax = wasm_v128_bitselect(vi8, vmax, vm8);
179       vidx = wasm_v128_bitselect(wasm_i32x4_splat(8), vidx, vm8);
180 
181       if (c & 2) {
182         *((double*) output) = wasm_f64x2_extract_lane(vmax, 0);
183         *((double*) index) = wasm_f64x2_extract_lane(vidx, 0);
184         vmax = wasm_v32x4_shuffle(vmax, vmax, 2, 3, 2, 3);
185         vidx = wasm_v32x4_shuffle(vidx, vidx, 2, 3, 2, 3);
186         output += 2;
187         index += 2;
188       }
189       if (c & 1) {
190         *output++ = wasm_f32x4_extract_lane(vmax, 0);
191         *index++ = wasm_f32x4_extract_lane(vidx, 0);
192       }
193     }
194     input = (const float**) ((uintptr_t) input + input_increment);
195     output = (float*) ((uintptr_t) output + output_increment);
196   } while (--output_pixels != 0);
197 }
198