1 // Auto-generated file. Do not edit!
2 // Template: src/f32-dwconv2d-chw/5x5p2-neon.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <arm_neon.h>
13
14 #include <xnnpack/dwconv.h>
15 #include <xnnpack/math.h>
16
17
xnn_f32_dwconv2d_chw_ukernel_5x5p2__neonfma_1x4_acc4(size_t input_height,size_t input_width,const float * input,const float * weights,const float * zero,float * output,uint32_t padding_top,const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS (1)])18 void xnn_f32_dwconv2d_chw_ukernel_5x5p2__neonfma_1x4_acc4(
19 size_t input_height,
20 size_t input_width,
21 const float* input,
22 const float* weights,
23 const float* zero,
24 float* output,
25 uint32_t padding_top,
26 const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
27 {
28 assert(input_height != 0);
29 assert(input_width != 0);
30 assert(input_width % sizeof(float) == 0);
31 assert(padding_top == 2);
32
33 const uint32x4_t vmask = vld1q_u32(params->neon.mask);
34 const float32x4_t vmax = vld1q_dup_f32(¶ms->neon.max);
35 const float32x4_t vmin = vld1q_dup_f32(¶ms->neon.min);
36
37 const float32x4_t vw0123 = vld1q_f32(weights);
38 const float32x4_t vw4567 = vld1q_f32(weights + 4);
39 const float32x4_t vw89AB = vld1q_f32(weights + 8);
40 const float32x4_t vwCDEF = vld1q_f32(weights + 12);
41 const float32x4_t vwGHIJ = vld1q_f32(weights + 16);
42 const float32x4_t vwKLMN = vld1q_f32(weights + 20);
43 const float32x2_t vwOP = vld1_f32(weights + 24);
44
45 const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
46
47 const float* i0 = zero;
48 const float* i1 = zero;
49 const float* i2 = input;
50 const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
51 const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
52
53 float* o0 = output;
54
55 size_t output_height = input_height;
56 do {
57 if XNN_UNPREDICTABLE(output_height < 2) {
58 i3 = zero;
59 }
60 if XNN_UNPREDICTABLE(output_height < 3) {
61 i4 = zero;
62 }
63
64 float32x4_t vi0x0123 = vmovq_n_f32(0.0f);
65 float32x4_t vi1x0123 = vmovq_n_f32(0.0f);
66 float32x4_t vi2x0123 = vmovq_n_f32(0.0f);
67 float32x4_t vi3x0123 = vmovq_n_f32(0.0f);
68 float32x4_t vi4x0123 = vmovq_n_f32(0.0f);
69
70 float32x4_t vi0x4567 = vld1q_f32(i0); i0 += 4;
71 float32x4_t vi1x4567 = vld1q_f32(i1); i1 += 4;
72 float32x4_t vi2x4567 = vld1q_f32(i2); i2 += 4;
73 float32x4_t vi3x4567 = vld1q_f32(i3); i3 += 4;
74 float32x4_t vi4x4567 = vld1q_f32(i4); i4 += 4;
75
76 size_t w = input_width;
77 for (; w > 8 * sizeof(float); w -= 4 * sizeof(float)) {
78 float32x4_t vo0p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
79
80 const float32x4_t vi0x89AB = vld1q_f32(i0); i0 += 4;
81 const float32x4_t vi1x89AB = vld1q_f32(i1); i1 += 4;
82 const float32x4_t vi2x89AB = vld1q_f32(i2); i2 += 4;
83 const float32x4_t vi3x89AB = vld1q_f32(i3); i3 += 4;
84 const float32x4_t vi4x89AB = vld1q_f32(i4); i4 += 4;
85
86 float32x4_t vo0p1 = vmulq_lane_f32(vi0x4567, vget_high_f32(vw0123), 1);
87
88 float32x4_t vo0p2 = vmulq_lane_f32(vi1x4567, vget_low_f32(vw89AB), 0);
89
90 float32x4_t vo0p3 = vmulq_lane_f32(vi2x4567, vget_low_f32(vwCDEF), 1);
91
92 vo0p1 = vfmaq_lane_f32(vo0p1, vi3x4567, vget_high_f32(vwGHIJ), 0);
93
94 vo0p2 = vfmaq_lane_f32(vo0p2, vi4x4567, vget_high_f32(vwKLMN), 1);
95
96 const float32x4_t vi0x3456 = vextq_f32(vi0x0123, vi0x4567, 3);
97 const float32x4_t vi1x3456 = vextq_f32(vi1x0123, vi1x4567, 3);
98 const float32x4_t vi2x3456 = vextq_f32(vi2x0123, vi2x4567, 3);
99 const float32x4_t vi3x3456 = vextq_f32(vi3x0123, vi3x4567, 3);
100 const float32x4_t vi4x3456 = vextq_f32(vi4x0123, vi4x4567, 3);
101
102 vo0p3 = vfmaq_lane_f32(vo0p3, vi0x3456, vget_high_f32(vw0123), 0);
103
104 vo0p0 = vfmaq_lane_f32(vo0p0, vi1x3456, vget_high_f32(vw4567), 1);
105
106 vo0p1 = vfmaq_lane_f32(vo0p1, vi2x3456, vget_low_f32(vwCDEF), 0);
107
108 vo0p2 = vfmaq_lane_f32(vo0p2, vi3x3456, vget_low_f32(vwGHIJ), 1);
109
110 vo0p3 = vfmaq_lane_f32(vo0p3, vi4x3456, vget_high_f32(vwKLMN), 0);
111
112 const float32x4_t vi0x2345 = vextq_f32(vi0x0123, vi0x4567, 2);
113 vi0x0123 = vi0x4567;
114 const float32x4_t vi1x2345 = vextq_f32(vi1x0123, vi1x4567, 2);
115 vi1x0123 = vi1x4567;
116 const float32x4_t vi2x2345 = vextq_f32(vi2x0123, vi2x4567, 2);
117 vi2x0123 = vi2x4567;
118 const float32x4_t vi3x2345 = vextq_f32(vi3x0123, vi3x4567, 2);
119 vi3x0123 = vi3x4567;
120 const float32x4_t vi4x2345 = vextq_f32(vi4x0123, vi4x4567, 2);
121 vi4x0123 = vi4x4567;
122
123 vo0p0 = vfmaq_lane_f32(vo0p0, vi0x2345, vget_low_f32(vw0123), 1);
124
125 vo0p1 = vfmaq_lane_f32(vo0p1, vi1x2345, vget_high_f32(vw4567), 0);
126
127 vo0p2 = vfmaq_lane_f32(vo0p2, vi2x2345, vget_high_f32(vw89AB), 1);
128
129 vo0p3 = vfmaq_lane_f32(vo0p3, vi3x2345, vget_low_f32(vwGHIJ), 0);
130
131 vo0p0 = vfmaq_lane_f32(vo0p0, vi4x2345, vget_low_f32(vwKLMN), 1);
132
133 const float32x4_t vi0x5678 = vextq_f32(vi0x4567, vi0x89AB, 1);
134 const float32x4_t vi1x5678 = vextq_f32(vi1x4567, vi1x89AB, 1);
135 const float32x4_t vi2x5678 = vextq_f32(vi2x4567, vi2x89AB, 1);
136 const float32x4_t vi3x5678 = vextq_f32(vi3x4567, vi3x89AB, 1);
137 const float32x4_t vi4x5678 = vextq_f32(vi4x4567, vi4x89AB, 1);
138
139 vo0p1 = vfmaq_lane_f32(vo0p1, vi0x5678, vget_low_f32(vw4567), 0);
140
141 vo0p2 = vfmaq_lane_f32(vo0p2, vi1x5678, vget_low_f32(vw89AB), 1);
142
143 vo0p3 = vfmaq_lane_f32(vo0p3, vi2x5678, vget_high_f32(vwCDEF), 0);
144
145 vo0p0 = vfmaq_lane_f32(vo0p0, vi3x5678, vget_high_f32(vwGHIJ), 1);
146
147 vo0p1 = vfmaq_lane_f32(vo0p1, vi4x5678, vwOP, 0);
148
149 const float32x4_t vi0x6789 = vextq_f32(vi0x4567, vi0x89AB, 2);
150 vi0x4567 = vi0x89AB;
151 const float32x4_t vi1x6789 = vextq_f32(vi1x4567, vi1x89AB, 2);
152 vi1x4567 = vi1x89AB;
153 const float32x4_t vi2x6789 = vextq_f32(vi2x4567, vi2x89AB, 2);
154 vi2x4567 = vi2x89AB;
155 const float32x4_t vi3x6789 = vextq_f32(vi3x4567, vi3x89AB, 2);
156 vi3x4567 = vi3x89AB;
157 const float32x4_t vi4x6789 = vextq_f32(vi4x4567, vi4x89AB, 2);
158 vi4x4567 = vi4x89AB;
159
160 vo0p2 = vfmaq_lane_f32(vo0p2, vi0x6789, vget_low_f32(vw4567), 1);
161
162 vo0p3 = vfmaq_lane_f32(vo0p3, vi1x6789, vget_high_f32(vw89AB), 0);
163
164 vo0p0 = vfmaq_lane_f32(vo0p0, vi2x6789, vget_high_f32(vwCDEF), 1);
165
166 vo0p1 = vfmaq_lane_f32(vo0p1, vi3x6789, vget_low_f32(vwKLMN), 0);
167
168 vo0p2 = vfmaq_lane_f32(vo0p2, vi4x6789, vwOP, 1);
169
170 vo0p0 = vaddq_f32(vo0p0, vo0p1);
171 vo0p2 = vaddq_f32(vo0p2, vo0p3);
172 vo0p0 = vaddq_f32(vo0p0, vo0p2);
173
174 float32x4_t vo0 = vmaxq_f32(vo0p0, vmin);
175
176 vo0 = vminq_f32(vo0, vmax);
177
178 vst1q_f32(o0, vo0); o0 += 4;
179 }
180 // Always process the last block of 5..8 pixels.
181 if XNN_LIKELY(w > 4 * sizeof(float)) {
182 float32x4_t vo0p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
183
184 float32x4_t vi0x89AB = vld1q_f32(i0); i0 += 4;
185 float32x4_t vi1x89AB = vld1q_f32(i1); i1 += 4;
186 float32x4_t vi2x89AB = vld1q_f32(i2); i2 += 4;
187 float32x4_t vi3x89AB = vld1q_f32(i3); i3 += 4;
188 float32x4_t vi4x89AB = vld1q_f32(i4); i4 += 4;
189
190 vi0x89AB = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi0x89AB)));
191 vi1x89AB = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi1x89AB)));
192 vi2x89AB = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi2x89AB)));
193 vi3x89AB = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi3x89AB)));
194 vi4x89AB = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi4x89AB)));
195
196 float32x4_t vo0p1 = vmulq_lane_f32(vi0x4567, vget_high_f32(vw0123), 1);
197
198 float32x4_t vo0p2 = vmulq_lane_f32(vi1x4567, vget_low_f32(vw89AB), 0);
199
200 float32x4_t vo0p3 = vmulq_lane_f32(vi2x4567, vget_low_f32(vwCDEF), 1);
201
202 vo0p1 = vfmaq_lane_f32(vo0p1, vi3x4567, vget_high_f32(vwGHIJ), 0);
203
204 vo0p2 = vfmaq_lane_f32(vo0p2, vi4x4567, vget_high_f32(vwKLMN), 1);
205
206 const float32x4_t vi0x3456 = vextq_f32(vi0x0123, vi0x4567, 3);
207 const float32x4_t vi1x3456 = vextq_f32(vi1x0123, vi1x4567, 3);
208 const float32x4_t vi2x3456 = vextq_f32(vi2x0123, vi2x4567, 3);
209 const float32x4_t vi3x3456 = vextq_f32(vi3x0123, vi3x4567, 3);
210 const float32x4_t vi4x3456 = vextq_f32(vi4x0123, vi4x4567, 3);
211
212 vo0p3 = vfmaq_lane_f32(vo0p3, vi0x3456, vget_high_f32(vw0123), 0);
213
214 vo0p0 = vfmaq_lane_f32(vo0p0, vi1x3456, vget_high_f32(vw4567), 1);
215
216 vo0p1 = vfmaq_lane_f32(vo0p1, vi2x3456, vget_low_f32(vwCDEF), 0);
217
218 vo0p2 = vfmaq_lane_f32(vo0p2, vi3x3456, vget_low_f32(vwGHIJ), 1);
219
220 vo0p3 = vfmaq_lane_f32(vo0p3, vi4x3456, vget_high_f32(vwKLMN), 0);
221
222 const float32x4_t vi0x2345 = vextq_f32(vi0x0123, vi0x4567, 2);
223 vi0x0123 = vi0x4567;
224 const float32x4_t vi1x2345 = vextq_f32(vi1x0123, vi1x4567, 2);
225 vi1x0123 = vi1x4567;
226 const float32x4_t vi2x2345 = vextq_f32(vi2x0123, vi2x4567, 2);
227 vi2x0123 = vi2x4567;
228 const float32x4_t vi3x2345 = vextq_f32(vi3x0123, vi3x4567, 2);
229 vi3x0123 = vi3x4567;
230 const float32x4_t vi4x2345 = vextq_f32(vi4x0123, vi4x4567, 2);
231 vi4x0123 = vi4x4567;
232
233 vo0p0 = vfmaq_lane_f32(vo0p0, vi0x2345, vget_low_f32(vw0123), 1);
234
235 vo0p1 = vfmaq_lane_f32(vo0p1, vi1x2345, vget_high_f32(vw4567), 0);
236
237 vo0p2 = vfmaq_lane_f32(vo0p2, vi2x2345, vget_high_f32(vw89AB), 1);
238
239 vo0p3 = vfmaq_lane_f32(vo0p3, vi3x2345, vget_low_f32(vwGHIJ), 0);
240
241 vo0p0 = vfmaq_lane_f32(vo0p0, vi4x2345, vget_low_f32(vwKLMN), 1);
242
243 const float32x4_t vi0x5678 = vextq_f32(vi0x4567, vi0x89AB, 1);
244 const float32x4_t vi1x5678 = vextq_f32(vi1x4567, vi1x89AB, 1);
245 const float32x4_t vi2x5678 = vextq_f32(vi2x4567, vi2x89AB, 1);
246 const float32x4_t vi3x5678 = vextq_f32(vi3x4567, vi3x89AB, 1);
247 const float32x4_t vi4x5678 = vextq_f32(vi4x4567, vi4x89AB, 1);
248
249 vo0p1 = vfmaq_lane_f32(vo0p1, vi0x5678, vget_low_f32(vw4567), 0);
250
251 vo0p2 = vfmaq_lane_f32(vo0p2, vi1x5678, vget_low_f32(vw89AB), 1);
252
253 vo0p3 = vfmaq_lane_f32(vo0p3, vi2x5678, vget_high_f32(vwCDEF), 0);
254
255 vo0p0 = vfmaq_lane_f32(vo0p0, vi3x5678, vget_high_f32(vwGHIJ), 1);
256
257 vo0p1 = vfmaq_lane_f32(vo0p1, vi4x5678, vwOP, 0);
258
259 const float32x4_t vi0x6789 = vextq_f32(vi0x4567, vi0x89AB, 2);
260 vi0x4567 = vi0x89AB;
261 const float32x4_t vi1x6789 = vextq_f32(vi1x4567, vi1x89AB, 2);
262 vi1x4567 = vi1x89AB;
263 const float32x4_t vi2x6789 = vextq_f32(vi2x4567, vi2x89AB, 2);
264 vi2x4567 = vi2x89AB;
265 const float32x4_t vi3x6789 = vextq_f32(vi3x4567, vi3x89AB, 2);
266 vi3x4567 = vi3x89AB;
267 const float32x4_t vi4x6789 = vextq_f32(vi4x4567, vi4x89AB, 2);
268 vi4x4567 = vi4x89AB;
269
270 vo0p2 = vfmaq_lane_f32(vo0p2, vi0x6789, vget_low_f32(vw4567), 1);
271
272 vo0p3 = vfmaq_lane_f32(vo0p3, vi1x6789, vget_high_f32(vw89AB), 0);
273
274 vo0p0 = vfmaq_lane_f32(vo0p0, vi2x6789, vget_high_f32(vwCDEF), 1);
275
276 vo0p1 = vfmaq_lane_f32(vo0p1, vi3x6789, vget_low_f32(vwKLMN), 0);
277
278 vo0p2 = vfmaq_lane_f32(vo0p2, vi4x6789, vwOP, 1);
279
280 vo0p0 = vaddq_f32(vo0p0, vo0p1);
281 vo0p2 = vaddq_f32(vo0p2, vo0p3);
282 vo0p0 = vaddq_f32(vo0p0, vo0p2);
283
284 float32x4_t vo0 = vmaxq_f32(vo0p0, vmin);
285
286 vo0 = vminq_f32(vo0, vmax);
287
288 vst1q_f32(o0, vo0); o0 += 4;
289
290 w -= 4 * sizeof(float);
291 }
292 assert(w >= 1 * sizeof(float));
293 assert(w <= 4 * sizeof(float));
294 {
295 float32x4_t vo0p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
296
297 vi0x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi0x4567)));
298 vi1x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi1x4567)));
299 vi2x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi2x4567)));
300 vi3x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi3x4567)));
301 vi4x4567 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi4x4567)));
302
303 float32x4_t vo0p1 = vmulq_lane_f32(vi0x4567, vget_high_f32(vw0123), 1);
304
305 float32x4_t vo0p2 = vmulq_lane_f32(vi1x4567, vget_low_f32(vw89AB), 0);
306
307 float32x4_t vo0p3 = vmulq_lane_f32(vi2x4567, vget_low_f32(vwCDEF), 1);
308
309 vo0p1 = vfmaq_lane_f32(vo0p1, vi3x4567, vget_high_f32(vwGHIJ), 0);
310
311 vo0p2 = vfmaq_lane_f32(vo0p2, vi4x4567, vget_high_f32(vwKLMN), 1);
312
313 const float32x4_t vi0x3456 = vextq_f32(vi0x0123, vi0x4567, 3);
314 const float32x4_t vi1x3456 = vextq_f32(vi1x0123, vi1x4567, 3);
315 const float32x4_t vi2x3456 = vextq_f32(vi2x0123, vi2x4567, 3);
316 const float32x4_t vi3x3456 = vextq_f32(vi3x0123, vi3x4567, 3);
317 const float32x4_t vi4x3456 = vextq_f32(vi4x0123, vi4x4567, 3);
318
319 vo0p3 = vfmaq_lane_f32(vo0p3, vi0x3456, vget_high_f32(vw0123), 0);
320
321 vo0p0 = vfmaq_lane_f32(vo0p0, vi1x3456, vget_high_f32(vw4567), 1);
322
323 vo0p1 = vfmaq_lane_f32(vo0p1, vi2x3456, vget_low_f32(vwCDEF), 0);
324
325 vo0p2 = vfmaq_lane_f32(vo0p2, vi3x3456, vget_low_f32(vwGHIJ), 1);
326
327 vo0p3 = vfmaq_lane_f32(vo0p3, vi4x3456, vget_high_f32(vwKLMN), 0);
328
329 const float32x4_t vi0x2345 = vextq_f32(vi0x0123, vi0x4567, 2);
330 const float32x4_t vi1x2345 = vextq_f32(vi1x0123, vi1x4567, 2);
331 const float32x4_t vi2x2345 = vextq_f32(vi2x0123, vi2x4567, 2);
332 const float32x4_t vi3x2345 = vextq_f32(vi3x0123, vi3x4567, 2);
333 const float32x4_t vi4x2345 = vextq_f32(vi4x0123, vi4x4567, 2);
334
335 vo0p0 = vfmaq_lane_f32(vo0p0, vi0x2345, vget_low_f32(vw0123), 1);
336
337 vo0p1 = vfmaq_lane_f32(vo0p1, vi1x2345, vget_high_f32(vw4567), 0);
338
339 vo0p2 = vfmaq_lane_f32(vo0p2, vi2x2345, vget_high_f32(vw89AB), 1);
340
341 vo0p3 = vfmaq_lane_f32(vo0p3, vi3x2345, vget_low_f32(vwGHIJ), 0);
342
343 vo0p0 = vfmaq_lane_f32(vo0p0, vi4x2345, vget_low_f32(vwKLMN), 1);
344
345 const float32x4_t vzero = vmovq_n_f32(0.0f);
346 const float32x4_t vi0x5678 = vextq_f32(vi0x4567, vzero, 1);
347 const float32x4_t vi1x5678 = vextq_f32(vi1x4567, vzero, 1);
348 const float32x4_t vi2x5678 = vextq_f32(vi2x4567, vzero, 1);
349 const float32x4_t vi3x5678 = vextq_f32(vi3x4567, vzero, 1);
350 const float32x4_t vi4x5678 = vextq_f32(vi4x4567, vzero, 1);
351
352 vo0p1 = vfmaq_lane_f32(vo0p1, vi0x5678, vget_low_f32(vw4567), 0);
353
354 vo0p2 = vfmaq_lane_f32(vo0p2, vi1x5678, vget_low_f32(vw89AB), 1);
355
356 vo0p3 = vfmaq_lane_f32(vo0p3, vi2x5678, vget_high_f32(vwCDEF), 0);
357
358 vo0p0 = vfmaq_lane_f32(vo0p0, vi3x5678, vget_high_f32(vwGHIJ), 1);
359
360 vo0p1 = vfmaq_lane_f32(vo0p1, vi4x5678, vwOP, 0);
361
362 const float32x4_t vi0x6789 = vextq_f32(vi0x5678, vzero, 1);
363 const float32x4_t vi1x6789 = vextq_f32(vi1x5678, vzero, 1);
364 const float32x4_t vi2x6789 = vextq_f32(vi2x5678, vzero, 1);
365 const float32x4_t vi3x6789 = vextq_f32(vi3x5678, vzero, 1);
366 const float32x4_t vi4x6789 = vextq_f32(vi4x5678, vzero, 1);
367
368 vo0p2 = vfmaq_lane_f32(vo0p2, vi0x6789, vget_low_f32(vw4567), 1);
369
370 vo0p3 = vfmaq_lane_f32(vo0p3, vi1x6789, vget_high_f32(vw89AB), 0);
371
372 vo0p0 = vfmaq_lane_f32(vo0p0, vi2x6789, vget_high_f32(vwCDEF), 1);
373
374 vo0p1 = vfmaq_lane_f32(vo0p1, vi3x6789, vget_low_f32(vwKLMN), 0);
375
376 vo0p2 = vfmaq_lane_f32(vo0p2, vi4x6789, vwOP, 1);
377
378 vo0p0 = vaddq_f32(vo0p0, vo0p1);
379 vo0p2 = vaddq_f32(vo0p2, vo0p3);
380 vo0p0 = vaddq_f32(vo0p0, vo0p2);
381
382 float32x4_t vo0 = vmaxq_f32(vo0p0, vmin);
383
384 vo0 = vminq_f32(vo0, vmax);
385
386 if XNN_LIKELY(w & (4 * sizeof(float))) {
387 vst1q_f32(o0, vo0); o0 += 4;
388 } else {
389 float32x2_t vo0_lo = vget_low_f32(vo0);
390 if (w & (2 * sizeof(float))) {
391 vst1_f32(o0, vo0_lo); o0 += 2;
392
393 vo0_lo = vget_high_f32(vo0);
394 }
395 if (w & (1 * sizeof(float))) {
396 vst1_lane_f32(o0, vo0_lo, 0); o0 += 1;
397 }
398 }
399 }
400
401 i0 = (const float*) ((uintptr_t) i1 - input_decrement);
402 i1 = (const float*) ((uintptr_t) i2 - input_decrement);
403 i2 = (const float*) ((uintptr_t) i1 + input_width);
404 i3 = (const float*) ((uintptr_t) i2 + input_width);
405 i4 = (const float*) ((uintptr_t) i3 + input_width);
406
407
408 } while (--output_height != 0);
409 }
410