1 // Auto-generated file. Do not edit!
2 //   Template: src/f32-dwconv/up-neon.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2019 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <arm_neon.h>
13 
14 #include <xnnpack/dwconv.h>
15 
16 
xnn_f32_dwconv_minmax_ukernel_up4x9__neonfma_acc2(size_t channels,size_t output_width,const float ** input,const float * weights,float * output,size_t input_stride,size_t output_increment,size_t input_offset,const float * zero,const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])17 void xnn_f32_dwconv_minmax_ukernel_up4x9__neonfma_acc2(
18     size_t channels,
19     size_t output_width,
20     const float** input,
21     const float* weights,
22     float* output,
23     size_t input_stride,
24     size_t output_increment,
25     size_t input_offset,
26     const float* zero,
27     const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
28 {
29   assert(channels != 0);
30   assert(output_width != 0);
31 
32   const float32x4_t vmax = vld1q_dup_f32(&params->scalar.max);
33   const float32x4_t vmin = vld1q_dup_f32(&params->scalar.min);
34   do {
35     const float* i0 = input[0];
36     assert(i0 != NULL);
37     if XNN_UNPREDICTABLE(i0 != zero) {
38       i0 = (const float*) ((uintptr_t) i0 + input_offset);
39     }
40     const float* i1 = input[1];
41     assert(i1 != NULL);
42     if XNN_UNPREDICTABLE(i1 != zero) {
43       i1 = (const float*) ((uintptr_t) i1 + input_offset);
44     }
45     const float* i2 = input[2];
46     assert(i2 != NULL);
47     if XNN_UNPREDICTABLE(i2 != zero) {
48       i2 = (const float*) ((uintptr_t) i2 + input_offset);
49     }
50     const float* i3 = input[3];
51     assert(i3 != NULL);
52     if XNN_UNPREDICTABLE(i3 != zero) {
53       i3 = (const float*) ((uintptr_t) i3 + input_offset);
54     }
55     const float* i4 = input[4];
56     assert(i4 != NULL);
57     if XNN_UNPREDICTABLE(i4 != zero) {
58       i4 = (const float*) ((uintptr_t) i4 + input_offset);
59     }
60     const float* i5 = input[5];
61     assert(i5 != NULL);
62     if XNN_UNPREDICTABLE(i5 != zero) {
63       i5 = (const float*) ((uintptr_t) i5 + input_offset);
64     }
65     const float* i6 = input[6];
66     assert(i6 != NULL);
67     if XNN_UNPREDICTABLE(i6 != zero) {
68       i6 = (const float*) ((uintptr_t) i6 + input_offset);
69     }
70     const float* i7 = input[7];
71     assert(i7 != NULL);
72     if XNN_UNPREDICTABLE(i7 != zero) {
73       i7 = (const float*) ((uintptr_t) i7 + input_offset);
74     }
75     const float* i8 = input[8];
76     assert(i8 != NULL);
77     if XNN_UNPREDICTABLE(i8 != zero) {
78       i8 = (const float*) ((uintptr_t) i8 + input_offset);
79     }
80 
81     input = (const float**) ((uintptr_t) input + input_stride);
82 
83     size_t c = channels;
84     const float* w = weights;
85     for (; c >= 4; c -= 4) {
86       float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
87 
88 
89       const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
90       const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
91       vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
92 
93       const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
94       const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
95       float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
96 
97       const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
98       const float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
99       vacc0123p0 = vfmaq_f32(vacc0123p0, vi2x0123, vk2x0123);
100 
101       const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
102       const float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
103       vacc0123p1 = vfmaq_f32(vacc0123p1, vi3x0123, vk3x0123);
104 
105       const float32x4_t vi4x0123 = vld1q_f32(i4); i4 += 4;
106       const float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
107       vacc0123p0 = vfmaq_f32(vacc0123p0, vi4x0123, vk4x0123);
108 
109       const float32x4_t vi5x0123 = vld1q_f32(i5); i5 += 4;
110       const float32x4_t vk5x0123 = vld1q_f32(w); w += 4;
111       vacc0123p1 = vfmaq_f32(vacc0123p1, vi5x0123, vk5x0123);
112 
113       const float32x4_t vi6x0123 = vld1q_f32(i6); i6 += 4;
114       const float32x4_t vk6x0123 = vld1q_f32(w); w += 4;
115       vacc0123p0 = vfmaq_f32(vacc0123p0, vi6x0123, vk6x0123);
116 
117       const float32x4_t vi7x0123 = vld1q_f32(i7); i7 += 4;
118       const float32x4_t vk7x0123 = vld1q_f32(w); w += 4;
119       vacc0123p1 = vfmaq_f32(vacc0123p1, vi7x0123, vk7x0123);
120 
121       const float32x4_t vi8x0123 = vld1q_f32(i8); i8 += 4;
122       const float32x4_t vk8x0123 = vld1q_f32(w); w += 4;
123       vacc0123p0 = vfmaq_f32(vacc0123p0, vi8x0123, vk8x0123);
124 
125       // Add up all accumulators to vacc0123p0
126       vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
127 
128       float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
129       vacc0123 = vminq_f32(vacc0123, vmax);
130 
131       vst1q_f32(output, vacc0123); output += 4;
132     }
133     if XNN_UNLIKELY(c != 0) {
134       float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
135 
136 
137       const float32x4_t vi0x0123 = vld1q_f32(i0);
138       const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
139       vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123);
140 
141       const float32x4_t vi1x0123 = vld1q_f32(i1);
142       const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
143       float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
144 
145       const float32x4_t vi2x0123 = vld1q_f32(i2);
146       const float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
147       vacc0123p0 = vfmaq_f32(vacc0123p0, vi2x0123, vk2x0123);
148 
149       const float32x4_t vi3x0123 = vld1q_f32(i3);
150       const float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
151       vacc0123p1 = vfmaq_f32(vacc0123p1, vi3x0123, vk3x0123);
152 
153       const float32x4_t vi4x0123 = vld1q_f32(i4);
154       const float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
155       vacc0123p0 = vfmaq_f32(vacc0123p0, vi4x0123, vk4x0123);
156 
157       const float32x4_t vi5x0123 = vld1q_f32(i5);
158       const float32x4_t vk5x0123 = vld1q_f32(w); w += 4;
159       vacc0123p1 = vfmaq_f32(vacc0123p1, vi5x0123, vk5x0123);
160 
161       const float32x4_t vi6x0123 = vld1q_f32(i6);
162       const float32x4_t vk6x0123 = vld1q_f32(w); w += 4;
163       vacc0123p0 = vfmaq_f32(vacc0123p0, vi6x0123, vk6x0123);
164 
165       const float32x4_t vi7x0123 = vld1q_f32(i7);
166       const float32x4_t vk7x0123 = vld1q_f32(w); w += 4;
167       vacc0123p1 = vfmaq_f32(vacc0123p1, vi7x0123, vk7x0123);
168 
169       const float32x4_t vi8x0123 = vld1q_f32(i8);
170       const float32x4_t vk8x0123 = vld1q_f32(w); w += 4;
171       vacc0123p0 = vfmaq_f32(vacc0123p0, vi8x0123, vk8x0123);
172 
173       // Add up all accumulators to vacc0123p0
174       vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
175 
176       float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
177       vacc0123 = vminq_f32(vacc0123, vmax);
178 
179       float32x2_t vacc01 = vget_low_f32(vacc0123);
180       if (c & 2) {
181         vst1_f32(output, vacc01); output += 2;
182         vacc01 = vget_high_f32(vacc0123);
183       }
184       if (c & 1) {
185         vst1_lane_f32(output, vacc01, 0); output += 1;
186       }
187     }
188 
189     output = (float*) ((uintptr_t) output + output_increment);
190   } while (--output_width != 0);
191 }
192