1 // Auto-generated file. Do not edit!
2 //   Template: src/f32-igemm/neon-ld64.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2019 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 
11 #include <assert.h>
12 
13 #include <arm_neon.h>
14 
15 #include <xnnpack/igemm.h>
16 
17 
xnn_f32_igemm_minmax_ukernel_4x8__neon_lane_ld64(size_t mr,size_t nc,size_t kc,size_t ks,const float ** restrict a,const float * restrict w,float * restrict c,size_t cm_stride,size_t cn_stride,size_t a_offset,const float * zero,const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])18 void xnn_f32_igemm_minmax_ukernel_4x8__neon_lane_ld64(
19     size_t mr,
20     size_t nc,
21     size_t kc,
22     size_t ks,
23     const float**restrict a,
24     const float*restrict w,
25     float*restrict c,
26     size_t cm_stride,
27     size_t cn_stride,
28     size_t a_offset,
29     const float* zero,
30     const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
31 {
32   assert(mr != 0);
33   assert(mr <= 4);
34   assert(nc != 0);
35   assert(kc != 0);
36   assert(kc % sizeof(float) == 0);
37   assert(ks != 0);
38   assert(ks % (4 * sizeof(void*)) == 0);
39   assert(a_offset % sizeof(float) == 0);
40   assert(a != NULL);
41   assert(w != NULL);
42   assert(c != NULL);
43 
44   float* c0 = c;
45   float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
46   if XNN_UNPREDICTABLE(mr < 2) {
47     c1 = c0;
48   }
49   float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
50   if XNN_UNPREDICTABLE(mr <= 2) {
51     c2 = c1;
52   }
53   float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
54   if XNN_UNPREDICTABLE(mr != 4) {
55     c3 = c2;
56   }
57 
58   do {
59     float32x4_t vacc0x0123 = vld1q_f32(w); w += 4;
60     float32x4_t vacc0x4567 = vld1q_f32(w); w += 4;
61     float32x4_t vacc1x0123 = vacc0x0123;
62     float32x4_t vacc1x4567 = vacc0x4567;
63     float32x4_t vacc2x0123 = vacc0x0123;
64     float32x4_t vacc2x4567 = vacc0x4567;
65     float32x4_t vacc3x0123 = vacc0x0123;
66     float32x4_t vacc3x4567 = vacc0x4567;
67 
68     size_t p = ks;
69     do {
70       const float* restrict a0 = a[0];
71       assert(a0 != NULL);
72       if XNN_UNPREDICTABLE(a0 != zero) {
73         a0 = (const float*) ((uintptr_t) a0 + a_offset);
74       }
75       const float* restrict a1 = a[1];
76       assert(a1 != NULL);
77       if XNN_UNPREDICTABLE(a1 != zero) {
78         a1 = (const float*) ((uintptr_t) a1 + a_offset);
79       }
80       const float* restrict a2 = a[2];
81       assert(a2 != NULL);
82       if XNN_UNPREDICTABLE(a2 != zero) {
83         a2 = (const float*) ((uintptr_t) a2 + a_offset);
84       }
85       const float* restrict a3 = a[3];
86       assert(a3 != NULL);
87       if XNN_UNPREDICTABLE(a3 != zero) {
88         a3 = (const float*) ((uintptr_t) a3 + a_offset);
89       }
90       a += 4;
91 
92       size_t k = kc;
93       for (; k >= 2 * sizeof(float); k -= 2 * sizeof(float)) {
94         const float32x2_t va0 = vld1_f32(a0); a0 += 2;
95         const float32x2_t va1 = vld1_f32(a1); a1 += 2;
96         const float32x2_t va2 = vld1_f32(a2); a2 += 2;
97         const float32x2_t va3 = vld1_f32(a3); a3 += 2;
98 
99         const float32x4_t vb0123c0 = vld1q_f32(w); w += 4;
100         const float32x4_t vb4567c0 = vld1q_f32(w); w += 4;
101 
102         vacc0x0123 = vmlaq_lane_f32(vacc0x0123, vb0123c0, va0, 0);
103         vacc1x0123 = vmlaq_lane_f32(vacc1x0123, vb0123c0, va1, 0);
104         vacc2x0123 = vmlaq_lane_f32(vacc2x0123, vb0123c0, va2, 0);
105         vacc3x0123 = vmlaq_lane_f32(vacc3x0123, vb0123c0, va3, 0);
106         vacc0x4567 = vmlaq_lane_f32(vacc0x4567, vb4567c0, va0, 0);
107         vacc1x4567 = vmlaq_lane_f32(vacc1x4567, vb4567c0, va1, 0);
108         vacc2x4567 = vmlaq_lane_f32(vacc2x4567, vb4567c0, va2, 0);
109         vacc3x4567 = vmlaq_lane_f32(vacc3x4567, vb4567c0, va3, 0);
110         const float32x4_t vb0123c1 = vld1q_f32(w); w += 4;
111         const float32x4_t vb4567c1 = vld1q_f32(w); w += 4;
112 
113         vacc0x0123 = vmlaq_lane_f32(vacc0x0123, vb0123c1, va0, 1);
114         vacc1x0123 = vmlaq_lane_f32(vacc1x0123, vb0123c1, va1, 1);
115         vacc2x0123 = vmlaq_lane_f32(vacc2x0123, vb0123c1, va2, 1);
116         vacc3x0123 = vmlaq_lane_f32(vacc3x0123, vb0123c1, va3, 1);
117         vacc0x4567 = vmlaq_lane_f32(vacc0x4567, vb4567c1, va0, 1);
118         vacc1x4567 = vmlaq_lane_f32(vacc1x4567, vb4567c1, va1, 1);
119         vacc2x4567 = vmlaq_lane_f32(vacc2x4567, vb4567c1, va2, 1);
120         vacc3x4567 = vmlaq_lane_f32(vacc3x4567, vb4567c1, va3, 1);
121 
122       }
123       if XNN_UNLIKELY(k != 0) {
124         const float32x4_t va0 = vld1q_dup_f32(a0);
125         const float32x4_t va1 = vld1q_dup_f32(a1);
126         const float32x4_t va2 = vld1q_dup_f32(a2);
127         const float32x4_t va3 = vld1q_dup_f32(a3);
128 
129         const float32x4_t vb0123 = vld1q_f32(w); w += 4;
130         const float32x4_t vb4567 = vld1q_f32(w); w += 4;
131 
132         vacc0x0123 = vmlaq_f32(vacc0x0123, va0, vb0123);
133         vacc1x0123 = vmlaq_f32(vacc1x0123, va1, vb0123);
134         vacc2x0123 = vmlaq_f32(vacc2x0123, va2, vb0123);
135         vacc3x0123 = vmlaq_f32(vacc3x0123, va3, vb0123);
136         vacc0x4567 = vmlaq_f32(vacc0x4567, va0, vb4567);
137         vacc1x4567 = vmlaq_f32(vacc1x4567, va1, vb4567);
138         vacc2x4567 = vmlaq_f32(vacc2x4567, va2, vb4567);
139         vacc3x4567 = vmlaq_f32(vacc3x4567, va3, vb4567);
140       }
141       p -= 4 * sizeof(void*);
142     } while (p != 0);
143 
144     const float32x4_t vmax = vld1q_dup_f32(&params->scalar.max);
145     vacc0x0123 = vminq_f32(vacc0x0123, vmax);
146     vacc1x0123 = vminq_f32(vacc1x0123, vmax);
147     vacc2x0123 = vminq_f32(vacc2x0123, vmax);
148     vacc3x0123 = vminq_f32(vacc3x0123, vmax);
149     vacc0x4567 = vminq_f32(vacc0x4567, vmax);
150     vacc1x4567 = vminq_f32(vacc1x4567, vmax);
151     vacc2x4567 = vminq_f32(vacc2x4567, vmax);
152     vacc3x4567 = vminq_f32(vacc3x4567, vmax);
153 
154     const float32x4_t vmin = vld1q_dup_f32(&params->scalar.min);
155     vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
156     vacc1x0123 = vmaxq_f32(vacc1x0123, vmin);
157     vacc2x0123 = vmaxq_f32(vacc2x0123, vmin);
158     vacc3x0123 = vmaxq_f32(vacc3x0123, vmin);
159     vacc0x4567 = vmaxq_f32(vacc0x4567, vmin);
160     vacc1x4567 = vmaxq_f32(vacc1x4567, vmin);
161     vacc2x4567 = vmaxq_f32(vacc2x4567, vmin);
162     vacc3x4567 = vmaxq_f32(vacc3x4567, vmin);
163 
164     if XNN_LIKELY(nc >= 8) {
165       vst1q_f32(c3, vacc3x0123);
166       vst1q_f32(c3 + 4, vacc3x4567);
167       c3 = (float*) ((uintptr_t) c3 + cn_stride);
168       vst1q_f32(c2, vacc2x0123);
169       vst1q_f32(c2 + 4, vacc2x4567);
170       c2 = (float*) ((uintptr_t) c2 + cn_stride);
171       vst1q_f32(c1, vacc1x0123);
172       vst1q_f32(c1 + 4, vacc1x4567);
173       c1 = (float*) ((uintptr_t) c1 + cn_stride);
174       vst1q_f32(c0, vacc0x0123);
175       vst1q_f32(c0 + 4, vacc0x4567);
176       c0 = (float*) ((uintptr_t) c0 + cn_stride);
177 
178       a = (const float**restrict) ((uintptr_t) a - ks);
179       nc -= 8;
180     } else {
181       if (nc & 4) {
182         vst1q_f32(c3, vacc3x0123); c3 += 4;
183         vst1q_f32(c2, vacc2x0123); c2 += 4;
184         vst1q_f32(c1, vacc1x0123); c1 += 4;
185         vst1q_f32(c0, vacc0x0123); c0 += 4;
186 
187         vacc3x0123 = vacc3x4567;
188         vacc2x0123 = vacc2x4567;
189         vacc1x0123 = vacc1x4567;
190         vacc0x0123 = vacc0x4567;
191       }
192       float32x2_t vacc3x01 = vget_low_f32(vacc3x0123);
193       float32x2_t vacc2x01 = vget_low_f32(vacc2x0123);
194       float32x2_t vacc1x01 = vget_low_f32(vacc1x0123);
195       float32x2_t vacc0x01 = vget_low_f32(vacc0x0123);
196       if (nc & 2) {
197         vst1_f32(c3, vacc3x01); c3 += 2;
198         vst1_f32(c2, vacc2x01); c2 += 2;
199         vst1_f32(c1, vacc1x01); c1 += 2;
200         vst1_f32(c0, vacc0x01); c0 += 2;
201 
202         vacc3x01 = vget_high_f32(vacc3x0123);
203         vacc2x01 = vget_high_f32(vacc2x0123);
204         vacc1x01 = vget_high_f32(vacc1x0123);
205         vacc0x01 = vget_high_f32(vacc0x0123);
206       }
207       if (nc & 1) {
208         vst1_lane_f32(c3, vacc3x01, 0);
209         vst1_lane_f32(c2, vacc2x01, 0);
210         vst1_lane_f32(c1, vacc1x01, 0);
211         vst1_lane_f32(c0, vacc0x01, 0);
212       }
213 
214       nc = 0;
215     }
216   } while (nc != 0);
217 }
218