1 // Auto-generated file. Do not edit!
2 // Template: src/f32-dwconv/up-avx512.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2019 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <immintrin.h>
13
14 #include <xnnpack/dwconv.h>
15 #include <xnnpack/intrinsics-polyfill.h>
16
17
xnn_f32_dwconv_minmax_ukernel_up32x4__avx512f(size_t channels,size_t output_width,const float ** input,const float * weights,float * output,size_t input_stride,size_t output_increment,size_t input_offset,const float * zero,const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])18 void xnn_f32_dwconv_minmax_ukernel_up32x4__avx512f(
19 size_t channels,
20 size_t output_width,
21 const float** input,
22 const float* weights,
23 float* output,
24 size_t input_stride,
25 size_t output_increment,
26 size_t input_offset,
27 const float* zero,
28 const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
29 {
30 assert(channels != 0);
31 assert(output_width != 0);
32
33 const __m512 vmax = _mm512_broadcast_f32x4(_mm_load_ps(params->sse.max));
34 const __m512 vmin = _mm512_broadcast_f32x4(_mm_load_ps(params->sse.min));
35 do {
36 const float* i0 = input[0];
37 assert(i0 != NULL);
38 if XNN_UNPREDICTABLE(i0 != zero) {
39 i0 = (const float*) ((uintptr_t) i0 + input_offset);
40 }
41 const float* i1 = input[1];
42 assert(i1 != NULL);
43 if XNN_UNPREDICTABLE(i1 != zero) {
44 i1 = (const float*) ((uintptr_t) i1 + input_offset);
45 }
46 const float* i2 = input[2];
47 assert(i2 != NULL);
48 if XNN_UNPREDICTABLE(i2 != zero) {
49 i2 = (const float*) ((uintptr_t) i2 + input_offset);
50 }
51 const float* i3 = input[3];
52 assert(i3 != NULL);
53 if XNN_UNPREDICTABLE(i3 != zero) {
54 i3 = (const float*) ((uintptr_t) i3 + input_offset);
55 }
56 input = (const float**) ((uintptr_t) input + input_stride);
57
58 size_t c = channels;
59 const float* w = weights;
60 for (; c >= 32; c -= 32) {
61 __m512 vacc0123456789ABCDEFp0 = _mm512_load_ps(w);
62 __m512 vaccGHIJKLMNOPQRSTUVp0 = _mm512_load_ps(w + 16);
63
64
65 const __m512 vi0x0123456789ABCDEF = _mm512_loadu_ps(i0);
66 const __m512 vi0xGHIJKLMNOPQRSTUV = _mm512_loadu_ps(i0 + 16);
67 i0 += 32;
68
69 const __m512 vk0x0123456789ABCDEF = _mm512_load_ps(w + 32);
70 const __m512 vk0xGHIJKLMNOPQRSTUV = _mm512_load_ps(w + 48);
71 vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi0x0123456789ABCDEF, vk0x0123456789ABCDEF, vacc0123456789ABCDEFp0);
72 vaccGHIJKLMNOPQRSTUVp0 = _mm512_fmadd_ps(vi0xGHIJKLMNOPQRSTUV, vk0xGHIJKLMNOPQRSTUV, vaccGHIJKLMNOPQRSTUVp0);
73
74 const __m512 vi1x0123456789ABCDEF = _mm512_loadu_ps(i1);
75 const __m512 vi1xGHIJKLMNOPQRSTUV = _mm512_loadu_ps(i1 + 16);
76 i1 += 32;
77
78 const __m512 vk1x0123456789ABCDEF = _mm512_load_ps(w + 64);
79 const __m512 vk1xGHIJKLMNOPQRSTUV = _mm512_load_ps(w + 80);
80 vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi1x0123456789ABCDEF, vk1x0123456789ABCDEF, vacc0123456789ABCDEFp0);
81 vaccGHIJKLMNOPQRSTUVp0 = _mm512_fmadd_ps(vi1xGHIJKLMNOPQRSTUV, vk1xGHIJKLMNOPQRSTUV, vaccGHIJKLMNOPQRSTUVp0);
82
83 const __m512 vi2x0123456789ABCDEF = _mm512_loadu_ps(i2);
84 const __m512 vi2xGHIJKLMNOPQRSTUV = _mm512_loadu_ps(i2 + 16);
85 i2 += 32;
86
87 const __m512 vk2x0123456789ABCDEF = _mm512_load_ps(w + 96);
88 const __m512 vk2xGHIJKLMNOPQRSTUV = _mm512_load_ps(w + 112);
89 vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi2x0123456789ABCDEF, vk2x0123456789ABCDEF, vacc0123456789ABCDEFp0);
90 vaccGHIJKLMNOPQRSTUVp0 = _mm512_fmadd_ps(vi2xGHIJKLMNOPQRSTUV, vk2xGHIJKLMNOPQRSTUV, vaccGHIJKLMNOPQRSTUVp0);
91
92 const __m512 vi3x0123456789ABCDEF = _mm512_loadu_ps(i3);
93 const __m512 vi3xGHIJKLMNOPQRSTUV = _mm512_loadu_ps(i3 + 16);
94 i3 += 32;
95
96 const __m512 vk3x0123456789ABCDEF = _mm512_load_ps(w + 128);
97 const __m512 vk3xGHIJKLMNOPQRSTUV = _mm512_load_ps(w + 144);
98 vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi3x0123456789ABCDEF, vk3x0123456789ABCDEF, vacc0123456789ABCDEFp0);
99 vaccGHIJKLMNOPQRSTUVp0 = _mm512_fmadd_ps(vi3xGHIJKLMNOPQRSTUV, vk3xGHIJKLMNOPQRSTUV, vaccGHIJKLMNOPQRSTUVp0);
100
101 w += 160;
102
103
104 __m512 vacc0123456789ABCDEF = _mm512_max_ps(vacc0123456789ABCDEFp0, vmin);
105 __m512 vaccGHIJKLMNOPQRSTUV = _mm512_max_ps(vaccGHIJKLMNOPQRSTUVp0, vmin);
106 vacc0123456789ABCDEF = _mm512_min_ps(vacc0123456789ABCDEF, vmax);
107 vaccGHIJKLMNOPQRSTUV = _mm512_min_ps(vaccGHIJKLMNOPQRSTUV, vmax);
108
109 _mm512_storeu_ps(output, vacc0123456789ABCDEF);
110 _mm512_storeu_ps(output + 16, vaccGHIJKLMNOPQRSTUV);
111 output += 32;
112 }
113 for (; c >= 16; c -= 16) {
114 __m512 vacc0123456789ABCDEFp0 = _mm512_load_ps(w);
115
116 const __m512 vi0x0123456789ABCDEF = _mm512_loadu_ps(i0);
117 i0 += 16;
118
119 const __m512 vk0x0123456789ABCDEF = _mm512_load_ps(w + 32);
120 vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi0x0123456789ABCDEF, vk0x0123456789ABCDEF, vacc0123456789ABCDEFp0);
121
122 const __m512 vi1x0123456789ABCDEF = _mm512_loadu_ps(i1);
123 i1 += 16;
124
125 const __m512 vk1x0123456789ABCDEF = _mm512_load_ps(w + 64);
126 vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi1x0123456789ABCDEF, vk1x0123456789ABCDEF, vacc0123456789ABCDEFp0);
127
128 const __m512 vi2x0123456789ABCDEF = _mm512_loadu_ps(i2);
129 i2 += 16;
130
131 const __m512 vk2x0123456789ABCDEF = _mm512_load_ps(w + 96);
132 vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi2x0123456789ABCDEF, vk2x0123456789ABCDEF, vacc0123456789ABCDEFp0);
133
134 const __m512 vi3x0123456789ABCDEF = _mm512_loadu_ps(i3);
135 i3 += 16;
136
137 const __m512 vk3x0123456789ABCDEF = _mm512_load_ps(w + 128);
138 vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi3x0123456789ABCDEF, vk3x0123456789ABCDEF, vacc0123456789ABCDEFp0);
139
140 w += 16;
141
142
143 __m512 vacc0123456789ABCDEF = _mm512_max_ps(vacc0123456789ABCDEFp0, vmin);
144 vacc0123456789ABCDEF = _mm512_min_ps(vacc0123456789ABCDEF, vmax);
145
146 _mm512_storeu_ps(output, vacc0123456789ABCDEF);
147 output += 16;
148 }
149 if XNN_UNLIKELY(c != 0) {
150 assert(c >= 1);
151 assert(c <= 16);
152 // Prepare mask for valid 32-bit elements (depends on nc).
153 const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << c) - UINT32_C(1)));
154
155 __m512 vacc0123456789ABCDEFp0 = _mm512_maskz_loadu_ps(vmask, w);
156
157 const __m512 vi0x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i0);
158 const __m512 vk0x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 32);
159 vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi0x0123456789ABCDEF, vk0x0123456789ABCDEF, vacc0123456789ABCDEFp0);
160
161 const __m512 vi1x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i1);
162 const __m512 vk1x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 64);
163 vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi1x0123456789ABCDEF, vk1x0123456789ABCDEF, vacc0123456789ABCDEFp0);
164
165 const __m512 vi2x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i2);
166 const __m512 vk2x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 96);
167 vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi2x0123456789ABCDEF, vk2x0123456789ABCDEF, vacc0123456789ABCDEFp0);
168
169 const __m512 vi3x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i3);
170 const __m512 vk3x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, w + 128);
171 vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi3x0123456789ABCDEF, vk3x0123456789ABCDEF, vacc0123456789ABCDEFp0);
172
173
174 __m512 vacc0123456789ABCDEF = _mm512_max_ps(vacc0123456789ABCDEFp0, vmin);
175 vacc0123456789ABCDEF = _mm512_min_ps(vacc0123456789ABCDEF, vmax);
176
177 _mm512_mask_storeu_ps(output, vmask, vacc0123456789ABCDEF);
178 output += c;
179 }
180
181 output = (float*) ((uintptr_t) output + output_increment);
182 } while (--output_width != 0);
183 }
184