1 // Auto-generated file. Do not edit!
2 //   Template: src/f32-vbinary/vop-avx.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2019 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <immintrin.h>
13 
14 #include <xnnpack/common.h>
15 #include <xnnpack/vbinary.h>
16 
17 
18 static const int32_t mask_table[14] = {-1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0};
19 
xnn_f32_vmul_minmax_ukernel__avx_x16(size_t n,const float * a,const float * b,float * y,const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])20 void xnn_f32_vmul_minmax_ukernel__avx_x16(
21     size_t n,
22     const float* a,
23     const float* b,
24     float* y,
25     const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
26 {
27   assert(n != 0);
28   assert(n % sizeof(float) == 0);
29   assert(a != NULL);
30   assert(b != NULL);
31   assert(y != NULL);
32 
33   const __m256 vy_min = _mm256_broadcast_ps((const __m128*) params->sse.min);
34   const __m256 vy_max = _mm256_broadcast_ps((const __m128*) params->sse.max);
35 
36   for (; n >= 16 * sizeof(float); n -= 16 * sizeof(float)) {
37     const __m256 va01234567 = _mm256_loadu_ps(a);
38     const __m256 va89ABCDEF = _mm256_loadu_ps(a + 8);
39     a += 16;
40 
41     const __m256 vb01234567 = _mm256_loadu_ps(b);
42     const __m256 vb89ABCDEF = _mm256_loadu_ps(b + 8);
43     b += 16;
44 
45     __m256 vy01234567 = _mm256_mul_ps(va01234567, vb01234567);
46     __m256 vy89ABCDEF = _mm256_mul_ps(va89ABCDEF, vb89ABCDEF);
47 
48 
49     vy01234567 = _mm256_max_ps(vy01234567, vy_min);
50     vy89ABCDEF = _mm256_max_ps(vy89ABCDEF, vy_min);
51 
52     vy01234567 = _mm256_min_ps(vy01234567, vy_max);
53     vy89ABCDEF = _mm256_min_ps(vy89ABCDEF, vy_max);
54 
55     _mm256_storeu_ps(y, vy01234567);
56     _mm256_storeu_ps(y + 8, vy89ABCDEF);
57     y += 16;
58   }
59   for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
60     const __m256 va = _mm256_loadu_ps(a);
61     a += 8;
62 
63     const __m256 vb = _mm256_loadu_ps(b);
64     b += 8;
65 
66     __m256 vy = _mm256_mul_ps(va, vb);
67     vy = _mm256_max_ps(vy, vy_min);
68     vy = _mm256_min_ps(vy, vy_max);
69     _mm256_storeu_ps(y, vy);
70     y += 8;
71   }
72   if XNN_UNLIKELY(n != 0) {
73     assert(n >= 1 * sizeof(float));
74     assert(n <= 7 * sizeof(float));
75     __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &mask_table[7] - n));
76 
77     const __m256 va = _mm256_maskload_ps(a, vmask);
78     const __m256 vb = _mm256_maskload_ps(b, vmask);
79 
80     __m256 vy = _mm256_mul_ps(va, vb);
81     vy = _mm256_max_ps(vy, vy_min);
82     vy = _mm256_min_ps(vy, vy_max);
83 
84     // _mm256_maskstore_ps(y, vmask, vy) could be used here, but triggers msan failures (probably an msan bug).
85     __m128 vy_lo = _mm256_castps256_ps128(vy);
86     if (n & (4 * sizeof(float))) {
87       _mm_storeu_ps(y, vy_lo);
88       vy_lo = _mm256_extractf128_ps(vy, 1);
89       y += 4;
90     }
91     if (n & (2 * sizeof(float))) {
92       _mm_storel_pi((__m64*) y, vy_lo);
93       vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
94       y += 2;
95     }
96     if (n & (1 * sizeof(float))) {
97       _mm_store_ss(y, vy_lo);
98     }
99   }
100 }
101