1// Copyright 2020 Google LLC
2//
3// This source code is licensed under the BSD-style license found in the
4// LICENSE file in the root directory of this source tree.
5
6$assert BATCH_TILE % 8 == 0
7$assert BATCH_TILE >= 8
8$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
9#include <assert.h>
10
11#include <immintrin.h>
12
13#include <xnnpack/common.h>
14#include <xnnpack/vunary.h>
15
16
17static const int32_t mask_table[14] = {-1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0};
18
19void xnn_f32_vlrelu_ukernel__avx_x${BATCH_TILE}(
20    size_t n,
21    const float* x,
22    float* y,
23    const union xnn_f32_lrelu_params params[restrict XNN_MIN_ELEMENTS(1)])
24{
25  assert(n != 0);
26  assert(n % sizeof(float) == 0);
27
28  const __m256 vslope = _mm256_broadcast_ps((const __m128*) params->sse.slope);
29  for (; n >= ${BATCH_TILE} * sizeof(float); n -= ${BATCH_TILE} * sizeof(float)) {
30    const __m256 vx${ABC[0:8]} = _mm256_loadu_ps(x);
31    $for N in range(8, BATCH_TILE, 8):
32      const __m256 vx${ABC[N:N+8]} = _mm256_loadu_ps(x + ${N});
33    x += ${BATCH_TILE};
34
35    $for N in range(0, BATCH_TILE, 8):
36      __m256 vacc${ABC[N:N+8]} = _mm256_mul_ps(vx${ABC[N:N+8]}, vslope);
37
38    $for N in range(0, BATCH_TILE, 8):
39      vacc${ABC[N:N+8]} = _mm256_blendv_ps(vx${ABC[N:N+8]}, vacc${ABC[N:N+8]}, vx${ABC[N:N+8]});
40
41    _mm256_storeu_ps(y, vacc${ABC[0:8]});
42    $for N in range(8, BATCH_TILE, 8):
43      _mm256_storeu_ps(y + ${N}, vacc${ABC[N:N+8]});
44    y += ${BATCH_TILE};
45  }
46  $if BATCH_TILE > 8:
47    for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
48      const __m256 vx = _mm256_loadu_ps(x);
49      x += 8;
50      __m256 vacc = _mm256_mul_ps(vx, vslope);
51      vacc = _mm256_blendv_ps(vx, vacc, vx);
52      _mm256_storeu_ps(y, vacc);
53      y += 8;
54    }
55  if XNN_UNLIKELY(n != 0) {
56    assert(n >= 1 * sizeof(float));
57    assert(n <= 7 * sizeof(float));
58    __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &mask_table[7] - n));
59
60    const __m256 vx = _mm256_maskload_ps(x, vmask);
61    __m256 vacc = _mm256_mul_ps(vx, vslope);
62    vacc = _mm256_blendv_ps(vx, vacc, vx);
63
64    // _mm256_maskstore_ps(y, vmask, vacc) could be used here, but triggers msan failures (probably an msan bug).
65    __m128 vacc_lo = _mm256_castps256_ps128(vacc);
66    if (n & (4 * sizeof(float))) {
67      _mm_storeu_ps(y, vacc_lo);
68      vacc_lo = _mm256_extractf128_ps(vacc, 1);
69      y += 4;
70    }
71    if (n & (2 * sizeof(float))) {
72      _mm_storel_pi((__m64*) y, vacc_lo);
73      vacc_lo = _mm_movehl_ps(vacc_lo, vacc_lo);
74      y += 2;
75    }
76    if (n & (1 * sizeof(float))) {
77      _mm_store_ss(y, vacc_lo);
78    }
79  }
80}
81