1// Copyright 2019 Google LLC
2//
3// This source code is licensed under the BSD-style license found in the
4// LICENSE file in the root directory of this source tree.
5
6$assert BATCH_TILE % 16 == 0
7$assert BATCH_TILE >= 16
8$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
9#include <assert.h>
10
11#include <immintrin.h>
12
13#include <xnnpack/common.h>
14#include <xnnpack/intrinsics-polyfill.h>
15#include <xnnpack/hswish.h>
16
17
18void xnn_f32_hswish_ukernel__avx512f_x${BATCH_TILE}(
19    size_t n,
20    const float* x,
21    float* y,
22    const union xnn_f32_hswish_params params[restrict XNN_MIN_ELEMENTS(1)])
23{
24  assert(n != 0);
25  assert(n % sizeof(float) == 0);
26
27  const __m512 vsixth = _mm512_broadcast_f32x4(_mm_load_ps(params->sse.sixth));
28  const __m512 vhalf = _mm512_broadcast_f32x4(_mm_load_ps(params->sse.half));
29  const __m512 vone = _mm512_broadcast_f32x4(_mm_load_ps(params->sse.one));
30  const __m512 vzero = _mm512_setzero_ps();
31
32  for (; n >= ${BATCH_TILE} * sizeof(float); n -= ${BATCH_TILE} * sizeof(float)) {
33    const __m512 vx${ABC[0:16]} = _mm512_loadu_ps(x);
34    $for N in range(16, BATCH_TILE, 16):
35      const __m512 vx${ABC[N:N+16]} = _mm512_loadu_ps(x + ${N});
36    x += ${BATCH_TILE};
37
38    $for N in range(0, BATCH_TILE, 16):
39      __m512 vacc${ABC[N:N+16]} = _mm512_fmadd_ps(vx${ABC[N:N+16]}, vsixth, vhalf);
40
41    $for N in range(0, BATCH_TILE, 16):
42      vacc${ABC[N:N+16]} = _mm512_max_ps(vacc${ABC[N:N+16]}, vzero);
43
44    $for N in range(0, BATCH_TILE, 16):
45      vacc${ABC[N:N+16]} = _mm512_min_ps(vacc${ABC[N:N+16]}, vone);
46
47    $for N in range(0, BATCH_TILE, 16):
48      vacc${ABC[N:N+16]} = _mm512_mul_ps(vacc${ABC[N:N+16]}, vx${ABC[N:N+16]});
49
50    _mm512_storeu_ps(y, vacc${ABC[0:16]});
51    $for N in range(16, BATCH_TILE, 16):
52      _mm512_storeu_ps(y + ${N}, vacc${ABC[N:N+16]});
53    y += ${BATCH_TILE};
54  }
55  $if BATCH_TILE > 16:
56    for (; n >= 16 * sizeof(float); n -= 16 * sizeof(float)) {
57      const __m512 vx = _mm512_loadu_ps(x);
58      x += 16;
59      __m512 vacc = _mm512_fmadd_ps(vx, vsixth, vhalf);
60      vacc = _mm512_max_ps(vacc, vzero);
61      vacc = _mm512_min_ps(vacc, vone);
62      vacc = _mm512_mul_ps(vacc, vx);
63      _mm512_storeu_ps(y, vacc);
64      y += 16;
65    }
66  if XNN_UNLIKELY(n != 0) {
67    assert(n >= 1 * sizeof(float));
68    assert(n <= 15 * sizeof(float));
69    // Prepare mask for valid 32-bit elements (depends on n).
70    n >>= 2 /* log2(sizeof(float)) */;
71    const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << n) - UINT32_C(1)));
72
73    const __m512 vx = _mm512_maskz_loadu_ps(vmask, x);
74    __m512 vacc = _mm512_fmadd_ps(vx, vsixth, vhalf);
75    vacc = _mm512_max_ps(vacc, vzero);
76    vacc = _mm512_min_ps(vacc, vone);
77    vacc = _mm512_mul_ps(vacc, vx);
78    _mm512_mask_storeu_ps(y, vmask, vacc);
79  }
80}
81