1// Copyright 2020 Google LLC
2//
3// This source code is licensed under the BSD-style license found in the
4// LICENSE file in the root directory of this source tree.
5
6$assert BATCH_TILE % 16 == 0
7$assert BATCH_TILE >= 16
8$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
9$assert OP in ["ABS", "NEG", "SQR"]
10#include <assert.h>
11
12#include <immintrin.h>
13
14#include <xnnpack/common.h>
15#include <xnnpack/intrinsics-polyfill.h>
16#include <xnnpack/vunary.h>
17
18
19$__M512 = {
20$  "ABS": "__m512i",
21$  "NEG": "__m512i",
22$  "SQR": "__m512",
23$}[OP]
24$_MM512_LOADU = {
25$  "ABS": "_mm512_loadu_si512",
26$  "NEG": "_mm512_loadu_si512",
27$  "SQR": "_mm512_loadu_ps",
28$}[OP]
29$_MM512_MASK_LOADU = {
30$  "ABS": "_mm512_maskz_loadu_epi32",
31$  "NEG": "_mm512_maskz_loadu_epi32",
32$  "SQR": "_mm512_maskz_loadu_ps",
33$}[OP]
34$_MM512_STOREU = {
35$  "ABS": "_mm512_storeu_si512",
36$  "NEG": "_mm512_storeu_si512",
37$  "SQR": "_mm512_storeu_ps",
38$}[OP]
39$_MM512_MASK_STOREU = {
40$  "ABS": "_mm512_mask_storeu_epi32",
41$  "NEG": "_mm512_mask_storeu_epi32",
42$  "SQR": "_mm512_mask_storeu_ps",
43$}[OP]
44$_MM512_OP = {
45$  "ABS": lambda x: "_mm512_and_epi32(%s, vnonsign_mask)" % x,
46$  "NEG": lambda x: "_mm512_xor_epi32(%s, vsign_mask)" % x,
47$  "SQR": lambda x: "_mm512_mul_ps(%s, %s)" % (x, x),
48$}[OP]
49$PARAMS = {
50$  "ABS": "const union xnn_f32_abs_params params[restrict XNN_MIN_ELEMENTS(1)]",
51$  "NEG": "const union xnn_f32_neg_params params[restrict XNN_MIN_ELEMENTS(1)]",
52$  "SQR": "const void* params",
53$}[OP]
54void xnn_f32_v${OP.lower()}_ukernel__avx512f_x${BATCH_TILE}(
55    size_t n,
56    const float* x,
57    float* y,
58    ${PARAMS})
59{
60  assert(n != 0);
61  assert(n % sizeof(float) == 0);
62  assert(x != NULL);
63  assert(y != NULL);
64
65  $if OP == "ABS":
66    const __m512i vnonsign_mask = _mm512_broadcast_i32x4(_mm_load_si128((const __m128i*) params->sse.nonsign_mask));
67  $elif OP == "NEG":
68    const __m512i vsign_mask = _mm512_broadcast_i32x4(_mm_load_si128((const __m128i*) params->sse.sign_mask));
69  for (; n >= ${BATCH_TILE} * sizeof(float); n -= ${BATCH_TILE} * sizeof(float)) {
70    const ${__M512} vx${ABC[0:16]} = ${_MM512_LOADU}(x);
71    $for N in range(16, BATCH_TILE, 16):
72      const ${__M512} vx${ABC[N:N+16]} = ${_MM512_LOADU}(x + ${N});
73    x += ${BATCH_TILE};
74
75    $for N in range(0, BATCH_TILE, 16):
76      const ${__M512} vy${ABC[N:N+16]} = ${_MM512_OP("vx" + ABC[N:N+16])};
77
78    ${_MM512_STOREU}(y, vy${ABC[0:16]});
79    $for N in range(16, BATCH_TILE, 16):
80      ${_MM512_STOREU}(y + ${N}, vy${ABC[N:N+16]});
81    y += ${BATCH_TILE};
82  }
83  $if BATCH_TILE > 16:
84    for (; n >= 16 * sizeof(float); n -= 16 * sizeof(float)) {
85      const ${__M512} vx = ${_MM512_LOADU}(x);
86      x += 16;
87
88      const ${__M512} vy = ${_MM512_OP("vx")};
89
90      ${_MM512_STOREU}(y, vy);
91      y += 16;
92    }
93  if XNN_UNLIKELY(n != 0) {
94    assert(n >= 1 * sizeof(float));
95    assert(n <= 15 * sizeof(float));
96    // Prepare mask for valid 32-bit elements (depends on n).
97    n >>= 2 /* log2(sizeof(float)) */;
98    const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << n) - UINT32_C(1)));
99
100    const ${__M512} vx = ${_MM512_MASK_LOADU}(vmask, x);
101    const ${__M512} vy = ${_MM512_OP("vx")};
102    ${_MM512_MASK_STOREU}(y, vmask, vy);
103  }
104}
105