1// Copyright 2020 Google LLC
2//
3// This source code is licensed under the BSD-style license found in the
4// LICENSE file in the root directory of this source tree.
5
6$assert BATCH_TILE % 8 == 0
7$assert BATCH_TILE >= 8
8$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
9#include <assert.h>
10
11#include <arm_neon.h>
12
13#include <xnnpack/clamp.h>
14#include <xnnpack/common.h>
15
16
17void xnn_f16_clamp_ukernel__neonfp16arith_x${BATCH_TILE}(
18    size_t n,
19    const void* restrict x_ptr,
20    void* restrict y_ptr,
21    const struct xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
22{
23  assert(n != 0);
24  assert(n % sizeof(__fp16) == 0);
25  assert(x_ptr != NULL);
26  assert(y_ptr != NULL);
27
28  const __fp16* x = (const __fp16*) x_ptr;
29  __fp16* y = (__fp16*) y_ptr;
30
31  const float16x8_t vy_min = vld1q_dup_f16(&params->min);
32  const float16x8_t vy_max = vld1q_dup_f16(&params->max);
33
34  for (; n >= ${BATCH_TILE} * sizeof(__fp16); n -= ${BATCH_TILE} * sizeof(__fp16)) {
35    $for N in range(0, BATCH_TILE, 8):
36      float16x8_t vacc${ABC[N:N+8]} = vld1q_f16(x); x += 8;
37
38    $for N in range(0, BATCH_TILE, 8):
39      vacc${ABC[N:N+8]} = vmaxq_f16(vacc${ABC[N:N+8]}, vy_min);
40
41    $for N in range(0, BATCH_TILE, 8):
42      vacc${ABC[N:N+8]} = vminq_f16(vacc${ABC[N:N+8]}, vy_max);
43
44    $for N in range(0, BATCH_TILE, 8):
45      vst1q_f16(y, vacc${ABC[N:N+8]}); y += 8;
46  }
47  $if BATCH_TILE > 8:
48    for (; n >= 8 * sizeof(__fp16); n -= 8 * sizeof(__fp16)) {
49      float16x8_t vacc = vld1q_f16(x); x += 8;
50      vacc = vmaxq_f16(vacc, vy_min);
51      vacc = vminq_f16(vacc, vy_max);
52      vst1q_f16(y, vacc); y += 8;
53    }
54  if XNN_UNLIKELY(n != 0) {
55    float16x8_t vacc = vld1q_f16(x);
56    vacc = vmaxq_f16(vacc, vy_min);
57    vacc = vminq_f16(vacc, vy_max);
58
59    float16x4_t vacc_lo = vget_low_f16(vacc);
60    if (n & (4 * sizeof(__fp16))) {
61      vst1_f16(y, vacc_lo); y += 4;
62      vacc_lo = vget_high_f16(vacc);
63    }
64    if (n & (2 * sizeof(__fp16))) {
65      vst1_lane_u32(__builtin_assume_aligned(y, 1), vreinterpret_u32_f16(vacc_lo), 0); y += 2;
66      vacc_lo = vext_f16(vacc_lo, vacc_lo, 2);
67    }
68    if (n & (1 * sizeof(__fp16))) {
69      vst1_lane_f16(y, vacc_lo, 0);
70    }
71  }
72}
73