1 // Auto-generated file. Do not edit!
2 //   Template: src/qs8-vaddc/neon-ld64.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <arm_neon.h>
13 
14 #include <xnnpack/vadd.h>
15 
16 #include <stdio.h>
17 #include <inttypes.h>
18 
19 
xnn_qs8_vaddc_minmax_ukernel__neon_ld64_x32(size_t n,const int8_t * input_x,const int8_t * input_y,int8_t * output,const union xnn_qs8_add_params params[restrict XNN_MIN_ELEMENTS (1)])20 void xnn_qs8_vaddc_minmax_ukernel__neon_ld64_x32(
21     size_t n,
22     const int8_t* input_x,
23     const int8_t* input_y,
24     int8_t* output,
25     const union xnn_qs8_add_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
26 {
27   const int8x8_t vx_zero_point = vld1_dup_s8(&params->neon.x_zero_point);
28   const int32x4_t vx_multiplier = vld1q_dup_s32(&params->neon.x_multiplier);
29   const int32x4_t vright_shift = vld1q_dup_s32(&params->neon.right_shift);
30   const int32x4_t vzero_shift_mask = vreinterpretq_s32_u32(vceqq_s32(vright_shift, vmovq_n_s32(0)));
31   const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->neon.output_zero_point);
32   const int8x16_t voutput_min = vld1q_dup_s8(&params->neon.output_min);
33   const int8x16_t voutput_max = vld1q_dup_s8(&params->neon.output_max);
34 
35   const int32_t vey = (int32_t) *input_y - (int32_t) params->neon.y_zero_point;
36   const int32_t vy_multiplier = params->neon.y_multiplier;
37   const int32x4_t vy_bias = vdupq_n_s32(vey * vy_multiplier);
38 
39   for (; n >= 32 * sizeof(int8_t); n -= 32 * sizeof(int8_t)) {
40     const int8x8_t vx01234567 = vld1_s8(input_x); input_x += 8;
41     const int8x8_t vx89ABCDEF = vld1_s8(input_x); input_x += 8;
42     const int8x8_t vxGHIJKLMN = vld1_s8(input_x); input_x += 8;
43     const int8x8_t vxOPQRSTUV = vld1_s8(input_x); input_x += 8;
44 
45     const int16x8_t vex01234567 = vsubl_s8(vx01234567, vx_zero_point);
46     const int16x8_t vex89ABCDEF = vsubl_s8(vx89ABCDEF, vx_zero_point);
47     const int16x8_t vexGHIJKLMN = vsubl_s8(vxGHIJKLMN, vx_zero_point);
48     const int16x8_t vexOPQRSTUV = vsubl_s8(vxOPQRSTUV, vx_zero_point);
49 
50     int32x4_t vacc0123 = vmlaq_s32(vy_bias, vmovl_s16(vget_low_s16(vex01234567)), vx_multiplier);
51     int32x4_t vacc4567 = vmlaq_s32(vy_bias, vmovl_s16(vget_high_s16(vex01234567)), vx_multiplier);
52     int32x4_t vacc89AB = vmlaq_s32(vy_bias, vmovl_s16(vget_low_s16(vex89ABCDEF)), vx_multiplier);
53     int32x4_t vaccCDEF = vmlaq_s32(vy_bias, vmovl_s16(vget_high_s16(vex89ABCDEF)), vx_multiplier);
54     int32x4_t vaccGHIJ = vmlaq_s32(vy_bias, vmovl_s16(vget_low_s16(vexGHIJKLMN)), vx_multiplier);
55     int32x4_t vaccKLMN = vmlaq_s32(vy_bias, vmovl_s16(vget_high_s16(vexGHIJKLMN)), vx_multiplier);
56     int32x4_t vaccOPQR = vmlaq_s32(vy_bias, vmovl_s16(vget_low_s16(vexOPQRSTUV)), vx_multiplier);
57     int32x4_t vaccSTUV = vmlaq_s32(vy_bias, vmovl_s16(vget_high_s16(vexOPQRSTUV)), vx_multiplier);
58 
59     vacc0123 = vsraq_n_s32(vacc0123, vbicq_s32(vacc0123, vzero_shift_mask), 31);
60     vacc4567 = vsraq_n_s32(vacc4567, vbicq_s32(vacc4567, vzero_shift_mask), 31);
61     vacc89AB = vsraq_n_s32(vacc89AB, vbicq_s32(vacc89AB, vzero_shift_mask), 31);
62     vaccCDEF = vsraq_n_s32(vaccCDEF, vbicq_s32(vaccCDEF, vzero_shift_mask), 31);
63     vaccGHIJ = vsraq_n_s32(vaccGHIJ, vbicq_s32(vaccGHIJ, vzero_shift_mask), 31);
64     vaccKLMN = vsraq_n_s32(vaccKLMN, vbicq_s32(vaccKLMN, vzero_shift_mask), 31);
65     vaccOPQR = vsraq_n_s32(vaccOPQR, vbicq_s32(vaccOPQR, vzero_shift_mask), 31);
66     vaccSTUV = vsraq_n_s32(vaccSTUV, vbicq_s32(vaccSTUV, vzero_shift_mask), 31);
67 
68     vacc0123 = vrshlq_s32(vacc0123, vright_shift);
69     vacc4567 = vrshlq_s32(vacc4567, vright_shift);
70     vacc89AB = vrshlq_s32(vacc89AB, vright_shift);
71     vaccCDEF = vrshlq_s32(vaccCDEF, vright_shift);
72     vaccGHIJ = vrshlq_s32(vaccGHIJ, vright_shift);
73     vaccKLMN = vrshlq_s32(vaccKLMN, vright_shift);
74     vaccOPQR = vrshlq_s32(vaccOPQR, vright_shift);
75     vaccSTUV = vrshlq_s32(vaccSTUV, vright_shift);
76 
77     const int16x8_t vacc01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567)), voutput_zero_point);
78     const int16x8_t vacc89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc89AB), vqmovn_s32(vaccCDEF)), voutput_zero_point);
79     const int16x8_t vaccGHIJKLMN = vqaddq_s16(vcombine_s16(vqmovn_s32(vaccGHIJ), vqmovn_s32(vaccKLMN)), voutput_zero_point);
80     const int16x8_t vaccOPQRSTUV = vqaddq_s16(vcombine_s16(vqmovn_s32(vaccOPQR), vqmovn_s32(vaccSTUV)), voutput_zero_point);
81 
82     int8x16_t vout0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc01234567), vqmovn_s16(vacc89ABCDEF));
83     int8x16_t voutGHIJKLMNOPQRSTUV = vcombine_s8(vqmovn_s16(vaccGHIJKLMN), vqmovn_s16(vaccOPQRSTUV));
84 
85     vout0123456789ABCDEF = vmaxq_s8(vout0123456789ABCDEF, voutput_min);
86     voutGHIJKLMNOPQRSTUV = vmaxq_s8(voutGHIJKLMNOPQRSTUV, voutput_min);
87 
88     vout0123456789ABCDEF = vminq_s8(vout0123456789ABCDEF, voutput_max);
89     voutGHIJKLMNOPQRSTUV = vminq_s8(voutGHIJKLMNOPQRSTUV, voutput_max);
90 
91     vst1q_s8(output, vout0123456789ABCDEF); output += 16;
92     vst1q_s8(output, voutGHIJKLMNOPQRSTUV); output += 16;
93   }
94   if XNN_UNLIKELY(n != 0) {
95     do {
96       const int8x8_t vx01234567 = vld1_s8(input_x); input_x += 8;
97 
98       const int16x8_t vex01234567 = vsubl_s8(vx01234567, vx_zero_point);
99 
100       int32x4_t vacc0123 = vmlaq_s32(vy_bias, vmovl_s16(vget_low_s16(vex01234567)), vx_multiplier);
101       int32x4_t vacc4567 = vmlaq_s32(vy_bias, vmovl_s16(vget_high_s16(vex01234567)), vx_multiplier);
102 
103       vacc0123 = vsraq_n_s32(vacc0123, vbicq_s32(vacc0123, vzero_shift_mask), 31);
104       vacc4567 = vsraq_n_s32(vacc4567, vbicq_s32(vacc4567, vzero_shift_mask), 31);
105 
106       vacc0123 = vrshlq_s32(vacc0123, vright_shift);
107       vacc4567 = vrshlq_s32(vacc4567, vright_shift);
108 
109       const int16x8_t vacc01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567)), voutput_zero_point);
110 
111       int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
112       vout01234567 = vmax_s8(vout01234567, vget_low_s8(voutput_min));
113       vout01234567 = vmin_s8(vout01234567, vget_low_s8(voutput_max));
114 
115       if XNN_LIKELY(n >= (8 * sizeof(int8_t))) {
116         vst1_s8(output, vout01234567); output += 8;
117         n -= 8 * sizeof(int8_t);
118       } else {
119         if (n & (4 * sizeof(int8_t))) {
120           vst1_lane_u32(__builtin_assume_aligned(output, 1), vreinterpret_u32_s8(vout01234567), 0); output += 4;
121           vout01234567 = vext_s8(vout01234567, vout01234567, 4);
122         }
123         if (n & (2 * sizeof(int8_t))) {
124           vst1_lane_u16(__builtin_assume_aligned(output, 1), vreinterpret_u16_s8(vout01234567), 0); output += 2;
125           vout01234567 = vext_s8(vout01234567, vout01234567, 2);
126         }
127         if (n & (1 * sizeof(int8_t))) {
128           vst1_lane_s8(output, vout01234567, 0);
129         }
130         n = 0;
131       }
132     } while (n != 0);
133   }
134 }
135