1 // Auto-generated file. Do not edit!
2 //   Template: src/f32-prelu/sse.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2019 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <smmintrin.h>
13 
14 #include <xnnpack/math.h>
15 #include <xnnpack/prelu.h>
16 
17 
xnn_f32_prelu_ukernel__sse41_2x4(size_t rows,size_t channels,const float * restrict input,size_t input_stride,const float * restrict weights,float * restrict output,size_t output_stride)18 void xnn_f32_prelu_ukernel__sse41_2x4(
19     size_t rows,
20     size_t channels,
21     const float*restrict input,
22     size_t input_stride,
23     const float*restrict weights,
24     float*restrict output,
25     size_t output_stride) XNN_DISABLE_TSAN
26 {
27   assert(rows != 0);
28   assert(channels != 0);
29   assert(channels % sizeof(float) == 0);
30 
31   const float* i0 = input;
32   float* o0 = output;
33   const float* i1 = (const float*) ((uintptr_t) i0 + input_stride);
34   float* o1 = (float*) ((uintptr_t) o0 + output_stride);
35   if XNN_UNPREDICTABLE(rows < 2) {
36     i1 = i0;
37     o1 = o0;
38   }
39 
40   const size_t input_increment = input_stride * 2 - channels;
41   const size_t output_increment = output_stride * 2 - channels;
42 
43   do {
44     const float* w = weights;
45     size_t c = channels;
46     for (; c >= 4 * sizeof(float); c -= 4 * sizeof(float)) {
47       const __m128 vw0123 = _mm_load_ps(w);
48       w += 4;
49 
50       const __m128 vi0x0123 = _mm_loadu_ps(i0);
51       i0 += 4;
52       const __m128 vi1x0123 = _mm_loadu_ps(i1);
53       i1 += 4;
54 
55       const __m128 vprod0x0123 = _mm_mul_ps(vi0x0123, vw0123);
56       const __m128 vprod1x0123 = _mm_mul_ps(vi1x0123, vw0123);
57 
58       const __m128 vacc0x0123 = _mm_blendv_ps(vi0x0123, vprod0x0123, vi0x0123);
59       const __m128 vacc1x0123 = _mm_blendv_ps(vi1x0123, vprod1x0123, vi1x0123);
60 
61       _mm_storeu_ps(o0, vacc0x0123);
62       o0 += 4;
63       _mm_storeu_ps(o1, vacc1x0123);
64       o1 += 4;
65     }
66     if XNN_UNLIKELY(c != 0) {
67       const __m128 vw0123 = _mm_load_ps(w);
68       w = (const float*) ((uintptr_t) w + c);
69 
70       const __m128 vi0x0123 = _mm_loadu_ps(i0);
71       i0 = (const float*) ((uintptr_t) i0 + c);
72       const __m128 vi1x0123 = _mm_loadu_ps(i1);
73       i1 = (const float*) ((uintptr_t) i1 + c);
74 
75       const __m128 vprod0x0123 = _mm_mul_ps(vi0x0123, vw0123);
76       const __m128 vprod1x0123 = _mm_mul_ps(vi1x0123, vw0123);
77 
78       __m128 vacc0x0123 = _mm_blendv_ps(vi0x0123, vprod0x0123, vi0x0123);
79       __m128 vacc1x0123 = _mm_blendv_ps(vi1x0123, vprod1x0123, vi1x0123);
80 
81       if (c & (2 * sizeof(float))) {
82         _mm_storel_pi((__m64*) o0, vacc0x0123);
83         _mm_storel_pi((__m64*) o1, vacc1x0123);
84 
85         vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
86         vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
87 
88         o0 += 2;
89         o1 += 2;
90       }
91       if (c & (1 * sizeof(float))) {
92         _mm_store_ss(o0, vacc0x0123);
93         _mm_store_ss(o1, vacc1x0123);
94 
95         o0 += 1;
96         o1 += 1;
97       }
98     }
99     i0 = (const float*) ((uintptr_t) i0 + input_increment);
100     o0 = (float*) ((uintptr_t) o0 + output_increment);
101     i1 = (const float*) ((uintptr_t) i1 + input_increment);
102     o1 = (float*) ((uintptr_t) o1 + output_increment);
103     if XNN_UNPREDICTABLE(rows < 4) {
104       i1 = i0;
105       o1 = o0;
106     }
107     rows = doz(rows, 2);
108   } while (rows != 0);
109 }
110