1// Copyright 2019 Google LLC
2//
3// This source code is licensed under the BSD-style license found in the
4// LICENSE file in the root directory of this source tree.
5
6$assert NR == 2
7$assert MR % 2 == 0
8#include <assert.h>
9
10#include <xmmintrin.h>
11
12#include <xnnpack/gemm.h>
13
14
15void xnn_f32_gemm_minmax_ukernel_${MR}x${NR}c4__sse(
16    size_t mr,
17    size_t nc,
18    size_t kc,
19    const float* restrict a,
20    size_t a_stride,
21    const float* restrict w,
22    float* restrict c,
23    size_t cm_stride,
24    size_t cn_stride,
25    const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
26{
27  assert(mr != 0);
28  assert(mr <= ${MR});
29  assert(nc != 0);
30  assert(kc != 0);
31  assert(kc % sizeof(float) == 0);
32  assert(a != NULL);
33  assert(w != NULL);
34  assert(c != NULL);
35
36  const float* a0 = a;
37  float* c0 = c;
38  $for M in range(1, MR):
39    const float* a${M} = (const float*) ((uintptr_t) a${M-1} + a_stride);
40    float* c${M} = (float*) ((uintptr_t) c${M-1} + cm_stride);
41    $if M % 2 == 0:
42      if XNN_UNPREDICTABLE(mr <= ${M}) {
43        a${M} = a${M-1};
44        c${M} = c${M-1};
45      }
46    $elif M + 1 == MR:
47      if XNN_UNPREDICTABLE(mr != ${M+1}) {
48        a${M} = a${M-1};
49        c${M} = c${M-1};
50      }
51    $else:
52      if XNN_UNPREDICTABLE(mr < ${M+1}) {
53        a${M} = a${M-1};
54        c${M} = c${M-1};
55      }
56
57  do {
58    __m128 vacc0x0c4 = _mm_load_ss(w);
59    $for N in range(1, NR):
60      __m128 vacc0x${N}c4 = _mm_load_ss(w + ${N});
61    $for M in range(1, MR):
62      $for N in range(NR):
63        __m128 vacc${M}x${N}c4 = vacc0x${N}c4;
64    w += ${NR};
65
66    size_t k = kc;
67    for (; k >= 4 * sizeof(float); k -= 4 * sizeof(float)) {
68      $for M in range(MR):
69        const __m128 va${M} = _mm_loadu_ps(a${M});
70        a${M} += 4;
71
72      const __m128 vb0 = _mm_loadu_ps(w);
73      $for N in range(1, NR):
74        const __m128 vb${N} = _mm_loadu_ps(w + ${N * 4});
75      w += ${NR * 4};
76
77      $for M in range(MR):
78        $for N in range(NR):
79          vacc${M}x${N}c4 = _mm_add_ps(vacc${M}x${N}c4, _mm_mul_ps(va${M}, vb${N}));
80    }
81    if XNN_UNLIKELY(k != 0) {
82      $for M in range(MR):
83        const __m128 va${M} = _mm_loadu_ps(a${M});
84        a${M} = (const float*) ((uintptr_t) a${M} + k);
85
86      const __m128 vb0 = _mm_loadu_ps(w);
87      $for N in range(1, NR):
88        const __m128 vb${N} = _mm_loadu_ps(w + ${N * 4});
89      w += ${NR * 4};
90
91      $for N in range(NR):
92        const __m128 vmask${N} = _mm_cmpeq_ps(_mm_setzero_ps(), vb${N});
93
94      $for M in range(MR):
95        $for N in range(NR):
96          vacc${M}x${N}c4 = _mm_add_ps(vacc${M}x${N}c4, _mm_mul_ps(_mm_andnot_ps(vmask${N}, va${M}), vb${N}));
97    }
98
99    $for M in range(MR):
100      const __m128 vacc${M}x01c2 = _mm_add_ps(_mm_unpacklo_ps(vacc${M}x0c4, vacc${M}x1c4), _mm_unpackhi_ps(vacc${M}x0c4, vacc${M}x1c4));
101
102    $for M in range(0, MR, 2):
103      __m128 vacc${M}${M+1}x01 = _mm_add_ps(_mm_movelh_ps(vacc${M}x01c2, vacc${M+1}x01c2), _mm_movehl_ps(vacc${M+1}x01c2, vacc${M}x01c2));
104
105    const __m128 vmax = _mm_load_ps(params->sse.max);
106    $for M in range(0, MR, 2):
107      vacc${M}${M+1}x01 = _mm_min_ps(vacc${M}${M+1}x01, vmax);
108
109    const __m128 vmin = _mm_load_ps(params->sse.min);
110    $for M in range(0, MR, 2):
111      vacc${M}${M+1}x01 = _mm_max_ps(vacc${M}${M+1}x01, vmin);
112
113    if XNN_LIKELY(nc >= ${NR}) {
114      $for M in reversed(range(0, MR, 2)):
115        _mm_storel_pi((__m64*) c${M}, vacc${M}${M+1}x01);
116        c${M} = (float*) ((uintptr_t) c${M} + cn_stride);
117        a${M} = (const float*) ((uintptr_t) a${M} - kc);
118        _mm_storeh_pi((__m64*) c${M+1}, vacc${M}${M+1}x01);
119        c${M+1} = (float*) ((uintptr_t) c${M+1} + cn_stride);
120        a${M+1} = (const float*) ((uintptr_t) a${M+1} - kc);
121
122      nc -= ${NR};
123    } else {
124      assert(nc == 1);
125      $for M in reversed(range(0, MR, 2)):
126        _mm_store_ss(c${M}, vacc${M}${M+1}x01);
127        _mm_store_ss(c${M+1}, _mm_movehl_ps(vacc${M}${M+1}x01, vacc${M}${M+1}x01));
128
129      nc = 0;
130    }
131  } while (nc != 0);
132}
133