1// Copyright 2019 Google LLC 2// 3// This source code is licensed under the BSD-style license found in the 4// LICENSE file in the root directory of this source tree. 5 6$assert NR % 4 == 0 7$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ" 8#include <assert.h> 9 10#include <xmmintrin.h> 11 12#include <xnnpack/gemm.h> 13 14 15void xnn_f32_gemm${"inc" if INC else ""}_minmax_ukernel_${MR}x${NR}__sse_load1( 16 size_t mr, 17 size_t nc, 18 size_t kc, 19 const float*restrict a, 20 size_t a_stride, 21 const float*restrict w, 22 float*restrict c, 23 size_t cm_stride, 24 size_t cn_stride, 25 $if INC: 26 const float*restrict acc, 27 const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) 28{ 29 assert(mr != 0); 30 assert(mr <= ${MR}); 31 assert(nc != 0); 32 assert(kc != 0); 33 assert(kc % sizeof(float) == 0); 34 assert(a != NULL); 35 assert(w != NULL); 36 assert(c != NULL); 37 $if INC: 38 assert(acc != NULL); 39 40 const float* a0 = a; 41 float* c0 = c; 42 $for M in range(1, MR): 43 const float* a${M} = (const float*) ((uintptr_t) a${M-1} + a_stride); 44 float* c${M} = (float*) ((uintptr_t) c${M-1} + cm_stride); 45 $if M % 2 == 0: 46 if XNN_UNPREDICTABLE(mr <= ${M}) { 47 a${M} = a${M-1}; 48 c${M} = c${M-1}; 49 } 50 $elif M + 1 == MR: 51 if XNN_UNPREDICTABLE(mr != ${M+1}) { 52 a${M} = a${M-1}; 53 c${M} = c${M-1}; 54 } 55 $else: 56 if XNN_UNPREDICTABLE(mr < ${M+1}) { 57 a${M} = a${M-1}; 58 c${M} = c${M-1}; 59 } 60 61 do { 62 $if INC: 63 $for M in range(MR): 64 $for N in range(0, NR, 4): 65 __m128 vacc${M}x${ABC[N:N+4]} = _mm_load_ps(acc + ${M*NR+N}); 66 acc += ${MR*NR}; 67 $else: 68 $for N in range(0, NR, 4): 69 __m128 vacc0x${ABC[N:N+4]} = _mm_load_ps(w + ${N}); 70 $for M in range(1, MR): 71 $for N in range(0, NR, 4): 72 __m128 vacc${M}x${ABC[N:N+4]} = vacc0x${ABC[N:N+4]}; 73 w += ${NR}; 74 75 size_t k = kc; 76 do { 77 $for M in range(MR): 78 const __m128 va${M} = _mm_load1_ps(a${M}); 79 a${M} += 1; 80 81 const __m128 vb${ABC[0:4]} = _mm_load_ps(w); 82 $for N in range(4, NR, 4): 83 const __m128 vb${ABC[N:N+4]} = _mm_load_ps(w + ${N}); 84 w += ${NR}; 85 86 $for N in range(0, NR, 4): 87 $for M in range(MR): 88 vacc${M}x${ABC[N:N+4]} = _mm_add_ps(vacc${M}x${ABC[N:N+4]}, _mm_mul_ps(va${M}, vb${ABC[N:N+4]})); 89 90 k -= sizeof(float); 91 } while (k != 0); 92 93 const __m128 vmax = _mm_load_ps(params->sse.max); 94 $for N in range(0, NR, 4): 95 $for M in range(MR): 96 vacc${M}x${ABC[N:N+4]} = _mm_min_ps(vacc${M}x${ABC[N:N+4]}, vmax); 97 98 const __m128 vmin = _mm_load_ps(params->sse.min); 99 $for N in range(0, NR, 4): 100 $for M in range(MR): 101 vacc${M}x${ABC[N:N+4]} = _mm_max_ps(vacc${M}x${ABC[N:N+4]}, vmin); 102 103 if XNN_LIKELY(nc >= ${NR}) { 104 $for M in reversed(range(MR)): 105 _mm_storeu_ps(c${M}, vacc${M}x${ABC[0:4]}); 106 $for N in range(4, NR, 4): 107 _mm_storeu_ps(c${M} + ${N}, vacc${M}x${ABC[N:N+4]}); 108 c${M} = (float*) ((uintptr_t) c${M} + cn_stride); 109 110 $for M in reversed(range(MR)): 111 a${M} = (const float*) ((uintptr_t) a${M} - kc); 112 113 nc -= ${NR}; 114 } else { 115 $for LOG2N in reversed(range(NR.bit_length())): 116 $if NR != 1 << LOG2N: 117 if (nc & ${1 << LOG2N}) { 118 $if LOG2N >= 2: 119 $for M in reversed(range(MR)): 120 _mm_storeu_ps(c${M}, vacc${M}x${ABC[0:4]}); 121 $for N in range(4, 1 << LOG2N, 4): 122 _mm_storeu_ps(c${M} + ${N}, vacc${M}x${ABC[N:N+4]}); 123 124 $for M in reversed(range(MR)): 125 $for N in range(0, 1 << (LOG2N - 1), 4): 126 vacc${M}x${ABC[N:N+4]} = vacc${M}x${ABC[N + (1 << LOG2N):N + (1 << LOG2N)+4]}; 127 128 $for M in reversed(range(MR)): 129 c${M} += ${1 << LOG2N}; 130 $elif LOG2N == 1: 131 $for M in reversed(range(MR)): 132 _mm_storel_pi((__m64*) c${M}, vacc${M}x${ABC[0:4]}); 133 134 $for M in reversed(range(MR)): 135 vacc${M}x${ABC[0:4]} = _mm_movehl_ps(vacc${M}x${ABC[0:4]}, vacc${M}x${ABC[0:4]}); 136 137 $for M in reversed(range(MR)): 138 c${M} += 2; 139 $elif LOG2N == 0: 140 $for M in reversed(range(MR)): 141 _mm_store_ss(c${M}, vacc${M}x${ABC[0:4]}); 142 } 143 144 nc = 0; 145 } 146 } while (nc != 0); 147} 148