1// Copyright 2019 Google LLC 2// 3// This source code is licensed under the BSD-style license found in the 4// LICENSE file in the root directory of this source tree. 5 6$assert CHANNEL_TILE % 4 == 0 7$assert CHANNEL_TILE >= 4 8$assert ROW_TILE >= 1 9$assert SSE in [1, 2, 4] 10$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ" 11$SSE_HEADER = {1: "xmmintrin.h", 2: "emmintrin.h", 4: "smmintrin.h"}[SSE] 12#include <assert.h> 13 14#include <${SSE_HEADER}> 15 16#include <xnnpack/math.h> 17#include <xnnpack/prelu.h> 18 19 20$ISA = {1: "sse", 2: "sse2", 4: "sse41"}[SSE] 21void xnn_f32_prelu_ukernel__${ISA}_${ROW_TILE}x${CHANNEL_TILE}( 22 size_t rows, 23 size_t channels, 24 const float*restrict input, 25 size_t input_stride, 26 const float*restrict weights, 27 float*restrict output, 28 size_t output_stride) XNN_DISABLE_TSAN 29{ 30 assert(rows != 0); 31 assert(channels != 0); 32 assert(channels % sizeof(float) == 0); 33 34 const float* i0 = input; 35 float* o0 = output; 36 $for M in range(1, ROW_TILE): 37 const float* i${M} = (const float*) ((uintptr_t) i${M-1} + input_stride); 38 float* o${M} = (float*) ((uintptr_t) o${M-1} + output_stride); 39 $if M % 2 == 0: 40 if XNN_UNPREDICTABLE(rows <= ${M}) { 41 i${M} = i${M-1}; 42 o${M} = o${M-1}; 43 } 44 $else: 45 if XNN_UNPREDICTABLE(rows < ${M+1}) { 46 i${M} = i${M-1}; 47 o${M} = o${M-1}; 48 } 49 50 const size_t input_increment = input_stride * ${ROW_TILE} - channels; 51 const size_t output_increment = output_stride * ${ROW_TILE} - channels; 52 53 $if SSE == 1: 54 const __m128 vzero = _mm_setzero_ps(); 55 do { 56 const float* w = weights; 57 size_t c = channels; 58 for (; c >= ${CHANNEL_TILE} * sizeof(float); c -= ${CHANNEL_TILE} * sizeof(float)) { 59 const __m128 vw${ABC[0:4]} = _mm_load_ps(w); 60 $for C in range(4, CHANNEL_TILE, 4): 61 const __m128 vw${ABC[C:C+4]} = _mm_load_ps(w + ${C}); 62 w += ${CHANNEL_TILE}; 63 64 $for M in range(ROW_TILE): 65 $if SSE == 1: 66 __m128 vi${M}x${ABC[0:4]} = _mm_loadu_ps(i${M}); 67 $for C in range(4, CHANNEL_TILE, 4): 68 __m128 vi${M}x${ABC[C:C+4]} = _mm_loadu_ps(i${M} + ${C}); 69 $else: 70 const __m128 vi${M}x${ABC[0:4]} = _mm_loadu_ps(i${M}); 71 $for C in range(4, CHANNEL_TILE, 4): 72 const __m128 vi${M}x${ABC[C:C+4]} = _mm_loadu_ps(i${M} + ${C}); 73 i${M} += ${CHANNEL_TILE}; 74 75 $for M in range(ROW_TILE): 76 $for C in range(0, CHANNEL_TILE, 4): 77 $if SSE == 1: 78 __m128 vacc${M}x${ABC[C:C+4]} = _mm_max_ps(_mm_setzero_ps(), vi${M}x${ABC[C:C+4]}); 79 vi${M}x${ABC[C:C+4]} = _mm_min_ps(vi${M}x${ABC[C:C+4]}, vzero); 80 $else: 81 const __m128 vprod${M}x${ABC[C:C+4]} = _mm_mul_ps(vi${M}x${ABC[C:C+4]}, vw${ABC[C:C+4]}); 82 $if SSE == 2: 83 const __m128 vmask${M}x${ABC[C:C+4]} = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vi${M}x${ABC[C:C+4]}))); 84 85 $for M in range(ROW_TILE): 86 $for C in range(0, CHANNEL_TILE, 4): 87 $if SSE == 1: 88 vacc${M}x${ABC[C:C+4]} = _mm_add_ps(vacc${M}x${ABC[C:C+4]}, _mm_mul_ps(vi${M}x${ABC[C:C+4]}, vw${ABC[C:C+4]})); 89 $elif SSE == 2: 90 const __m128 vacc${M}x${ABC[C:C+4]} = _mm_or_ps(_mm_and_ps(vprod${M}x${ABC[C:C+4]}, vmask${M}x${ABC[C:C+4]}), _mm_andnot_ps(vmask${M}x${ABC[C:C+4]}, vi${M}x${ABC[C:C+4]})); 91 $elif SSE == 4: 92 const __m128 vacc${M}x${ABC[C:C+4]} = _mm_blendv_ps(vi${M}x${ABC[C:C+4]}, vprod${M}x${ABC[C:C+4]}, vi${M}x${ABC[C:C+4]}); 93 94 $for M in range(ROW_TILE): 95 _mm_storeu_ps(o${M}, vacc${M}x${ABC[0:4]}); 96 $for C in range(4, CHANNEL_TILE, 4): 97 _mm_storeu_ps(o${M} + ${C}, vacc${M}x${ABC[C:C+4]}); 98 o${M} += ${CHANNEL_TILE}; 99 } 100 $if CHANNEL_TILE > 4: 101 for (; c >= 4 * sizeof(float); c -= 4 * sizeof(float)) { 102 const __m128 vw0123 = _mm_load_ps(w); 103 w += 4; 104 105 $for M in range(ROW_TILE): 106 $if SSE == 1: 107 __m128 vi${M}x0123 = _mm_loadu_ps(i${M}); 108 $else: 109 const __m128 vi${M}x0123 = _mm_loadu_ps(i${M}); 110 i${M} += 4; 111 112 $for M in range(ROW_TILE): 113 $if SSE == 1: 114 __m128 vacc${M}x0123 = _mm_max_ps(_mm_setzero_ps(), vi${M}x0123); 115 vi${M}x0123 = _mm_min_ps(vi${M}x0123, vzero); 116 $else: 117 const __m128 vprod${M}x0123 = _mm_mul_ps(vi${M}x0123, vw0123); 118 $if SSE == 2: 119 const __m128 vmask${M}x0123 = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vi${M}x0123))); 120 121 $for M in range(ROW_TILE): 122 $if SSE == 1: 123 vacc${M}x0123 = _mm_add_ps(vacc${M}x0123, _mm_mul_ps(vi${M}x0123, vw0123)); 124 $elif SSE == 2: 125 __m128 vacc${M}x0123 = _mm_or_ps(_mm_and_ps(vprod${M}x0123, vmask${M}x0123), _mm_andnot_ps(vmask${M}x0123, vi${M}x0123)); 126 $elif SSE == 4: 127 __m128 vacc${M}x0123 = _mm_blendv_ps(vi${M}x0123, vprod${M}x0123, vi${M}x0123); 128 129 $for M in range(ROW_TILE): 130 _mm_storeu_ps(o${M}, vacc${M}x0123); 131 o${M} += 4; 132 } 133 if XNN_UNLIKELY(c != 0) { 134 const __m128 vw0123 = _mm_load_ps(w); 135 w = (const float*) ((uintptr_t) w + c); 136 137 $for M in range(ROW_TILE): 138 $if SSE == 1: 139 __m128 vi${M}x0123 = _mm_loadu_ps(i${M}); 140 $else: 141 const __m128 vi${M}x0123 = _mm_loadu_ps(i${M}); 142 i${M} = (const float*) ((uintptr_t) i${M} + c); 143 144 $for M in range(ROW_TILE): 145 $if SSE == 1: 146 __m128 vacc${M}x0123 = _mm_max_ps(_mm_setzero_ps(), vi${M}x0123); 147 vi${M}x0123 = _mm_min_ps(vi${M}x0123, vzero); 148 $else: 149 const __m128 vprod${M}x0123 = _mm_mul_ps(vi${M}x0123, vw0123); 150 $if SSE == 2: 151 const __m128 vmask${M}x0123 = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vi${M}x0123))); 152 153 $for M in range(ROW_TILE): 154 $if SSE == 1: 155 vacc${M}x0123 = _mm_add_ps(vacc${M}x0123, _mm_mul_ps(vi${M}x0123, vw0123)); 156 $elif SSE == 2: 157 __m128 vacc${M}x0123 = _mm_or_ps(_mm_and_ps(vprod${M}x0123, vmask${M}x0123), _mm_andnot_ps(vmask${M}x0123, vi${M}x0123)); 158 $elif SSE == 4: 159 __m128 vacc${M}x0123 = _mm_blendv_ps(vi${M}x0123, vprod${M}x0123, vi${M}x0123); 160 161 if (c & (2 * sizeof(float))) { 162 $for M in range(ROW_TILE): 163 _mm_storel_pi((__m64*) o${M}, vacc${M}x0123); 164 165 $for M in range(ROW_TILE): 166 vacc${M}x0123 = _mm_movehl_ps(vacc${M}x0123, vacc${M}x0123); 167 168 $for M in range(ROW_TILE): 169 o${M} += 2; 170 } 171 if (c & (1 * sizeof(float))) { 172 $for M in range(ROW_TILE): 173 _mm_store_ss(o${M}, vacc${M}x0123); 174 175 $for M in range(ROW_TILE): 176 o${M} += 1; 177 } 178 } 179 $for M in range(ROW_TILE): 180 i${M} = (const float*) ((uintptr_t) i${M} + input_increment); 181 o${M} = (float*) ((uintptr_t) o${M} + output_increment); 182 $if M % 2 == 1: 183 if XNN_UNPREDICTABLE(rows < ${ROW_TILE + M+1}) { 184 i${M} = i${M-1}; 185 o${M} = o${M-1}; 186 } 187 $elif M != 0: 188 if XNN_UNPREDICTABLE(rows <= ${ROW_TILE + M}) { 189 i${M} = i${M-1}; 190 o${M} = o${M-1}; 191 } 192 rows = doz(rows, ${ROW_TILE}); 193 } while (rows != 0); 194} 195