// Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. $assert CHANNEL_TILE % 4 == 0 $assert CHANNEL_TILE >= 4 $assert ROW_TILE >= 1 $assert SSE in [1, 2, 4] $ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ" $SSE_HEADER = {1: "xmmintrin.h", 2: "emmintrin.h", 4: "smmintrin.h"}[SSE] #include #include <${SSE_HEADER}> #include #include $ISA = {1: "sse", 2: "sse2", 4: "sse41"}[SSE] void xnn_f32_prelu_ukernel__${ISA}_${ROW_TILE}x${CHANNEL_TILE}( size_t rows, size_t channels, const float*restrict input, size_t input_stride, const float*restrict weights, float*restrict output, size_t output_stride) XNN_DISABLE_TSAN { assert(rows != 0); assert(channels != 0); assert(channels % sizeof(float) == 0); const float* i0 = input; float* o0 = output; $for M in range(1, ROW_TILE): const float* i${M} = (const float*) ((uintptr_t) i${M-1} + input_stride); float* o${M} = (float*) ((uintptr_t) o${M-1} + output_stride); $if M % 2 == 0: if XNN_UNPREDICTABLE(rows <= ${M}) { i${M} = i${M-1}; o${M} = o${M-1}; } $else: if XNN_UNPREDICTABLE(rows < ${M+1}) { i${M} = i${M-1}; o${M} = o${M-1}; } const size_t input_increment = input_stride * ${ROW_TILE} - channels; const size_t output_increment = output_stride * ${ROW_TILE} - channels; $if SSE == 1: const __m128 vzero = _mm_setzero_ps(); do { const float* w = weights; size_t c = channels; for (; c >= ${CHANNEL_TILE} * sizeof(float); c -= ${CHANNEL_TILE} * sizeof(float)) { const __m128 vw${ABC[0:4]} = _mm_load_ps(w); $for C in range(4, CHANNEL_TILE, 4): const __m128 vw${ABC[C:C+4]} = _mm_load_ps(w + ${C}); w += ${CHANNEL_TILE}; $for M in range(ROW_TILE): $if SSE == 1: __m128 vi${M}x${ABC[0:4]} = _mm_loadu_ps(i${M}); $for C in range(4, CHANNEL_TILE, 4): __m128 vi${M}x${ABC[C:C+4]} = _mm_loadu_ps(i${M} + ${C}); $else: const __m128 vi${M}x${ABC[0:4]} = _mm_loadu_ps(i${M}); $for C in range(4, CHANNEL_TILE, 4): const __m128 vi${M}x${ABC[C:C+4]} = _mm_loadu_ps(i${M} + ${C}); i${M} += ${CHANNEL_TILE}; $for M in range(ROW_TILE): $for C in range(0, CHANNEL_TILE, 4): $if SSE == 1: __m128 vacc${M}x${ABC[C:C+4]} = _mm_max_ps(_mm_setzero_ps(), vi${M}x${ABC[C:C+4]}); vi${M}x${ABC[C:C+4]} = _mm_min_ps(vi${M}x${ABC[C:C+4]}, vzero); $else: const __m128 vprod${M}x${ABC[C:C+4]} = _mm_mul_ps(vi${M}x${ABC[C:C+4]}, vw${ABC[C:C+4]}); $if SSE == 2: const __m128 vmask${M}x${ABC[C:C+4]} = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vi${M}x${ABC[C:C+4]}))); $for M in range(ROW_TILE): $for C in range(0, CHANNEL_TILE, 4): $if SSE == 1: vacc${M}x${ABC[C:C+4]} = _mm_add_ps(vacc${M}x${ABC[C:C+4]}, _mm_mul_ps(vi${M}x${ABC[C:C+4]}, vw${ABC[C:C+4]})); $elif SSE == 2: const __m128 vacc${M}x${ABC[C:C+4]} = _mm_or_ps(_mm_and_ps(vprod${M}x${ABC[C:C+4]}, vmask${M}x${ABC[C:C+4]}), _mm_andnot_ps(vmask${M}x${ABC[C:C+4]}, vi${M}x${ABC[C:C+4]})); $elif SSE == 4: const __m128 vacc${M}x${ABC[C:C+4]} = _mm_blendv_ps(vi${M}x${ABC[C:C+4]}, vprod${M}x${ABC[C:C+4]}, vi${M}x${ABC[C:C+4]}); $for M in range(ROW_TILE): _mm_storeu_ps(o${M}, vacc${M}x${ABC[0:4]}); $for C in range(4, CHANNEL_TILE, 4): _mm_storeu_ps(o${M} + ${C}, vacc${M}x${ABC[C:C+4]}); o${M} += ${CHANNEL_TILE}; } $if CHANNEL_TILE > 4: for (; c >= 4 * sizeof(float); c -= 4 * sizeof(float)) { const __m128 vw0123 = _mm_load_ps(w); w += 4; $for M in range(ROW_TILE): $if SSE == 1: __m128 vi${M}x0123 = _mm_loadu_ps(i${M}); $else: const __m128 vi${M}x0123 = _mm_loadu_ps(i${M}); i${M} += 4; $for M in range(ROW_TILE): $if SSE == 1: __m128 vacc${M}x0123 = _mm_max_ps(_mm_setzero_ps(), vi${M}x0123); vi${M}x0123 = _mm_min_ps(vi${M}x0123, vzero); $else: const __m128 vprod${M}x0123 = _mm_mul_ps(vi${M}x0123, vw0123); $if SSE == 2: const __m128 vmask${M}x0123 = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vi${M}x0123))); $for M in range(ROW_TILE): $if SSE == 1: vacc${M}x0123 = _mm_add_ps(vacc${M}x0123, _mm_mul_ps(vi${M}x0123, vw0123)); $elif SSE == 2: __m128 vacc${M}x0123 = _mm_or_ps(_mm_and_ps(vprod${M}x0123, vmask${M}x0123), _mm_andnot_ps(vmask${M}x0123, vi${M}x0123)); $elif SSE == 4: __m128 vacc${M}x0123 = _mm_blendv_ps(vi${M}x0123, vprod${M}x0123, vi${M}x0123); $for M in range(ROW_TILE): _mm_storeu_ps(o${M}, vacc${M}x0123); o${M} += 4; } if XNN_UNLIKELY(c != 0) { const __m128 vw0123 = _mm_load_ps(w); w = (const float*) ((uintptr_t) w + c); $for M in range(ROW_TILE): $if SSE == 1: __m128 vi${M}x0123 = _mm_loadu_ps(i${M}); $else: const __m128 vi${M}x0123 = _mm_loadu_ps(i${M}); i${M} = (const float*) ((uintptr_t) i${M} + c); $for M in range(ROW_TILE): $if SSE == 1: __m128 vacc${M}x0123 = _mm_max_ps(_mm_setzero_ps(), vi${M}x0123); vi${M}x0123 = _mm_min_ps(vi${M}x0123, vzero); $else: const __m128 vprod${M}x0123 = _mm_mul_ps(vi${M}x0123, vw0123); $if SSE == 2: const __m128 vmask${M}x0123 = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vi${M}x0123))); $for M in range(ROW_TILE): $if SSE == 1: vacc${M}x0123 = _mm_add_ps(vacc${M}x0123, _mm_mul_ps(vi${M}x0123, vw0123)); $elif SSE == 2: __m128 vacc${M}x0123 = _mm_or_ps(_mm_and_ps(vprod${M}x0123, vmask${M}x0123), _mm_andnot_ps(vmask${M}x0123, vi${M}x0123)); $elif SSE == 4: __m128 vacc${M}x0123 = _mm_blendv_ps(vi${M}x0123, vprod${M}x0123, vi${M}x0123); if (c & (2 * sizeof(float))) { $for M in range(ROW_TILE): _mm_storel_pi((__m64*) o${M}, vacc${M}x0123); $for M in range(ROW_TILE): vacc${M}x0123 = _mm_movehl_ps(vacc${M}x0123, vacc${M}x0123); $for M in range(ROW_TILE): o${M} += 2; } if (c & (1 * sizeof(float))) { $for M in range(ROW_TILE): _mm_store_ss(o${M}, vacc${M}x0123); $for M in range(ROW_TILE): o${M} += 1; } } $for M in range(ROW_TILE): i${M} = (const float*) ((uintptr_t) i${M} + input_increment); o${M} = (float*) ((uintptr_t) o${M} + output_increment); $if M % 2 == 1: if XNN_UNPREDICTABLE(rows < ${ROW_TILE + M+1}) { i${M} = i${M-1}; o${M} = o${M-1}; } $elif M != 0: if XNN_UNPREDICTABLE(rows <= ${ROW_TILE + M}) { i${M} = i${M-1}; o${M} = o${M-1}; } rows = doz(rows, ${ROW_TILE}); } while (rows != 0); }