1// Copyright 2020 Google LLC 2// 3// This source code is licensed under the BSD-style license found in the 4// LICENSE file in the root directory of this source tree. 5 6$assert BATCH_TILE % 16 == 0 7$assert BATCH_TILE >= 16 8$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ" 9#include <assert.h> 10 11#include <immintrin.h> 12 13#include <xnnpack/common.h> 14#include <xnnpack/intrinsics-polyfill.h> 15#include <xnnpack/vunary.h> 16 17 18void xnn_f32_vlrelu_ukernel__avx512f_x${BATCH_TILE}( 19 size_t n, 20 const float* x, 21 float* y, 22 const union xnn_f32_lrelu_params params[restrict XNN_MIN_ELEMENTS(1)]) 23{ 24 assert(n != 0); 25 assert(n % sizeof(float) == 0); 26 27 const __m512 vslope = _mm512_broadcast_f32x4(_mm_load_ps(params->sse.slope)); 28 const __m512 vzero = _mm512_setzero_ps(); 29 30 for (; n >= ${BATCH_TILE} * sizeof(float); n -= ${BATCH_TILE} * sizeof(float)) { 31 __m512 vacc${ABC[0:16]} = _mm512_loadu_ps(x); 32 $for N in range(16, BATCH_TILE, 16): 33 __m512 vacc${ABC[N:N+16]} = _mm512_loadu_ps(x + ${N}); 34 x += ${BATCH_TILE}; 35 36 $for N in range(0, BATCH_TILE, 16): 37 const __mmask16 vsign${ABC[N:N+16]} = _mm512_cmp_ps_mask(vacc${ABC[N:N+16]}, vzero, _CMP_LT_OQ); 38 39 $for N in range(0, BATCH_TILE, 16): 40 vacc${ABC[N:N+16]} = _mm512_mask_mul_ps(vacc${ABC[N:N+16]}, vsign${ABC[N:N+16]}, vacc${ABC[N:N+16]}, vslope); 41 42 _mm512_storeu_ps(y, vacc${ABC[0:16]}); 43 $for N in range(16, BATCH_TILE, 16): 44 _mm512_storeu_ps(y + ${N}, vacc${ABC[N:N+16]}); 45 y += ${BATCH_TILE}; 46 } 47 $if BATCH_TILE > 16: 48 for (; n >= 16 * sizeof(float); n -= 16 * sizeof(float)) { 49 __m512 vacc = _mm512_loadu_ps(x); 50 x += 16; 51 const __mmask16 vsign = _mm512_cmp_ps_mask(vacc, vzero, _CMP_LT_OQ); 52 vacc = _mm512_mask_mul_ps(vacc, vsign, vacc, vslope); 53 _mm512_storeu_ps(y, vacc); 54 y += 16; 55 } 56 if XNN_UNLIKELY(n != 0) { 57 assert(n >= 1 * sizeof(float)); 58 assert(n <= 15 * sizeof(float)); 59 // Prepare mask for valid 32-bit elements (depends on n). 60 n >>= 2 /* log2(sizeof(float)) */; 61 const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << n) - UINT32_C(1))); 62 63 __m512 vacc = _mm512_maskz_loadu_ps(vmask, x); 64 const __mmask16 vsign = _mm512_mask_cmp_ps_mask(vmask, vacc, vzero, _CMP_LT_OQ); 65 vacc = _mm512_mask_mul_ps(vacc, vsign, vacc, vslope); 66 _mm512_mask_storeu_ps(y, vmask, vacc); 67 } 68} 69