1// Copyright 2020 Google LLC 2// 3// This source code is licensed under the BSD-style license found in the 4// LICENSE file in the root directory of this source tree. 5 6$assert BATCH_TILE % 16 == 0 7$assert BATCH_TILE >= 16 8$SIMD_TILE = BATCH_TILE // 16 9$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ" 10#include <assert.h> 11 12#include <immintrin.h> 13 14#include <xnnpack/common.h> 15#include <xnnpack/intrinsics-polyfill.h> 16#include <xnnpack/vunary.h> 17 18 19void xnn_f32_vsqrt_ukernel__avx512f_nr1fma1adj_x${BATCH_TILE}( 20 size_t n, 21 const float* x, 22 float* y, 23 const union xnn_f32_sqrt_params params[restrict XNN_MIN_ELEMENTS(1)]) 24{ 25 assert(n != 0); 26 assert(n % sizeof(float) == 0); 27 28 const __m512 vhalf = _mm512_set1_ps(params->fma.half); 29 $if BATCH_TILE > 16: 30 for (; n >= ${BATCH_TILE} * sizeof(float); n -= ${BATCH_TILE} * sizeof(float)) { 31 const __m512 vx${ABC[0]} = _mm512_loadu_ps(x); 32 $for N in range(1, SIMD_TILE): 33 const __m512 vx${ABC[N]} = _mm512_loadu_ps(x + ${N * 16}); 34 x += ${BATCH_TILE}; 35 36 $for N in range(SIMD_TILE): 37 const __m512 vrsqrtx${ABC[N]} = _mm512_rsqrt14_ps(vx${ABC[N]}); 38 39 $for N in range(SIMD_TILE): 40 __m512 vsqrtx${ABC[N]} = _mm512_mul_ps(vrsqrtx${ABC[N]}, vx${ABC[N]}); 41 __m512 vhalfrsqrtx${ABC[N]} = _mm512_mul_ps(vrsqrtx${ABC[N]}, vhalf); 42 43 $for N in range(SIMD_TILE): 44 const __m512 vresidual${ABC[N]} = _mm512_fnmadd_ps(vsqrtx${ABC[N]}, vhalfrsqrtx${ABC[N]}, vhalf); 45 46 $for N in range(SIMD_TILE): 47 vhalfrsqrtx${ABC[N]} = _mm512_fmadd_ps(vhalfrsqrtx${ABC[N]}, vresidual${ABC[N]}, vhalfrsqrtx${ABC[N]}); 48 vsqrtx${ABC[N]} = _mm512_fmadd_ps(vsqrtx${ABC[N]}, vresidual${ABC[N]}, vsqrtx${ABC[N]}); 49 50 $for N in range(SIMD_TILE): 51 const __m512 vadjustment${ABC[N]} = _mm512_fnmadd_ps(vsqrtx${ABC[N]}, vsqrtx${ABC[N]}, vx${ABC[N]}); 52 53 $for N in range(SIMD_TILE): 54 const __m512 vy${ABC[N]} = _mm512_fmadd_ps(vhalfrsqrtx${ABC[N]}, vadjustment${ABC[N]}, vsqrtx${ABC[N]}); 55 56 _mm512_storeu_ps(y, vy${ABC[0]}); 57 $for N in range(1, SIMD_TILE): 58 _mm512_storeu_ps(y + ${N * 16}, vy${ABC[N]}); 59 y += ${BATCH_TILE}; 60 } 61 for (; n >= 16 * sizeof(float); n -= 16 * sizeof(float)) { 62 const __m512 vx = _mm512_loadu_ps(x); 63 x += 16; 64 65 const __m512 vrsqrtx = _mm512_rsqrt14_ps(vx); 66 __m512 vsqrtx = _mm512_mul_ps(vrsqrtx, vx); 67 __m512 vhalfrsqrtx = _mm512_mul_ps(vrsqrtx, vhalf); 68 const __m512 vresidual = _mm512_fnmadd_ps(vsqrtx, vhalfrsqrtx, vhalf); 69 vhalfrsqrtx = _mm512_fmadd_ps(vhalfrsqrtx, vresidual, vhalfrsqrtx); 70 vsqrtx = _mm512_fmadd_ps(vsqrtx, vresidual, vsqrtx); 71 const __m512 vadjustment = _mm512_fnmadd_ps(vsqrtx, vsqrtx, vx); 72 const __m512 vy = _mm512_fmadd_ps(vhalfrsqrtx, vadjustment, vsqrtx); 73 74 _mm512_storeu_ps(y, vy); 75 y += 16; 76 } 77 if XNN_UNLIKELY(n != 0) { 78 assert(n >= 1 * sizeof(float)); 79 assert(n <= 15 * sizeof(float)); 80 // Prepare mask for valid 32-bit elements (depends on n). 81 n >>= 2 /* log2(sizeof(float)) */; 82 const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << n) - UINT32_C(1))); 83 84 const __m512 vx = _mm512_maskz_loadu_ps(vmask, x); 85 const __m512 vrsqrtx = _mm512_rsqrt14_ps(vx); 86 __m512 vsqrtx = _mm512_mul_ps(vrsqrtx, vx); 87 __m512 vhalfrsqrtx = _mm512_mul_ps(vrsqrtx, vhalf); 88 const __m512 vresidual = _mm512_fnmadd_ps(vsqrtx, vhalfrsqrtx, vhalf); 89 vhalfrsqrtx = _mm512_fmadd_ps(vhalfrsqrtx, vresidual, vhalfrsqrtx); 90 vsqrtx = _mm512_fmadd_ps(vsqrtx, vresidual, vsqrtx); 91 const __m512 vadjustment = _mm512_fnmadd_ps(vsqrtx, vsqrtx, vx); 92 const __m512 vy = _mm512_fmadd_ps(vhalfrsqrtx, vadjustment, vsqrtx); 93 94 _mm512_mask_storeu_ps(y, vmask, vy); 95 } 96} 97