1// Copyright 2020 Google LLC 2// 3// This source code is licensed under the BSD-style license found in the 4// LICENSE file in the root directory of this source tree. 5 6$assert BATCH_TILE % 8 == 0 7$assert BATCH_TILE >= 8 8$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ" 9$assert OP in ["RNDNE", "RNDZ", "RNDU", "RNDD"] 10#include <assert.h> 11 12#include <immintrin.h> 13 14#include <xnnpack/common.h> 15#include <xnnpack/math.h> 16#include <xnnpack/vunary.h> 17 18 19static const int32_t mask_table[14] = {-1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0}; 20 21$_MM_FROUND_TO_FLAG = { 22$ "RNDNE": "_MM_FROUND_TO_NEAREST_INT", 23$ "RNDZ": "_MM_FROUND_TO_ZERO", 24$ "RNDU": "_MM_FROUND_TO_POS_INF", 25$ "RNDD": "_MM_FROUND_TO_NEG_INF", 26$}[OP] 27void xnn_f32_v${OP.lower()}_ukernel__avx_x${BATCH_TILE}( 28 size_t n, 29 const float* x, 30 float* y, 31 const union xnn_f32_rnd_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN 32{ 33 assert(n != 0); 34 assert(n % sizeof(float) == 0); 35 36 for (; n >= ${BATCH_TILE} * sizeof(float); n -= ${BATCH_TILE} * sizeof(float)) { 37 const __m256 vx${ABC[0:8]} = _mm256_loadu_ps(x); 38 $for N in range(8, BATCH_TILE, 8): 39 const __m256 vx${ABC[N:N+8]} = _mm256_loadu_ps(x + ${N}); 40 x += ${BATCH_TILE}; 41 42 $for N in range(0, BATCH_TILE, 8): 43 const __m256 vy${ABC[N:N+8]} = _mm256_round_ps(vx${ABC[N:N+8]}, ${_MM_FROUND_TO_FLAG} | _MM_FROUND_NO_EXC); 44 45 _mm256_storeu_ps(y, vy${ABC[0:8]}); 46 $for N in range(8, BATCH_TILE, 8): 47 _mm256_storeu_ps(y + ${N}, vy${ABC[N:N+8]}); 48 y += ${BATCH_TILE}; 49 } 50 $if BATCH_TILE > 8: 51 for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) { 52 const __m256 vx = _mm256_loadu_ps(x); 53 x += 8; 54 55 const __m256 vy = _mm256_round_ps(vx, ${_MM_FROUND_TO_FLAG} | _MM_FROUND_NO_EXC); 56 57 _mm256_storeu_ps(y, vy); 58 y += 8; 59 } 60 if XNN_UNLIKELY(n != 0) { 61 assert(n >= 1 * sizeof(float)); 62 assert(n <= 7 * sizeof(float)); 63 __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &mask_table[7] - n)); 64 65 const __m256 vx = _mm256_maskload_ps(x, vmask); 66 const __m256 vy = _mm256_round_ps(vx, ${_MM_FROUND_TO_FLAG} | _MM_FROUND_NO_EXC); 67 68 // _mm256_maskstore_ps(y, vmask, vy) could be used here, but triggers msan failures (probably an msan bug). 69 __m128 vy_lo = _mm256_castps256_ps128(vy); 70 if (n & (4 * sizeof(float))) { 71 _mm_storeu_ps(y, vy_lo); 72 vy_lo = _mm256_extractf128_ps(vy, 1); 73 y += 4; 74 } 75 if (n & (2 * sizeof(float))) { 76 _mm_storel_pi((__m64*) y, vy_lo); 77 vy_lo = _mm_movehl_ps(vy_lo, vy_lo); 78 y += 2; 79 } 80 if (n & (1 * sizeof(float))) { 81 _mm_store_ss(y, vy_lo); 82 } 83 } 84} 85