1// Copyright 2019 Google LLC 2// 3// This source code is licensed under the BSD-style license found in the 4// LICENSE file in the root directory of this source tree. 5 6$assert MR % 8 == 0 7$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ" 8#include <assert.h> 9 10#include <arm_neon.h> 11 12#include <xnnpack/spmm.h> 13 14 15void xnn_f16_spmm_minmax_ukernel_${MR}x${NR}__neonfp16arith${"_x%d" % UNROLL if UNROLL > 1 else ""}( 16 size_t mc, 17 size_t nc, 18 const void*restrict input, 19 const void*restrict weights, 20 const int32_t*restrict widx_dmap, 21 const uint32_t*restrict nidx_nnzmap, 22 void*restrict output, 23 size_t output_stride, 24 const struct xnn_f16_scaleminmax_params params[restrict XNN_MIN_ELEMENTS(1)]) 25{ 26 assert(mc != 0); 27 assert(mc % sizeof(__fp16) == 0); 28 assert(nc != 0); 29 30 const __fp16*restrict i = (const __fp16*) input; 31 __fp16*restrict o = (__fp16*) output; 32 33 const float16x8_t vscale = vld1q_dup_f16((const __fp16*) ¶ms->scale); 34 const float16x8_t vmax = vld1q_dup_f16((const __fp16*) ¶ms->max); 35 const float16x8_t vmin = vld1q_dup_f16((const __fp16*) ¶ms->min); 36 37 size_t output_decrement = output_stride * nc - ${MR} * sizeof(__fp16); 38 while XNN_LIKELY(mc >= ${MR} * sizeof(__fp16)) { 39 const __fp16*restrict w = (const __fp16*) weights; 40 const int32_t* dmap = widx_dmap; 41 const uint32_t* nnzmap = nidx_nnzmap; 42 size_t n = nc; 43 do { 44 uint32_t nnz = *nnzmap++; 45 $if UNROLL > 1: 46 float16x8_t vacc01234567x0 = vld1q_dup_f16(w); w += 1; 47 $for K in range(1, UNROLL): 48 float16x8_t vacc01234567x${K} = vmovq_n_f16(0.0f); 49 $for M in range(8, MR, 8): 50 float16x8_t vacc${ABC[M:M+8]}x0 = vacc01234567x0; 51 $for K in range(1, UNROLL): 52 float16x8_t vacc${ABC[M:M+8]}x${K} = vmovq_n_f16(0.0f); 53 for (; nnz >= ${UNROLL}; nnz -= ${UNROLL}) { 54 $for K in range(UNROLL): 55 const intptr_t diff${K} = dmap[${K}]; 56 dmap += ${UNROLL}; 57 $for K in range(UNROLL): 58 const float16x8_t va01234567x${K} = vld1q_f16(i); 59 $for M in range(8, MR, 8): 60 const float16x8_t va${ABC[M:M+8]}x${K} = vld1q_f16(i + ${M}); 61 i = (const __fp16*restrict) ((uintptr_t) i + (uintptr_t) diff${K}); 62 const float16x8_t vb${K} = vld1q_dup_f16(w); w += 1; 63 $for M in range(0, MR, 8): 64 vacc${ABC[M:M+8]}x${K} = vfmaq_f16(vacc${ABC[M:M+8]}x${K}, va${ABC[M:M+8]}x${K}, vb${K}); 65 } 66 $for M in range(0, MR, 8): 67 float16x8_t vacc${ABC[M:M+8]} = vacc${ABC[M:M+8]}x0; 68 $for K in range(1, UNROLL): 69 $for M in range(0, MR, 8): 70 vacc${ABC[M:M+8]} = vaddq_f16(vacc${ABC[M:M+8]}, vacc${ABC[M:M+8]}x${K}); 71 $else: 72 float16x8_t vacc01234567 = vld1q_dup_f16(w); w += 1; 73 $for M in range(8, MR, 8): 74 float16x8_t vacc${ABC[M:M+8]} = vacc01234567; 75 if XNN_LIKELY(nnz != 0) { 76 do { 77 const intptr_t diff = *dmap++; 78 const float16x8_t va01234567 = vld1q_f16(i); 79 $for M in range(8, MR, 8): 80 const float16x8_t va${ABC[M:M+8]} = vld1q_f16(i + ${M}); 81 i = (const __fp16*restrict) ((uintptr_t) i + (uintptr_t) diff); 82 const float16x8_t vb = vld1q_dup_f16(w); w += 1; 83 $for M in range(0, MR, 8): 84 vacc${ABC[M:M+8]} = vfmaq_f16(vacc${ABC[M:M+8]}, va${ABC[M:M+8]}, vb); 85 } while (--nnz != 0); 86 } 87 $for M in range(0, MR, 8): 88 float16x8_t vout${ABC[M:M+8]} = vmulq_f16(vacc${ABC[M:M+8]}, vscale); 89 $for M in range(0, MR, 8): 90 vout${ABC[M:M+8]} = vminq_f16(vout${ABC[M:M+8]}, vmax); 91 $for M in range(0, MR, 8): 92 vout${ABC[M:M+8]} = vmaxq_f16(vout${ABC[M:M+8]}, vmin); 93 vst1q_f16(o, vout01234567); 94 $for M in range(8, MR, 8): 95 vst1q_f16(o + ${M}, vout${ABC[M:M+8]}); 96 o = (__fp16*restrict) ((uintptr_t) o + output_stride); 97 } while (--n != 0); 98 o = (__fp16*restrict) ((uintptr_t) o - output_decrement); 99 i += ${MR}; 100 mc -= ${MR} * sizeof(__fp16); 101 } 102 if XNN_UNLIKELY(mc != 0) { 103 $for LOG2M in reversed(range((MR - 1).bit_length())): 104 $SUBMR = 1 << LOG2M 105 $if SUBMR * 2 >= MR: 106 output_decrement += ${MR - SUBMR} * sizeof(__fp16); 107 $else: 108 output_decrement += ${SUBMR} * sizeof(__fp16); 109 if (mc & (${SUBMR} * sizeof(__fp16))) { 110 const __fp16*restrict w = (const __fp16*) weights; 111 const int32_t* dmap = widx_dmap; 112 const uint32_t* nnzmap = nidx_nnzmap; 113 size_t n = nc; 114 do { 115 uint32_t nnz = *nnzmap++; 116 $if SUBMR <= 4: 117 float16x4_t vacc${ABC[0:SUBMR]} = vld1_dup_f16(w); w += 1; 118 $else: 119 float16x8_t vacc01234567 = vld1q_dup_f16(w); w += 1; 120 $for M in range(8, SUBMR, 8): 121 float16x8_t vacc${ABC[M:M+8]} = vacc01234567; 122 if XNN_LIKELY(nnz != 0) { 123 do { 124 const intptr_t diff = *dmap++; 125 $if SUBMR == 1: 126 const float16x4_t va0 = vld1_dup_f16(i); 127 $elif SUBMR == 2: 128 const float16x4_t va01 = vreinterpret_f16_f32(vld1_dup_f32(__builtin_assume_aligned(i, 1))); 129 $elif SUBMR == 4: 130 const float16x4_t va0123 = vld1_f16(i); 131 $else: 132 const float16x8_t va01234567 = vld1q_f16(i); 133 $for M in range(8, SUBMR, 8): 134 const float16x8_t va${ABC[M:M+8]} = vld1q_f16(i + ${M}); 135 i = (const __fp16*restrict) ((uintptr_t) i + (uintptr_t) diff); 136 $if SUBMR <= 4: 137 const float16x4_t vb = vld1_dup_f16(w); w += 1; 138 $else: 139 const float16x8_t vb = vld1q_dup_f16(w); w += 1; 140 $if SUBMR <= 4: 141 vacc${ABC[0:SUBMR]} = vfma_f16(vacc${ABC[0:SUBMR]}, va${ABC[0:SUBMR]}, vb); 142 $else: 143 $for M in range(0, SUBMR, 8): 144 vacc${ABC[M:M+8]} = vfmaq_f16(vacc${ABC[M:M+8]}, va${ABC[M:M+8]}, vb); 145 } while (--nnz != 0); 146 } 147 $if SUBMR <= 4: 148 float16x4_t vout${ABC[0:SUBMR]} = vmin_f16(vacc${ABC[0:SUBMR]}, vget_low_f16(vmax)); 149 vout${ABC[0:SUBMR]} = vmax_f16(vout${ABC[0:SUBMR]}, vget_low_f16(vmin)); 150 $if SUBMR == 1: 151 vst1_lane_f16(o, vout${ABC[0]}, 0); 152 $elif SUBMR == 2: 153 vst1_lane_f32(__builtin_assume_aligned(o, 1), vreinterpret_f32_f16(vout${ABC[0:SUBMR]}), 0); 154 $else: 155 vst1_f16(o, vout${ABC[0:SUBMR]}); 156 $else: 157 $for M in range(0, SUBMR, 8): 158 float16x8_t vout${ABC[M:M+8]} = vminq_f16(vacc${ABC[M:M+8]}, vmax); 159 $for M in range(0, SUBMR, 8): 160 vout${ABC[M:M+8]} = vmaxq_f16(vout${ABC[M:M+8]}, vmin); 161 vst1q_f16(o, vout01234567); 162 $for M in range(8, SUBMR, 8): 163 vst1q_f16(o + ${M}, vout${ABC[M:M+8]}); 164 o = (__fp16*restrict) ((uintptr_t) o + output_stride); 165 } while (--n != 0); 166 o = (__fp16*restrict) ((uintptr_t) o - output_decrement); 167 i += ${SUBMR}; 168 } 169 } 170} 171