/external/XNNPACK/src/f32-dwconv/gen/ |
D | up8x4-wasmsimd.c | 75 const v128_t vk1x0123 = wasm_v128_load(w + 16); in xnn_f32_dwconv_ukernel_up8x4__wasmsimd() local 77 vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi1x0123, vk1x0123)); in xnn_f32_dwconv_ukernel_up8x4__wasmsimd() 120 const v128_t vk1x0123 = wasm_v128_load(w + 16); in xnn_f32_dwconv_ukernel_up8x4__wasmsimd() local 121 vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi1x0123, vk1x0123)); in xnn_f32_dwconv_ukernel_up8x4__wasmsimd() 151 const v128_t vk1x0123 = wasm_v128_load(w + 16); in xnn_f32_dwconv_ukernel_up8x4__wasmsimd() local 152 vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi1x0123, vk1x0123)); in xnn_f32_dwconv_ukernel_up8x4__wasmsimd()
|
D | up8x4-minmax-sse-acc2.c | 77 const __m128 vk1x0123 = _mm_load_ps(w + 16); in xnn_f32_dwconv_minmax_ukernel_up8x4__sse_acc2() local 79 __m128 vacc0123p1 = _mm_mul_ps(vi1x0123, vk1x0123); in xnn_f32_dwconv_minmax_ukernel_up8x4__sse_acc2() 127 const __m128 vk1x0123 = _mm_load_ps(w + 16); in xnn_f32_dwconv_minmax_ukernel_up8x4__sse_acc2() local 128 __m128 vacc0123p1 = _mm_mul_ps(vi1x0123, vk1x0123); in xnn_f32_dwconv_minmax_ukernel_up8x4__sse_acc2() 161 const __m128 vk1x0123 = _mm_load_ps(w + 16); in xnn_f32_dwconv_minmax_ukernel_up8x4__sse_acc2() local 162 __m128 vacc0123p1 = _mm_mul_ps(vi1x0123, vk1x0123); in xnn_f32_dwconv_minmax_ukernel_up8x4__sse_acc2()
|
D | up8x4-minmax-neon.c | 74 const float32x4_t vk1x0123 = vld1q_f32(w); w += 4; in xnn_f32_dwconv_minmax_ukernel_up8x4__neon() local 76 vacc0123p0 = vmlaq_f32(vacc0123p0, vi1x0123, vk1x0123); in xnn_f32_dwconv_minmax_ukernel_up8x4__neon() 111 const float32x4_t vk1x0123 = vld1q_f32(w + 12); in xnn_f32_dwconv_minmax_ukernel_up8x4__neon() local 112 vacc0123p0 = vmlaq_f32(vacc0123p0, vi1x0123, vk1x0123); in xnn_f32_dwconv_minmax_ukernel_up8x4__neon() 137 const float32x4_t vk1x0123 = vld1q_f32(w + 16); in xnn_f32_dwconv_minmax_ukernel_up8x4__neon() local 138 vacc0123p0 = vmlaq_f32(vacc0123p0, vi1x0123, vk1x0123); in xnn_f32_dwconv_minmax_ukernel_up8x4__neon()
|
D | up8x4-minmax-wasmsimd-arm-acc2.c | 77 const v128_t vk1x0123 = wasm_v128_load(w + 16); in xnn_f32_dwconv_minmax_ukernel_up8x4__wasmsimd_arm_acc2() local 79 v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123); in xnn_f32_dwconv_minmax_ukernel_up8x4__wasmsimd_arm_acc2() 128 const v128_t vk1x0123 = wasm_v128_load(w + 16); in xnn_f32_dwconv_minmax_ukernel_up8x4__wasmsimd_arm_acc2() local 129 v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123); in xnn_f32_dwconv_minmax_ukernel_up8x4__wasmsimd_arm_acc2() 162 const v128_t vk1x0123 = wasm_v128_load(w + 16); in xnn_f32_dwconv_minmax_ukernel_up8x4__wasmsimd_arm_acc2() local 163 v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123); in xnn_f32_dwconv_minmax_ukernel_up8x4__wasmsimd_arm_acc2()
|
D | up8x4-minmax-neonfma.c | 74 const float32x4_t vk1x0123 = vld1q_f32(w); w += 4; in xnn_f32_dwconv_minmax_ukernel_up8x4__neonfma() local 76 vacc0123p0 = vfmaq_f32(vacc0123p0, vi1x0123, vk1x0123); in xnn_f32_dwconv_minmax_ukernel_up8x4__neonfma() 111 const float32x4_t vk1x0123 = vld1q_f32(w + 12); in xnn_f32_dwconv_minmax_ukernel_up8x4__neonfma() local 112 vacc0123p0 = vfmaq_f32(vacc0123p0, vi1x0123, vk1x0123); in xnn_f32_dwconv_minmax_ukernel_up8x4__neonfma() 137 const float32x4_t vk1x0123 = vld1q_f32(w + 16); in xnn_f32_dwconv_minmax_ukernel_up8x4__neonfma() local 138 vacc0123p0 = vfmaq_f32(vacc0123p0, vi1x0123, vk1x0123); in xnn_f32_dwconv_minmax_ukernel_up8x4__neonfma()
|
D | up8x4-minmax-neon-acc2.c | 74 const float32x4_t vk1x0123 = vld1q_f32(w); w += 4; in xnn_f32_dwconv_minmax_ukernel_up8x4__neon_acc2() local 76 float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123); in xnn_f32_dwconv_minmax_ukernel_up8x4__neon_acc2() 114 const float32x4_t vk1x0123 = vld1q_f32(w + 12); in xnn_f32_dwconv_minmax_ukernel_up8x4__neon_acc2() local 115 float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123); in xnn_f32_dwconv_minmax_ukernel_up8x4__neon_acc2() 142 const float32x4_t vk1x0123 = vld1q_f32(w + 16); in xnn_f32_dwconv_minmax_ukernel_up8x4__neon_acc2() local 143 float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123); in xnn_f32_dwconv_minmax_ukernel_up8x4__neon_acc2()
|
D | up8x4-minmax-neonfma-acc2.c | 74 const float32x4_t vk1x0123 = vld1q_f32(w); w += 4; in xnn_f32_dwconv_minmax_ukernel_up8x4__neonfma_acc2() local 76 float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123); in xnn_f32_dwconv_minmax_ukernel_up8x4__neonfma_acc2() 114 const float32x4_t vk1x0123 = vld1q_f32(w + 12); in xnn_f32_dwconv_minmax_ukernel_up8x4__neonfma_acc2() local 115 float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123); in xnn_f32_dwconv_minmax_ukernel_up8x4__neonfma_acc2() 142 const float32x4_t vk1x0123 = vld1q_f32(w + 16); in xnn_f32_dwconv_minmax_ukernel_up8x4__neonfma_acc2() local 143 float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123); in xnn_f32_dwconv_minmax_ukernel_up8x4__neonfma_acc2()
|
D | up8x4-minmax-wasmsimd-arm.c | 77 const v128_t vk1x0123 = wasm_v128_load(w + 16); in xnn_f32_dwconv_minmax_ukernel_up8x4__wasmsimd_arm() local 79 vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi1x0123, vk1x0123)); in xnn_f32_dwconv_minmax_ukernel_up8x4__wasmsimd_arm() 125 const v128_t vk1x0123 = wasm_v128_load(w + 16); in xnn_f32_dwconv_minmax_ukernel_up8x4__wasmsimd_arm() local 126 vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi1x0123, vk1x0123)); in xnn_f32_dwconv_minmax_ukernel_up8x4__wasmsimd_arm() 157 const v128_t vk1x0123 = wasm_v128_load(w + 16); in xnn_f32_dwconv_minmax_ukernel_up8x4__wasmsimd_arm() local 158 vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi1x0123, vk1x0123)); in xnn_f32_dwconv_minmax_ukernel_up8x4__wasmsimd_arm()
|
D | up8x4-minmax-wasmsimd-x86.c | 77 const v128_t vk1x0123 = wasm_v128_load(w + 16); in xnn_f32_dwconv_minmax_ukernel_up8x4__wasmsimd_x86() local 79 vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi1x0123, vk1x0123)); in xnn_f32_dwconv_minmax_ukernel_up8x4__wasmsimd_x86() 125 const v128_t vk1x0123 = wasm_v128_load(w + 16); in xnn_f32_dwconv_minmax_ukernel_up8x4__wasmsimd_x86() local 126 vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi1x0123, vk1x0123)); in xnn_f32_dwconv_minmax_ukernel_up8x4__wasmsimd_x86() 157 const v128_t vk1x0123 = wasm_v128_load(w + 16); in xnn_f32_dwconv_minmax_ukernel_up8x4__wasmsimd_x86() local 158 vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi1x0123, vk1x0123)); in xnn_f32_dwconv_minmax_ukernel_up8x4__wasmsimd_x86()
|
D | up8x4-minmax-sse.c | 77 const __m128 vk1x0123 = _mm_load_ps(w + 16); in xnn_f32_dwconv_minmax_ukernel_up8x4__sse() local 79 vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi1x0123, vk1x0123)); in xnn_f32_dwconv_minmax_ukernel_up8x4__sse() 124 const __m128 vk1x0123 = _mm_load_ps(w + 16); in xnn_f32_dwconv_minmax_ukernel_up8x4__sse() local 125 vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi1x0123, vk1x0123)); in xnn_f32_dwconv_minmax_ukernel_up8x4__sse() 156 const __m128 vk1x0123 = _mm_load_ps(w + 16); in xnn_f32_dwconv_minmax_ukernel_up8x4__sse() local 157 vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi1x0123, vk1x0123)); in xnn_f32_dwconv_minmax_ukernel_up8x4__sse()
|
D | up8x4-minmax-wasmsimd-x86-acc2.c | 77 const v128_t vk1x0123 = wasm_v128_load(w + 16); in xnn_f32_dwconv_minmax_ukernel_up8x4__wasmsimd_x86_acc2() local 79 v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123); in xnn_f32_dwconv_minmax_ukernel_up8x4__wasmsimd_x86_acc2() 128 const v128_t vk1x0123 = wasm_v128_load(w + 16); in xnn_f32_dwconv_minmax_ukernel_up8x4__wasmsimd_x86_acc2() local 129 v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123); in xnn_f32_dwconv_minmax_ukernel_up8x4__wasmsimd_x86_acc2() 162 const v128_t vk1x0123 = wasm_v128_load(w + 16); in xnn_f32_dwconv_minmax_ukernel_up8x4__wasmsimd_x86_acc2() local 163 v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123); in xnn_f32_dwconv_minmax_ukernel_up8x4__wasmsimd_x86_acc2()
|
D | up4x4-wasmsimd.c | 70 const v128_t vk1x0123 = wasm_v128_load(w + 8); in xnn_f32_dwconv_ukernel_up4x4__wasmsimd() local 71 vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi1x0123, vk1x0123)); in xnn_f32_dwconv_ukernel_up4x4__wasmsimd() 101 const v128_t vk1x0123 = wasm_v128_load(w + 8); in xnn_f32_dwconv_ukernel_up4x4__wasmsimd() local 102 vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi1x0123, vk1x0123)); in xnn_f32_dwconv_ukernel_up4x4__wasmsimd()
|
D | up4x4-minmax-sse.c | 72 const __m128 vk1x0123 = _mm_load_ps(w + 8); in xnn_f32_dwconv_minmax_ukernel_up4x4__sse() local 73 vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi1x0123, vk1x0123)); in xnn_f32_dwconv_minmax_ukernel_up4x4__sse() 104 const __m128 vk1x0123 = _mm_load_ps(w + 8); in xnn_f32_dwconv_minmax_ukernel_up4x4__sse() local 105 vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi1x0123, vk1x0123)); in xnn_f32_dwconv_minmax_ukernel_up4x4__sse()
|
D | up4x4-minmax-wasmsimd-arm.c | 72 const v128_t vk1x0123 = wasm_v128_load(w + 8); in xnn_f32_dwconv_minmax_ukernel_up4x4__wasmsimd_arm() local 73 vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi1x0123, vk1x0123)); in xnn_f32_dwconv_minmax_ukernel_up4x4__wasmsimd_arm() 105 const v128_t vk1x0123 = wasm_v128_load(w + 8); in xnn_f32_dwconv_minmax_ukernel_up4x4__wasmsimd_arm() local 106 vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi1x0123, vk1x0123)); in xnn_f32_dwconv_minmax_ukernel_up4x4__wasmsimd_arm()
|
D | up4x4-minmax-wasmsimd-arm-acc2.c | 72 const v128_t vk1x0123 = wasm_v128_load(w + 8); in xnn_f32_dwconv_minmax_ukernel_up4x4__wasmsimd_arm_acc2() local 73 v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123); in xnn_f32_dwconv_minmax_ukernel_up4x4__wasmsimd_arm_acc2() 107 const v128_t vk1x0123 = wasm_v128_load(w + 8); in xnn_f32_dwconv_minmax_ukernel_up4x4__wasmsimd_arm_acc2() local 108 v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123); in xnn_f32_dwconv_minmax_ukernel_up4x4__wasmsimd_arm_acc2()
|
D | up4x4-minmax-sse-acc2.c | 72 const __m128 vk1x0123 = _mm_load_ps(w + 8); in xnn_f32_dwconv_minmax_ukernel_up4x4__sse_acc2() local 73 __m128 vacc0123p1 = _mm_mul_ps(vi1x0123, vk1x0123); in xnn_f32_dwconv_minmax_ukernel_up4x4__sse_acc2() 106 const __m128 vk1x0123 = _mm_load_ps(w + 8); in xnn_f32_dwconv_minmax_ukernel_up4x4__sse_acc2() local 107 __m128 vacc0123p1 = _mm_mul_ps(vi1x0123, vk1x0123); in xnn_f32_dwconv_minmax_ukernel_up4x4__sse_acc2()
|
D | up4x4-minmax-neon.c | 69 const float32x4_t vk1x0123 = vld1q_f32(w); w += 4; in xnn_f32_dwconv_minmax_ukernel_up4x4__neon() local 70 vacc0123p0 = vmlaq_f32(vacc0123p0, vi1x0123, vk1x0123); in xnn_f32_dwconv_minmax_ukernel_up4x4__neon() 95 const float32x4_t vk1x0123 = vld1q_f32(w); w += 4; in xnn_f32_dwconv_minmax_ukernel_up4x4__neon() local 96 vacc0123p0 = vmlaq_f32(vacc0123p0, vi1x0123, vk1x0123); in xnn_f32_dwconv_minmax_ukernel_up4x4__neon()
|
D | up4x4-minmax-neonfma-acc2.c | 69 const float32x4_t vk1x0123 = vld1q_f32(w); w += 4; in xnn_f32_dwconv_minmax_ukernel_up4x4__neonfma_acc2() local 70 float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123); in xnn_f32_dwconv_minmax_ukernel_up4x4__neonfma_acc2() 97 const float32x4_t vk1x0123 = vld1q_f32(w); w += 4; in xnn_f32_dwconv_minmax_ukernel_up4x4__neonfma_acc2() local 98 float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123); in xnn_f32_dwconv_minmax_ukernel_up4x4__neonfma_acc2()
|
D | up4x4-minmax-wasmsimd-x86.c | 72 const v128_t vk1x0123 = wasm_v128_load(w + 8); in xnn_f32_dwconv_minmax_ukernel_up4x4__wasmsimd_x86() local 73 vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi1x0123, vk1x0123)); in xnn_f32_dwconv_minmax_ukernel_up4x4__wasmsimd_x86() 105 const v128_t vk1x0123 = wasm_v128_load(w + 8); in xnn_f32_dwconv_minmax_ukernel_up4x4__wasmsimd_x86() local 106 vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi1x0123, vk1x0123)); in xnn_f32_dwconv_minmax_ukernel_up4x4__wasmsimd_x86()
|
D | up4x4-minmax-neonfma.c | 69 const float32x4_t vk1x0123 = vld1q_f32(w); w += 4; in xnn_f32_dwconv_minmax_ukernel_up4x4__neonfma() local 70 vacc0123p0 = vfmaq_f32(vacc0123p0, vi1x0123, vk1x0123); in xnn_f32_dwconv_minmax_ukernel_up4x4__neonfma() 95 const float32x4_t vk1x0123 = vld1q_f32(w); w += 4; in xnn_f32_dwconv_minmax_ukernel_up4x4__neonfma() local 96 vacc0123p0 = vfmaq_f32(vacc0123p0, vi1x0123, vk1x0123); in xnn_f32_dwconv_minmax_ukernel_up4x4__neonfma()
|
D | up4x4-minmax-neon-acc2.c | 69 const float32x4_t vk1x0123 = vld1q_f32(w); w += 4; in xnn_f32_dwconv_minmax_ukernel_up4x4__neon_acc2() local 70 float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123); in xnn_f32_dwconv_minmax_ukernel_up4x4__neon_acc2() 97 const float32x4_t vk1x0123 = vld1q_f32(w); w += 4; in xnn_f32_dwconv_minmax_ukernel_up4x4__neon_acc2() local 98 float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123); in xnn_f32_dwconv_minmax_ukernel_up4x4__neon_acc2()
|
D | up4x4-minmax-wasmsimd-x86-acc2.c | 72 const v128_t vk1x0123 = wasm_v128_load(w + 8); in xnn_f32_dwconv_minmax_ukernel_up4x4__wasmsimd_x86_acc2() local 73 v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123); in xnn_f32_dwconv_minmax_ukernel_up4x4__wasmsimd_x86_acc2() 107 const v128_t vk1x0123 = wasm_v128_load(w + 8); in xnn_f32_dwconv_minmax_ukernel_up4x4__wasmsimd_x86_acc2() local 108 v128_t vacc0123p1 = wasm_f32x4_mul(vi1x0123, vk1x0123); in xnn_f32_dwconv_minmax_ukernel_up4x4__wasmsimd_x86_acc2()
|
D | up8x9-minmax-sse.c | 102 const __m128 vk1x0123 = _mm_load_ps(w + 16); in xnn_f32_dwconv_minmax_ukernel_up8x9__sse() local 104 vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi1x0123, vk1x0123)); in xnn_f32_dwconv_minmax_ukernel_up8x9__sse() 194 const __m128 vk1x0123 = _mm_load_ps(w + 16); in xnn_f32_dwconv_minmax_ukernel_up8x9__sse() local 195 vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi1x0123, vk1x0123)); in xnn_f32_dwconv_minmax_ukernel_up8x9__sse() 256 const __m128 vk1x0123 = _mm_load_ps(w + 16); in xnn_f32_dwconv_minmax_ukernel_up8x9__sse() local 257 vacc0123p0 = _mm_add_ps(vacc0123p0, _mm_mul_ps(vi1x0123, vk1x0123)); in xnn_f32_dwconv_minmax_ukernel_up8x9__sse()
|
D | up8x9-wasmsimd.c | 100 const v128_t vk1x0123 = wasm_v128_load(w + 16); in xnn_f32_dwconv_ukernel_up8x9__wasmsimd() local 102 vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi1x0123, vk1x0123)); in xnn_f32_dwconv_ukernel_up8x9__wasmsimd() 190 const v128_t vk1x0123 = wasm_v128_load(w + 16); in xnn_f32_dwconv_ukernel_up8x9__wasmsimd() local 191 vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi1x0123, vk1x0123)); in xnn_f32_dwconv_ukernel_up8x9__wasmsimd() 251 const v128_t vk1x0123 = wasm_v128_load(w + 16); in xnn_f32_dwconv_ukernel_up8x9__wasmsimd() local 252 vacc0123p0 = wasm_f32x4_add(vacc0123p0, wasm_f32x4_mul(vi1x0123, vk1x0123)); in xnn_f32_dwconv_ukernel_up8x9__wasmsimd()
|
D | up8x9-minmax-neon.c | 99 const float32x4_t vk1x0123 = vld1q_f32(w); w += 4; in xnn_f32_dwconv_minmax_ukernel_up8x9__neon() local 101 vacc0123p0 = vmlaq_f32(vacc0123p0, vi1x0123, vk1x0123); in xnn_f32_dwconv_minmax_ukernel_up8x9__neon() 171 const float32x4_t vk1x0123 = vld1q_f32(w + 12); in xnn_f32_dwconv_minmax_ukernel_up8x9__neon() local 172 vacc0123p0 = vmlaq_f32(vacc0123p0, vi1x0123, vk1x0123); in xnn_f32_dwconv_minmax_ukernel_up8x9__neon() 217 const float32x4_t vk1x0123 = vld1q_f32(w + 16); in xnn_f32_dwconv_minmax_ukernel_up8x9__neon() local 218 vacc0123p0 = vmlaq_f32(vacc0123p0, vi1x0123, vk1x0123); in xnn_f32_dwconv_minmax_ukernel_up8x9__neon()
|