/external/XNNPACK/src/f32-dwconv2d-chw/gen/ |
D | 3x3p1-minmax-neonfma-3x4.c | 105 const float32x4_t vi2x3456 = vextq_f32(vi2x0123, vi2x4567, 3); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__neonfma_3x4() local 111 vo2p0 = vfmaq_lane_f32(vo2p0, vi2x3456, vget_low_f32(vw0123), 1); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__neonfma_3x4() 114 vo1p0 = vfmaq_lane_f32(vo1p0, vi2x3456, vget_low_f32(vw4567), 0); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__neonfma_3x4() 117 vo0p0 = vfmaq_lane_f32(vo0p0, vi2x3456, vget_high_f32(vw4567), 1); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__neonfma_3x4() 192 const float32x4_t vi2x3456 = vextq_f32(vi2x0123, vi2x4567, 3); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__neonfma_3x4() local 198 vo2p0 = vfmaq_lane_f32(vo2p0, vi2x3456, vget_low_f32(vw0123), 1); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__neonfma_3x4() 201 vo1p0 = vfmaq_lane_f32(vo1p0, vi2x3456, vget_low_f32(vw4567), 0); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__neonfma_3x4() 204 vo0p0 = vfmaq_lane_f32(vo0p0, vi2x3456, vget_high_f32(vw4567), 1); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__neonfma_3x4()
|
D | 3x3p1-minmax-wasmsimd-arm-loadsplat-3x4.c | 123 const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_loadsplat_3x4() local 129 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x3456, vk00)); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_loadsplat_3x4() 131 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, vk10)); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_loadsplat_3x4() 133 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, vk20)); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_loadsplat_3x4() 202 const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_loadsplat_3x4() local 208 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x3456, vk00)); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_loadsplat_3x4() 210 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, vk10)); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_loadsplat_3x4() 212 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, vk20)); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_loadsplat_3x4()
|
D | 3x3p1-minmax-neon-3x4.c | 105 const float32x4_t vi2x3456 = vextq_f32(vi2x0123, vi2x4567, 3); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__neon_3x4() local 111 vo2p0 = vmlaq_lane_f32(vo2p0, vi2x3456, vget_low_f32(vw0123), 1); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__neon_3x4() 114 vo1p0 = vmlaq_lane_f32(vo1p0, vi2x3456, vget_low_f32(vw4567), 0); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__neon_3x4() 117 vo0p0 = vmlaq_lane_f32(vo0p0, vi2x3456, vget_high_f32(vw4567), 1); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__neon_3x4() 192 const float32x4_t vi2x3456 = vextq_f32(vi2x0123, vi2x4567, 3); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__neon_3x4() local 198 vo2p0 = vmlaq_lane_f32(vo2p0, vi2x3456, vget_low_f32(vw0123), 1); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__neon_3x4() 201 vo1p0 = vmlaq_lane_f32(vo1p0, vi2x3456, vget_low_f32(vw4567), 0); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__neon_3x4() 204 vo0p0 = vmlaq_lane_f32(vo0p0, vi2x3456, vget_high_f32(vw4567), 1); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__neon_3x4()
|
D | 3x3p1-minmax-ssse3-3x4.c | 116 …const __m128 vi2x3456 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi2x4567), _mm_castps_si… in xnn_f32_dwconv2d_chw_ukernel_3x3p1__ssse3_3x4() local 122 vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi2x3456, vk00)); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__ssse3_3x4() 124 vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x3456, vk10)); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__ssse3_3x4() 126 vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x3456, vk20)); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__ssse3_3x4() 196 …const __m128 vi2x3456 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi2x4567), _mm_castps_si… in xnn_f32_dwconv2d_chw_ukernel_3x3p1__ssse3_3x4() local 202 vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi2x3456, vk00)); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__ssse3_3x4() 204 vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x3456, vk10)); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__ssse3_3x4() 206 vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x3456, vk20)); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__ssse3_3x4()
|
D | 3x3p1-minmax-wasmsimd-x86-loadsplat-3x4.c | 123 const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_loadsplat_3x4() local 129 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x3456, vk00)); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_loadsplat_3x4() 131 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, vk10)); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_loadsplat_3x4() 133 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, vk20)); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_loadsplat_3x4() 202 const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_loadsplat_3x4() local 208 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x3456, vk00)); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_loadsplat_3x4() 210 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, vk10)); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_loadsplat_3x4() 212 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, vk20)); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_loadsplat_3x4()
|
D | 3x3p1-minmax-neon-2x4-acc2.c | 92 const float32x4_t vi2x3456 = vextq_f32(vi2x0123, vi2x4567, 3); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__neon_2x4_acc2() local 99 vo1p0 = vmlaq_lane_f32(vo1p0, vi2x3456, vget_low_f32(vw4567), 0); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__neon_2x4_acc2() 101 vo0p1 = vmlaq_lane_f32(vo0p1, vi2x3456, vget_high_f32(vw4567), 1); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__neon_2x4_acc2() 163 const float32x4_t vi2x3456 = vextq_f32(vi2x0123, vi2x4567, 3); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__neon_2x4_acc2() local 170 vo1p0 = vmlaq_lane_f32(vo1p0, vi2x3456, vget_low_f32(vw4567), 0); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__neon_2x4_acc2() 172 vo0p1 = vmlaq_lane_f32(vo0p1, vi2x3456, vget_high_f32(vw4567), 1); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__neon_2x4_acc2()
|
D | 3x3p1-minmax-wasmsimd-arm-loadsplat-2x4-acc2.c | 109 const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_loadsplat_2x4_acc2() local 115 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, vk10)); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_loadsplat_2x4_acc2() 116 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, vk20)); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_loadsplat_2x4_acc2() 172 const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_loadsplat_2x4_acc2() local 178 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, vk10)); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_loadsplat_2x4_acc2() 179 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, vk20)); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_loadsplat_2x4_acc2()
|
D | 3x3p1-minmax-neonfma-2x4-acc2.c | 92 const float32x4_t vi2x3456 = vextq_f32(vi2x0123, vi2x4567, 3); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__neonfma_2x4_acc2() local 99 vo1p0 = vfmaq_lane_f32(vo1p0, vi2x3456, vget_low_f32(vw4567), 0); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__neonfma_2x4_acc2() 101 vo0p1 = vfmaq_lane_f32(vo0p1, vi2x3456, vget_high_f32(vw4567), 1); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__neonfma_2x4_acc2() 163 const float32x4_t vi2x3456 = vextq_f32(vi2x0123, vi2x4567, 3); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__neonfma_2x4_acc2() local 170 vo1p0 = vfmaq_lane_f32(vo1p0, vi2x3456, vget_low_f32(vw4567), 0); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__neonfma_2x4_acc2() 172 vo0p1 = vfmaq_lane_f32(vo0p1, vi2x3456, vget_high_f32(vw4567), 1); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__neonfma_2x4_acc2()
|
D | 3x3p1-minmax-wasmsimd-x86-loadsplat-2x4.c | 109 const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_loadsplat_2x4() local 115 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, vk10)); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_loadsplat_2x4() 116 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, vk20)); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_loadsplat_2x4() 170 const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_loadsplat_2x4() local 176 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, vk10)); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_loadsplat_2x4() 177 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, vk20)); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_loadsplat_2x4()
|
D | 3x3p1-minmax-neon-2x4.c | 92 const float32x4_t vi2x3456 = vextq_f32(vi2x0123, vi2x4567, 3); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__neon_2x4() local 99 vo1p0 = vmlaq_lane_f32(vo1p0, vi2x3456, vget_low_f32(vw4567), 0); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__neon_2x4() 101 vo0p0 = vmlaq_lane_f32(vo0p0, vi2x3456, vget_high_f32(vw4567), 1); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__neon_2x4() 161 const float32x4_t vi2x3456 = vextq_f32(vi2x0123, vi2x4567, 3); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__neon_2x4() local 168 vo1p0 = vmlaq_lane_f32(vo1p0, vi2x3456, vget_low_f32(vw4567), 0); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__neon_2x4() 170 vo0p0 = vmlaq_lane_f32(vo0p0, vi2x3456, vget_high_f32(vw4567), 1); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__neon_2x4()
|
D | 3x3p1-minmax-ssse3-2x4.c | 102 …const __m128 vi2x3456 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi2x4567), _mm_castps_si… in xnn_f32_dwconv2d_chw_ukernel_3x3p1__ssse3_2x4() local 108 vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x3456, vk10)); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__ssse3_2x4() 109 vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x3456, vk20)); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__ssse3_2x4() 164 …const __m128 vi2x3456 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi2x4567), _mm_castps_si… in xnn_f32_dwconv2d_chw_ukernel_3x3p1__ssse3_2x4() local 170 vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x3456, vk10)); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__ssse3_2x4() 171 vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x3456, vk20)); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__ssse3_2x4()
|
D | 3x3p1-minmax-wasmsimd-arm-loadsplat-2x4.c | 109 const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_loadsplat_2x4() local 115 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, vk10)); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_loadsplat_2x4() 116 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, vk20)); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_loadsplat_2x4() 170 const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_loadsplat_2x4() local 176 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, vk10)); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_loadsplat_2x4() 177 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, vk20)); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_loadsplat_2x4()
|
D | 3x3p1-minmax-neonfma-2x4.c | 92 const float32x4_t vi2x3456 = vextq_f32(vi2x0123, vi2x4567, 3); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__neonfma_2x4() local 99 vo1p0 = vfmaq_lane_f32(vo1p0, vi2x3456, vget_low_f32(vw4567), 0); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__neonfma_2x4() 101 vo0p0 = vfmaq_lane_f32(vo0p0, vi2x3456, vget_high_f32(vw4567), 1); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__neonfma_2x4() 161 const float32x4_t vi2x3456 = vextq_f32(vi2x0123, vi2x4567, 3); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__neonfma_2x4() local 168 vo1p0 = vfmaq_lane_f32(vo1p0, vi2x3456, vget_low_f32(vw4567), 0); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__neonfma_2x4() 170 vo0p0 = vfmaq_lane_f32(vo0p0, vi2x3456, vget_high_f32(vw4567), 1); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__neonfma_2x4()
|
D | 3x3p1-minmax-sse-3x4.c | 140 const __m128 vi2x3456 = _mm_move_ss(vi2x7456, vi2x3012); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__sse_3x4() local 148 vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi2x3456, vk00)); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__sse_3x4() 150 vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x3456, vk10)); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__sse_3x4() 152 vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x3456, vk20)); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__sse_3x4() 252 const __m128 vi2x3456 = _mm_move_ss(vi2x7456, vi2x3012); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__sse_3x4() local 260 vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi2x3456, vk00)); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__sse_3x4() 262 vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x3456, vk10)); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__sse_3x4() 264 vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x3456, vk20)); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__sse_3x4()
|
D | 3x3p1-minmax-wasmsimd-x86-splat-3x4.c | 108 const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_splat_3x4() local 114 …vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1,… in xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_splat_3x4() 117 …vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0,… in xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_splat_3x4() 120 …vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3,… in xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_splat_3x4() 194 const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_splat_3x4() local 200 …vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1,… in xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_splat_3x4() 203 …vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0,… in xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_splat_3x4() 206 …vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3,… in xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_splat_3x4()
|
D | 3x3p1-minmax-wasmsimd-arm-splat-3x4.c | 108 const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_splat_3x4() local 114 …vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1,… in xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_splat_3x4() 117 …vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0,… in xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_splat_3x4() 120 …vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3,… in xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_splat_3x4() 194 const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_splat_3x4() local 200 …vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1,… in xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_splat_3x4() 203 …vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0,… in xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_splat_3x4() 206 …vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3,… in xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_splat_3x4()
|
D | 3x3p1-minmax-wasmsimd-x86-splat-2x4-acc2.c | 95 const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_splat_2x4_acc2() local 102 …vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0,… in xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_splat_2x4_acc2() 104 …vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3,… in xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_splat_2x4_acc2() 165 const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_splat_2x4_acc2() local 172 …vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0,… in xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_splat_2x4_acc2() 174 …vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3,… in xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_splat_2x4_acc2()
|
D | 3x3p1-minmax-sse-2x4.c | 122 const __m128 vi2x3456 = _mm_move_ss(vi2x7456, vi2x3012); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__sse_2x4() local 129 vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x3456, vk10)); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__sse_2x4() 130 vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x3456, vk20)); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__sse_2x4() 210 const __m128 vi2x3456 = _mm_move_ss(vi2x7456, vi2x3012); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__sse_2x4() local 217 vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x3456, vk10)); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__sse_2x4() 218 vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x3456, vk20)); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__sse_2x4()
|
D | 3x3p1-minmax-sse-2x4-acc2.c | 122 const __m128 vi2x3456 = _mm_move_ss(vi2x7456, vi2x3012); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__sse_2x4_acc2() local 129 vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x3456, vk10)); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__sse_2x4_acc2() 130 vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi2x3456, vk20)); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__sse_2x4_acc2() 212 const __m128 vi2x3456 = _mm_move_ss(vi2x7456, vi2x3012); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__sse_2x4_acc2() local 219 vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x3456, vk10)); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__sse_2x4_acc2() 220 vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi2x3456, vk20)); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__sse_2x4_acc2()
|
D | 3x3p1-minmax-wasmsimd-x86-loadsplat-2x4-acc2.c | 109 const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_loadsplat_2x4_acc2() local 115 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, vk10)); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_loadsplat_2x4_acc2() 116 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, vk20)); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_loadsplat_2x4_acc2() 172 const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_loadsplat_2x4_acc2() local 178 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, vk10)); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_loadsplat_2x4_acc2() 179 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, vk20)); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_loadsplat_2x4_acc2()
|
D | 3x3p1-minmax-wasmsimd-arm-splat-2x4-acc2.c | 95 const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_splat_2x4_acc2() local 102 …vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0,… in xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_splat_2x4_acc2() 104 …vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3,… in xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_splat_2x4_acc2() 165 const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_splat_2x4_acc2() local 172 …vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0,… in xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_splat_2x4_acc2() 174 …vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3,… in xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_splat_2x4_acc2()
|
D | 3x3p1-minmax-wasmsimd-arm-splat-2x4.c | 95 const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_splat_2x4() local 102 …vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0,… in xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_splat_2x4() 104 …vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3,… in xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_splat_2x4() 163 const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_splat_2x4() local 170 …vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0,… in xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_splat_2x4() 172 …vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3,… in xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_splat_2x4()
|
D | 3x3p1-minmax-wasmsimd-x86-splat-2x4.c | 95 const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_splat_2x4() local 102 …vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0,… in xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_splat_2x4() 104 …vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3,… in xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_splat_2x4() 163 const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_splat_2x4() local 170 …vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0,… in xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_splat_2x4() 172 …vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3,… in xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_splat_2x4()
|
D | 3x3p1-minmax-ssse3-2x4-acc2.c | 102 …const __m128 vi2x3456 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi2x4567), _mm_castps_si… in xnn_f32_dwconv2d_chw_ukernel_3x3p1__ssse3_2x4_acc2() local 108 vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x3456, vk10)); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__ssse3_2x4_acc2() 109 vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi2x3456, vk20)); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__ssse3_2x4_acc2() 166 …const __m128 vi2x3456 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi2x4567), _mm_castps_si… in xnn_f32_dwconv2d_chw_ukernel_3x3p1__ssse3_2x4_acc2() local 172 vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x3456, vk10)); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__ssse3_2x4_acc2() 173 vo0p1 = _mm_add_ps(vo0p1, _mm_mul_ps(vi2x3456, vk20)); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__ssse3_2x4_acc2()
|
D | 3x3p1-minmax-neonfma-1x4.c | 79 const float32x4_t vi2x3456 = vextq_f32(vi2x0123, vi2x4567, 3); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__neonfma_1x4() local 85 vo0p0 = vfmaq_lane_f32(vo0p0, vi2x3456, vget_high_f32(vw4567), 1); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__neonfma_1x4() 130 const float32x4_t vi2x3456 = vextq_f32(vi2x0123, vi2x4567, 3); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__neonfma_1x4() local 136 vo0p0 = vfmaq_lane_f32(vo0p0, vi2x3456, vget_high_f32(vw4567), 1); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__neonfma_1x4()
|