/external/XNNPACK/src/f32-dwconv2d-chw/gen/ |
D | 3x3s2p1-minmax-neonfma-3x4.c | 124 const float32x4_t vi4x7BDF = vextq_f32(vi4x1357, vi4x8ACE9BDF.val[1], 3); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__neonfma_3x4() local 133 vo2p0 = vfmaq_lane_f32(vo2p0, vi4x7BDF, vget_low_f32(vw0123), 1); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__neonfma_3x4() 140 vo1p0 = vfmaq_lane_f32(vo1p0, vi4x7BDF, vget_high_f32(vw4567), 1); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__neonfma_3x4() 214 const float32x4_t vi4x7BDF = vextq_f32(vi4x1357, vi4x9BDF, 3); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__neonfma_3x4() local 220 vo2p0 = vfmaq_lane_f32(vo2p0, vi4x7BDF, vget_low_f32(vw0123), 1); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__neonfma_3x4() 227 vo1p0 = vfmaq_lane_f32(vo1p0, vi4x7BDF, vget_high_f32(vw4567), 1); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__neonfma_3x4()
|
D | 3x3s2p1-minmax-neon-3x4.c | 124 const float32x4_t vi4x7BDF = vextq_f32(vi4x1357, vi4x8ACE9BDF.val[1], 3); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__neon_3x4() local 133 vo2p0 = vmlaq_lane_f32(vo2p0, vi4x7BDF, vget_low_f32(vw0123), 1); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__neon_3x4() 140 vo1p0 = vmlaq_lane_f32(vo1p0, vi4x7BDF, vget_high_f32(vw4567), 1); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__neon_3x4() 214 const float32x4_t vi4x7BDF = vextq_f32(vi4x1357, vi4x9BDF, 3); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__neon_3x4() local 220 vo2p0 = vmlaq_lane_f32(vo2p0, vi4x7BDF, vget_low_f32(vw0123), 1); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__neon_3x4() 227 vo1p0 = vmlaq_lane_f32(vo1p0, vi4x7BDF, vget_high_f32(vw4567), 1); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__neon_3x4()
|
D | 3x3s2p1-minmax-wasmsimd-x86-loadsplat-3x4.c | 166 const v128_t vi4x7BDF = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_3x4() local 175 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x7BDF, vk00)); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_3x4() 182 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x7BDF, vk20)); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_3x4() 262 const v128_t vi4x7BDF = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_3x4() local 268 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x7BDF, vk00)); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_3x4() 275 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x7BDF, vk20)); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_3x4()
|
D | 3x3s2p1-minmax-wasmsimd-arm-loadsplat-3x4.c | 166 const v128_t vi4x7BDF = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_3x4() local 175 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x7BDF, vk00)); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_3x4() 182 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x7BDF, vk20)); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_3x4() 262 const v128_t vi4x7BDF = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_3x4() local 268 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x7BDF, vk00)); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_3x4() 275 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x7BDF, vk20)); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_3x4()
|
D | 3x3s2p1-minmax-wasmsimd-arm-splat-3x4.c | 156 const v128_t vi4x7BDF = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_3x4() local 165 …vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x7BDF, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1,… in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_3x4() 172 …vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3,… in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_3x4() 252 const v128_t vi4x7BDF = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_3x4() local 258 …vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x7BDF, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1,… in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_3x4() 265 …vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3,… in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_3x4()
|
D | 3x3s2p1-minmax-neonfma-2x4-acc2.c | 106 const float32x4_t vi4x7BDF = vextq_f32(vi4x1357, vi4x8ACE9BDF.val[1], 3); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__neonfma_2x4_acc2() local 116 vo1p1 = vfmaq_lane_f32(vo1p1, vi4x7BDF, vget_high_f32(vw4567), 1); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__neonfma_2x4_acc2() 175 const float32x4_t vi4x7BDF = vextq_f32(vi4x1357, vi4x9BDF, 3); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__neonfma_2x4_acc2() local 184 vo1p1 = vfmaq_lane_f32(vo1p1, vi4x7BDF, vget_high_f32(vw4567), 1); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__neonfma_2x4_acc2()
|
D | 3x3s2p1-minmax-neon-2x4.c | 106 const float32x4_t vi4x7BDF = vextq_f32(vi4x1357, vi4x8ACE9BDF.val[1], 3); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__neon_2x4() local 116 vo1p0 = vmlaq_lane_f32(vo1p0, vi4x7BDF, vget_high_f32(vw4567), 1); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__neon_2x4() 173 const float32x4_t vi4x7BDF = vextq_f32(vi4x1357, vi4x9BDF, 3); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__neon_2x4() local 182 vo1p0 = vmlaq_lane_f32(vo1p0, vi4x7BDF, vget_high_f32(vw4567), 1); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__neon_2x4()
|
D | 3x3s2p1-minmax-neonfma-2x4.c | 106 const float32x4_t vi4x7BDF = vextq_f32(vi4x1357, vi4x8ACE9BDF.val[1], 3); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__neonfma_2x4() local 116 vo1p0 = vfmaq_lane_f32(vo1p0, vi4x7BDF, vget_high_f32(vw4567), 1); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__neonfma_2x4() 173 const float32x4_t vi4x7BDF = vextq_f32(vi4x1357, vi4x9BDF, 3); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__neonfma_2x4() local 182 vo1p0 = vfmaq_lane_f32(vo1p0, vi4x7BDF, vget_high_f32(vw4567), 1); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__neonfma_2x4()
|
D | 3x3s2p1-minmax-wasmsimd-x86-splat-3x4.c | 156 const v128_t vi4x7BDF = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_3x4() local 165 …vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x7BDF, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1,… in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_3x4() 172 …vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3,… in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_3x4() 252 const v128_t vi4x7BDF = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_3x4() local 258 …vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x7BDF, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1,… in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_3x4() 265 …vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3,… in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_3x4()
|
D | 3x3s2p1-minmax-neonfma-4x4.c | 142 const float32x4_t vi4x7BDF = vextq_f32(vi4x1357, vi4x8ACE9BDF.val[1], 3); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__neonfma_4x4() local 155 vo2p0 = vfmaq_lane_f32(vo2p0, vi4x7BDF, vget_low_f32(vw0123), 1); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__neonfma_4x4() 164 vo1p0 = vfmaq_lane_f32(vo1p0, vi4x7BDF, vget_high_f32(vw4567), 1); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__neonfma_4x4() 255 const float32x4_t vi4x7BDF = vextq_f32(vi4x1357, vi4x9BDF, 3); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__neonfma_4x4() local 263 vo2p0 = vfmaq_lane_f32(vo2p0, vi4x7BDF, vget_low_f32(vw0123), 1); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__neonfma_4x4() 272 vo1p0 = vfmaq_lane_f32(vo1p0, vi4x7BDF, vget_high_f32(vw4567), 1); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__neonfma_4x4()
|
D | 3x3s2p1-minmax-neon-4x4.c | 142 const float32x4_t vi4x7BDF = vextq_f32(vi4x1357, vi4x8ACE9BDF.val[1], 3); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__neon_4x4() local 155 vo2p0 = vmlaq_lane_f32(vo2p0, vi4x7BDF, vget_low_f32(vw0123), 1); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__neon_4x4() 164 vo1p0 = vmlaq_lane_f32(vo1p0, vi4x7BDF, vget_high_f32(vw4567), 1); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__neon_4x4() 255 const float32x4_t vi4x7BDF = vextq_f32(vi4x1357, vi4x9BDF, 3); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__neon_4x4() local 263 vo2p0 = vmlaq_lane_f32(vo2p0, vi4x7BDF, vget_low_f32(vw0123), 1); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__neon_4x4() 272 vo1p0 = vmlaq_lane_f32(vo1p0, vi4x7BDF, vget_high_f32(vw4567), 1); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__neon_4x4()
|
D | 3x3s2p1-minmax-sse-3x4.c | 168 const __m128 vi4x7BDF = _mm_move_ss(vi4xF9BD, vi4x7531); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_3x4() local 182 vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi4x7BDF, vk00)); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_3x4() 187 vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi4x7BDF, vk20)); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_3x4() 271 const __m128 vi4x7BDF = _mm_move_ss(vi4xF9BD, vi4x7531); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_3x4() local 285 vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi4x7BDF, vk00)); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_3x4() 290 vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi4x7BDF, vk20)); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_3x4()
|
D | 3x3s2p1-minmax-wasmsimd-x86-loadsplat-2x4.c | 140 const v128_t vi4x7BDF = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_2x4() local 150 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x7BDF, vk20)); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_2x4() 211 const v128_t vi4x7BDF = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_2x4() local 220 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x7BDF, vk20)); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_2x4()
|
D | 3x3s2p1-minmax-wasmsimd-x86-loadsplat-2x4-acc2.c | 140 const v128_t vi4x7BDF = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_2x4_acc2() local 150 vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x7BDF, vk20)); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_2x4_acc2() 213 const v128_t vi4x7BDF = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_2x4_acc2() local 222 vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x7BDF, vk20)); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_2x4_acc2()
|
D | 3x3s2p1-minmax-wasmsimd-arm-loadsplat-2x4-acc2.c | 140 const v128_t vi4x7BDF = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_2x4_acc2() local 150 vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x7BDF, vk20)); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_2x4_acc2() 213 const v128_t vi4x7BDF = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_2x4_acc2() local 222 vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x7BDF, vk20)); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_2x4_acc2()
|
D | 3x3s2p1-minmax-wasmsimd-arm-splat-2x4-acc2.c | 130 const v128_t vi4x7BDF = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_2x4_acc2() local 140 …vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3,… in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_2x4_acc2() 203 const v128_t vi4x7BDF = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_2x4_acc2() local 212 …vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3,… in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_2x4_acc2()
|
D | 3x3s2p1-minmax-wasmsimd-arm-splat-2x4.c | 130 const v128_t vi4x7BDF = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_2x4() local 140 …vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3,… in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_2x4() 201 const v128_t vi4x7BDF = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_2x4() local 210 …vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3,… in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_2x4()
|
D | 3x3s2p1-minmax-wasmsimd-arm-loadsplat-2x4.c | 140 const v128_t vi4x7BDF = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_2x4() local 150 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x7BDF, vk20)); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_2x4() 211 const v128_t vi4x7BDF = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_2x4() local 220 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x7BDF, vk20)); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_2x4()
|
D | 3x3s2p1-minmax-wasmsimd-arm-loadsplat-4x4.c | 192 const v128_t vi4x7BDF = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_4x4() local 205 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x7BDF, vk00)); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_4x4() 214 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x7BDF, vk20)); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_4x4() 313 const v128_t vi4x7BDF = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_4x4() local 321 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x7BDF, vk00)); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_4x4() 330 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x7BDF, vk20)); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_4x4()
|
D | 3x3s2p1-minmax-sse-2x4-acc2.c | 138 const __m128 vi4x7BDF = _mm_move_ss(vi4xF9BD, vi4x7531); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_2x4_acc2() local 151 vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi4x7BDF, vk20)); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_2x4_acc2() 216 const __m128 vi4x7BDF = _mm_move_ss(vi4xF9BD, vi4x7531); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_2x4_acc2() local 229 vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi4x7BDF, vk20)); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_2x4_acc2()
|
D | 3x3s2p1-minmax-sse-2x4.c | 138 const __m128 vi4x7BDF = _mm_move_ss(vi4xF9BD, vi4x7531); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_2x4() local 151 vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi4x7BDF, vk20)); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_2x4() 214 const __m128 vi4x7BDF = _mm_move_ss(vi4xF9BD, vi4x7531); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_2x4() local 227 vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi4x7BDF, vk20)); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_2x4()
|
D | 3x3s2p1-minmax-wasmsimd-x86-splat-2x4.c | 130 const v128_t vi4x7BDF = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_2x4() local 140 …vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3,… in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_2x4() 201 const v128_t vi4x7BDF = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_2x4() local 210 …vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3,… in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_2x4()
|
D | 3x3s2p1-minmax-wasmsimd-x86-splat-2x4-acc2.c | 130 const v128_t vi4x7BDF = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_2x4_acc2() local 140 …vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3,… in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_2x4_acc2() 203 const v128_t vi4x7BDF = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_2x4_acc2() local 212 …vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3,… in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_2x4_acc2()
|
D | 3x3s2p1-minmax-wasmsimd-x86-loadsplat-4x4.c | 192 const v128_t vi4x7BDF = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_4x4() local 205 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x7BDF, vk00)); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_4x4() 214 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x7BDF, vk20)); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_4x4() 313 const v128_t vi4x7BDF = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_4x4() local 321 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x7BDF, vk00)); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_4x4() 330 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x7BDF, vk20)); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_4x4()
|
D | 3x3s2p1-minmax-wasmsimd-arm-splat-4x4.c | 182 const v128_t vi4x7BDF = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_4x4() local 195 …vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x7BDF, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1,… in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_4x4() 204 …vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3,… in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_4x4() 303 const v128_t vi4x7BDF = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_4x4() local 311 …vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x7BDF, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1,… in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_4x4() 320 …vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3,… in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_4x4()
|