/external/XNNPACK/src/f32-dwconv2d-chw/gen/ |
D | 3x3s2p1-minmax-wasmsimd-arm-loadsplat-4x4.c | 143 const v128_t vi7x89AB = wasm_v128_load(i7); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_4x4() local 164 const v128_t vi7x8ACE = wasm_v32x4_shuffle(vi7x89AB, vi7xCDEF, 0, 2, 4, 6); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_4x4() 165 const v128_t vi7x9BDF = wasm_v32x4_shuffle(vi7x89AB, vi7xCDEF, 1, 3, 5, 7); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_4x4() 270 const v128_t vi7x89AB = wasm_v128_load(i7); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_4x4() local 289 …const v128_t vi7x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi7x89AB, vi7xCDEF, 0, 2, 4, … in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_4x4() 290 …const v128_t vi7x9BDF = wasm_v128_and(vmask_odd, wasm_v32x4_shuffle(vi7x89AB, vi7xCDEF, 1, 3, 5, … in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_4x4()
|
D | 3x3s2p1-minmax-wasmsimd-x86-loadsplat-4x4.c | 143 const v128_t vi7x89AB = wasm_v128_load(i7); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_4x4() local 164 const v128_t vi7x8ACE = wasm_v32x4_shuffle(vi7x89AB, vi7xCDEF, 0, 2, 4, 6); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_4x4() 165 const v128_t vi7x9BDF = wasm_v32x4_shuffle(vi7x89AB, vi7xCDEF, 1, 3, 5, 7); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_4x4() 270 const v128_t vi7x89AB = wasm_v128_load(i7); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_4x4() local 289 …const v128_t vi7x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi7x89AB, vi7xCDEF, 0, 2, 4, … in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_4x4() 290 …const v128_t vi7x9BDF = wasm_v128_and(vmask_odd, wasm_v32x4_shuffle(vi7x89AB, vi7xCDEF, 1, 3, 5, … in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_4x4()
|
D | 3x3s2p1-minmax-wasmsimd-arm-splat-4x4.c | 133 const v128_t vi7x89AB = wasm_v128_load(i7); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_4x4() local 154 const v128_t vi7x8ACE = wasm_v32x4_shuffle(vi7x89AB, vi7xCDEF, 0, 2, 4, 6); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_4x4() 155 const v128_t vi7x9BDF = wasm_v32x4_shuffle(vi7x89AB, vi7xCDEF, 1, 3, 5, 7); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_4x4() 260 const v128_t vi7x89AB = wasm_v128_load(i7); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_4x4() local 279 …const v128_t vi7x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi7x89AB, vi7xCDEF, 0, 2, 4, … in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_4x4() 280 …const v128_t vi7x9BDF = wasm_v128_and(vmask_odd, wasm_v32x4_shuffle(vi7x89AB, vi7xCDEF, 1, 3, 5, … in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_4x4()
|
D | 3x3s2p1-minmax-wasmsimd-x86-splat-4x4.c | 133 const v128_t vi7x89AB = wasm_v128_load(i7); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_4x4() local 154 const v128_t vi7x8ACE = wasm_v32x4_shuffle(vi7x89AB, vi7xCDEF, 0, 2, 4, 6); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_4x4() 155 const v128_t vi7x9BDF = wasm_v32x4_shuffle(vi7x89AB, vi7xCDEF, 1, 3, 5, 7); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_4x4() 260 const v128_t vi7x89AB = wasm_v128_load(i7); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_4x4() local 279 …const v128_t vi7x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi7x89AB, vi7xCDEF, 0, 2, 4, … in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_4x4() 280 …const v128_t vi7x9BDF = wasm_v128_and(vmask_odd, wasm_v32x4_shuffle(vi7x89AB, vi7xCDEF, 1, 3, 5, … in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_4x4()
|
D | 3x3s2p1-minmax-sse-4x4.c | 132 const __m128 vi7x89AB = _mm_loadu_ps(i7); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_4x4() local 153 const __m128 vi7x8ACE = _mm_shuffle_ps(vi7x89AB, vi7xCDEF, _MM_SHUFFLE(2, 0, 2, 0)); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_4x4() 154 const __m128 vi7x9BDF = _mm_shuffle_ps(vi7x89AB, vi7xCDEF, _MM_SHUFFLE(3, 1, 3, 1)); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_4x4() 264 const __m128 vi7x89AB = _mm_loadu_ps(i7); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_4x4() local 283 …const __m128 vi7x8ACE = _mm_and_ps(vmask_even, _mm_shuffle_ps(vi7x89AB, vi7xCDEF, _MM_SHUFFLE(2, 0… in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_4x4() 284 …const __m128 vi7x9BDF = _mm_and_ps(vmask_odd, _mm_shuffle_ps(vi7x89AB, vi7xCDEF, _MM_SHUFFLE(3, 1… in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_4x4()
|
D | 5x5p2-minmax-neon-4x4.c | 114 const float32x4_t vi7x89AB = vld1q_f32(i7); i7 += 4; in xnn_f32_dwconv2d_chw_ukernel_5x5p2__neon_4x4() local 224 const float32x4_t vi7x5678 = vextq_f32(vi7x4567, vi7x89AB, 1); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__neon_4x4() 265 const float32x4_t vi7x6789 = vextq_f32(vi7x4567, vi7x89AB, 2); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__neon_4x4() 266 vi7x4567 = vi7x89AB; in xnn_f32_dwconv2d_chw_ukernel_5x5p2__neon_4x4() 323 float32x4_t vi7x89AB = vld1q_f32(i7); i7 += 4; in xnn_f32_dwconv2d_chw_ukernel_5x5p2__neon_4x4() local 332 vi7x89AB = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi7x89AB))); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__neon_4x4() 442 const float32x4_t vi7x5678 = vextq_f32(vi7x4567, vi7x89AB, 1); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__neon_4x4() 483 const float32x4_t vi7x6789 = vextq_f32(vi7x4567, vi7x89AB, 2); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__neon_4x4() 484 vi7x4567 = vi7x89AB; in xnn_f32_dwconv2d_chw_ukernel_5x5p2__neon_4x4()
|
D | 5x5p2-minmax-neon-4x4-acc2.c | 114 const float32x4_t vi7x89AB = vld1q_f32(i7); i7 += 4; in xnn_f32_dwconv2d_chw_ukernel_5x5p2__neon_4x4_acc2() local 224 const float32x4_t vi7x5678 = vextq_f32(vi7x4567, vi7x89AB, 1); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__neon_4x4_acc2() 265 const float32x4_t vi7x6789 = vextq_f32(vi7x4567, vi7x89AB, 2); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__neon_4x4_acc2() 266 vi7x4567 = vi7x89AB; in xnn_f32_dwconv2d_chw_ukernel_5x5p2__neon_4x4_acc2() 327 float32x4_t vi7x89AB = vld1q_f32(i7); i7 += 4; in xnn_f32_dwconv2d_chw_ukernel_5x5p2__neon_4x4_acc2() local 336 vi7x89AB = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi7x89AB))); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__neon_4x4_acc2() 446 const float32x4_t vi7x5678 = vextq_f32(vi7x4567, vi7x89AB, 1); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__neon_4x4_acc2() 487 const float32x4_t vi7x6789 = vextq_f32(vi7x4567, vi7x89AB, 2); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__neon_4x4_acc2() 488 vi7x4567 = vi7x89AB; in xnn_f32_dwconv2d_chw_ukernel_5x5p2__neon_4x4_acc2()
|
D | 5x5p2-minmax-neonfma-4x4.c | 114 const float32x4_t vi7x89AB = vld1q_f32(i7); i7 += 4; in xnn_f32_dwconv2d_chw_ukernel_5x5p2__neonfma_4x4() local 224 const float32x4_t vi7x5678 = vextq_f32(vi7x4567, vi7x89AB, 1); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__neonfma_4x4() 265 const float32x4_t vi7x6789 = vextq_f32(vi7x4567, vi7x89AB, 2); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__neonfma_4x4() 266 vi7x4567 = vi7x89AB; in xnn_f32_dwconv2d_chw_ukernel_5x5p2__neonfma_4x4() 323 float32x4_t vi7x89AB = vld1q_f32(i7); i7 += 4; in xnn_f32_dwconv2d_chw_ukernel_5x5p2__neonfma_4x4() local 332 vi7x89AB = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi7x89AB))); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__neonfma_4x4() 442 const float32x4_t vi7x5678 = vextq_f32(vi7x4567, vi7x89AB, 1); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__neonfma_4x4() 483 const float32x4_t vi7x6789 = vextq_f32(vi7x4567, vi7x89AB, 2); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__neonfma_4x4() 484 vi7x4567 = vi7x89AB; in xnn_f32_dwconv2d_chw_ukernel_5x5p2__neonfma_4x4()
|
D | 5x5p2-minmax-neonfma-4x4-acc2.c | 114 const float32x4_t vi7x89AB = vld1q_f32(i7); i7 += 4; in xnn_f32_dwconv2d_chw_ukernel_5x5p2__neonfma_4x4_acc2() local 224 const float32x4_t vi7x5678 = vextq_f32(vi7x4567, vi7x89AB, 1); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__neonfma_4x4_acc2() 265 const float32x4_t vi7x6789 = vextq_f32(vi7x4567, vi7x89AB, 2); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__neonfma_4x4_acc2() 266 vi7x4567 = vi7x89AB; in xnn_f32_dwconv2d_chw_ukernel_5x5p2__neonfma_4x4_acc2() 327 float32x4_t vi7x89AB = vld1q_f32(i7); i7 += 4; in xnn_f32_dwconv2d_chw_ukernel_5x5p2__neonfma_4x4_acc2() local 336 vi7x89AB = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi7x89AB))); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__neonfma_4x4_acc2() 446 const float32x4_t vi7x5678 = vextq_f32(vi7x4567, vi7x89AB, 1); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__neonfma_4x4_acc2() 487 const float32x4_t vi7x6789 = vextq_f32(vi7x4567, vi7x89AB, 2); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__neonfma_4x4_acc2() 488 vi7x4567 = vi7x89AB; in xnn_f32_dwconv2d_chw_ukernel_5x5p2__neonfma_4x4_acc2()
|
D | 5x5p2-minmax-wasmsimd-x86-loadsplat-4x4-acc2.c | 143 const v128_t vi7x89AB = wasm_v128_load(i7); i7 += 4; in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_4x4_acc2() local 253 const v128_t vi7x5678 = wasm_v32x4_shuffle(vi7x4567, vi7x89AB, 1, 2, 3, 4); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_4x4_acc2() 294 const v128_t vi7x6789 = wasm_v32x4_shuffle(vi7x4567, vi7x89AB, 2, 3, 4, 5); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_4x4_acc2() 295 vi7x4567 = vi7x89AB; in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_4x4_acc2() 355 v128_t vi7x89AB = wasm_v128_load(i7); i7 += 4; in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_4x4_acc2() local 364 vi7x89AB = wasm_v128_and(vmask, vi7x89AB); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_4x4_acc2() 474 const v128_t vi7x5678 = wasm_v32x4_shuffle(vi7x4567, vi7x89AB, 1, 2, 3, 4); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_4x4_acc2() 515 const v128_t vi7x6789 = wasm_v32x4_shuffle(vi7x4567, vi7x89AB, 2, 3, 4, 5); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_4x4_acc2() 516 vi7x4567 = vi7x89AB; in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_4x4_acc2()
|
D | 5x5p2-minmax-wasmsimd-arm-loadsplat-4x4.c | 143 const v128_t vi7x89AB = wasm_v128_load(i7); i7 += 4; in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_4x4() local 253 const v128_t vi7x5678 = wasm_v32x4_shuffle(vi7x4567, vi7x89AB, 1, 2, 3, 4); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_4x4() 294 const v128_t vi7x6789 = wasm_v32x4_shuffle(vi7x4567, vi7x89AB, 2, 3, 4, 5); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_4x4() 295 vi7x4567 = vi7x89AB; in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_4x4() 351 v128_t vi7x89AB = wasm_v128_load(i7); i7 += 4; in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_4x4() local 360 vi7x89AB = wasm_v128_and(vmask, vi7x89AB); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_4x4() 470 const v128_t vi7x5678 = wasm_v32x4_shuffle(vi7x4567, vi7x89AB, 1, 2, 3, 4); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_4x4() 511 const v128_t vi7x6789 = wasm_v32x4_shuffle(vi7x4567, vi7x89AB, 2, 3, 4, 5); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_4x4() 512 vi7x4567 = vi7x89AB; in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_4x4()
|
D | 5x5p2-minmax-wasmsimd-arm-loadsplat-4x4-acc2.c | 143 const v128_t vi7x89AB = wasm_v128_load(i7); i7 += 4; in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_4x4_acc2() local 253 const v128_t vi7x5678 = wasm_v32x4_shuffle(vi7x4567, vi7x89AB, 1, 2, 3, 4); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_4x4_acc2() 294 const v128_t vi7x6789 = wasm_v32x4_shuffle(vi7x4567, vi7x89AB, 2, 3, 4, 5); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_4x4_acc2() 295 vi7x4567 = vi7x89AB; in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_4x4_acc2() 355 v128_t vi7x89AB = wasm_v128_load(i7); i7 += 4; in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_4x4_acc2() local 364 vi7x89AB = wasm_v128_and(vmask, vi7x89AB); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_4x4_acc2() 474 const v128_t vi7x5678 = wasm_v32x4_shuffle(vi7x4567, vi7x89AB, 1, 2, 3, 4); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_4x4_acc2() 515 const v128_t vi7x6789 = wasm_v32x4_shuffle(vi7x4567, vi7x89AB, 2, 3, 4, 5); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_4x4_acc2() 516 vi7x4567 = vi7x89AB; in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_4x4_acc2()
|
D | 5x5p2-minmax-wasmsimd-x86-loadsplat-4x4.c | 143 const v128_t vi7x89AB = wasm_v128_load(i7); i7 += 4; in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_4x4() local 253 const v128_t vi7x5678 = wasm_v32x4_shuffle(vi7x4567, vi7x89AB, 1, 2, 3, 4); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_4x4() 294 const v128_t vi7x6789 = wasm_v32x4_shuffle(vi7x4567, vi7x89AB, 2, 3, 4, 5); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_4x4() 295 vi7x4567 = vi7x89AB; in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_4x4() 351 v128_t vi7x89AB = wasm_v128_load(i7); i7 += 4; in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_4x4() local 360 vi7x89AB = wasm_v128_and(vmask, vi7x89AB); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_4x4() 470 const v128_t vi7x5678 = wasm_v32x4_shuffle(vi7x4567, vi7x89AB, 1, 2, 3, 4); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_4x4() 511 const v128_t vi7x6789 = wasm_v32x4_shuffle(vi7x4567, vi7x89AB, 2, 3, 4, 5); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_4x4() 512 vi7x4567 = vi7x89AB; in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_4x4()
|
D | 3x3s2p1-minmax-sse-5x4.c | 144 const __m128 vi7x89AB = _mm_loadu_ps(i7); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_5x4() local 171 const __m128 vi7x8ACE = _mm_shuffle_ps(vi7x89AB, vi7xCDEF, _MM_SHUFFLE(2, 0, 2, 0)); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_5x4() 172 const __m128 vi7x9BDF = _mm_shuffle_ps(vi7x89AB, vi7xCDEF, _MM_SHUFFLE(3, 1, 3, 1)); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_5x4() 305 const __m128 vi7x89AB = _mm_loadu_ps(i7); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_5x4() local 328 …const __m128 vi7x8ACE = _mm_and_ps(vmask_even, _mm_shuffle_ps(vi7x89AB, vi7xCDEF, _MM_SHUFFLE(2, 0… in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_5x4() 329 …const __m128 vi7x9BDF = _mm_and_ps(vmask_odd, _mm_shuffle_ps(vi7x89AB, vi7xCDEF, _MM_SHUFFLE(3, 1… in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_5x4()
|
D | 3x3s2p1-minmax-sse-6x4.c | 156 const __m128 vi7x89AB = _mm_loadu_ps(i7); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_6x4() local 189 const __m128 vi7x8ACE = _mm_shuffle_ps(vi7x89AB, vi7xCDEF, _MM_SHUFFLE(2, 0, 2, 0)); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_6x4() 190 const __m128 vi7x9BDF = _mm_shuffle_ps(vi7x89AB, vi7xCDEF, _MM_SHUFFLE(3, 1, 3, 1)); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_6x4() 346 const __m128 vi7x89AB = _mm_loadu_ps(i7); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_6x4() local 373 …const __m128 vi7x8ACE = _mm_and_ps(vmask_even, _mm_shuffle_ps(vi7x89AB, vi7xCDEF, _MM_SHUFFLE(2, 0… in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_6x4() 374 …const __m128 vi7x9BDF = _mm_and_ps(vmask_odd, _mm_shuffle_ps(vi7x89AB, vi7xCDEF, _MM_SHUFFLE(3, 1… in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_6x4()
|
D | 5x5p2-minmax-wasmsimd-arm-splat-4x4-acc2.c | 117 const v128_t vi7x89AB = wasm_v128_load(i7); i7 += 4; in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_4x4_acc2() local 227 const v128_t vi7x5678 = wasm_v32x4_shuffle(vi7x4567, vi7x89AB, 1, 2, 3, 4); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_4x4_acc2() 268 const v128_t vi7x6789 = wasm_v32x4_shuffle(vi7x4567, vi7x89AB, 2, 3, 4, 5); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_4x4_acc2() 269 vi7x4567 = vi7x89AB; in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_4x4_acc2() 329 v128_t vi7x89AB = wasm_v128_load(i7); i7 += 4; in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_4x4_acc2() local 338 vi7x89AB = wasm_v128_and(vmask, vi7x89AB); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_4x4_acc2() 448 const v128_t vi7x5678 = wasm_v32x4_shuffle(vi7x4567, vi7x89AB, 1, 2, 3, 4); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_4x4_acc2() 489 const v128_t vi7x6789 = wasm_v32x4_shuffle(vi7x4567, vi7x89AB, 2, 3, 4, 5); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_4x4_acc2() 490 vi7x4567 = vi7x89AB; in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_4x4_acc2()
|
D | 5x5p2-minmax-wasmsimd-x86-loadsplat-5x4.c | 152 const v128_t vi7x89AB = wasm_v128_load(i7); i7 += 4; in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_5x4() local 281 const v128_t vi7x5678 = wasm_v32x4_shuffle(vi7x4567, vi7x89AB, 1, 2, 3, 4); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_5x4() 328 const v128_t vi7x6789 = wasm_v32x4_shuffle(vi7x4567, vi7x89AB, 2, 3, 4, 5); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_5x4() 329 vi7x4567 = vi7x89AB; in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_5x4() 396 v128_t vi7x89AB = wasm_v128_load(i7); i7 += 4; in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_5x4() local 406 vi7x89AB = wasm_v128_and(vmask, vi7x89AB); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_5x4() 535 const v128_t vi7x5678 = wasm_v32x4_shuffle(vi7x4567, vi7x89AB, 1, 2, 3, 4); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_5x4() 582 const v128_t vi7x6789 = wasm_v32x4_shuffle(vi7x4567, vi7x89AB, 2, 3, 4, 5); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_5x4() 583 vi7x4567 = vi7x89AB; in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_5x4()
|
D | 5x5p2-minmax-neonfma-5x4.c | 123 const float32x4_t vi7x89AB = vld1q_f32(i7); i7 += 4; in xnn_f32_dwconv2d_chw_ukernel_5x5p2__neonfma_5x4() local 252 const float32x4_t vi7x5678 = vextq_f32(vi7x4567, vi7x89AB, 1); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__neonfma_5x4() 299 const float32x4_t vi7x6789 = vextq_f32(vi7x4567, vi7x89AB, 2); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__neonfma_5x4() 300 vi7x4567 = vi7x89AB; in xnn_f32_dwconv2d_chw_ukernel_5x5p2__neonfma_5x4() 368 float32x4_t vi7x89AB = vld1q_f32(i7); i7 += 4; in xnn_f32_dwconv2d_chw_ukernel_5x5p2__neonfma_5x4() local 378 vi7x89AB = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi7x89AB))); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__neonfma_5x4() 507 const float32x4_t vi7x5678 = vextq_f32(vi7x4567, vi7x89AB, 1); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__neonfma_5x4() 554 const float32x4_t vi7x6789 = vextq_f32(vi7x4567, vi7x89AB, 2); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__neonfma_5x4() 555 vi7x4567 = vi7x89AB; in xnn_f32_dwconv2d_chw_ukernel_5x5p2__neonfma_5x4()
|
D | 5x5p2-minmax-neon-5x4.c | 123 const float32x4_t vi7x89AB = vld1q_f32(i7); i7 += 4; in xnn_f32_dwconv2d_chw_ukernel_5x5p2__neon_5x4() local 252 const float32x4_t vi7x5678 = vextq_f32(vi7x4567, vi7x89AB, 1); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__neon_5x4() 299 const float32x4_t vi7x6789 = vextq_f32(vi7x4567, vi7x89AB, 2); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__neon_5x4() 300 vi7x4567 = vi7x89AB; in xnn_f32_dwconv2d_chw_ukernel_5x5p2__neon_5x4() 368 float32x4_t vi7x89AB = vld1q_f32(i7); i7 += 4; in xnn_f32_dwconv2d_chw_ukernel_5x5p2__neon_5x4() local 378 vi7x89AB = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi7x89AB))); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__neon_5x4() 507 const float32x4_t vi7x5678 = vextq_f32(vi7x4567, vi7x89AB, 1); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__neon_5x4() 554 const float32x4_t vi7x6789 = vextq_f32(vi7x4567, vi7x89AB, 2); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__neon_5x4() 555 vi7x4567 = vi7x89AB; in xnn_f32_dwconv2d_chw_ukernel_5x5p2__neon_5x4()
|
D | 5x5p2-minmax-wasmsimd-arm-splat-4x4.c | 117 const v128_t vi7x89AB = wasm_v128_load(i7); i7 += 4; in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_4x4() local 227 const v128_t vi7x5678 = wasm_v32x4_shuffle(vi7x4567, vi7x89AB, 1, 2, 3, 4); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_4x4() 268 const v128_t vi7x6789 = wasm_v32x4_shuffle(vi7x4567, vi7x89AB, 2, 3, 4, 5); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_4x4() 269 vi7x4567 = vi7x89AB; in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_4x4() 325 v128_t vi7x89AB = wasm_v128_load(i7); i7 += 4; in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_4x4() local 334 vi7x89AB = wasm_v128_and(vmask, vi7x89AB); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_4x4() 444 const v128_t vi7x5678 = wasm_v32x4_shuffle(vi7x4567, vi7x89AB, 1, 2, 3, 4); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_4x4() 485 const v128_t vi7x6789 = wasm_v32x4_shuffle(vi7x4567, vi7x89AB, 2, 3, 4, 5); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_4x4() 486 vi7x4567 = vi7x89AB; in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_4x4()
|
D | 5x5p2-minmax-wasmsimd-arm-loadsplat-5x4.c | 152 const v128_t vi7x89AB = wasm_v128_load(i7); i7 += 4; in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_5x4() local 281 const v128_t vi7x5678 = wasm_v32x4_shuffle(vi7x4567, vi7x89AB, 1, 2, 3, 4); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_5x4() 328 const v128_t vi7x6789 = wasm_v32x4_shuffle(vi7x4567, vi7x89AB, 2, 3, 4, 5); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_5x4() 329 vi7x4567 = vi7x89AB; in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_5x4() 396 v128_t vi7x89AB = wasm_v128_load(i7); i7 += 4; in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_5x4() local 406 vi7x89AB = wasm_v128_and(vmask, vi7x89AB); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_5x4() 535 const v128_t vi7x5678 = wasm_v32x4_shuffle(vi7x4567, vi7x89AB, 1, 2, 3, 4); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_5x4() 582 const v128_t vi7x6789 = wasm_v32x4_shuffle(vi7x4567, vi7x89AB, 2, 3, 4, 5); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_5x4() 583 vi7x4567 = vi7x89AB; in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_5x4()
|
D | 5x5p2-minmax-wasmsimd-x86-splat-4x4.c | 117 const v128_t vi7x89AB = wasm_v128_load(i7); i7 += 4; in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_4x4() local 227 const v128_t vi7x5678 = wasm_v32x4_shuffle(vi7x4567, vi7x89AB, 1, 2, 3, 4); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_4x4() 268 const v128_t vi7x6789 = wasm_v32x4_shuffle(vi7x4567, vi7x89AB, 2, 3, 4, 5); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_4x4() 269 vi7x4567 = vi7x89AB; in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_4x4() 325 v128_t vi7x89AB = wasm_v128_load(i7); i7 += 4; in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_4x4() local 334 vi7x89AB = wasm_v128_and(vmask, vi7x89AB); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_4x4() 444 const v128_t vi7x5678 = wasm_v32x4_shuffle(vi7x4567, vi7x89AB, 1, 2, 3, 4); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_4x4() 485 const v128_t vi7x6789 = wasm_v32x4_shuffle(vi7x4567, vi7x89AB, 2, 3, 4, 5); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_4x4() 486 vi7x4567 = vi7x89AB; in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_4x4()
|
D | 5x5p2-minmax-wasmsimd-x86-splat-4x4-acc2.c | 117 const v128_t vi7x89AB = wasm_v128_load(i7); i7 += 4; in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_4x4_acc2() local 227 const v128_t vi7x5678 = wasm_v32x4_shuffle(vi7x4567, vi7x89AB, 1, 2, 3, 4); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_4x4_acc2() 268 const v128_t vi7x6789 = wasm_v32x4_shuffle(vi7x4567, vi7x89AB, 2, 3, 4, 5); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_4x4_acc2() 269 vi7x4567 = vi7x89AB; in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_4x4_acc2() 329 v128_t vi7x89AB = wasm_v128_load(i7); i7 += 4; in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_4x4_acc2() local 338 vi7x89AB = wasm_v128_and(vmask, vi7x89AB); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_4x4_acc2() 448 const v128_t vi7x5678 = wasm_v32x4_shuffle(vi7x4567, vi7x89AB, 1, 2, 3, 4); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_4x4_acc2() 489 const v128_t vi7x6789 = wasm_v32x4_shuffle(vi7x4567, vi7x89AB, 2, 3, 4, 5); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_4x4_acc2() 490 vi7x4567 = vi7x89AB; in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_4x4_acc2()
|
D | 5x5p2-minmax-wasmsimd-x86-splat-5x4.c | 126 const v128_t vi7x89AB = wasm_v128_load(i7); i7 += 4; in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_5x4() local 255 const v128_t vi7x5678 = wasm_v32x4_shuffle(vi7x4567, vi7x89AB, 1, 2, 3, 4); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_5x4() 302 const v128_t vi7x6789 = wasm_v32x4_shuffle(vi7x4567, vi7x89AB, 2, 3, 4, 5); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_5x4() 303 vi7x4567 = vi7x89AB; in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_5x4() 370 v128_t vi7x89AB = wasm_v128_load(i7); i7 += 4; in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_5x4() local 380 vi7x89AB = wasm_v128_and(vmask, vi7x89AB); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_5x4() 509 const v128_t vi7x5678 = wasm_v32x4_shuffle(vi7x4567, vi7x89AB, 1, 2, 3, 4); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_5x4() 556 const v128_t vi7x6789 = wasm_v32x4_shuffle(vi7x4567, vi7x89AB, 2, 3, 4, 5); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_5x4() 557 vi7x4567 = vi7x89AB; in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_5x4()
|
D | 5x5p2-minmax-wasmsimd-arm-splat-5x4.c | 126 const v128_t vi7x89AB = wasm_v128_load(i7); i7 += 4; in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_5x4() local 255 const v128_t vi7x5678 = wasm_v32x4_shuffle(vi7x4567, vi7x89AB, 1, 2, 3, 4); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_5x4() 302 const v128_t vi7x6789 = wasm_v32x4_shuffle(vi7x4567, vi7x89AB, 2, 3, 4, 5); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_5x4() 303 vi7x4567 = vi7x89AB; in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_5x4() 370 v128_t vi7x89AB = wasm_v128_load(i7); i7 += 4; in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_5x4() local 380 vi7x89AB = wasm_v128_and(vmask, vi7x89AB); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_5x4() 509 const v128_t vi7x5678 = wasm_v32x4_shuffle(vi7x4567, vi7x89AB, 1, 2, 3, 4); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_5x4() 556 const v128_t vi7x6789 = wasm_v32x4_shuffle(vi7x4567, vi7x89AB, 2, 3, 4, 5); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_5x4() 557 vi7x4567 = vi7x89AB; in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_5x4()
|