Home
last modified time | relevance | path

Searched refs:vi0xCDEF (Results 1 – 25 of 101) sorted by relevance

12345

/external/XNNPACK/src/f32-prelu/gen/
Dwasmsimd-minmax-1x16.c51 v128_t vi0xCDEF = wasm_v128_load(i0 + 12); in xnn_f32_prelu_ukernel__wasmsimd_minmax_1x16() local
60 v128_t vacc0xCDEF = wasm_i32x4_max(vi0xCDEF, vzero); in xnn_f32_prelu_ukernel__wasmsimd_minmax_1x16()
61 vi0xCDEF = wasm_i32x4_min(vi0xCDEF, vzero); in xnn_f32_prelu_ukernel__wasmsimd_minmax_1x16()
66 vacc0xCDEF = wasm_f32x4_add(vacc0xCDEF, wasm_f32x4_mul(vi0xCDEF, vwCDEF)); in xnn_f32_prelu_ukernel__wasmsimd_minmax_1x16()
Dwasmsimd-bitselect-1x16.c51 const v128_t vi0xCDEF = wasm_v128_load(i0 + 12); in xnn_f32_prelu_ukernel__wasmsimd_bitselect_1x16() local
60 v128_t vacc0xCDEF = wasm_f32x4_mul(vi0xCDEF, vwCDEF); in xnn_f32_prelu_ukernel__wasmsimd_bitselect_1x16()
61 const v128_t vmask0xCDEF = wasm_i32x4_lt(vi0xCDEF, vzero); in xnn_f32_prelu_ukernel__wasmsimd_bitselect_1x16()
66 vacc0xCDEF = wasm_v128_bitselect(vacc0xCDEF, vi0xCDEF, vmask0xCDEF); in xnn_f32_prelu_ukernel__wasmsimd_bitselect_1x16()
Dneon-1x16.c49 const float32x4_t vi0xCDEF = vld1q_f32(i0); i0 += 4; in xnn_f32_prelu_ukernel__neon_1x16() local
57 float32x4_t vacc0xCDEF = vmulq_f32(vi0xCDEF, vwCDEF); in xnn_f32_prelu_ukernel__neon_1x16()
58 const uint32x4_t vm0xCDEF = vcltq_s32(vreinterpretq_s32_f32(vi0xCDEF), vmovq_n_s32(0)); in xnn_f32_prelu_ukernel__neon_1x16()
63 vacc0xCDEF = vbslq_f32(vm0xCDEF, vacc0xCDEF, vi0xCDEF); in xnn_f32_prelu_ukernel__neon_1x16()
Dwasmsimd-minmax-2x16.c57 v128_t vi0xCDEF = wasm_v128_load(i0 + 12); in xnn_f32_prelu_ukernel__wasmsimd_minmax_2x16() local
71 v128_t vacc0xCDEF = wasm_i32x4_max(vi0xCDEF, vzero); in xnn_f32_prelu_ukernel__wasmsimd_minmax_2x16()
72 vi0xCDEF = wasm_i32x4_min(vi0xCDEF, vzero); in xnn_f32_prelu_ukernel__wasmsimd_minmax_2x16()
85 vacc0xCDEF = wasm_f32x4_add(vacc0xCDEF, wasm_f32x4_mul(vi0xCDEF, vwCDEF)); in xnn_f32_prelu_ukernel__wasmsimd_minmax_2x16()
Dwasmsimd-bitselect-2x16.c57 const v128_t vi0xCDEF = wasm_v128_load(i0 + 12); in xnn_f32_prelu_ukernel__wasmsimd_bitselect_2x16() local
71 v128_t vacc0xCDEF = wasm_f32x4_mul(vi0xCDEF, vwCDEF); in xnn_f32_prelu_ukernel__wasmsimd_bitselect_2x16()
72 const v128_t vmask0xCDEF = wasm_i32x4_lt(vi0xCDEF, vzero); in xnn_f32_prelu_ukernel__wasmsimd_bitselect_2x16()
85 vacc0xCDEF = wasm_v128_bitselect(vacc0xCDEF, vi0xCDEF, vmask0xCDEF); in xnn_f32_prelu_ukernel__wasmsimd_bitselect_2x16()
/external/XNNPACK/src/f32-dwconv2d-chw/gen/
D3x3s2p1-minmax-wasmsimd-arm-loadsplat-1x4.c83 const v128_t vi0xCDEF = wasm_v128_load(i0 + 4); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_1x4() local
92 const v128_t vi0x8ACE = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_1x4()
93 const v128_t vi0x9BDF = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_1x4()
136 const v128_t vi0xCDEF = wasm_v128_load(i0 + 4); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_1x4() local
142 …const v128_t vi0x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, … in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_1x4()
143 …const v128_t vi0x9BDF = wasm_v128_and(vmask_odd, wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, … in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_1x4()
D3x3s2p1-minmax-wasmsimd-arm-loadsplat-1x4-acc2.c83 const v128_t vi0xCDEF = wasm_v128_load(i0 + 4); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_1x4_acc2() local
92 const v128_t vi0x8ACE = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_1x4_acc2()
93 const v128_t vi0x9BDF = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_1x4_acc2()
137 const v128_t vi0xCDEF = wasm_v128_load(i0 + 4); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_1x4_acc2() local
143 …const v128_t vi0x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, … in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_1x4_acc2()
144 …const v128_t vi0x9BDF = wasm_v128_and(vmask_odd, wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, … in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_1x4_acc2()
D3x3s2p1-minmax-wasmsimd-x86-splat-1x4.c73 const v128_t vi0xCDEF = wasm_v128_load(i0 + 4); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_1x4() local
82 const v128_t vi0x8ACE = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_1x4()
83 const v128_t vi0x9BDF = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_1x4()
126 const v128_t vi0xCDEF = wasm_v128_load(i0 + 4); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_1x4() local
132 …const v128_t vi0x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, … in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_1x4()
133 …const v128_t vi0x9BDF = wasm_v128_and(vmask_odd, wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, … in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_1x4()
D3x3s2p1-minmax-wasmsimd-arm-splat-1x4.c73 const v128_t vi0xCDEF = wasm_v128_load(i0 + 4); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_1x4() local
82 const v128_t vi0x8ACE = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_1x4()
83 const v128_t vi0x9BDF = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_1x4()
126 const v128_t vi0xCDEF = wasm_v128_load(i0 + 4); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_1x4() local
132 …const v128_t vi0x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, … in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_1x4()
133 …const v128_t vi0x9BDF = wasm_v128_and(vmask_odd, wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, … in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_1x4()
D3x3s2p1-minmax-wasmsimd-arm-splat-1x4-acc3.c73 const v128_t vi0xCDEF = wasm_v128_load(i0 + 4); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_1x4_acc3() local
82 const v128_t vi0x8ACE = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_1x4_acc3()
83 const v128_t vi0x9BDF = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_1x4_acc3()
128 const v128_t vi0xCDEF = wasm_v128_load(i0 + 4); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_1x4_acc3() local
134 …const v128_t vi0x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, … in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_1x4_acc3()
135 …const v128_t vi0x9BDF = wasm_v128_and(vmask_odd, wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, … in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_1x4_acc3()
D3x3s2p1-minmax-wasmsimd-arm-splat-1x4-acc2.c73 const v128_t vi0xCDEF = wasm_v128_load(i0 + 4); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_1x4_acc2() local
82 const v128_t vi0x8ACE = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_1x4_acc2()
83 const v128_t vi0x9BDF = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_1x4_acc2()
127 const v128_t vi0xCDEF = wasm_v128_load(i0 + 4); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_1x4_acc2() local
133 …const v128_t vi0x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, … in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_1x4_acc2()
134 …const v128_t vi0x9BDF = wasm_v128_and(vmask_odd, wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, … in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_1x4_acc2()
D3x3s2p1-minmax-wasmsimd-x86-splat-1x4-acc2.c73 const v128_t vi0xCDEF = wasm_v128_load(i0 + 4); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_1x4_acc2() local
82 const v128_t vi0x8ACE = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_1x4_acc2()
83 const v128_t vi0x9BDF = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_1x4_acc2()
127 const v128_t vi0xCDEF = wasm_v128_load(i0 + 4); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_1x4_acc2() local
133 …const v128_t vi0x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, … in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_1x4_acc2()
134 …const v128_t vi0x9BDF = wasm_v128_and(vmask_odd, wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, … in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_1x4_acc2()
D3x3s2p1-minmax-wasmsimd-arm-splat-1x4-acc4.c73 const v128_t vi0xCDEF = wasm_v128_load(i0 + 4); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_1x4_acc4() local
82 const v128_t vi0x8ACE = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_1x4_acc4()
83 const v128_t vi0x9BDF = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_1x4_acc4()
129 const v128_t vi0xCDEF = wasm_v128_load(i0 + 4); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_1x4_acc4() local
135 …const v128_t vi0x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, … in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_1x4_acc4()
136 …const v128_t vi0x9BDF = wasm_v128_and(vmask_odd, wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, … in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_1x4_acc4()
D3x3s2p1-minmax-wasmsimd-arm-loadsplat-1x4-acc4.c83 const v128_t vi0xCDEF = wasm_v128_load(i0 + 4); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_1x4_acc4() local
92 const v128_t vi0x8ACE = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_1x4_acc4()
93 const v128_t vi0x9BDF = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_1x4_acc4()
139 const v128_t vi0xCDEF = wasm_v128_load(i0 + 4); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_1x4_acc4() local
145 …const v128_t vi0x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, … in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_1x4_acc4()
146 …const v128_t vi0x9BDF = wasm_v128_and(vmask_odd, wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, … in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_1x4_acc4()
D3x3s2p1-minmax-wasmsimd-x86-loadsplat-1x4-acc3.c83 const v128_t vi0xCDEF = wasm_v128_load(i0 + 4); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_1x4_acc3() local
92 const v128_t vi0x8ACE = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_1x4_acc3()
93 const v128_t vi0x9BDF = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_1x4_acc3()
138 const v128_t vi0xCDEF = wasm_v128_load(i0 + 4); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_1x4_acc3() local
144 …const v128_t vi0x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, … in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_1x4_acc3()
145 …const v128_t vi0x9BDF = wasm_v128_and(vmask_odd, wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, … in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_1x4_acc3()
D3x3s2p1-minmax-wasmsimd-x86-loadsplat-1x4-acc2.c83 const v128_t vi0xCDEF = wasm_v128_load(i0 + 4); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_1x4_acc2() local
92 const v128_t vi0x8ACE = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_1x4_acc2()
93 const v128_t vi0x9BDF = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_1x4_acc2()
137 const v128_t vi0xCDEF = wasm_v128_load(i0 + 4); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_1x4_acc2() local
143 …const v128_t vi0x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, … in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_1x4_acc2()
144 …const v128_t vi0x9BDF = wasm_v128_and(vmask_odd, wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, … in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_1x4_acc2()
D3x3s2p1-minmax-wasmsimd-x86-splat-1x4-acc4.c73 const v128_t vi0xCDEF = wasm_v128_load(i0 + 4); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_1x4_acc4() local
82 const v128_t vi0x8ACE = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_1x4_acc4()
83 const v128_t vi0x9BDF = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_1x4_acc4()
129 const v128_t vi0xCDEF = wasm_v128_load(i0 + 4); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_1x4_acc4() local
135 …const v128_t vi0x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, … in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_1x4_acc4()
136 …const v128_t vi0x9BDF = wasm_v128_and(vmask_odd, wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, … in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_1x4_acc4()
D3x3s2p1-minmax-sse-1x4.c75 const __m128 vi0xCDEF = _mm_loadu_ps(i0 + 4); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_1x4() local
84 const __m128 vi0x8ACE = _mm_shuffle_ps(vi0x89AB, vi0xCDEF, _MM_SHUFFLE(2, 0, 2, 0)); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_1x4()
85 const __m128 vi0x9BDF = _mm_shuffle_ps(vi0x89AB, vi0xCDEF, _MM_SHUFFLE(3, 1, 3, 1)); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_1x4()
127 const __m128 vi0xCDEF = _mm_loadu_ps(i0 + 4); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_1x4() local
133 …const __m128 vi0x8ACE = _mm_and_ps(vmask_even, _mm_shuffle_ps(vi0x89AB, vi0xCDEF, _MM_SHUFFLE(2, 0… in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_1x4()
134 …const __m128 vi0x9BDF = _mm_and_ps(vmask_odd, _mm_shuffle_ps(vi0x89AB, vi0xCDEF, _MM_SHUFFLE(3, 1… in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_1x4()
D3x3s2p1-minmax-sse-1x4-acc3.c75 const __m128 vi0xCDEF = _mm_loadu_ps(i0 + 4); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_1x4_acc3() local
84 const __m128 vi0x8ACE = _mm_shuffle_ps(vi0x89AB, vi0xCDEF, _MM_SHUFFLE(2, 0, 2, 0)); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_1x4_acc3()
85 const __m128 vi0x9BDF = _mm_shuffle_ps(vi0x89AB, vi0xCDEF, _MM_SHUFFLE(3, 1, 3, 1)); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_1x4_acc3()
129 const __m128 vi0xCDEF = _mm_loadu_ps(i0 + 4); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_1x4_acc3() local
135 …const __m128 vi0x8ACE = _mm_and_ps(vmask_even, _mm_shuffle_ps(vi0x89AB, vi0xCDEF, _MM_SHUFFLE(2, 0… in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_1x4_acc3()
136 …const __m128 vi0x9BDF = _mm_and_ps(vmask_odd, _mm_shuffle_ps(vi0x89AB, vi0xCDEF, _MM_SHUFFLE(3, 1… in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_1x4_acc3()
D3x3s2p1-minmax-wasmsimd-x86-splat-1x4-acc3.c73 const v128_t vi0xCDEF = wasm_v128_load(i0 + 4); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_1x4_acc3() local
82 const v128_t vi0x8ACE = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_1x4_acc3()
83 const v128_t vi0x9BDF = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_1x4_acc3()
128 const v128_t vi0xCDEF = wasm_v128_load(i0 + 4); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_1x4_acc3() local
134 …const v128_t vi0x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, … in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_1x4_acc3()
135 …const v128_t vi0x9BDF = wasm_v128_and(vmask_odd, wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, … in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_1x4_acc3()
D3x3s2p1-minmax-wasmsimd-x86-loadsplat-1x4.c83 const v128_t vi0xCDEF = wasm_v128_load(i0 + 4); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_1x4() local
92 const v128_t vi0x8ACE = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_1x4()
93 const v128_t vi0x9BDF = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_1x4()
136 const v128_t vi0xCDEF = wasm_v128_load(i0 + 4); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_1x4() local
142 …const v128_t vi0x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, … in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_1x4()
143 …const v128_t vi0x9BDF = wasm_v128_and(vmask_odd, wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, … in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_1x4()
D3x3s2p1-minmax-wasmsimd-x86-loadsplat-1x4-acc4.c83 const v128_t vi0xCDEF = wasm_v128_load(i0 + 4); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_1x4_acc4() local
92 const v128_t vi0x8ACE = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_1x4_acc4()
93 const v128_t vi0x9BDF = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_1x4_acc4()
139 const v128_t vi0xCDEF = wasm_v128_load(i0 + 4); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_1x4_acc4() local
145 …const v128_t vi0x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, … in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_1x4_acc4()
146 …const v128_t vi0x9BDF = wasm_v128_and(vmask_odd, wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, … in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_1x4_acc4()
D3x3s2p1-minmax-sse-1x4-acc2.c75 const __m128 vi0xCDEF = _mm_loadu_ps(i0 + 4); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_1x4_acc2() local
84 const __m128 vi0x8ACE = _mm_shuffle_ps(vi0x89AB, vi0xCDEF, _MM_SHUFFLE(2, 0, 2, 0)); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_1x4_acc2()
85 const __m128 vi0x9BDF = _mm_shuffle_ps(vi0x89AB, vi0xCDEF, _MM_SHUFFLE(3, 1, 3, 1)); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_1x4_acc2()
128 const __m128 vi0xCDEF = _mm_loadu_ps(i0 + 4); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_1x4_acc2() local
134 …const __m128 vi0x8ACE = _mm_and_ps(vmask_even, _mm_shuffle_ps(vi0x89AB, vi0xCDEF, _MM_SHUFFLE(2, 0… in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_1x4_acc2()
135 …const __m128 vi0x9BDF = _mm_and_ps(vmask_odd, _mm_shuffle_ps(vi0x89AB, vi0xCDEF, _MM_SHUFFLE(3, 1… in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_1x4_acc2()
D3x3s2p1-minmax-wasmsimd-arm-loadsplat-1x4-acc3.c83 const v128_t vi0xCDEF = wasm_v128_load(i0 + 4); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_1x4_acc3() local
92 const v128_t vi0x8ACE = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_1x4_acc3()
93 const v128_t vi0x9BDF = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_1x4_acc3()
138 const v128_t vi0xCDEF = wasm_v128_load(i0 + 4); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_1x4_acc3() local
144 …const v128_t vi0x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, … in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_1x4_acc3()
145 …const v128_t vi0x9BDF = wasm_v128_and(vmask_odd, wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, … in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_1x4_acc3()
D3x3s2p1-minmax-sse-1x4-acc4.c75 const __m128 vi0xCDEF = _mm_loadu_ps(i0 + 4); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_1x4_acc4() local
84 const __m128 vi0x8ACE = _mm_shuffle_ps(vi0x89AB, vi0xCDEF, _MM_SHUFFLE(2, 0, 2, 0)); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_1x4_acc4()
85 const __m128 vi0x9BDF = _mm_shuffle_ps(vi0x89AB, vi0xCDEF, _MM_SHUFFLE(3, 1, 3, 1)); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_1x4_acc4()
130 const __m128 vi0xCDEF = _mm_loadu_ps(i0 + 4); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_1x4_acc4() local
136 …const __m128 vi0x8ACE = _mm_and_ps(vmask_even, _mm_shuffle_ps(vi0x89AB, vi0xCDEF, _MM_SHUFFLE(2, 0… in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_1x4_acc4()
137 …const __m128 vi0x9BDF = _mm_and_ps(vmask_odd, _mm_shuffle_ps(vi0x89AB, vi0xCDEF, _MM_SHUFFLE(3, 1… in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_1x4_acc4()

12345