/external/XNNPACK/src/f32-conv-hwc2chw/ |
D | 3x3s2p1c3x4-neonfma-2x2.c | 172 const float32x4_t vk10c2 = vld1q_f32(w + 32); in xnn_f32_conv_hwc2chw_ukernel_3x3s2p1c3x4__neonfma_2x2() local 174 vo0x0 = vfmaq_laneq_f32(vo0x0, vk10c2, vi1x0, 3); in xnn_f32_conv_hwc2chw_ukernel_3x3s2p1c3x4__neonfma_2x2() 175 vo1x0 = vfmaq_laneq_f32(vo1x0, vk10c2, vi3x0, 3); in xnn_f32_conv_hwc2chw_ukernel_3x3s2p1c3x4__neonfma_2x2() 176 vo0x1 = vfmaq_laneq_f32(vo0x1, vk10c2, vi1x2, 1); in xnn_f32_conv_hwc2chw_ukernel_3x3s2p1c3x4__neonfma_2x2() 177 vo1x1 = vfmaq_laneq_f32(vo1x1, vk10c2, vi3x2, 1); in xnn_f32_conv_hwc2chw_ukernel_3x3s2p1c3x4__neonfma_2x2() 434 const float32x4_t vk10c2 = vld1q_f32(w + 32); in xnn_f32_conv_hwc2chw_ukernel_3x3s2p1c3x4__neonfma_2x2() local 436 vo0x0 = vfmaq_laneq_f32(vo0x0, vk10c2, vi1x0, 3); in xnn_f32_conv_hwc2chw_ukernel_3x3s2p1c3x4__neonfma_2x2() 437 vo1x0 = vfmaq_laneq_f32(vo1x0, vk10c2, vi3x0, 3); in xnn_f32_conv_hwc2chw_ukernel_3x3s2p1c3x4__neonfma_2x2() 438 vo0x1 = vfmaq_laneq_f32(vo0x1, vk10c2, vi1x2, 1); in xnn_f32_conv_hwc2chw_ukernel_3x3s2p1c3x4__neonfma_2x2() 439 vo1x1 = vfmaq_laneq_f32(vo1x1, vk10c2, vi3x2, 1); in xnn_f32_conv_hwc2chw_ukernel_3x3s2p1c3x4__neonfma_2x2()
|
D | 3x3s2p1c3x4-neon-2x2.c | 172 const float32x4_t vk10c2 = vld1q_f32(w + 32); in xnn_f32_conv_hwc2chw_ukernel_3x3s2p1c3x4__neon_2x2() local 174 vo0x0 = vmlaq_lane_f32(vo0x0, vk10c2, vget_high_f32(vi1x0), 1); in xnn_f32_conv_hwc2chw_ukernel_3x3s2p1c3x4__neon_2x2() 175 vo1x0 = vmlaq_lane_f32(vo1x0, vk10c2, vget_high_f32(vi3x0), 1); in xnn_f32_conv_hwc2chw_ukernel_3x3s2p1c3x4__neon_2x2() 176 vo0x1 = vmlaq_lane_f32(vo0x1, vk10c2, vget_low_f32(vi1x2), 1); in xnn_f32_conv_hwc2chw_ukernel_3x3s2p1c3x4__neon_2x2() 177 vo1x1 = vmlaq_lane_f32(vo1x1, vk10c2, vget_low_f32(vi3x2), 1); in xnn_f32_conv_hwc2chw_ukernel_3x3s2p1c3x4__neon_2x2() 432 const float32x4_t vk10c2 = vld1q_f32(w + 32); in xnn_f32_conv_hwc2chw_ukernel_3x3s2p1c3x4__neon_2x2() local 434 vo0x0 = vmlaq_lane_f32(vo0x0, vk10c2, vget_high_f32(vi1x0), 1); in xnn_f32_conv_hwc2chw_ukernel_3x3s2p1c3x4__neon_2x2() 435 vo1x0 = vmlaq_lane_f32(vo1x0, vk10c2, vget_high_f32(vi3x0), 1); in xnn_f32_conv_hwc2chw_ukernel_3x3s2p1c3x4__neon_2x2() 436 vo0x1 = vmlaq_lane_f32(vo0x1, vk10c2, vget_low_f32(vi1x2), 1); in xnn_f32_conv_hwc2chw_ukernel_3x3s2p1c3x4__neon_2x2() 437 vo1x1 = vmlaq_lane_f32(vo1x1, vk10c2, vget_low_f32(vi3x2), 1); in xnn_f32_conv_hwc2chw_ukernel_3x3s2p1c3x4__neon_2x2()
|
D | 3x3s2p1c3x4-wasmsimd-2x2.c | 173 const v128_t vk10c2 = wasm_v128_load(w + 32); in xnn_f32_conv_hwc2chw_ukernel_3x3s2p1c3x4__wasmsimd_2x2() local 175 …vo0x0 = wasm_f32x4_add(vo0x0, wasm_f32x4_mul(vk10c2, wasm_v32x4_shuffle(vi1x0, vi1x0, 3, 3, 3, 3))… in xnn_f32_conv_hwc2chw_ukernel_3x3s2p1c3x4__wasmsimd_2x2() 176 …vo1x0 = wasm_f32x4_add(vo1x0, wasm_f32x4_mul(vk10c2, wasm_v32x4_shuffle(vi3x0, vi3x0, 3, 3, 3, 3))… in xnn_f32_conv_hwc2chw_ukernel_3x3s2p1c3x4__wasmsimd_2x2() 177 …vo0x1 = wasm_f32x4_add(vo0x1, wasm_f32x4_mul(vk10c2, wasm_v32x4_shuffle(vi1x2, vi1x2, 1, 1, 1, 1))… in xnn_f32_conv_hwc2chw_ukernel_3x3s2p1c3x4__wasmsimd_2x2() 178 …vo1x1 = wasm_f32x4_add(vo1x1, wasm_f32x4_mul(vk10c2, wasm_v32x4_shuffle(vi3x2, vi3x2, 1, 1, 1, 1))… in xnn_f32_conv_hwc2chw_ukernel_3x3s2p1c3x4__wasmsimd_2x2() 434 const v128_t vk10c2 = wasm_v128_load(w + 32); in xnn_f32_conv_hwc2chw_ukernel_3x3s2p1c3x4__wasmsimd_2x2() local 436 …vo0x0 = wasm_f32x4_add(vo0x0, wasm_f32x4_mul(vk10c2, wasm_v32x4_shuffle(vi1x0, vi1x0, 3, 3, 3, 3))… in xnn_f32_conv_hwc2chw_ukernel_3x3s2p1c3x4__wasmsimd_2x2() 437 …vo1x0 = wasm_f32x4_add(vo1x0, wasm_f32x4_mul(vk10c2, wasm_v32x4_shuffle(vi3x0, vi3x0, 3, 3, 3, 3))… in xnn_f32_conv_hwc2chw_ukernel_3x3s2p1c3x4__wasmsimd_2x2() 438 …vo0x1 = wasm_f32x4_add(vo0x1, wasm_f32x4_mul(vk10c2, wasm_v32x4_shuffle(vi1x2, vi1x2, 1, 1, 1, 1))… in xnn_f32_conv_hwc2chw_ukernel_3x3s2p1c3x4__wasmsimd_2x2() 439 …vo1x1 = wasm_f32x4_add(vo1x1, wasm_f32x4_mul(vk10c2, wasm_v32x4_shuffle(vi3x2, vi3x2, 1, 1, 1, 1))… in xnn_f32_conv_hwc2chw_ukernel_3x3s2p1c3x4__wasmsimd_2x2()
|
D | 3x3s2p1c3x4-sse-2x2.c | 172 const __m128 vk10c2 = _mm_load_ps(w + 32); in xnn_f32_conv_hwc2chw_ukernel_3x3s2p1c3x4__sse_2x2() local 174 …vo0x0 = _mm_add_ps(vo0x0, _mm_mul_ps(vk10c2, _mm_shuffle_ps(vi1x0, vi1x0, _MM_SHUFFLE(3, 3, 3, 3))… in xnn_f32_conv_hwc2chw_ukernel_3x3s2p1c3x4__sse_2x2() 175 …vo1x0 = _mm_add_ps(vo1x0, _mm_mul_ps(vk10c2, _mm_shuffle_ps(vi3x0, vi3x0, _MM_SHUFFLE(3, 3, 3, 3))… in xnn_f32_conv_hwc2chw_ukernel_3x3s2p1c3x4__sse_2x2() 176 …vo0x1 = _mm_add_ps(vo0x1, _mm_mul_ps(vk10c2, _mm_shuffle_ps(vi1x2, vi1x2, _MM_SHUFFLE(1, 1, 1, 1))… in xnn_f32_conv_hwc2chw_ukernel_3x3s2p1c3x4__sse_2x2() 177 …vo1x1 = _mm_add_ps(vo1x1, _mm_mul_ps(vk10c2, _mm_shuffle_ps(vi3x2, vi3x2, _MM_SHUFFLE(1, 1, 1, 1))… in xnn_f32_conv_hwc2chw_ukernel_3x3s2p1c3x4__sse_2x2() 434 const __m128 vk10c2 = _mm_load_ps(w + 32); in xnn_f32_conv_hwc2chw_ukernel_3x3s2p1c3x4__sse_2x2() local 436 …vo0x0 = _mm_add_ps(vo0x0, _mm_mul_ps(vk10c2, _mm_shuffle_ps(vi1x0, vi1x0, _MM_SHUFFLE(3, 3, 3, 3))… in xnn_f32_conv_hwc2chw_ukernel_3x3s2p1c3x4__sse_2x2() 437 …vo1x0 = _mm_add_ps(vo1x0, _mm_mul_ps(vk10c2, _mm_shuffle_ps(vi3x0, vi3x0, _MM_SHUFFLE(3, 3, 3, 3))… in xnn_f32_conv_hwc2chw_ukernel_3x3s2p1c3x4__sse_2x2() 438 …vo0x1 = _mm_add_ps(vo0x1, _mm_mul_ps(vk10c2, _mm_shuffle_ps(vi1x2, vi1x2, _MM_SHUFFLE(1, 1, 1, 1))… in xnn_f32_conv_hwc2chw_ukernel_3x3s2p1c3x4__sse_2x2() 439 …vo1x1 = _mm_add_ps(vo1x1, _mm_mul_ps(vk10c2, _mm_shuffle_ps(vi3x2, vi3x2, _MM_SHUFFLE(1, 1, 1, 1))… in xnn_f32_conv_hwc2chw_ukernel_3x3s2p1c3x4__sse_2x2()
|