/external/XNNPACK/src/f32-velu/gen/ |
D | velu-avx-rr2-lut4-p4-perm-x24.c | 66 __m256 ven2 = _mm256_andnot_ps(vindex_mask, vn2); in xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x24() local 68 …128 ven2_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(ven2)), 21)); in xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x24() 75 …8 ven2_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(ven2, 1)), 21)); in xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x24() 82 ven2 = _mm256_insertf128_ps(_mm256_castps128_ps256(ven2_lo), ven2_hi, 1); in xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x24() 89 __m256 vs2 = _mm256_mul_ps(vl2, ven2); in xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x24()
|
D | velu-avx-rr2-lut4-p4-perm-x32.c | 69 __m256 ven2 = _mm256_andnot_ps(vindex_mask, vn2); in xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x32() local 71 …128 ven2_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(ven2)), 21)); in xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x32() 81 …8 ven2_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(ven2, 1)), 21)); in xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x32() 90 ven2 = _mm256_insertf128_ps(_mm256_castps128_ps256(ven2_lo), ven2_hi, 1); in xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x32() 99 __m256 vs2 = _mm256_mul_ps(vl2, ven2); in xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x32()
|
D | velu-avx-rr2-lut4-p4-perm-x40.c | 72 __m256 ven2 = _mm256_andnot_ps(vindex_mask, vn2); in xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x40() local 74 …128 ven2_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(ven2)), 21)); in xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x40() 87 …8 ven2_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(ven2, 1)), 21)); in xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x40() 98 ven2 = _mm256_insertf128_ps(_mm256_castps128_ps256(ven2_lo), ven2_hi, 1); in xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x40() 109 __m256 vs2 = _mm256_mul_ps(vl2, ven2); in xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x40()
|
D | velu-scalar-rr2-lut16-p3-x3.c | 63 const uint32_t ven2 = fp32_to_bits(vn2) << 19; in xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x3() local 72 float vs2 = fp32_from_bits(xnn_table_exp2minus_k_over_16[vidx2] + ven2); in xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x3()
|
D | velu-wasm-rr2-lut16-p3-x3.c | 63 const uint32_t ven2 = fp32_to_bits(vn2) << 19; in xnn_f32_velu_ukernel__wasm_rr2_lut16_p3_x3() local 72 float vs2 = fp32_from_bits(xnn_table_exp2minus_k_over_16[vidx2] + ven2); in xnn_f32_velu_ukernel__wasm_rr2_lut16_p3_x3()
|
D | velu-avx-rr2-lut4-p4-perm-x48.c | 75 __m256 ven2 = _mm256_andnot_ps(vindex_mask, vn2); in xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x48() local 77 …128 ven2_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(ven2)), 21)); in xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x48() 93 …8 ven2_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(ven2, 1)), 21)); in xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x48() 106 ven2 = _mm256_insertf128_ps(_mm256_castps128_ps256(ven2_lo), ven2_hi, 1); in xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x48() 119 __m256 vs2 = _mm256_mul_ps(vl2, ven2); in xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x48()
|
D | velu-wasm-rr2-lut16-p3-x4.c | 66 const uint32_t ven2 = fp32_to_bits(vn2) << 19; in xnn_f32_velu_ukernel__wasm_rr2_lut16_p3_x4() local 78 float vs2 = fp32_from_bits(xnn_table_exp2minus_k_over_16[vidx2] + ven2); in xnn_f32_velu_ukernel__wasm_rr2_lut16_p3_x4()
|
D | velu-scalar-rr2-lut16-p3-x4.c | 66 const uint32_t ven2 = fp32_to_bits(vn2) << 19; in xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x4() local 78 float vs2 = fp32_from_bits(xnn_table_exp2minus_k_over_16[vidx2] + ven2); in xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x4()
|
D | velu-scalar-rr2-lut16-p3-x5.c | 69 const uint32_t ven2 = fp32_to_bits(vn2) << 19; in xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x5() local 84 float vs2 = fp32_from_bits(xnn_table_exp2minus_k_over_16[vidx2] + ven2); in xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x5()
|
D | velu-wasm-rr2-lut16-p3-x5.c | 69 const uint32_t ven2 = fp32_to_bits(vn2) << 19; in xnn_f32_velu_ukernel__wasm_rr2_lut16_p3_x5() local 84 float vs2 = fp32_from_bits(xnn_table_exp2minus_k_over_16[vidx2] + ven2); in xnn_f32_velu_ukernel__wasm_rr2_lut16_p3_x5()
|
D | velu-wasm-rr2-lut16-p3-x6.c | 72 const uint32_t ven2 = fp32_to_bits(vn2) << 19; in xnn_f32_velu_ukernel__wasm_rr2_lut16_p3_x6() local 90 float vs2 = fp32_from_bits(xnn_table_exp2minus_k_over_16[vidx2] + ven2); in xnn_f32_velu_ukernel__wasm_rr2_lut16_p3_x6()
|
D | velu-scalar-rr2-lut16-p3-x6.c | 72 const uint32_t ven2 = fp32_to_bits(vn2) << 19; in xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x6() local 90 float vs2 = fp32_from_bits(xnn_table_exp2minus_k_over_16[vidx2] + ven2); in xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x6()
|
D | velu-avx512f-rr1-lut16-p3-perm-x48.c | 60 const __m512i ven2 = _mm512_slli_epi32(_mm512_castps_si512(vn2), 19); in xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x48() local 67 __m512 vs2 = _mm512_castsi512_ps(_mm512_add_epi32(vl2, ven2)); in xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x48()
|
D | velu-avx2-rr1-lut16-p3-gather-x24.c | 67 const __m256i ven2 = _mm256_slli_epi32(_mm256_castps_si256(vn2), 19); in xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x24() local 74 __m256 vs2 = _mm256_castsi256_ps(_mm256_add_epi32(vl2, ven2)); in xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x24()
|
D | velu-avx2-rr1-lut8-p4-perm-x24.c | 62 const __m256i ven2 = _mm256_slli_epi32(_mm256_castps_si256(vn2), 20); in xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x24() local 70 __m256 vs2 = _mm256_castsi256_ps(_mm256_add_epi32(vl2, ven2)); in xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x24()
|
D | velu-avx2-rr1-lut4-p4-perm-x24.c | 63 const __m256i ven2 = _mm256_slli_epi32(_mm256_castps_si256(vn2), 21); in xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x24() local 71 __m256 vs2 = _mm256_castsi256_ps(_mm256_add_epi32(vl2, ven2)); in xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x24()
|
D | velu-avx2-rr1-lut16-p3-gather-x32.c | 72 const __m256i ven2 = _mm256_slli_epi32(_mm256_castps_si256(vn2), 19); in xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x32() local 81 __m256 vs2 = _mm256_castsi256_ps(_mm256_add_epi32(vl2, ven2)); in xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x32()
|
D | velu-avx512f-rr1-lut16-p3-perm-x64.c | 63 const __m512i ven2 = _mm512_slli_epi32(_mm512_castps_si512(vn2), 19); in xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x64() local 72 __m512 vs2 = _mm512_castsi512_ps(_mm512_add_epi32(vl2, ven2)); in xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x64()
|
D | velu-avx2-rr1-lut8-p4-perm-x32.c | 65 const __m256i ven2 = _mm256_slli_epi32(_mm256_castps_si256(vn2), 20); in xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x32() local 76 __m256 vs2 = _mm256_castsi256_ps(_mm256_add_epi32(vl2, ven2)); in xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x32()
|
D | velu-avx2-rr1-lut4-p4-perm-x32.c | 66 const __m256i ven2 = _mm256_slli_epi32(_mm256_castps_si256(vn2), 21); in xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x32() local 77 __m256 vs2 = _mm256_castsi256_ps(_mm256_add_epi32(vl2, ven2)); in xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x32()
|
D | velu-avx512f-rr1-lut16-p3-perm-x80.c | 66 const __m512i ven2 = _mm512_slli_epi32(_mm512_castps_si512(vn2), 19); in xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x80() local 77 __m512 vs2 = _mm512_castsi512_ps(_mm512_add_epi32(vl2, ven2)); in xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x80()
|
D | velu-avx2-rr1-lut8-p4-perm-x40.c | 68 const __m256i ven2 = _mm256_slli_epi32(_mm256_castps_si256(vn2), 20); in xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x40() local 82 __m256 vs2 = _mm256_castsi256_ps(_mm256_add_epi32(vl2, ven2)); in xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x40()
|
D | velu-avx2-rr1-lut4-p4-perm-x40.c | 69 const __m256i ven2 = _mm256_slli_epi32(_mm256_castps_si256(vn2), 21); in xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x40() local 83 __m256 vs2 = _mm256_castsi256_ps(_mm256_add_epi32(vl2, ven2)); in xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x40()
|
D | velu-avx2-rr1-lut16-p3-gather-x40.c | 77 const __m256i ven2 = _mm256_slli_epi32(_mm256_castps_si256(vn2), 19); in xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x40() local 88 __m256 vs2 = _mm256_castsi256_ps(_mm256_add_epi32(vl2, ven2)); in xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x40()
|
D | velu-avx2-rr1-lut16-p3-gather-x48.c | 82 const __m256i ven2 = _mm256_slli_epi32(_mm256_castps_si256(vn2), 19); in xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x48() local 95 __m256 vs2 = _mm256_castsi256_ps(_mm256_add_epi32(vl2, ven2)); in xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x48()
|