/external/XNNPACK/src/f32-gemm/gen-inc/ |
D | 3x8inc-minmax-sse-dup.c | 90 const __m128 va2c1111 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(1, 1, 1, 1)); in xnn_f32_gemminc_minmax_ukernel_3x8__sse_dup() local 97 vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c1111, vb0123c1)); in xnn_f32_gemminc_minmax_ukernel_3x8__sse_dup() 100 vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c1111, vb4567c1)); in xnn_f32_gemminc_minmax_ukernel_3x8__sse_dup()
|
D | 3x8inc-minmax-sse2-dup.c | 90 …const __m128 va2c1111 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va2), _MM_SHUFFLE(1, 1… in xnn_f32_gemminc_minmax_ukernel_3x8__sse2_dup() local 97 vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c1111, vb0123c1)); in xnn_f32_gemminc_minmax_ukernel_3x8__sse2_dup() 100 vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c1111, vb4567c1)); in xnn_f32_gemminc_minmax_ukernel_3x8__sse2_dup()
|
D | 4x8inc-minmax-sse2-dup.c | 103 …const __m128 va2c1111 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va2), _MM_SHUFFLE(1, 1… in xnn_f32_gemminc_minmax_ukernel_4x8__sse2_dup() local 111 vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c1111, vb0123c1)); in xnn_f32_gemminc_minmax_ukernel_4x8__sse2_dup() 115 vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c1111, vb4567c1)); in xnn_f32_gemminc_minmax_ukernel_4x8__sse2_dup()
|
D | 4x8inc-minmax-sse-dup.c | 103 const __m128 va2c1111 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(1, 1, 1, 1)); in xnn_f32_gemminc_minmax_ukernel_4x8__sse_dup() local 111 vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c1111, vb0123c1)); in xnn_f32_gemminc_minmax_ukernel_4x8__sse_dup() 115 vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c1111, vb4567c1)); in xnn_f32_gemminc_minmax_ukernel_4x8__sse_dup()
|
D | 5x8inc-minmax-sse-dup.c | 116 const __m128 va2c1111 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(1, 1, 1, 1)); in xnn_f32_gemminc_minmax_ukernel_5x8__sse_dup() local 125 vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c1111, vb0123c1)); in xnn_f32_gemminc_minmax_ukernel_5x8__sse_dup() 130 vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c1111, vb4567c1)); in xnn_f32_gemminc_minmax_ukernel_5x8__sse_dup()
|
D | 5x8inc-minmax-sse2-dup.c | 116 …const __m128 va2c1111 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va2), _MM_SHUFFLE(1, 1… in xnn_f32_gemminc_minmax_ukernel_5x8__sse2_dup() local 125 vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c1111, vb0123c1)); in xnn_f32_gemminc_minmax_ukernel_5x8__sse2_dup() 130 vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c1111, vb4567c1)); in xnn_f32_gemminc_minmax_ukernel_5x8__sse2_dup()
|
/external/XNNPACK/src/f32-gemm/gen/ |
D | 3x8-minmax-sse2-dup.c | 88 …const __m128 va2c1111 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va2), _MM_SHUFFLE(1, 1… in xnn_f32_gemm_minmax_ukernel_3x8__sse2_dup() local 95 vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c1111, vb0123c1)); in xnn_f32_gemm_minmax_ukernel_3x8__sse2_dup() 98 vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c1111, vb4567c1)); in xnn_f32_gemm_minmax_ukernel_3x8__sse2_dup()
|
D | 3x8-minmax-sse-dup.c | 88 const __m128 va2c1111 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(1, 1, 1, 1)); in xnn_f32_gemm_minmax_ukernel_3x8__sse_dup() local 95 vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c1111, vb0123c1)); in xnn_f32_gemm_minmax_ukernel_3x8__sse_dup() 98 vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c1111, vb4567c1)); in xnn_f32_gemm_minmax_ukernel_3x8__sse_dup()
|
D | 4x8-minmax-sse2-dup.c | 101 …const __m128 va2c1111 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va2), _MM_SHUFFLE(1, 1… in xnn_f32_gemm_minmax_ukernel_4x8__sse2_dup() local 109 vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c1111, vb0123c1)); in xnn_f32_gemm_minmax_ukernel_4x8__sse2_dup() 113 vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c1111, vb4567c1)); in xnn_f32_gemm_minmax_ukernel_4x8__sse2_dup()
|
D | 4x8-minmax-sse-dup.c | 101 const __m128 va2c1111 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(1, 1, 1, 1)); in xnn_f32_gemm_minmax_ukernel_4x8__sse_dup() local 109 vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c1111, vb0123c1)); in xnn_f32_gemm_minmax_ukernel_4x8__sse_dup() 113 vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c1111, vb4567c1)); in xnn_f32_gemm_minmax_ukernel_4x8__sse_dup()
|
D | 5x8-minmax-sse-dup.c | 114 const __m128 va2c1111 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(1, 1, 1, 1)); in xnn_f32_gemm_minmax_ukernel_5x8__sse_dup() local 123 vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c1111, vb0123c1)); in xnn_f32_gemm_minmax_ukernel_5x8__sse_dup() 128 vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c1111, vb4567c1)); in xnn_f32_gemm_minmax_ukernel_5x8__sse_dup()
|
D | 5x8-minmax-sse2-dup.c | 114 …const __m128 va2c1111 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va2), _MM_SHUFFLE(1, 1… in xnn_f32_gemm_minmax_ukernel_5x8__sse2_dup() local 123 vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c1111, vb0123c1)); in xnn_f32_gemm_minmax_ukernel_5x8__sse2_dup() 128 vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c1111, vb4567c1)); in xnn_f32_gemm_minmax_ukernel_5x8__sse2_dup()
|
/external/XNNPACK/src/f32-igemm/gen/ |
D | 3x8-minmax-sse-dup.c | 107 const __m128 va2c1111 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(1, 1, 1, 1)); in xnn_f32_igemm_minmax_ukernel_3x8__sse_dup() local 114 vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c1111, vb0123c1)); in xnn_f32_igemm_minmax_ukernel_3x8__sse_dup() 117 vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c1111, vb4567c1)); in xnn_f32_igemm_minmax_ukernel_3x8__sse_dup()
|
D | 3x8-minmax-sse2-dup.c | 107 …const __m128 va2c1111 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va2), _MM_SHUFFLE(1, 1… in xnn_f32_igemm_minmax_ukernel_3x8__sse2_dup() local 114 vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c1111, vb0123c1)); in xnn_f32_igemm_minmax_ukernel_3x8__sse2_dup() 117 vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c1111, vb4567c1)); in xnn_f32_igemm_minmax_ukernel_3x8__sse2_dup()
|
D | 4x8-minmax-sse-dup.c | 123 const __m128 va2c1111 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(1, 1, 1, 1)); in xnn_f32_igemm_minmax_ukernel_4x8__sse_dup() local 131 vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c1111, vb0123c1)); in xnn_f32_igemm_minmax_ukernel_4x8__sse_dup() 135 vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c1111, vb4567c1)); in xnn_f32_igemm_minmax_ukernel_4x8__sse_dup()
|
D | 4x8-minmax-sse2-dup.c | 123 …const __m128 va2c1111 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va2), _MM_SHUFFLE(1, 1… in xnn_f32_igemm_minmax_ukernel_4x8__sse2_dup() local 131 vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c1111, vb0123c1)); in xnn_f32_igemm_minmax_ukernel_4x8__sse2_dup() 135 vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c1111, vb4567c1)); in xnn_f32_igemm_minmax_ukernel_4x8__sse2_dup()
|
D | 5x8-minmax-sse2-dup.c | 139 …const __m128 va2c1111 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va2), _MM_SHUFFLE(1, 1… in xnn_f32_igemm_minmax_ukernel_5x8__sse2_dup() local 148 vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c1111, vb0123c1)); in xnn_f32_igemm_minmax_ukernel_5x8__sse2_dup() 153 vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c1111, vb4567c1)); in xnn_f32_igemm_minmax_ukernel_5x8__sse2_dup()
|
D | 5x8-minmax-sse-dup.c | 139 const __m128 va2c1111 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(1, 1, 1, 1)); in xnn_f32_igemm_minmax_ukernel_5x8__sse_dup() local 148 vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c1111, vb0123c1)); in xnn_f32_igemm_minmax_ukernel_5x8__sse_dup() 153 vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c1111, vb4567c1)); in xnn_f32_igemm_minmax_ukernel_5x8__sse_dup()
|