/external/XNNPACK/src/qs8-gemm/gen/ |
D | 3x4c8-xw-minmax-ssse3.c | 155 const __m128i vnmask2x13 = _mm_shuffle_epi32(vnmask2x0123, _MM_SHUFFLE(3, 3, 1, 1)); in xnn_qs8_gemm_xw_minmax_ukernel_3x4c8__ssse3() local 159 const __m128i vprod2x13 = _mm_sub_epi64(_mm_xor_si128(vabsprod2x13, vnmask2x13), vnmask2x13); in xnn_qs8_gemm_xw_minmax_ukernel_3x4c8__ssse3()
|
D | 3x4c8-minmax-sse2-ld128.c | 157 const __m128i vnmask2x13 = _mm_shuffle_epi32(vnmask2x0123, _MM_SHUFFLE(3, 3, 1, 1)); in xnn_qs8_gemm_minmax_ukernel_3x4c8__sse2_ld128() local 161 const __m128i vprod2x13 = _mm_sub_epi64(_mm_xor_si128(vabsprod2x13, vnmask2x13), vnmask2x13); in xnn_qs8_gemm_minmax_ukernel_3x4c8__sse2_ld128()
|
D | 3x4c8-xw-minmax-sse2.c | 155 const __m128i vnmask2x13 = _mm_shuffle_epi32(vnmask2x0123, _MM_SHUFFLE(3, 3, 1, 1)); in xnn_qs8_gemm_xw_minmax_ukernel_3x4c8__sse2() local 159 const __m128i vprod2x13 = _mm_sub_epi64(_mm_xor_si128(vabsprod2x13, vnmask2x13), vnmask2x13); in xnn_qs8_gemm_xw_minmax_ukernel_3x4c8__sse2()
|
D | 3x4c8-minmax-sse2-ld64.c | 159 const __m128i vnmask2x13 = _mm_shuffle_epi32(vnmask2x0123, _MM_SHUFFLE(3, 3, 1, 1)); in xnn_qs8_gemm_minmax_ukernel_3x4c8__sse2_ld64() local 163 const __m128i vprod2x13 = _mm_sub_epi64(_mm_xor_si128(vabsprod2x13, vnmask2x13), vnmask2x13); in xnn_qs8_gemm_minmax_ukernel_3x4c8__sse2_ld64()
|
D | 3x4c8-minmax-ssse3-ld128.c | 157 const __m128i vnmask2x13 = _mm_shuffle_epi32(vnmask2x0123, _MM_SHUFFLE(3, 3, 1, 1)); in xnn_qs8_gemm_minmax_ukernel_3x4c8__ssse3_ld128() local 161 const __m128i vprod2x13 = _mm_sub_epi64(_mm_xor_si128(vabsprod2x13, vnmask2x13), vnmask2x13); in xnn_qs8_gemm_minmax_ukernel_3x4c8__ssse3_ld128()
|
D | 3x4c8-minmax-ssse3-ld64.c | 159 const __m128i vnmask2x13 = _mm_shuffle_epi32(vnmask2x0123, _MM_SHUFFLE(3, 3, 1, 1)); in xnn_qs8_gemm_minmax_ukernel_3x4c8__ssse3_ld64() local 163 const __m128i vprod2x13 = _mm_sub_epi64(_mm_xor_si128(vabsprod2x13, vnmask2x13), vnmask2x13); in xnn_qs8_gemm_minmax_ukernel_3x4c8__ssse3_ld64()
|
D | 4x4c2-minmax-ssse3-ld128.c | 234 const __m128i vnmask2x13 = _mm_shuffle_epi32(vnmask2x0123, _MM_SHUFFLE(3, 3, 1, 1)); in xnn_qs8_gemm_minmax_ukernel_4x4c2__ssse3_ld128() local 239 const __m128i vprod2x13 = _mm_sub_epi64(_mm_xor_si128(vabsprod2x13, vnmask2x13), vnmask2x13); in xnn_qs8_gemm_minmax_ukernel_4x4c2__ssse3_ld128()
|
D | 4x4c2-minmax-sse2-ld64.c | 234 const __m128i vnmask2x13 = _mm_shuffle_epi32(vnmask2x0123, _MM_SHUFFLE(3, 3, 1, 1)); in xnn_qs8_gemm_minmax_ukernel_4x4c2__sse2_ld64() local 239 const __m128i vprod2x13 = _mm_sub_epi64(_mm_xor_si128(vabsprod2x13, vnmask2x13), vnmask2x13); in xnn_qs8_gemm_minmax_ukernel_4x4c2__sse2_ld64()
|
D | 4x4c2-minmax-ssse3-ld64.c | 234 const __m128i vnmask2x13 = _mm_shuffle_epi32(vnmask2x0123, _MM_SHUFFLE(3, 3, 1, 1)); in xnn_qs8_gemm_minmax_ukernel_4x4c2__ssse3_ld64() local 239 const __m128i vprod2x13 = _mm_sub_epi64(_mm_xor_si128(vabsprod2x13, vnmask2x13), vnmask2x13); in xnn_qs8_gemm_minmax_ukernel_4x4c2__ssse3_ld64()
|
D | 4x4c2-xw-minmax-ssse3.c | 227 const __m128i vnmask2x13 = _mm_shuffle_epi32(vnmask2x0123, _MM_SHUFFLE(3, 3, 1, 1)); in xnn_qs8_gemm_xw_minmax_ukernel_4x4c2__ssse3() local 232 const __m128i vprod2x13 = _mm_sub_epi64(_mm_xor_si128(vabsprod2x13, vnmask2x13), vnmask2x13); in xnn_qs8_gemm_xw_minmax_ukernel_4x4c2__ssse3()
|
D | 4x4c2-minmax-sse2-ld128.c | 234 const __m128i vnmask2x13 = _mm_shuffle_epi32(vnmask2x0123, _MM_SHUFFLE(3, 3, 1, 1)); in xnn_qs8_gemm_minmax_ukernel_4x4c2__sse2_ld128() local 239 const __m128i vprod2x13 = _mm_sub_epi64(_mm_xor_si128(vabsprod2x13, vnmask2x13), vnmask2x13); in xnn_qs8_gemm_minmax_ukernel_4x4c2__sse2_ld128()
|
D | 4x4c2-xw-minmax-sse2.c | 227 const __m128i vnmask2x13 = _mm_shuffle_epi32(vnmask2x0123, _MM_SHUFFLE(3, 3, 1, 1)); in xnn_qs8_gemm_xw_minmax_ukernel_4x4c2__sse2() local 232 const __m128i vprod2x13 = _mm_sub_epi64(_mm_xor_si128(vabsprod2x13, vnmask2x13), vnmask2x13); in xnn_qs8_gemm_xw_minmax_ukernel_4x4c2__sse2()
|
/external/XNNPACK/src/qs8-igemm/gen/ |
D | 3x4c8-minmax-ssse3-ld64.c | 176 const __m128i vnmask2x13 = _mm_shuffle_epi32(vnmask2x0123, _MM_SHUFFLE(3, 3, 1, 1)); in xnn_qs8_igemm_minmax_ukernel_3x4c8__ssse3_ld64() local 180 const __m128i vprod2x13 = _mm_sub_epi64(_mm_xor_si128(vabsprod2x13, vnmask2x13), vnmask2x13); in xnn_qs8_igemm_minmax_ukernel_3x4c8__ssse3_ld64()
|
D | 3x4c8-minmax-ssse3-ld128.c | 174 const __m128i vnmask2x13 = _mm_shuffle_epi32(vnmask2x0123, _MM_SHUFFLE(3, 3, 1, 1)); in xnn_qs8_igemm_minmax_ukernel_3x4c8__ssse3_ld128() local 178 const __m128i vprod2x13 = _mm_sub_epi64(_mm_xor_si128(vabsprod2x13, vnmask2x13), vnmask2x13); in xnn_qs8_igemm_minmax_ukernel_3x4c8__ssse3_ld128()
|
D | 3x4c8-minmax-sse2-ld128.c | 174 const __m128i vnmask2x13 = _mm_shuffle_epi32(vnmask2x0123, _MM_SHUFFLE(3, 3, 1, 1)); in xnn_qs8_igemm_minmax_ukernel_3x4c8__sse2_ld128() local 178 const __m128i vprod2x13 = _mm_sub_epi64(_mm_xor_si128(vabsprod2x13, vnmask2x13), vnmask2x13); in xnn_qs8_igemm_minmax_ukernel_3x4c8__sse2_ld128()
|
D | 3x4c8-minmax-sse2-ld64.c | 176 const __m128i vnmask2x13 = _mm_shuffle_epi32(vnmask2x0123, _MM_SHUFFLE(3, 3, 1, 1)); in xnn_qs8_igemm_minmax_ukernel_3x4c8__sse2_ld64() local 180 const __m128i vprod2x13 = _mm_sub_epi64(_mm_xor_si128(vabsprod2x13, vnmask2x13), vnmask2x13); in xnn_qs8_igemm_minmax_ukernel_3x4c8__sse2_ld64()
|
D | 4x4c2-minmax-sse2-ld128.c | 253 const __m128i vnmask2x13 = _mm_shuffle_epi32(vnmask2x0123, _MM_SHUFFLE(3, 3, 1, 1)); in xnn_qs8_igemm_minmax_ukernel_4x4c2__sse2_ld128() local 258 const __m128i vprod2x13 = _mm_sub_epi64(_mm_xor_si128(vabsprod2x13, vnmask2x13), vnmask2x13); in xnn_qs8_igemm_minmax_ukernel_4x4c2__sse2_ld128()
|
D | 4x4c2-minmax-ssse3-ld128.c | 253 const __m128i vnmask2x13 = _mm_shuffle_epi32(vnmask2x0123, _MM_SHUFFLE(3, 3, 1, 1)); in xnn_qs8_igemm_minmax_ukernel_4x4c2__ssse3_ld128() local 258 const __m128i vprod2x13 = _mm_sub_epi64(_mm_xor_si128(vabsprod2x13, vnmask2x13), vnmask2x13); in xnn_qs8_igemm_minmax_ukernel_4x4c2__ssse3_ld128()
|
D | 4x4c2-minmax-ssse3-ld64.c | 253 const __m128i vnmask2x13 = _mm_shuffle_epi32(vnmask2x0123, _MM_SHUFFLE(3, 3, 1, 1)); in xnn_qs8_igemm_minmax_ukernel_4x4c2__ssse3_ld64() local 258 const __m128i vprod2x13 = _mm_sub_epi64(_mm_xor_si128(vabsprod2x13, vnmask2x13), vnmask2x13); in xnn_qs8_igemm_minmax_ukernel_4x4c2__ssse3_ld64()
|
D | 4x4c2-minmax-sse2-ld64.c | 253 const __m128i vnmask2x13 = _mm_shuffle_epi32(vnmask2x0123, _MM_SHUFFLE(3, 3, 1, 1)); in xnn_qs8_igemm_minmax_ukernel_4x4c2__sse2_ld64() local 258 const __m128i vprod2x13 = _mm_sub_epi64(_mm_xor_si128(vabsprod2x13, vnmask2x13), vnmask2x13); in xnn_qs8_igemm_minmax_ukernel_4x4c2__sse2_ld64()
|
/external/XNNPACK/src/qu8-gemm/ |
D | 4x4c2-minmax-sse2.c | 239 const __m128i vnmask2x13 = _mm_shuffle_epi32(vnmask2x0123, _MM_SHUFFLE(3, 3, 1, 1)); in xnn_qu8_gemm_minmax_ukernel_4x4c2__sse2() local 244 const __m128i vprod2x13 = _mm_sub_epi64(_mm_xor_si128(vabsprod2x13, vnmask2x13), vnmask2x13); in xnn_qu8_gemm_minmax_ukernel_4x4c2__sse2()
|
/external/XNNPACK/src/qu8-igemm/ |
D | 4x4c2-minmax-sse2.c | 223 const __m128i vnmask2x13 = _mm_shuffle_epi32(vnmask2x0123, _MM_SHUFFLE(3, 3, 1, 1)); in xnn_qu8_igemm_minmax_ukernel_4x4c2__sse2() local 228 const __m128i vprod2x13 = _mm_sub_epi64(_mm_xor_si128(vabsprod2x13, vnmask2x13), vnmask2x13); in xnn_qu8_igemm_minmax_ukernel_4x4c2__sse2()
|