Searched refs:vxi5 (Results 1 – 6 of 6) sorted by relevance
/external/XNNPACK/src/qu8-gavgpool/ |
D | 7p7x-minmax-sse2-c8.c | 57 const __m128i vxi5 = _mm_unpacklo_epi8(vi5, vzero); in xnn_qu8_gavgpool_minmax_ukernel_7p7x__sse2_c8() local 62 const __m128i vsum45 = _mm_add_epi16(vxi4, vxi5); in xnn_qu8_gavgpool_minmax_ukernel_7p7x__sse2_c8() 101 const __m128i vxi5 = _mm_unpacklo_epi8(vi5, vzero); in xnn_qu8_gavgpool_minmax_ukernel_7p7x__sse2_c8() local 106 const __m128i vsum45 = _mm_add_epi16(vxi4, vxi5); in xnn_qu8_gavgpool_minmax_ukernel_7p7x__sse2_c8() 169 const __m128i vxi5 = _mm_unpacklo_epi8(vi5, vzero); in xnn_qu8_gavgpool_minmax_ukernel_7p7x__sse2_c8() local 174 const __m128i vsum45 = _mm_add_epi16(vxi4, vxi5); in xnn_qu8_gavgpool_minmax_ukernel_7p7x__sse2_c8() 240 const __m128i vxi5 = _mm_unpacklo_epi8(vi5, vzero); in xnn_qu8_gavgpool_minmax_ukernel_7p7x__sse2_c8() local 245 const __m128i vsum45 = _mm_add_epi16(vxi4, vxi5); in xnn_qu8_gavgpool_minmax_ukernel_7p7x__sse2_c8()
|
D | 7x-minmax-sse2-c8.c | 75 const __m128i vxi5 = _mm_unpacklo_epi8(vi5, vzero); in xnn_qu8_gavgpool_minmax_ukernel_7x__sse2_c8() local 80 const __m128i vsum45 = _mm_add_epi16(vxi4, vxi5); in xnn_qu8_gavgpool_minmax_ukernel_7x__sse2_c8() 144 const __m128i vxi5 = _mm_unpacklo_epi8(vi5, vzero); in xnn_qu8_gavgpool_minmax_ukernel_7x__sse2_c8() local 149 const __m128i vsum45 = _mm_add_epi16(vxi4, vxi5); in xnn_qu8_gavgpool_minmax_ukernel_7x__sse2_c8()
|
/external/XNNPACK/src/qu8-avgpool/ |
D | 9p8x-minmax-sse2-c8.c | 104 const __m128i vxi5 = _mm_unpacklo_epi8(vi5, vzero); in xnn_qu8_avgpool_minmax_ukernel_9p8x__sse2_c8() local 111 const __m128i vsum45 = _mm_add_epi16(vxi4, vxi5); in xnn_qu8_avgpool_minmax_ukernel_9p8x__sse2_c8() 188 const __m128i vxi5 = _mm_unpacklo_epi8(vi5, vzero); in xnn_qu8_avgpool_minmax_ukernel_9p8x__sse2_c8() local 194 const __m128i vsum45 = _mm_add_epi16(vxi4, vxi5); in xnn_qu8_avgpool_minmax_ukernel_9p8x__sse2_c8() 294 const __m128i vxi5 = _mm_unpacklo_epi8(vi5, vzero); in xnn_qu8_avgpool_minmax_ukernel_9p8x__sse2_c8() local 300 const __m128i vsum45 = _mm_add_epi16(vxi4, vxi5); in xnn_qu8_avgpool_minmax_ukernel_9p8x__sse2_c8() 369 const __m128i vxi5 = _mm_unpacklo_epi8(vi5, vzero); in xnn_qu8_avgpool_minmax_ukernel_9p8x__sse2_c8() local 375 const __m128i vsum45 = _mm_add_epi16(vxi4, vxi5); in xnn_qu8_avgpool_minmax_ukernel_9p8x__sse2_c8()
|
D | 9x-minmax-sse2-c8.c | 128 const __m128i vxi5 = _mm_unpacklo_epi8(vi5, vzero); in xnn_qu8_avgpool_minmax_ukernel_9x__sse2_c8() local 135 const __m128i vsum45 = _mm_add_epi16(vxi4, vxi5); in xnn_qu8_avgpool_minmax_ukernel_9x__sse2_c8() 203 const __m128i vxi5 = _mm_unpacklo_epi8(vi5, vzero); in xnn_qu8_avgpool_minmax_ukernel_9x__sse2_c8() local 210 const __m128i vsum45 = _mm_add_epi16(vxi4, vxi5); in xnn_qu8_avgpool_minmax_ukernel_9x__sse2_c8()
|
/external/XNNPACK/src/qu8-dwconv/ |
D | up8x9-minmax-neon.c | 118 const int16x8_t vxi5 = vreinterpretq_s16_u16(vmovl_u8(vi5)); in xnn_qu8_dwconv_minmax_ukernel_up8x9__neon() local 119 vaccX1_lo = vmlal_s16(vaccX1_lo, vget_low_s16(vxk5), vget_low_s16(vxi5)); in xnn_qu8_dwconv_minmax_ukernel_up8x9__neon() 120 vaccX1_hi = vmlal_s16(vaccX1_hi, vget_high_s16(vxk5), vget_high_s16(vxi5)); in xnn_qu8_dwconv_minmax_ukernel_up8x9__neon() 209 const int16x8_t vxi5 = vreinterpretq_s16_u16(vmovl_u8(vi5)); in xnn_qu8_dwconv_minmax_ukernel_up8x9__neon() local 210 vaccX1_lo = vmlal_s16(vaccX1_lo, vget_low_s16(vxk5), vget_low_s16(vxi5)); in xnn_qu8_dwconv_minmax_ukernel_up8x9__neon() 211 vaccX1_hi = vmlal_s16(vaccX1_hi, vget_high_s16(vxk5), vget_high_s16(vxi5)); in xnn_qu8_dwconv_minmax_ukernel_up8x9__neon()
|
D | up8x9-minmax-sse2.c | 121 const __m128i vxi5 = _mm_unpacklo_epi8(vi5, vzero); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2() local 124 const __m128i vprod5_odd = _mm_mullo_epi16(vxi5, vxk5); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2() 125 const __m128i vprod5_even = _mm_mulhi_epi16(vxi5, vxk5); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2() 273 const __m128i vxi5 = _mm_unpacklo_epi8(vi5, vzero); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2() local 276 const __m128i vprod5_odd = _mm_mullo_epi16(vxi5, vxk5); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2() 277 const __m128i vprod5_even = _mm_mulhi_epi16(vxi5, vxk5); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
|