Searched refs:vxi4 (Results 1 – 6 of 6) sorted by relevance
/external/XNNPACK/src/qu8-gavgpool/ |
D | 7p7x-minmax-sse2-c8.c | 56 const __m128i vxi4 = _mm_unpacklo_epi8(vi4, vzero); in xnn_qu8_gavgpool_minmax_ukernel_7p7x__sse2_c8() local 62 const __m128i vsum45 = _mm_add_epi16(vxi4, vxi5); in xnn_qu8_gavgpool_minmax_ukernel_7p7x__sse2_c8() 100 const __m128i vxi4 = _mm_unpacklo_epi8(vi4, vzero); in xnn_qu8_gavgpool_minmax_ukernel_7p7x__sse2_c8() local 106 const __m128i vsum45 = _mm_add_epi16(vxi4, vxi5); in xnn_qu8_gavgpool_minmax_ukernel_7p7x__sse2_c8() 168 const __m128i vxi4 = _mm_unpacklo_epi8(vi4, vzero); in xnn_qu8_gavgpool_minmax_ukernel_7p7x__sse2_c8() local 174 const __m128i vsum45 = _mm_add_epi16(vxi4, vxi5); in xnn_qu8_gavgpool_minmax_ukernel_7p7x__sse2_c8() 239 const __m128i vxi4 = _mm_unpacklo_epi8(vi4, vzero); in xnn_qu8_gavgpool_minmax_ukernel_7p7x__sse2_c8() local 245 const __m128i vsum45 = _mm_add_epi16(vxi4, vxi5); in xnn_qu8_gavgpool_minmax_ukernel_7p7x__sse2_c8()
|
D | 7x-minmax-sse2-c8.c | 74 const __m128i vxi4 = _mm_unpacklo_epi8(vi4, vzero); in xnn_qu8_gavgpool_minmax_ukernel_7x__sse2_c8() local 80 const __m128i vsum45 = _mm_add_epi16(vxi4, vxi5); in xnn_qu8_gavgpool_minmax_ukernel_7x__sse2_c8() 143 const __m128i vxi4 = _mm_unpacklo_epi8(vi4, vzero); in xnn_qu8_gavgpool_minmax_ukernel_7x__sse2_c8() local 149 const __m128i vsum45 = _mm_add_epi16(vxi4, vxi5); in xnn_qu8_gavgpool_minmax_ukernel_7x__sse2_c8()
|
/external/XNNPACK/src/qu8-avgpool/ |
D | 9p8x-minmax-sse2-c8.c | 103 const __m128i vxi4 = _mm_unpacklo_epi8(vi4, vzero); in xnn_qu8_avgpool_minmax_ukernel_9p8x__sse2_c8() local 111 const __m128i vsum45 = _mm_add_epi16(vxi4, vxi5); in xnn_qu8_avgpool_minmax_ukernel_9p8x__sse2_c8() 187 const __m128i vxi4 = _mm_unpacklo_epi8(vi4, vzero); in xnn_qu8_avgpool_minmax_ukernel_9p8x__sse2_c8() local 194 const __m128i vsum45 = _mm_add_epi16(vxi4, vxi5); in xnn_qu8_avgpool_minmax_ukernel_9p8x__sse2_c8() 293 const __m128i vxi4 = _mm_unpacklo_epi8(vi4, vzero); in xnn_qu8_avgpool_minmax_ukernel_9p8x__sse2_c8() local 300 const __m128i vsum45 = _mm_add_epi16(vxi4, vxi5); in xnn_qu8_avgpool_minmax_ukernel_9p8x__sse2_c8() 368 const __m128i vxi4 = _mm_unpacklo_epi8(vi4, vzero); in xnn_qu8_avgpool_minmax_ukernel_9p8x__sse2_c8() local 375 const __m128i vsum45 = _mm_add_epi16(vxi4, vxi5); in xnn_qu8_avgpool_minmax_ukernel_9p8x__sse2_c8()
|
D | 9x-minmax-sse2-c8.c | 127 const __m128i vxi4 = _mm_unpacklo_epi8(vi4, vzero); in xnn_qu8_avgpool_minmax_ukernel_9x__sse2_c8() local 135 const __m128i vsum45 = _mm_add_epi16(vxi4, vxi5); in xnn_qu8_avgpool_minmax_ukernel_9x__sse2_c8() 202 const __m128i vxi4 = _mm_unpacklo_epi8(vi4, vzero); in xnn_qu8_avgpool_minmax_ukernel_9x__sse2_c8() local 210 const __m128i vsum45 = _mm_add_epi16(vxi4, vxi5); in xnn_qu8_avgpool_minmax_ukernel_9x__sse2_c8()
|
/external/XNNPACK/src/qu8-dwconv/ |
D | up8x9-minmax-neon.c | 111 const int16x8_t vxi4 = vreinterpretq_s16_u16(vmovl_u8(vi4)); in xnn_qu8_dwconv_minmax_ukernel_up8x9__neon() local 112 vaccX0_lo = vmlal_s16(vaccX0_lo, vget_low_s16(vxk4), vget_low_s16(vxi4)); in xnn_qu8_dwconv_minmax_ukernel_up8x9__neon() 113 vaccX0_hi = vmlal_s16(vaccX0_hi, vget_high_s16(vxk4), vget_high_s16(vxi4)); in xnn_qu8_dwconv_minmax_ukernel_up8x9__neon() 202 const int16x8_t vxi4 = vreinterpretq_s16_u16(vmovl_u8(vi4)); in xnn_qu8_dwconv_minmax_ukernel_up8x9__neon() local 203 vaccX0_lo = vmlal_s16(vaccX0_lo, vget_low_s16(vxk4), vget_low_s16(vxi4)); in xnn_qu8_dwconv_minmax_ukernel_up8x9__neon() 204 vaccX0_hi = vmlal_s16(vaccX0_hi, vget_high_s16(vxk4), vget_high_s16(vxi4)); in xnn_qu8_dwconv_minmax_ukernel_up8x9__neon()
|
D | up8x9-minmax-sse2.c | 112 const __m128i vxi4 = _mm_unpacklo_epi8(vi4, vzero); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2() local 115 const __m128i vprod4_odd = _mm_mullo_epi16(vxi4, vxk4); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2() 116 const __m128i vprod4_even = _mm_mulhi_epi16(vxi4, vxk4); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2() 264 const __m128i vxi4 = _mm_unpacklo_epi8(vi4, vzero); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2() local 267 const __m128i vprod4_odd = _mm_mullo_epi16(vxi4, vxk4); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2() 268 const __m128i vprod4_even = _mm_mulhi_epi16(vxi4, vxk4); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
|