Lines Matching refs:vmovl_s16

49     int32x4_t vacc0_lo = vmulq_s32(vmovl_s16(vget_low_s16(vxa0)), va_multiplier);  in xnn_qu8_vadd_minmax_ukernel__neon()
50 int32x4_t vacc1_lo = vmulq_s32(vmovl_s16(vget_low_s16(vxa1)), va_multiplier); in xnn_qu8_vadd_minmax_ukernel__neon()
51 int32x4_t vacc2_lo = vmulq_s32(vmovl_s16(vget_low_s16(vxa2)), va_multiplier); in xnn_qu8_vadd_minmax_ukernel__neon()
52 int32x4_t vacc3_lo = vmulq_s32(vmovl_s16(vget_low_s16(vxa3)), va_multiplier); in xnn_qu8_vadd_minmax_ukernel__neon()
58 vacc0_lo = vmlaq_s32(vacc0_lo, vmovl_s16(vget_low_s16(vxb0)), vb_multiplier); in xnn_qu8_vadd_minmax_ukernel__neon()
59 vacc1_lo = vmlaq_s32(vacc1_lo, vmovl_s16(vget_low_s16(vxb1)), vb_multiplier); in xnn_qu8_vadd_minmax_ukernel__neon()
60 vacc2_lo = vmlaq_s32(vacc2_lo, vmovl_s16(vget_low_s16(vxb2)), vb_multiplier); in xnn_qu8_vadd_minmax_ukernel__neon()
61 vacc3_lo = vmlaq_s32(vacc3_lo, vmovl_s16(vget_low_s16(vxb3)), vb_multiplier); in xnn_qu8_vadd_minmax_ukernel__neon()
115 int32x4_t vacc0_lo = vmulq_s32(vmovl_s16(vget_low_s16(vxa0)), va_multiplier); in xnn_qu8_vadd_minmax_ukernel__neon()
116 int32x4_t vacc1_lo = vmulq_s32(vmovl_s16(vget_low_s16(vxa1)), va_multiplier); in xnn_qu8_vadd_minmax_ukernel__neon()
117 int32x4_t vacc0_hi = vmulq_s32(vmovl_s16(vget_high_s16(vxa0)), va_multiplier); in xnn_qu8_vadd_minmax_ukernel__neon()
118 int32x4_t vacc1_hi = vmulq_s32(vmovl_s16(vget_high_s16(vxa1)), va_multiplier); in xnn_qu8_vadd_minmax_ukernel__neon()
123 vacc0_lo = vmlaq_s32(vacc0_lo, vmovl_s16(vget_low_s16(vxb0)), vb_multiplier); in xnn_qu8_vadd_minmax_ukernel__neon()
124 vacc1_lo = vmlaq_s32(vacc1_lo, vmovl_s16(vget_low_s16(vxb1)), vb_multiplier); in xnn_qu8_vadd_minmax_ukernel__neon()
125 vacc0_hi = vmlaq_s32(vacc0_hi, vmovl_s16(vget_high_s16(vxb0)), vb_multiplier); in xnn_qu8_vadd_minmax_ukernel__neon()
126 vacc1_hi = vmlaq_s32(vacc1_hi, vmovl_s16(vget_high_s16(vxb1)), vb_multiplier); in xnn_qu8_vadd_minmax_ukernel__neon()
159 int32x4_t vacc_lo = vmulq_s32(vmovl_s16(vget_low_s16(vxa)), va_multiplier); in xnn_qu8_vadd_minmax_ukernel__neon()
163 int32x4_t vacc_hi = vmulq_s32(vmovl_s16(vget_high_s16(vxa)), va_multiplier); in xnn_qu8_vadd_minmax_ukernel__neon()
166 vacc_lo = vmlaq_s32(vacc_lo, vmovl_s16(vget_low_s16(vxb)), vb_multiplier); in xnn_qu8_vadd_minmax_ukernel__neon()
170 vacc_hi = vmlaq_s32(vacc_hi, vmovl_s16(vget_high_s16(vxb)), vb_multiplier); in xnn_qu8_vadd_minmax_ukernel__neon()
202 int32x4_t vacc_lo = vmulq_s32(vmovl_s16(vget_low_s16(vxa)), va_multiplier); in xnn_qu8_vadd_minmax_ukernel__neon()
206 int32x4_t vacc_hi = vmulq_s32(vmovl_s16(vget_high_s16(vxa)), va_multiplier); in xnn_qu8_vadd_minmax_ukernel__neon()
209 vacc_lo = vmlaq_s32(vacc_lo, vmovl_s16(vget_low_s16(vxb)), vb_multiplier); in xnn_qu8_vadd_minmax_ukernel__neon()
213 vacc_hi = vmlaq_s32(vacc_hi, vmovl_s16(vget_high_s16(vxb)), vb_multiplier); in xnn_qu8_vadd_minmax_ukernel__neon()