Lines Matching refs:vacc01234567p0

167       float16x8_t vacc01234567p0 = vld1q_f16(w); w += 8;  in xnn_f16_dwconv_minmax_ukernel_up8x25__neonfp16arith()  local
172 vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi0x01234567, vk0x01234567); in xnn_f16_dwconv_minmax_ukernel_up8x25__neonfp16arith()
176 vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi1x01234567, vk1x01234567); in xnn_f16_dwconv_minmax_ukernel_up8x25__neonfp16arith()
180 vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi2x01234567, vk2x01234567); in xnn_f16_dwconv_minmax_ukernel_up8x25__neonfp16arith()
184 vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi3x01234567, vk3x01234567); in xnn_f16_dwconv_minmax_ukernel_up8x25__neonfp16arith()
188 vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi4x01234567, vk4x01234567); in xnn_f16_dwconv_minmax_ukernel_up8x25__neonfp16arith()
192 vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi5x01234567, vk5x01234567); in xnn_f16_dwconv_minmax_ukernel_up8x25__neonfp16arith()
196 vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi6x01234567, vk6x01234567); in xnn_f16_dwconv_minmax_ukernel_up8x25__neonfp16arith()
200 vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi7x01234567, vk7x01234567); in xnn_f16_dwconv_minmax_ukernel_up8x25__neonfp16arith()
204 vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi8x01234567, vk8x01234567); in xnn_f16_dwconv_minmax_ukernel_up8x25__neonfp16arith()
208 vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi9x01234567, vk9x01234567); in xnn_f16_dwconv_minmax_ukernel_up8x25__neonfp16arith()
212 vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi10x01234567, vk10x01234567); in xnn_f16_dwconv_minmax_ukernel_up8x25__neonfp16arith()
216 vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi11x01234567, vk11x01234567); in xnn_f16_dwconv_minmax_ukernel_up8x25__neonfp16arith()
220 vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi12x01234567, vk12x01234567); in xnn_f16_dwconv_minmax_ukernel_up8x25__neonfp16arith()
224 vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi13x01234567, vk13x01234567); in xnn_f16_dwconv_minmax_ukernel_up8x25__neonfp16arith()
228 vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi14x01234567, vk14x01234567); in xnn_f16_dwconv_minmax_ukernel_up8x25__neonfp16arith()
232 vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi15x01234567, vk15x01234567); in xnn_f16_dwconv_minmax_ukernel_up8x25__neonfp16arith()
236 vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi16x01234567, vk16x01234567); in xnn_f16_dwconv_minmax_ukernel_up8x25__neonfp16arith()
240 vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi17x01234567, vk17x01234567); in xnn_f16_dwconv_minmax_ukernel_up8x25__neonfp16arith()
244 vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi18x01234567, vk18x01234567); in xnn_f16_dwconv_minmax_ukernel_up8x25__neonfp16arith()
248 vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi19x01234567, vk19x01234567); in xnn_f16_dwconv_minmax_ukernel_up8x25__neonfp16arith()
252 vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi20x01234567, vk20x01234567); in xnn_f16_dwconv_minmax_ukernel_up8x25__neonfp16arith()
256 vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi21x01234567, vk21x01234567); in xnn_f16_dwconv_minmax_ukernel_up8x25__neonfp16arith()
260 vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi22x01234567, vk22x01234567); in xnn_f16_dwconv_minmax_ukernel_up8x25__neonfp16arith()
264 vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi23x01234567, vk23x01234567); in xnn_f16_dwconv_minmax_ukernel_up8x25__neonfp16arith()
268 vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi24x01234567, vk24x01234567); in xnn_f16_dwconv_minmax_ukernel_up8x25__neonfp16arith()
271 float16x8_t vacc01234567 = vmaxq_f16(vacc01234567p0, vmin); in xnn_f16_dwconv_minmax_ukernel_up8x25__neonfp16arith()
277 float16x8_t vacc01234567p0 = vld1q_f16(w); w += 8; in xnn_f16_dwconv_minmax_ukernel_up8x25__neonfp16arith() local
282 vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi0x01234567, vk0x01234567); in xnn_f16_dwconv_minmax_ukernel_up8x25__neonfp16arith()
286 vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi1x01234567, vk1x01234567); in xnn_f16_dwconv_minmax_ukernel_up8x25__neonfp16arith()
290 vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi2x01234567, vk2x01234567); in xnn_f16_dwconv_minmax_ukernel_up8x25__neonfp16arith()
294 vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi3x01234567, vk3x01234567); in xnn_f16_dwconv_minmax_ukernel_up8x25__neonfp16arith()
298 vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi4x01234567, vk4x01234567); in xnn_f16_dwconv_minmax_ukernel_up8x25__neonfp16arith()
302 vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi5x01234567, vk5x01234567); in xnn_f16_dwconv_minmax_ukernel_up8x25__neonfp16arith()
306 vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi6x01234567, vk6x01234567); in xnn_f16_dwconv_minmax_ukernel_up8x25__neonfp16arith()
310 vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi7x01234567, vk7x01234567); in xnn_f16_dwconv_minmax_ukernel_up8x25__neonfp16arith()
314 vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi8x01234567, vk8x01234567); in xnn_f16_dwconv_minmax_ukernel_up8x25__neonfp16arith()
318 vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi9x01234567, vk9x01234567); in xnn_f16_dwconv_minmax_ukernel_up8x25__neonfp16arith()
322 vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi10x01234567, vk10x01234567); in xnn_f16_dwconv_minmax_ukernel_up8x25__neonfp16arith()
326 vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi11x01234567, vk11x01234567); in xnn_f16_dwconv_minmax_ukernel_up8x25__neonfp16arith()
330 vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi12x01234567, vk12x01234567); in xnn_f16_dwconv_minmax_ukernel_up8x25__neonfp16arith()
334 vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi13x01234567, vk13x01234567); in xnn_f16_dwconv_minmax_ukernel_up8x25__neonfp16arith()
338 vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi14x01234567, vk14x01234567); in xnn_f16_dwconv_minmax_ukernel_up8x25__neonfp16arith()
342 vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi15x01234567, vk15x01234567); in xnn_f16_dwconv_minmax_ukernel_up8x25__neonfp16arith()
346 vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi16x01234567, vk16x01234567); in xnn_f16_dwconv_minmax_ukernel_up8x25__neonfp16arith()
350 vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi17x01234567, vk17x01234567); in xnn_f16_dwconv_minmax_ukernel_up8x25__neonfp16arith()
354 vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi18x01234567, vk18x01234567); in xnn_f16_dwconv_minmax_ukernel_up8x25__neonfp16arith()
358 vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi19x01234567, vk19x01234567); in xnn_f16_dwconv_minmax_ukernel_up8x25__neonfp16arith()
362 vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi20x01234567, vk20x01234567); in xnn_f16_dwconv_minmax_ukernel_up8x25__neonfp16arith()
366 vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi21x01234567, vk21x01234567); in xnn_f16_dwconv_minmax_ukernel_up8x25__neonfp16arith()
370 vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi22x01234567, vk22x01234567); in xnn_f16_dwconv_minmax_ukernel_up8x25__neonfp16arith()
374 vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi23x01234567, vk23x01234567); in xnn_f16_dwconv_minmax_ukernel_up8x25__neonfp16arith()
378 vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi24x01234567, vk24x01234567); in xnn_f16_dwconv_minmax_ukernel_up8x25__neonfp16arith()
381 float16x8_t vacc01234567 = vmaxq_f16(vacc01234567p0, vmin); in xnn_f16_dwconv_minmax_ukernel_up8x25__neonfp16arith()