Lines Matching refs:tmp2

7 	%tmp2 = load <8 x i8>, <8 x i8>* %B
8 %tmp3 = call <8 x i8> @llvm.aarch64.neon.smax.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
16 %tmp2 = load <16 x i8>, <16 x i8>* %B
17 %tmp3 = call <16 x i8> @llvm.aarch64.neon.smax.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
25 %tmp2 = load <4 x i16>, <4 x i16>* %B
26 %tmp3 = call <4 x i16> @llvm.aarch64.neon.smax.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
34 %tmp2 = load <8 x i16>, <8 x i16>* %B
35 %tmp3 = call <8 x i16> @llvm.aarch64.neon.smax.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
43 %tmp2 = load <2 x i32>, <2 x i32>* %B
44 %tmp3 = call <2 x i32> @llvm.aarch64.neon.smax.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
52 %tmp2 = load <4 x i32>, <4 x i32>* %B
53 %tmp3 = call <4 x i32> @llvm.aarch64.neon.smax.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
68 %tmp2 = load <8 x i8>, <8 x i8>* %B
69 %tmp3 = call <8 x i8> @llvm.aarch64.neon.umax.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
77 %tmp2 = load <16 x i8>, <16 x i8>* %B
78 %tmp3 = call <16 x i8> @llvm.aarch64.neon.umax.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
86 %tmp2 = load <4 x i16>, <4 x i16>* %B
87 %tmp3 = call <4 x i16> @llvm.aarch64.neon.umax.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
95 %tmp2 = load <8 x i16>, <8 x i16>* %B
96 %tmp3 = call <8 x i16> @llvm.aarch64.neon.umax.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
104 %tmp2 = load <2 x i32>, <2 x i32>* %B
105 %tmp3 = call <2 x i32> @llvm.aarch64.neon.umax.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
113 %tmp2 = load <4 x i32>, <4 x i32>* %B
114 %tmp3 = call <4 x i32> @llvm.aarch64.neon.umax.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
129 %tmp2 = load <8 x i8>, <8 x i8>* %B
130 %tmp3 = call <8 x i8> @llvm.aarch64.neon.smin.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
138 %tmp2 = load <16 x i8>, <16 x i8>* %B
139 %tmp3 = call <16 x i8> @llvm.aarch64.neon.smin.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
147 %tmp2 = load <4 x i16>, <4 x i16>* %B
148 %tmp3 = call <4 x i16> @llvm.aarch64.neon.smin.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
156 %tmp2 = load <8 x i16>, <8 x i16>* %B
157 %tmp3 = call <8 x i16> @llvm.aarch64.neon.smin.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
165 %tmp2 = load <2 x i32>, <2 x i32>* %B
166 %tmp3 = call <2 x i32> @llvm.aarch64.neon.smin.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
174 %tmp2 = load <4 x i32>, <4 x i32>* %B
175 %tmp3 = call <4 x i32> @llvm.aarch64.neon.smin.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
190 %tmp2 = load <8 x i8>, <8 x i8>* %B
191 %tmp3 = call <8 x i8> @llvm.aarch64.neon.umin.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
199 %tmp2 = load <16 x i8>, <16 x i8>* %B
200 %tmp3 = call <16 x i8> @llvm.aarch64.neon.umin.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
208 %tmp2 = load <4 x i16>, <4 x i16>* %B
209 %tmp3 = call <4 x i16> @llvm.aarch64.neon.umin.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
217 %tmp2 = load <8 x i16>, <8 x i16>* %B
218 %tmp3 = call <8 x i16> @llvm.aarch64.neon.umin.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
226 %tmp2 = load <2 x i32>, <2 x i32>* %B
227 %tmp3 = call <2 x i32> @llvm.aarch64.neon.umin.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
235 %tmp2 = load <4 x i32>, <4 x i32>* %B
236 %tmp3 = call <4 x i32> @llvm.aarch64.neon.umin.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
253 %tmp2 = load <8 x i8>, <8 x i8>* %B
254 %tmp3 = call <8 x i8> @llvm.aarch64.neon.smaxp.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
262 %tmp2 = load <16 x i8>, <16 x i8>* %B
263 %tmp3 = call <16 x i8> @llvm.aarch64.neon.smaxp.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
271 %tmp2 = load <4 x i16>, <4 x i16>* %B
272 %tmp3 = call <4 x i16> @llvm.aarch64.neon.smaxp.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
280 %tmp2 = load <8 x i16>, <8 x i16>* %B
281 %tmp3 = call <8 x i16> @llvm.aarch64.neon.smaxp.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
289 %tmp2 = load <2 x i32>, <2 x i32>* %B
290 %tmp3 = call <2 x i32> @llvm.aarch64.neon.smaxp.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
298 %tmp2 = load <4 x i32>, <4 x i32>* %B
299 %tmp3 = call <4 x i32> @llvm.aarch64.neon.smaxp.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
314 %tmp2 = load <8 x i8>, <8 x i8>* %B
315 %tmp3 = call <8 x i8> @llvm.aarch64.neon.umaxp.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
323 %tmp2 = load <16 x i8>, <16 x i8>* %B
324 %tmp3 = call <16 x i8> @llvm.aarch64.neon.umaxp.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
332 %tmp2 = load <4 x i16>, <4 x i16>* %B
333 %tmp3 = call <4 x i16> @llvm.aarch64.neon.umaxp.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
341 %tmp2 = load <8 x i16>, <8 x i16>* %B
342 %tmp3 = call <8 x i16> @llvm.aarch64.neon.umaxp.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
350 %tmp2 = load <2 x i32>, <2 x i32>* %B
351 %tmp3 = call <2 x i32> @llvm.aarch64.neon.umaxp.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
359 %tmp2 = load <4 x i32>, <4 x i32>* %B
360 %tmp3 = call <4 x i32> @llvm.aarch64.neon.umaxp.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
377 %tmp2 = load <8 x i8>, <8 x i8>* %B
378 %tmp3 = call <8 x i8> @llvm.aarch64.neon.sminp.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
386 %tmp2 = load <16 x i8>, <16 x i8>* %B
387 %tmp3 = call <16 x i8> @llvm.aarch64.neon.sminp.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
395 %tmp2 = load <4 x i16>, <4 x i16>* %B
396 %tmp3 = call <4 x i16> @llvm.aarch64.neon.sminp.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
404 %tmp2 = load <8 x i16>, <8 x i16>* %B
405 %tmp3 = call <8 x i16> @llvm.aarch64.neon.sminp.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
413 %tmp2 = load <2 x i32>, <2 x i32>* %B
414 %tmp3 = call <2 x i32> @llvm.aarch64.neon.sminp.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
422 %tmp2 = load <4 x i32>, <4 x i32>* %B
423 %tmp3 = call <4 x i32> @llvm.aarch64.neon.sminp.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
438 %tmp2 = load <8 x i8>, <8 x i8>* %B
439 %tmp3 = call <8 x i8> @llvm.aarch64.neon.uminp.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
447 %tmp2 = load <16 x i8>, <16 x i8>* %B
448 %tmp3 = call <16 x i8> @llvm.aarch64.neon.uminp.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
456 %tmp2 = load <4 x i16>, <4 x i16>* %B
457 %tmp3 = call <4 x i16> @llvm.aarch64.neon.uminp.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
465 %tmp2 = load <8 x i16>, <8 x i16>* %B
466 %tmp3 = call <8 x i16> @llvm.aarch64.neon.uminp.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
474 %tmp2 = load <2 x i32>, <2 x i32>* %B
475 %tmp3 = call <2 x i32> @llvm.aarch64.neon.uminp.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
483 %tmp2 = load <4 x i32>, <4 x i32>* %B
484 %tmp3 = call <4 x i32> @llvm.aarch64.neon.uminp.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
499 %tmp2 = load <2 x float>, <2 x float>* %B
500 %tmp3 = call <2 x float> @llvm.aarch64.neon.fmax.v2f32(<2 x float> %tmp1, <2 x float> %tmp2)
508 %tmp2 = load <4 x float>, <4 x float>* %B
509 %tmp3 = call <4 x float> @llvm.aarch64.neon.fmax.v4f32(<4 x float> %tmp1, <4 x float> %tmp2)
517 %tmp2 = load <2 x double>, <2 x double>* %B
518 %tmp3 = call <2 x double> @llvm.aarch64.neon.fmax.v2f64(<2 x double> %tmp1, <2 x double> %tmp2)
530 %tmp2 = load <2 x float>, <2 x float>* %B
531 %tmp3 = call <2 x float> @llvm.aarch64.neon.fmaxp.v2f32(<2 x float> %tmp1, <2 x float> %tmp2)
539 %tmp2 = load <4 x float>, <4 x float>* %B
540 %tmp3 = call <4 x float> @llvm.aarch64.neon.fmaxp.v4f32(<4 x float> %tmp1, <4 x float> %tmp2)
548 %tmp2 = load <2 x double>, <2 x double>* %B
549 %tmp3 = call <2 x double> @llvm.aarch64.neon.fmaxp.v2f64(<2 x double> %tmp1, <2 x double> %tmp2)
561 %tmp2 = load <2 x float>, <2 x float>* %B
562 %tmp3 = call <2 x float> @llvm.aarch64.neon.fmin.v2f32(<2 x float> %tmp1, <2 x float> %tmp2)
570 %tmp2 = load <4 x float>, <4 x float>* %B
571 %tmp3 = call <4 x float> @llvm.aarch64.neon.fmin.v4f32(<4 x float> %tmp1, <4 x float> %tmp2)
579 %tmp2 = load <2 x double>, <2 x double>* %B
580 %tmp3 = call <2 x double> @llvm.aarch64.neon.fmin.v2f64(<2 x double> %tmp1, <2 x double> %tmp2)
592 %tmp2 = load <2 x float>, <2 x float>* %B
593 %tmp3 = call <2 x float> @llvm.aarch64.neon.fminp.v2f32(<2 x float> %tmp1, <2 x float> %tmp2)
601 %tmp2 = load <4 x float>, <4 x float>* %B
602 %tmp3 = call <4 x float> @llvm.aarch64.neon.fminp.v4f32(<4 x float> %tmp1, <4 x float> %tmp2)
610 %tmp2 = load <2 x double>, <2 x double>* %B
611 %tmp3 = call <2 x double> @llvm.aarch64.neon.fminp.v2f64(<2 x double> %tmp1, <2 x double> %tmp2)
623 %tmp2 = load <2 x float>, <2 x float>* %B
624 %tmp3 = call <2 x float> @llvm.aarch64.neon.fminnmp.v2f32(<2 x float> %tmp1, <2 x float> %tmp2)
632 %tmp2 = load <4 x float>, <4 x float>* %B
633 %tmp3 = call <4 x float> @llvm.aarch64.neon.fminnmp.v4f32(<4 x float> %tmp1, <4 x float> %tmp2)
641 %tmp2 = load <2 x double>, <2 x double>* %B
642 %tmp3 = call <2 x double> @llvm.aarch64.neon.fminnmp.v2f64(<2 x double> %tmp1, <2 x double> %tmp2)
654 %tmp2 = load <2 x float>, <2 x float>* %B
655 %tmp3 = call <2 x float> @llvm.aarch64.neon.fmaxnmp.v2f32(<2 x float> %tmp1, <2 x float> %tmp2)
663 %tmp2 = load <4 x float>, <4 x float>* %B
664 %tmp3 = call <4 x float> @llvm.aarch64.neon.fmaxnmp.v4f32(<4 x float> %tmp1, <4 x float> %tmp2)
672 %tmp2 = load <2 x double>, <2 x double>* %B
673 %tmp3 = call <2 x double> @llvm.aarch64.neon.fmaxnmp.v2f64(<2 x double> %tmp1, <2 x double> %tmp2)