Lines Matching refs:tmp2
7 %tmp2 = sub <8 x i8> zeroinitializer, %tmp1
8 ret <8 x i8> %tmp2
15 %tmp2 = sub <4 x i16> zeroinitializer, %tmp1
16 ret <4 x i16> %tmp2
23 %tmp2 = sub <2 x i32> zeroinitializer, %tmp1
24 ret <2 x i32> %tmp2
31 %tmp2 = fsub <2 x float> < float -0.000000e+00, float -0.000000e+00 >, %tmp1
32 ret <2 x float> %tmp2
39 %tmp2 = sub <16 x i8> zeroinitializer, %tmp1
40 ret <16 x i8> %tmp2
47 %tmp2 = sub <8 x i16> zeroinitializer, %tmp1
48 ret <8 x i16> %tmp2
55 %tmp2 = sub <4 x i32> zeroinitializer, %tmp1
56 ret <4 x i32> %tmp2
63 …%tmp2 = fsub <4 x float> < float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0…
64 ret <4 x float> %tmp2
71 %tmp2 = call <8 x i8> @llvm.arm.neon.vqneg.v8i8(<8 x i8> %tmp1)
72 ret <8 x i8> %tmp2
79 %tmp2 = call <4 x i16> @llvm.arm.neon.vqneg.v4i16(<4 x i16> %tmp1)
80 ret <4 x i16> %tmp2
87 %tmp2 = call <2 x i32> @llvm.arm.neon.vqneg.v2i32(<2 x i32> %tmp1)
88 ret <2 x i32> %tmp2
95 %tmp2 = call <16 x i8> @llvm.arm.neon.vqneg.v16i8(<16 x i8> %tmp1)
96 ret <16 x i8> %tmp2
103 %tmp2 = call <8 x i16> @llvm.arm.neon.vqneg.v8i16(<8 x i16> %tmp1)
104 ret <8 x i16> %tmp2
111 %tmp2 = call <4 x i32> @llvm.arm.neon.vqneg.v4i32(<4 x i32> %tmp1)
112 ret <4 x i32> %tmp2