Lines Matching +full:llvm +full:- +full:3
2 ; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx | FileCheck %s --check-prefix=ALL --check-prefi…
3 ; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx2 | FileCheck %s --check-prefix=ALL --check-pref…
4 ; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx512f | FileCheck %s --check-prefix=ALL --check-p…
8 declare <4 x float> @llvm.x86.avx.vpermil.ps(<4 x float>, i8)
9 declare <8 x float> @llvm.x86.avx.vpermil.ps.256(<8 x float>, i8)
10 declare <2 x double> @llvm.x86.avx.vpermil.pd(<2 x double>, i8)
11 declare <4 x double> @llvm.x86.avx.vpermil.pd.256(<4 x double>, i8)
13 declare <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float>, <4 x i32>)
14 declare <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float>, <8 x i32>)
15 declare <2 x double> @llvm.x86.avx.vpermilvar.pd(<2 x double>, <2 x i64>)
16 declare <4 x double> @llvm.x86.avx.vpermilvar.pd.256(<4 x double>, <4 x i64>)
18 declare <8 x i32> @llvm.x86.avx.vperm2f128.si.256(<8 x i32>, <8 x i32>, i8)
19 declare <8 x float> @llvm.x86.avx.vperm2f128.ps.256(<8 x float>, <8 x float>, i8)
20 declare <4 x double> @llvm.x86.avx.vperm2f128.pd.256(<4 x double>, <4 x double>, i8)
23 ; ALL-LABEL: combine_vpermilvar_4f32_identity:
25 ; ALL-NEXT: retq
26 …%1 = tail call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %a0, <4 x i32> <i32 3, i32 2, i…
27 …%2 = tail call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %1, <4 x i32> <i32 3, i32 2, i…
32 ; ALL-LABEL: combine_vpermilvar_4f32_movddup:
34 ; ALL-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
35 ; ALL-NEXT: retq
36 …%1 = tail call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %a0, <4 x i32> <i32 0, i32 1, i…
40 ; ALL-LABEL: combine_vpermilvar_4f32_movddup_load:
42 ; ALL-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
43 ; ALL-NEXT: retq
45 …%2 = tail call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %1, <4 x i32> <i32 0, i32 1, i3…
50 ; ALL-LABEL: combine_vpermilvar_4f32_movshdup:
52 ; ALL-NEXT: vmovshdup {{.*#+}} xmm0 = xmm0[1,1,3,3]
53 ; ALL-NEXT: retq
54 …%1 = tail call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %a0, <4 x i32> <i32 undef, i32 …
59 ; ALL-LABEL: combine_vpermilvar_4f32_movsldup:
61 ; ALL-NEXT: vmovsldup {{.*#+}} xmm0 = xmm0[0,0,2,2]
62 ; ALL-NEXT: retq
63 …%1 = tail call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %a0, <4 x i32> <i32 0, i32 0, i…
68 ; ALL-LABEL: combine_vpermilvar_4f32_unpckh:
70 ; ALL-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[2,2,3,3]
71 ; ALL-NEXT: retq
72 …%1 = tail call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %a0, <4 x i32> <i32 2, i32 2, i…
77 ; ALL-LABEL: combine_vpermilvar_4f32_unpckl:
79 ; ALL-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,0,1,1]
80 ; ALL-NEXT: retq
81 …%1 = tail call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %a0, <4 x i32> <i32 0, i32 0, i…
86 ; ALL-LABEL: combine_vpermilvar_8f32_identity:
88 ; ALL-NEXT: retq
89 …1 = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %a0, <8 x i32> <i32 3, i32 2…
90 …2 = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %1, <8 x i32> <i32 3, i32 2…
95 ; ALL-LABEL: combine_vpermilvar_8f32_10326u4u:
97 ; ALL-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[1,0,3,2,6,u,4,u]
98 ; ALL-NEXT: retq
99 …%1 = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %a0, <8 x i32> <i32 3, i32 …
100 …2 = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %1, <8 x i32> <i32 2, i32 3…
105 ; ALL-LABEL: combine_vpermilvar_vperm2f128_8f32:
107 ; ALL-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,0,1]
108 ; ALL-NEXT: retq
109 …%1 = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %a0, <8 x i32> <i32 3, i32 …
110 … x float> %1, <8 x float> undef, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 0, i32 1, i32 2, i32 3>
111 …%3 = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %2, <8 x i32> <i32 3, i32 …
112 ret <8 x float> %3
116 ; ALL-LABEL: combine_vpermilvar_vperm2f128_zero_8f32:
118 ; ALL-NEXT: vperm2f128 {{.*#+}} ymm0 = zero,zero,ymm0[0,1]
119 ; ALL-NEXT: retq
120 …%1 = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %a0, <8 x i32> <i32 3, i32 …
121 …%1, <8 x float> zeroinitializer, <8 x i32> <i32 8, i32 8, i32 8, i32 8, i32 0, i32 1, i32 2, i32 3>
122 …%3 = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %2, <8 x i32> <i32 3, i32 …
123 ret <8 x float> %3
127 ; ALL-LABEL: combine_vperm2f128_vpermilvar_as_vpblendpd:
129 ; ALL-NEXT: vxorpd %ymm1, %ymm1, %ymm1
130 ; ALL-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3]
131 ; ALL-NEXT: retq
132 …%1 = tail call <4 x double> @llvm.x86.avx.vpermilvar.pd.256(<4 x double> %a0, <4 x i64> <i64 2, i6…
134 …%3 = tail call <4 x double> @llvm.x86.avx.vpermilvar.pd.256(<4 x double> %2, <4 x i64> <i64 2, i64…
135 ret <4 x double> %3
139 ; ALL-LABEL: combine_vpermilvar_8f32_movddup:
141 ; ALL-NEXT: vmovddup {{.*#+}} ymm0 = ymm0[0,0,2,2]
142 ; ALL-NEXT: retq
143 …%1 = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %a0, <8 x i32> <i32 0, i32 …
147 ; ALL-LABEL: combine_vpermilvar_8f32_movddup_load:
149 ; ALL-NEXT: vmovddup {{.*#+}} ymm0 = mem[0,0,2,2]
150 ; ALL-NEXT: retq
152 …%2 = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %1, <8 x i32> <i32 0, i32 1…
157 ; ALL-LABEL: combine_vpermilvar_8f32_movshdup:
159 ; ALL-NEXT: vmovshdup {{.*#+}} ymm0 = ymm0[1,1,3,3,5,5,7,7]
160 ; ALL-NEXT: retq
161 …%1 = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %a0, <8 x i32> <i32 1, i32 …
166 ; ALL-LABEL: combine_vpermilvar_8f32_movsldup:
168 ; ALL-NEXT: vmovsldup {{.*#+}} ymm0 = ymm0[0,0,2,2,4,4,6,6]
169 ; ALL-NEXT: retq
170 …%1 = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %a0, <8 x i32> <i32 0, i32 …
175 ; ALL-LABEL: combine_vpermilvar_2f64_identity:
177 ; ALL-NEXT: retq
178 …%1 = tail call <2 x double> @llvm.x86.avx.vpermilvar.pd(<2 x double> %a0, <2 x i64> <i64 2, i64 0>)
179 …%2 = tail call <2 x double> @llvm.x86.avx.vpermilvar.pd(<2 x double> %1, <2 x i64> <i64 2, i64 0>)
184 ; ALL-LABEL: combine_vpermilvar_2f64_movddup:
186 ; ALL-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
187 ; ALL-NEXT: retq
188 …%1 = tail call <2 x double> @llvm.x86.avx.vpermilvar.pd(<2 x double> %a0, <2 x i64> <i64 0, i64 0>)
193 ; ALL-LABEL: combine_vpermilvar_4f64_identity:
195 ; ALL-NEXT: retq
196 …%1 = tail call <4 x double> @llvm.x86.avx.vpermilvar.pd.256(<4 x double> %a0, <4 x i64> <i64 2, i6…
197 …%2 = tail call <4 x double> @llvm.x86.avx.vpermilvar.pd.256(<4 x double> %1, <4 x i64> <i64 2, i6…
202 ; ALL-LABEL: combine_vpermilvar_4f64_movddup:
204 ; ALL-NEXT: vmovddup {{.*#+}} ymm0 = ymm0[0,0,2,2]
205 ; ALL-NEXT: retq
206 …%1 = tail call <4 x double> @llvm.x86.avx.vpermilvar.pd.256(<4 x double> %a0, <4 x i64> <i64 0, i6…
211 ; ALL-LABEL: combine_vpermilvar_4f32_4stage:
213 ; ALL-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[2,0,3,1]
214 ; ALL-NEXT: retq
215 …%1 = tail call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %a0, <4 x i32> <i32 3, i32 2, i…
216 …%2 = tail call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %1, <4 x i32> <i32 2, i32 3, i…
217 …%3 = tail call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %2, <4 x i32> <i32 0, i32 2, i…
218 …%4 = tail call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %3, <4 x i32> <i32 3, i32 2, i…
223 ; ALL-LABEL: combine_vpermilvar_8f32_4stage:
225 ; ALL-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[2,0,3,1,6,4,7,5]
226 ; ALL-NEXT: retq
227 …%1 = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %a0, <8 x i32> <i32 3, i32 …
228 …2 = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %1, <8 x i32> <i32 2, i32 3…
229 …%3 = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %2, <8 x i32> <i32 0, i32 …
230 …%4 = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %3, <8 x i32> <i32 3, i32 …
235 ; ALL-LABEL: combine_vpermilvar_4f32_as_insertps:
237 ; ALL-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[1],zero,xmm0[2],zero
238 ; ALL-NEXT: retq
239 …%1 = call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %a0, <4 x i32> <i32 3, i32 2, i32 1,…