1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt < %s -basic-aa -slp-vectorizer -slp-threshold=-100 -dce -S -mtriple=i386-apple-macosx10.8.0 -mcpu=corei7-avx | FileCheck %s
3
4target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128-n8:16:32-S128"
5target triple = "i386-apple-macosx10.9.0"
6
7;int foo(double *A, int k) {
8;  double A0;
9;  double A1;
10;  if (k) {
11;    A0 = 3;
12;    A1 = 5;
13;  } else {
14;    A0 = A[10];
15;    A1 = A[11];
16;  }
17;  A[0] = A0;
18;  A[1] = A1;
19;}
20
21
22define i32 @foo(double* nocapture %A, i32 %k) {
23; CHECK-LABEL: @foo(
24; CHECK-NEXT:  entry:
25; CHECK-NEXT:    [[TOBOOL:%.*]] = icmp eq i32 [[K:%.*]], 0
26; CHECK-NEXT:    br i1 [[TOBOOL]], label [[IF_ELSE:%.*]], label [[IF_END:%.*]]
27; CHECK:       if.else:
28; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[A:%.*]], i64 10
29; CHECK-NEXT:    [[TMP0:%.*]] = bitcast double* [[ARRAYIDX]] to <2 x double>*
30; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x double>, <2 x double>* [[TMP0]], align 8
31; CHECK-NEXT:    br label [[IF_END]]
32; CHECK:       if.end:
33; CHECK-NEXT:    [[TMP2:%.*]] = phi <2 x double> [ [[TMP1]], [[IF_ELSE]] ], [ <double 3.000000e+00, double 5.000000e+00>, [[ENTRY:%.*]] ]
34; CHECK-NEXT:    [[TMP3:%.*]] = bitcast double* [[A]] to <2 x double>*
35; CHECK-NEXT:    store <2 x double> [[TMP2]], <2 x double>* [[TMP3]], align 8
36; CHECK-NEXT:    ret i32 undef
37;
38entry:
39  %tobool = icmp eq i32 %k, 0
40  br i1 %tobool, label %if.else, label %if.end
41
42if.else:                                          ; preds = %entry
43  %arrayidx = getelementptr inbounds double, double* %A, i64 10
44  %0 = load double, double* %arrayidx, align 8
45  %arrayidx1 = getelementptr inbounds double, double* %A, i64 11
46  %1 = load double, double* %arrayidx1, align 8
47  br label %if.end
48
49if.end:                                           ; preds = %entry, %if.else
50  %A0.0 = phi double [ %0, %if.else ], [ 3.000000e+00, %entry ]
51  %A1.0 = phi double [ %1, %if.else ], [ 5.000000e+00, %entry ]
52  store double %A0.0, double* %A, align 8
53  %arrayidx3 = getelementptr inbounds double, double* %A, i64 1
54  store double %A1.0, double* %arrayidx3, align 8
55  ret i32 undef
56}
57
58
59;int foo(double * restrict B,  double * restrict A, int n, int m) {
60;  double R=A[1];
61;  double G=A[0];
62;  for (int i=0; i < 100; i++) {
63;    R += 10;
64;    G += 10;
65;    R *= 4;
66;    G *= 4;
67;    R += 4;
68;    G += 4;
69;  }
70;  B[0] = G;
71;  B[1] = R;
72;  return 0;
73;}
74
75define i32 @foo2(double* noalias nocapture %B, double* noalias nocapture %A, i32 %n, i32 %m) #0 {
76; CHECK-LABEL: @foo2(
77; CHECK-NEXT:  entry:
78; CHECK-NEXT:    [[TMP0:%.*]] = bitcast double* [[A:%.*]] to <2 x double>*
79; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x double>, <2 x double>* [[TMP0]], align 8
80; CHECK-NEXT:    br label [[FOR_BODY:%.*]]
81; CHECK:       for.body:
82; CHECK-NEXT:    [[I_019:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[INC:%.*]], [[FOR_BODY]] ]
83; CHECK-NEXT:    [[TMP2:%.*]] = phi <2 x double> [ [[TMP1]], [[ENTRY]] ], [ [[TMP5:%.*]], [[FOR_BODY]] ]
84; CHECK-NEXT:    [[TMP3:%.*]] = fadd <2 x double> [[TMP2]], <double 1.000000e+01, double 1.000000e+01>
85; CHECK-NEXT:    [[TMP4:%.*]] = fmul <2 x double> [[TMP3]], <double 4.000000e+00, double 4.000000e+00>
86; CHECK-NEXT:    [[TMP5]] = fadd <2 x double> [[TMP4]], <double 4.000000e+00, double 4.000000e+00>
87; CHECK-NEXT:    [[INC]] = add nsw i32 [[I_019]], 1
88; CHECK-NEXT:    [[EXITCOND:%.*]] = icmp eq i32 [[INC]], 100
89; CHECK-NEXT:    br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]]
90; CHECK:       for.end:
91; CHECK-NEXT:    [[TMP6:%.*]] = bitcast double* [[B:%.*]] to <2 x double>*
92; CHECK-NEXT:    store <2 x double> [[TMP5]], <2 x double>* [[TMP6]], align 8
93; CHECK-NEXT:    ret i32 0
94;
95entry:
96  %arrayidx = getelementptr inbounds double, double* %A, i64 1
97  %0 = load double, double* %arrayidx, align 8
98  %1 = load double, double* %A, align 8
99  br label %for.body
100
101for.body:                                         ; preds = %for.body, %entry
102  %i.019 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
103  %G.018 = phi double [ %1, %entry ], [ %add5, %for.body ]
104  %R.017 = phi double [ %0, %entry ], [ %add4, %for.body ]
105  %add = fadd double %R.017, 1.000000e+01
106  %add2 = fadd double %G.018, 1.000000e+01
107  %mul = fmul double %add, 4.000000e+00
108  %mul3 = fmul double %add2, 4.000000e+00
109  %add4 = fadd double %mul, 4.000000e+00
110  %add5 = fadd double %mul3, 4.000000e+00
111  %inc = add nsw i32 %i.019, 1
112  %exitcond = icmp eq i32 %inc, 100
113  br i1 %exitcond, label %for.end, label %for.body
114
115for.end:                                          ; preds = %for.body
116  store double %add5, double* %B, align 8
117  %arrayidx7 = getelementptr inbounds double, double* %B, i64 1
118  store double %add4, double* %arrayidx7, align 8
119  ret i32 0
120}
121
122; float foo3(float *A) {
123;
124;   float R = A[0];
125;   float G = A[1];
126;   float B = A[2];
127;   float Y = A[3];
128;   float P = A[4];
129;   for (int i=0; i < 121; i+=3) {
130;     R+=A[i+0]*7;
131;     G+=A[i+1]*8;
132;     B+=A[i+2]*9;
133;     Y+=A[i+3]*10;
134;     P+=A[i+4]*11;
135;   }
136;
137;   return R+G+B+Y+P;
138; }
139
140define float @foo3(float* nocapture readonly %A) #0 {
141; CHECK-LABEL: @foo3(
142; CHECK-NEXT:  entry:
143; CHECK-NEXT:    [[TMP0:%.*]] = load float, float* [[A:%.*]], align 4
144; CHECK-NEXT:    [[ARRAYIDX1:%.*]] = getelementptr inbounds float, float* [[A]], i64 1
145; CHECK-NEXT:    [[TMP1:%.*]] = bitcast float* [[ARRAYIDX1]] to <4 x float>*
146; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x float>, <4 x float>* [[TMP1]], align 4
147; CHECK-NEXT:    [[REORDER_SHUFFLE:%.*]] = shufflevector <4 x float> [[TMP2]], <4 x float> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
148; CHECK-NEXT:    [[TMP3:%.*]] = extractelement <4 x float> [[REORDER_SHUFFLE]], i32 3
149; CHECK-NEXT:    br label [[FOR_BODY:%.*]]
150; CHECK:       for.body:
151; CHECK-NEXT:    [[INDVARS_IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
152; CHECK-NEXT:    [[R_052:%.*]] = phi float [ [[TMP0]], [[ENTRY]] ], [ [[ADD6:%.*]], [[FOR_BODY]] ]
153; CHECK-NEXT:    [[TMP4:%.*]] = phi float [ [[TMP3]], [[ENTRY]] ], [ [[TMP11:%.*]], [[FOR_BODY]] ]
154; CHECK-NEXT:    [[TMP5:%.*]] = phi float [ [[TMP0]], [[ENTRY]] ], [ [[TMP13:%.*]], [[FOR_BODY]] ]
155; CHECK-NEXT:    [[TMP6:%.*]] = phi <4 x float> [ [[REORDER_SHUFFLE]], [[ENTRY]] ], [ [[TMP18:%.*]], [[FOR_BODY]] ]
156; CHECK-NEXT:    [[MUL:%.*]] = fmul float [[TMP5]], 7.000000e+00
157; CHECK-NEXT:    [[ADD6]] = fadd float [[R_052]], [[MUL]]
158; CHECK-NEXT:    [[TMP7:%.*]] = add nsw i64 [[INDVARS_IV]], 2
159; CHECK-NEXT:    [[ARRAYIDX14:%.*]] = getelementptr inbounds float, float* [[A]], i64 [[TMP7]]
160; CHECK-NEXT:    [[TMP8:%.*]] = load float, float* [[ARRAYIDX14]], align 4
161; CHECK-NEXT:    [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 3
162; CHECK-NEXT:    [[ARRAYIDX19:%.*]] = getelementptr inbounds float, float* [[A]], i64 [[INDVARS_IV_NEXT]]
163; CHECK-NEXT:    [[TMP9:%.*]] = bitcast float* [[ARRAYIDX19]] to <2 x float>*
164; CHECK-NEXT:    [[TMP10:%.*]] = load <2 x float>, <2 x float>* [[TMP9]], align 4
165; CHECK-NEXT:    [[REORDER_SHUFFLE1:%.*]] = shufflevector <2 x float> [[TMP10]], <2 x float> undef, <2 x i32> <i32 1, i32 0>
166; CHECK-NEXT:    [[TMP11]] = extractelement <2 x float> [[REORDER_SHUFFLE1]], i32 0
167; CHECK-NEXT:    [[TMP12:%.*]] = insertelement <4 x float> undef, float [[TMP11]], i32 0
168; CHECK-NEXT:    [[TMP13]] = extractelement <2 x float> [[REORDER_SHUFFLE1]], i32 1
169; CHECK-NEXT:    [[TMP14:%.*]] = insertelement <4 x float> [[TMP12]], float [[TMP13]], i32 1
170; CHECK-NEXT:    [[TMP15:%.*]] = insertelement <4 x float> [[TMP14]], float [[TMP8]], i32 2
171; CHECK-NEXT:    [[TMP16:%.*]] = insertelement <4 x float> [[TMP15]], float [[TMP4]], i32 3
172; CHECK-NEXT:    [[TMP17:%.*]] = fmul <4 x float> [[TMP16]], <float 1.100000e+01, float 1.000000e+01, float 9.000000e+00, float 8.000000e+00>
173; CHECK-NEXT:    [[TMP18]] = fadd <4 x float> [[TMP6]], [[TMP17]]
174; CHECK-NEXT:    [[TMP19:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32
175; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i32 [[TMP19]], 121
176; CHECK-NEXT:    br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_END:%.*]]
177; CHECK:       for.end:
178; CHECK-NEXT:    [[TMP20:%.*]] = extractelement <4 x float> [[TMP18]], i32 3
179; CHECK-NEXT:    [[ADD28:%.*]] = fadd float [[ADD6]], [[TMP20]]
180; CHECK-NEXT:    [[TMP21:%.*]] = extractelement <4 x float> [[TMP18]], i32 2
181; CHECK-NEXT:    [[ADD29:%.*]] = fadd float [[ADD28]], [[TMP21]]
182; CHECK-NEXT:    [[TMP22:%.*]] = extractelement <4 x float> [[TMP18]], i32 1
183; CHECK-NEXT:    [[ADD30:%.*]] = fadd float [[ADD29]], [[TMP22]]
184; CHECK-NEXT:    [[TMP23:%.*]] = extractelement <4 x float> [[TMP18]], i32 0
185; CHECK-NEXT:    [[ADD31:%.*]] = fadd float [[ADD30]], [[TMP23]]
186; CHECK-NEXT:    ret float [[ADD31]]
187;
188entry:
189  %0 = load float, float* %A, align 4
190  %arrayidx1 = getelementptr inbounds float, float* %A, i64 1
191  %1 = load float, float* %arrayidx1, align 4
192  %arrayidx2 = getelementptr inbounds float, float* %A, i64 2
193  %2 = load float, float* %arrayidx2, align 4
194  %arrayidx3 = getelementptr inbounds float, float* %A, i64 3
195  %3 = load float, float* %arrayidx3, align 4
196  %arrayidx4 = getelementptr inbounds float, float* %A, i64 4
197  %4 = load float, float* %arrayidx4, align 4
198  br label %for.body
199
200for.body:                                         ; preds = %for.body, %entry
201  %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
202  %P.056 = phi float [ %4, %entry ], [ %add26, %for.body ]
203  %Y.055 = phi float [ %3, %entry ], [ %add21, %for.body ]
204  %B.054 = phi float [ %2, %entry ], [ %add16, %for.body ]
205  %G.053 = phi float [ %1, %entry ], [ %add11, %for.body ]
206  %R.052 = phi float [ %0, %entry ], [ %add6, %for.body ]
207  %5 = phi float [ %1, %entry ], [ %11, %for.body ]
208  %6 = phi float [ %0, %entry ], [ %9, %for.body ]
209  %mul = fmul float %6, 7.000000e+00
210  %add6 = fadd float %R.052, %mul
211  %mul10 = fmul float %5, 8.000000e+00
212  %add11 = fadd float %G.053, %mul10
213  %7 = add nsw i64 %indvars.iv, 2
214  %arrayidx14 = getelementptr inbounds float, float* %A, i64 %7
215  %8 = load float, float* %arrayidx14, align 4
216  %mul15 = fmul float %8, 9.000000e+00
217  %add16 = fadd float %B.054, %mul15
218  %indvars.iv.next = add nuw nsw i64 %indvars.iv, 3
219  %arrayidx19 = getelementptr inbounds float, float* %A, i64 %indvars.iv.next
220  %9 = load float, float* %arrayidx19, align 4
221  %mul20 = fmul float %9, 1.000000e+01
222  %add21 = fadd float %Y.055, %mul20
223  %10 = add nsw i64 %indvars.iv, 4
224  %arrayidx24 = getelementptr inbounds float, float* %A, i64 %10
225  %11 = load float, float* %arrayidx24, align 4
226  %mul25 = fmul float %11, 1.100000e+01
227  %add26 = fadd float %P.056, %mul25
228  %12 = trunc i64 %indvars.iv.next to i32
229  %cmp = icmp slt i32 %12, 121
230  br i1 %cmp, label %for.body, label %for.end
231
232for.end:                                          ; preds = %for.body
233  %add28 = fadd float %add6, %add11
234  %add29 = fadd float %add28, %add16
235  %add30 = fadd float %add29, %add21
236  %add31 = fadd float %add30, %add26
237  ret float %add31
238}
239
240; Make sure the order of phi nodes of different types does not prevent
241; vectorization of same typed phi nodes.
242define float @sort_phi_type(float* nocapture readonly %A) {
243; CHECK-LABEL: @sort_phi_type(
244; CHECK-NEXT:  entry:
245; CHECK-NEXT:    br label [[FOR_BODY:%.*]]
246; CHECK:       for.body:
247; CHECK-NEXT:    [[INDVARS_IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
248; CHECK-NEXT:    [[TMP0:%.*]] = phi <4 x float> [ <float 1.000000e+01, float 1.000000e+01, float 1.000000e+01, float 1.000000e+01>, [[ENTRY]] ], [ [[TMP9:%.*]], [[FOR_BODY]] ]
249; CHECK-NEXT:    [[TMP1:%.*]] = extractelement <4 x float> [[TMP0]], i32 0
250; CHECK-NEXT:    [[TMP2:%.*]] = insertelement <4 x float> undef, float [[TMP1]], i32 0
251; CHECK-NEXT:    [[TMP3:%.*]] = extractelement <4 x float> [[TMP0]], i32 1
252; CHECK-NEXT:    [[TMP4:%.*]] = insertelement <4 x float> [[TMP2]], float [[TMP3]], i32 1
253; CHECK-NEXT:    [[TMP5:%.*]] = extractelement <4 x float> [[TMP0]], i32 3
254; CHECK-NEXT:    [[TMP6:%.*]] = insertelement <4 x float> [[TMP4]], float [[TMP5]], i32 2
255; CHECK-NEXT:    [[TMP7:%.*]] = extractelement <4 x float> [[TMP0]], i32 2
256; CHECK-NEXT:    [[TMP8:%.*]] = insertelement <4 x float> [[TMP6]], float [[TMP7]], i32 3
257; CHECK-NEXT:    [[TMP9]] = fmul <4 x float> [[TMP8]], <float 8.000000e+00, float 9.000000e+00, float 1.000000e+02, float 1.110000e+02>
258; CHECK-NEXT:    [[INDVARS_IV_NEXT]] = add nsw i64 [[INDVARS_IV]], 4
259; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i64 [[INDVARS_IV_NEXT]], 128
260; CHECK-NEXT:    br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_END:%.*]]
261; CHECK:       for.end:
262; CHECK-NEXT:    [[TMP10:%.*]] = extractelement <4 x float> [[TMP9]], i32 0
263; CHECK-NEXT:    [[TMP11:%.*]] = extractelement <4 x float> [[TMP9]], i32 1
264; CHECK-NEXT:    [[ADD29:%.*]] = fadd float [[TMP10]], [[TMP11]]
265; CHECK-NEXT:    [[TMP12:%.*]] = extractelement <4 x float> [[TMP9]], i32 2
266; CHECK-NEXT:    [[ADD30:%.*]] = fadd float [[ADD29]], [[TMP12]]
267; CHECK-NEXT:    [[TMP13:%.*]] = extractelement <4 x float> [[TMP9]], i32 3
268; CHECK-NEXT:    [[ADD31:%.*]] = fadd float [[ADD30]], [[TMP13]]
269; CHECK-NEXT:    ret float [[ADD31]]
270;
271entry:
272  br label %for.body
273
274for.body:                                         ; preds = %for.body, %entry
275  %Y = phi float [ 1.000000e+01, %entry ], [ %mul10, %for.body ]
276  %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
277  %B = phi float [ 1.000000e+01, %entry ], [ %mul15, %for.body ]
278  %G = phi float [ 1.000000e+01, %entry ], [ %mul20, %for.body ]
279  %R = phi float [ 1.000000e+01, %entry ], [ %mul25, %for.body ]
280  %mul10 = fmul float %Y, 8.000000e+00
281  %mul15 = fmul float %B, 9.000000e+00
282  %mul20 = fmul float %R, 10.000000e+01
283  %mul25 = fmul float %G, 11.100000e+01
284  %indvars.iv.next = add nsw i64 %indvars.iv, 4
285  %cmp = icmp slt i64 %indvars.iv.next, 128
286  br i1 %cmp, label %for.body, label %for.end
287
288for.end:                                          ; preds = %for.body
289  %add28 = fadd float 1.000000e+01, %mul10
290  %add29 = fadd float %mul10, %mul15
291  %add30 = fadd float %add29, %mul20
292  %add31 = fadd float %add30, %mul25
293  ret float %add31
294}
295
296define void @test(x86_fp80* %i1, x86_fp80* %i2, x86_fp80* %o) {
297; CHECK-LABEL: @test(
298; CHECK-NEXT:  entry:
299; CHECK-NEXT:    [[I1_0:%.*]] = load x86_fp80, x86_fp80* [[I1:%.*]], align 16
300; CHECK-NEXT:    [[I1_GEP1:%.*]] = getelementptr x86_fp80, x86_fp80* [[I1]], i64 1
301; CHECK-NEXT:    [[I1_1:%.*]] = load x86_fp80, x86_fp80* [[I1_GEP1]], align 16
302; CHECK-NEXT:    br i1 undef, label [[THEN:%.*]], label [[END:%.*]]
303; CHECK:       then:
304; CHECK-NEXT:    [[I2_GEP0:%.*]] = getelementptr inbounds x86_fp80, x86_fp80* [[I2:%.*]], i64 0
305; CHECK-NEXT:    [[I2_0:%.*]] = load x86_fp80, x86_fp80* [[I2_GEP0]], align 16
306; CHECK-NEXT:    [[I2_GEP1:%.*]] = getelementptr inbounds x86_fp80, x86_fp80* [[I2]], i64 1
307; CHECK-NEXT:    [[I2_1:%.*]] = load x86_fp80, x86_fp80* [[I2_GEP1]], align 16
308; CHECK-NEXT:    br label [[END]]
309; CHECK:       end:
310; CHECK-NEXT:    [[PHI0:%.*]] = phi x86_fp80 [ [[I1_0]], [[ENTRY:%.*]] ], [ [[I2_0]], [[THEN]] ]
311; CHECK-NEXT:    [[PHI1:%.*]] = phi x86_fp80 [ [[I1_1]], [[ENTRY]] ], [ [[I2_1]], [[THEN]] ]
312; CHECK-NEXT:    store x86_fp80 [[PHI0]], x86_fp80* [[O:%.*]], align 16
313; CHECK-NEXT:    [[O_GEP1:%.*]] = getelementptr inbounds x86_fp80, x86_fp80* [[O]], i64 1
314; CHECK-NEXT:    store x86_fp80 [[PHI1]], x86_fp80* [[O_GEP1]], align 16
315; CHECK-NEXT:    ret void
316;
317; Test that we correctly recognize the discontiguous memory in arrays where the
318; size is less than the alignment, and through various different GEP formations.
319; We disable the vectorization of x86_fp80 for now.
320
321entry:
322  %i1.0 = load x86_fp80, x86_fp80* %i1, align 16
323  %i1.gep1 = getelementptr x86_fp80, x86_fp80* %i1, i64 1
324  %i1.1 = load x86_fp80, x86_fp80* %i1.gep1, align 16
325  br i1 undef, label %then, label %end
326
327then:
328  %i2.gep0 = getelementptr inbounds x86_fp80, x86_fp80* %i2, i64 0
329  %i2.0 = load x86_fp80, x86_fp80* %i2.gep0, align 16
330  %i2.gep1 = getelementptr inbounds x86_fp80, x86_fp80* %i2, i64 1
331  %i2.1 = load x86_fp80, x86_fp80* %i2.gep1, align 16
332  br label %end
333
334end:
335  %phi0 = phi x86_fp80 [ %i1.0, %entry ], [ %i2.0, %then ]
336  %phi1 = phi x86_fp80 [ %i1.1, %entry ], [ %i2.1, %then ]
337  store x86_fp80 %phi0, x86_fp80* %o, align 16
338  %o.gep1 = getelementptr inbounds x86_fp80, x86_fp80* %o, i64 1
339  store x86_fp80 %phi1, x86_fp80* %o.gep1, align 16
340  ret void
341}
342