1; RUN: opt -loop-vectorize -mtriple=arm64-apple-ios -S -mcpu=cyclone < %s | FileCheck %s
2target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-n32:64-S128"
3
4@kernel = global [512 x float] zeroinitializer, align 16
5@kernel2 = global [512 x float] zeroinitializer, align 16
6@kernel3 = global [512 x float] zeroinitializer, align 16
7@kernel4 = global [512 x float] zeroinitializer, align 16
8@src_data = global [1536 x float] zeroinitializer, align 16
9@r_ = global i8 0, align 1
10@g_ = global i8 0, align 1
11@b_ = global i8 0, align 1
12
13; We don't want to vectorize most loops containing gathers because they are
14; expensive.
15; Make sure we don't vectorize it.
16; CHECK-NOT: x float>
17
18define void @_Z4testmm(i64 %size, i64 %offset) {
19entry:
20  %cmp53 = icmp eq i64 %size, 0
21  br i1 %cmp53, label %for.end, label %for.body.lr.ph
22
23for.body.lr.ph:
24  br label %for.body
25
26for.body:
27  %r.057 = phi float [ 0.000000e+00, %for.body.lr.ph ], [ %add10, %for.body ]
28  %g.056 = phi float [ 0.000000e+00, %for.body.lr.ph ], [ %add20, %for.body ]
29  %v.055 = phi i64 [ 0, %for.body.lr.ph ], [ %inc, %for.body ]
30  %b.054 = phi float [ 0.000000e+00, %for.body.lr.ph ], [ %add30, %for.body ]
31  %add = add i64 %v.055, %offset
32  %mul = mul i64 %add, 3
33  %arrayidx = getelementptr inbounds [1536 x float], [1536 x float]* @src_data, i64 0, i64 %mul
34  %0 = load float, float* %arrayidx, align 4
35  %arrayidx2 = getelementptr inbounds [512 x float], [512 x float]* @kernel, i64 0, i64 %v.055
36  %1 = load float, float* %arrayidx2, align 4
37  %mul3 = fmul fast float %0, %1
38  %arrayidx4 = getelementptr inbounds [512 x float], [512 x float]* @kernel2, i64 0, i64 %v.055
39  %2 = load float, float* %arrayidx4, align 4
40  %mul5 = fmul fast float %mul3, %2
41  %arrayidx6 = getelementptr inbounds [512 x float], [512 x float]* @kernel3, i64 0, i64 %v.055
42  %3 = load float, float* %arrayidx6, align 4
43  %mul7 = fmul fast float %mul5, %3
44  %arrayidx8 = getelementptr inbounds [512 x float], [512 x float]* @kernel4, i64 0, i64 %v.055
45  %4 = load float, float* %arrayidx8, align 4
46  %mul9 = fmul fast float %mul7, %4
47  %add10 = fadd fast float %r.057, %mul9
48  %arrayidx.sum = add i64 %mul, 1
49  %arrayidx11 = getelementptr inbounds [1536 x float], [1536 x float]* @src_data, i64 0, i64 %arrayidx.sum
50  %5 = load float, float* %arrayidx11, align 4
51  %mul13 = fmul fast float %1, %5
52  %mul15 = fmul fast float %2, %mul13
53  %mul17 = fmul fast float %3, %mul15
54  %mul19 = fmul fast float %4, %mul17
55  %add20 = fadd fast float %g.056, %mul19
56  %arrayidx.sum52 = add i64 %mul, 2
57  %arrayidx21 = getelementptr inbounds [1536 x float], [1536 x float]* @src_data, i64 0, i64 %arrayidx.sum52
58  %6 = load float, float* %arrayidx21, align 4
59  %mul23 = fmul fast float %1, %6
60  %mul25 = fmul fast float %2, %mul23
61  %mul27 = fmul fast float %3, %mul25
62  %mul29 = fmul fast float %4, %mul27
63  %add30 = fadd fast float %b.054, %mul29
64  %inc = add i64 %v.055, 1
65  %exitcond = icmp ne i64 %inc, %size
66  br i1 %exitcond, label %for.body, label %for.cond.for.end_crit_edge
67
68for.cond.for.end_crit_edge:
69  %add30.lcssa = phi float [ %add30, %for.body ]
70  %add20.lcssa = phi float [ %add20, %for.body ]
71  %add10.lcssa = phi float [ %add10, %for.body ]
72  %phitmp = fptoui float %add10.lcssa to i8
73  %phitmp60 = fptoui float %add20.lcssa to i8
74  %phitmp61 = fptoui float %add30.lcssa to i8
75  br label %for.end
76
77for.end:
78  %r.0.lcssa = phi i8 [ %phitmp, %for.cond.for.end_crit_edge ], [ 0, %entry ]
79  %g.0.lcssa = phi i8 [ %phitmp60, %for.cond.for.end_crit_edge ], [ 0, %entry ]
80  %b.0.lcssa = phi i8 [ %phitmp61, %for.cond.for.end_crit_edge ], [ 0, %entry ]
81  store i8 %r.0.lcssa, i8* @r_, align 1
82  store i8 %g.0.lcssa, i8* @g_, align 1
83  store i8 %b.0.lcssa, i8* @b_, align 1
84  ret void
85}
86