1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve.fp -enable-arm-maskedldst -enable-mem-access-versioning=false -tail-predication=force-enabled %s -o - | FileCheck %s
3
4define dso_local void @mve_gather_qi_wb(i32* noalias nocapture readonly %A, i32* noalias nocapture readonly %B, i32* noalias nocapture %C, i32 %n, i32 %m, i32 %l) {
5; CHECK-LABEL: mve_gather_qi_wb:
6; CHECK:       @ %bb.0: @ %entry
7; CHECK-NEXT:    .save {r7, lr}
8; CHECK-NEXT:    push {r7, lr}
9; CHECK-NEXT:    add.w r12, r0, r3, lsl #2
10; CHECK-NEXT:    adr r0, .LCPI0_0
11; CHECK-NEXT:    vldrw.u32 q0, [r0]
12; CHECK-NEXT:    movw lr, #1250
13; CHECK-NEXT:    vmov.i32 q1, #0x0
14; CHECK-NEXT:    dls lr, lr
15; CHECK-NEXT:    vadd.i32 q0, q0, r1
16; CHECK-NEXT:    adds r1, r3, #4
17; CHECK-NEXT:  .LBB0_1: @ %vector.body
18; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
19; CHECK-NEXT:    vctp.32 r3
20; CHECK-NEXT:    vmov q2, q1
21; CHECK-NEXT:    vpstt
22; CHECK-NEXT:    vldrwt.u32 q1, [r12], #16
23; CHECK-NEXT:    vldrwt.u32 q3, [q0, #80]!
24; CHECK-NEXT:    subs r3, #4
25; CHECK-NEXT:    vmul.i32 q1, q3, q1
26; CHECK-NEXT:    vadd.i32 q1, q2, q1
27; CHECK-NEXT:    le lr, .LBB0_1
28; CHECK-NEXT:  @ %bb.2: @ %middle.block
29; CHECK-NEXT:    vpsel q0, q1, q2
30; CHECK-NEXT:    vaddv.u32 r0, q0
31; CHECK-NEXT:    str.w r0, [r2, r1, lsl #2]
32; CHECK-NEXT:    pop {r7, pc}
33; CHECK-NEXT:    .p2align 4
34; CHECK-NEXT:  @ %bb.3:
35; CHECK-NEXT:  .LCPI0_0:
36; CHECK-NEXT:    .long 4294967228 @ 0xffffffbc
37; CHECK-NEXT:    .long 4294967248 @ 0xffffffd0
38; CHECK-NEXT:    .long 4294967268 @ 0xffffffe4
39; CHECK-NEXT:    .long 4294967288 @ 0xfffffff8
40entry:                                  ; preds = %middle.
41  %add.us.us = add i32 4, %n
42  %arrayidx.us.us = getelementptr inbounds i32, i32* %C, i32 %add.us.us
43  br label %vector.body
44vector.body:                                      ; preds = %vector.body, %entry
45  %index = phi i32 [ 0, %entry ], [ %index.next, %vector.body ]
46  %vec.phi = phi <4 x i32> [ zeroinitializer, %entry ], [ %7, %vector.body ]
47  %vec.ind = phi <4 x i32> [ <i32 0, i32 1, i32 2, i32 3>, %entry ], [ %vec.ind.next, %vector.body ]
48  %0 = add i32 %index, %n
49  %1 = getelementptr inbounds i32, i32* %A, i32 %0
50  %active.lane.mask = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %index, i32 %n)
51  %2 = bitcast i32* %1 to <4 x i32>*
52  %wide.masked.load = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %2, i32 4, <4 x i1> %active.lane.mask, <4 x i32> undef)
53  %3 = mul <4 x i32> %vec.ind, <i32 5, i32 5, i32 5, i32 5>
54  %4 = add <4 x i32> %3, <i32 3, i32 3, i32 3, i32 3>
55  %5 = getelementptr inbounds i32, i32* %B, <4 x i32> %4
56  %wide.masked.gather = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*> %5, i32 4, <4 x i1> %active.lane.mask, <4 x i32> undef)
57  %6 = mul nsw <4 x i32> %wide.masked.gather, %wide.masked.load
58  %7 = add <4 x i32> %vec.phi, %6
59  %index.next = add i32 %index, 4
60  %vec.ind.next = add <4 x i32> %vec.ind, <i32 4, i32 4, i32 4, i32 4>
61  %8 = icmp eq i32 %index.next, 5000
62  br i1 %8, label %middle.block, label %vector.body
63middle.block:                                     ; preds = %vector.body
64  %9 = select <4 x i1> %active.lane.mask, <4 x i32> %7, <4 x i32> %vec.phi
65  %10 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %9)
66  store i32 %10, i32* %arrayidx.us.us, align 4
67  %inc21.us.us = add nuw i32 4, 1
68  %exitcond81.not = icmp eq i32 %inc21.us.us, %n
69  br label %end
70end:                                 ; preds = %middle.block
71  ret void
72}
73
74define dso_local void @mve_gatherscatter_offset(i32* noalias nocapture readonly %A, i32* noalias nocapture readonly %B, i32* noalias nocapture %C, i32 %n, i32 %m, i32 %l) {
75; CHECK-LABEL: mve_gatherscatter_offset:
76; CHECK:       @ %bb.0: @ %entry
77; CHECK-NEXT:    .save {r4, lr}
78; CHECK-NEXT:    push {r4, lr}
79; CHECK-NEXT:    .vsave {d8, d9}
80; CHECK-NEXT:    vpush {d8, d9}
81; CHECK-NEXT:    add.w r4, r0, r3, lsl #2
82; CHECK-NEXT:    adr r0, .LCPI1_0
83; CHECK-NEXT:    movw lr, #1250
84; CHECK-NEXT:    vldrw.u32 q1, [r0]
85; CHECK-NEXT:    add.w r12, r3, #4
86; CHECK-NEXT:    vmov.i32 q2, #0x0
87; CHECK-NEXT:    vmov.i32 q0, #0x14
88; CHECK-NEXT:    dls lr, lr
89; CHECK-NEXT:  .LBB1_1: @ %vector.body
90; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
91; CHECK-NEXT:    vctp.32 r3
92; CHECK-NEXT:    vmov q3, q2
93; CHECK-NEXT:    vpstt
94; CHECK-NEXT:    vldrwt.u32 q2, [r1, q1, uxtw #2]
95; CHECK-NEXT:    vldrwt.u32 q4, [r4], #16
96; CHECK-NEXT:    subs r3, #4
97; CHECK-NEXT:    vmul.i32 q2, q2, q4
98; CHECK-NEXT:    vpst
99; CHECK-NEXT:    vstrwt.32 q2, [r1, q1, uxtw #2]
100; CHECK-NEXT:    vadd.i32 q1, q1, q0
101; CHECK-NEXT:    vadd.i32 q2, q3, q2
102; CHECK-NEXT:    le lr, .LBB1_1
103; CHECK-NEXT:  @ %bb.2: @ %middle.block
104; CHECK-NEXT:    vpsel q0, q2, q3
105; CHECK-NEXT:    vaddv.u32 r0, q0
106; CHECK-NEXT:    str.w r0, [r2, r12, lsl #2]
107; CHECK-NEXT:    vpop {d8, d9}
108; CHECK-NEXT:    pop {r4, pc}
109; CHECK-NEXT:    .p2align 4
110; CHECK-NEXT:  @ %bb.3:
111; CHECK-NEXT:  .LCPI1_0:
112; CHECK-NEXT:    .long 3 @ 0x3
113; CHECK-NEXT:    .long 8 @ 0x8
114; CHECK-NEXT:    .long 13 @ 0xd
115; CHECK-NEXT:    .long 18 @ 0x12
116entry:                                  ; preds = %middle.
117  %add.us.us = add i32 4, %n
118  %arrayidx.us.us = getelementptr inbounds i32, i32* %C, i32 %add.us.us
119  br label %vector.body
120vector.body:                                      ; preds = %vector.body, %entry
121  %index = phi i32 [ 0, %entry ], [ %index.next, %vector.body ]
122  %vec.phi = phi <4 x i32> [ zeroinitializer, %entry ], [ %7, %vector.body ]
123  %vec.ind = phi <4 x i32> [ <i32 0, i32 1, i32 2, i32 3>, %entry ], [ %vec.ind.next, %vector.body ]
124  %0 = add i32 %index, %n
125  %1 = getelementptr inbounds i32, i32* %A, i32 %0
126  %active.lane.mask = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %index, i32 %n)
127  %2 = bitcast i32* %1 to <4 x i32>*
128  %wide.masked.load = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %2, i32 4, <4 x i1> %active.lane.mask, <4 x i32> undef)
129  %3 = mul <4 x i32> %vec.ind, <i32 5, i32 5, i32 5, i32 5>
130  %4 = add <4 x i32> %3, <i32 3, i32 3, i32 3, i32 3>
131  %5 = getelementptr inbounds i32, i32* %B, <4 x i32> %4
132  %wide.masked.gather = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*> %5, i32 4, <4 x i1> %active.lane.mask, <4 x i32> undef)
133  %6 = mul nsw <4 x i32> %wide.masked.gather, %wide.masked.load
134  call void @llvm.masked.scatter.v4i32.v4p0i32(<4 x i32> %6, <4 x i32*> %5, i32 4, <4 x i1> %active.lane.mask)
135  %7 = add <4 x i32> %vec.phi, %6
136  %index.next = add i32 %index, 4
137  %vec.ind.next = add <4 x i32> %vec.ind, <i32 4, i32 4, i32 4, i32 4>
138  %8 = icmp eq i32 %index.next, 5000
139  br i1 %8, label %middle.block, label %vector.body
140middle.block:                                     ; preds = %vector.body
141  %9 = select <4 x i1> %active.lane.mask, <4 x i32> %7, <4 x i32> %vec.phi
142  %10 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %9)
143  store i32 %10, i32* %arrayidx.us.us, align 4
144  %inc21.us.us = add nuw i32 4, 1
145  %exitcond81.not = icmp eq i32 %inc21.us.us, %n
146  br label %end
147end:                                 ; preds = %middle.block
148  ret void
149}
150define dso_local void @mve_scatter_qi(i32* noalias nocapture readonly %A, i32* noalias nocapture readonly %B, i32* noalias nocapture %C, i32 %n, i32 %m, i32 %l) {
151; CHECK-LABEL: mve_scatter_qi:
152; CHECK:       @ %bb.0: @ %entry
153; CHECK-NEXT:    .save {r7, lr}
154; CHECK-NEXT:    push {r7, lr}
155; CHECK-NEXT:    add.w r12, r0, r3, lsl #2
156; CHECK-NEXT:    adr r0, .LCPI2_0
157; CHECK-NEXT:    vldrw.u32 q0, [r0]
158; CHECK-NEXT:    movw lr, #1250
159; CHECK-NEXT:    vmov.i32 q1, #0x0
160; CHECK-NEXT:    dls lr, lr
161; CHECK-NEXT:    vadd.i32 q0, q0, r1
162; CHECK-NEXT:    adds r1, r3, #4
163; CHECK-NEXT:    vmov.i32 q2, #0x3
164; CHECK-NEXT:  .LBB2_1: @ %vector.body
165; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
166; CHECK-NEXT:    vctp.32 r3
167; CHECK-NEXT:    vmov q3, q1
168; CHECK-NEXT:    vpst
169; CHECK-NEXT:    vldrwt.u32 q1, [r12], #16
170; CHECK-NEXT:    subs r3, #4
171; CHECK-NEXT:    vmul.i32 q1, q1, q2
172; CHECK-NEXT:    vpst
173; CHECK-NEXT:    vstrwt.32 q1, [q0, #80]!
174; CHECK-NEXT:    vadd.i32 q1, q3, q1
175; CHECK-NEXT:    le lr, .LBB2_1
176; CHECK-NEXT:  @ %bb.2: @ %middle.block
177; CHECK-NEXT:    vpsel q0, q1, q3
178; CHECK-NEXT:    vaddv.u32 r0, q0
179; CHECK-NEXT:    str.w r0, [r2, r1, lsl #2]
180; CHECK-NEXT:    pop {r7, pc}
181; CHECK-NEXT:    .p2align 4
182; CHECK-NEXT:  @ %bb.3:
183; CHECK-NEXT:  .LCPI2_0:
184; CHECK-NEXT:    .long 4294967228 @ 0xffffffbc
185; CHECK-NEXT:    .long 4294967248 @ 0xffffffd0
186; CHECK-NEXT:    .long 4294967268 @ 0xffffffe4
187; CHECK-NEXT:    .long 4294967288 @ 0xfffffff8
188entry:                                  ; preds = %middle.
189  %add.us.us = add i32 4, %n
190  %arrayidx.us.us = getelementptr inbounds i32, i32* %C, i32 %add.us.us
191  br label %vector.body
192vector.body:                                      ; preds = %vector.body, %entry
193  %index = phi i32 [ 0, %entry ], [ %index.next, %vector.body ]
194  %vec.phi = phi <4 x i32> [ zeroinitializer, %entry ], [ %7, %vector.body ]
195  %vec.ind = phi <4 x i32> [ <i32 0, i32 1, i32 2, i32 3>, %entry ], [ %vec.ind.next, %vector.body ]
196  %0 = add i32 %index, %n
197  %1 = getelementptr inbounds i32, i32* %A, i32 %0
198  %active.lane.mask = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %index, i32 %n)
199  %2 = bitcast i32* %1 to <4 x i32>*
200  %wide.masked.load = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %2, i32 4, <4 x i1> %active.lane.mask, <4 x i32> undef)
201  %3 = mul <4 x i32> %vec.ind, <i32 5, i32 5, i32 5, i32 5>
202  %4 = add <4 x i32> %3, <i32 3, i32 3, i32 3, i32 3>
203  %5 = getelementptr inbounds i32, i32* %B, <4 x i32> %4
204  %6 = mul nsw <4 x i32> <i32 3, i32 3, i32 3, i32 3>, %wide.masked.load
205  call void @llvm.masked.scatter.v4i32.v4p0i32(<4 x i32> %6, <4 x i32*> %5, i32 4, <4 x i1> %active.lane.mask)
206  %7 = add <4 x i32> %vec.phi, %6
207  %index.next = add i32 %index, 4
208  %vec.ind.next = add <4 x i32> %vec.ind, <i32 4, i32 4, i32 4, i32 4>
209  %8 = icmp eq i32 %index.next, 5000
210  br i1 %8, label %middle.block, label %vector.body
211middle.block:                                     ; preds = %vector.body
212  %9 = select <4 x i1> %active.lane.mask, <4 x i32> %7, <4 x i32> %vec.phi
213  %10 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %9)
214  store i32 %10, i32* %arrayidx.us.us, align 4
215  %inc21.us.us = add nuw i32 4, 1
216  %exitcond81.not = icmp eq i32 %inc21.us.us, %n
217  br label %end
218end:                                 ; preds = %middle.block
219  ret void
220}
221
222define void @justoffsets(i8* noalias nocapture readonly %r, i8* noalias nocapture %w, i32 %N) {
223; CHECK-LABEL: justoffsets:
224; CHECK:       @ %bb.0: @ %entry
225; CHECK-NEXT:    .save {r4, r5, r6, r7, r8, lr}
226; CHECK-NEXT:    push.w {r4, r5, r6, r7, r8, lr}
227; CHECK-NEXT:    .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
228; CHECK-NEXT:    vpush {d8, d9, d10, d11, d12, d13, d14, d15}
229; CHECK-NEXT:    .pad #216
230; CHECK-NEXT:    sub sp, #216
231; CHECK-NEXT:    cmp r2, #0
232; CHECK-NEXT:    beq.w .LBB3_3
233; CHECK-NEXT:  @ %bb.1: @ %vector.ph
234; CHECK-NEXT:    adds r3, r2, #3
235; CHECK-NEXT:    adr r7, .LCPI3_5
236; CHECK-NEXT:    bic r3, r3, #3
237; CHECK-NEXT:    vmov.i32 q0, #0x8000
238; CHECK-NEXT:    sub.w r12, r3, #4
239; CHECK-NEXT:    movs r3, #1
240; CHECK-NEXT:    adr r6, .LCPI3_4
241; CHECK-NEXT:    adr r5, .LCPI3_3
242; CHECK-NEXT:    add.w lr, r3, r12, lsr #2
243; CHECK-NEXT:    adr r4, .LCPI3_2
244; CHECK-NEXT:    dls lr, lr
245; CHECK-NEXT:    vstrw.32 q0, [sp, #160] @ 16-byte Spill
246; CHECK-NEXT:    vldrw.u32 q0, [r7]
247; CHECK-NEXT:    adr.w r8, .LCPI3_1
248; CHECK-NEXT:    adr.w r12, .LCPI3_0
249; CHECK-NEXT:    adr r3, .LCPI3_6
250; CHECK-NEXT:    vstrw.32 q0, [sp, #176] @ 16-byte Spill
251; CHECK-NEXT:    vldrw.u32 q0, [r6]
252; CHECK-NEXT:    vldrw.u32 q1, [r3]
253; CHECK-NEXT:    adr r3, .LCPI3_7
254; CHECK-NEXT:    vstrw.32 q0, [sp, #144] @ 16-byte Spill
255; CHECK-NEXT:    vldrw.u32 q0, [r5]
256; CHECK-NEXT:    adr r6, .LCPI3_10
257; CHECK-NEXT:    adr r7, .LCPI3_9
258; CHECK-NEXT:    vstrw.32 q0, [sp, #128] @ 16-byte Spill
259; CHECK-NEXT:    vldrw.u32 q0, [r4]
260; CHECK-NEXT:    vstrw.32 q1, [sp, #192] @ 16-byte Spill
261; CHECK-NEXT:    vstrw.32 q0, [sp, #112] @ 16-byte Spill
262; CHECK-NEXT:    vldrw.u32 q0, [r8]
263; CHECK-NEXT:    vstrw.32 q0, [sp, #96] @ 16-byte Spill
264; CHECK-NEXT:    vldrw.u32 q0, [r12]
265; CHECK-NEXT:    vstrw.32 q0, [sp, #80] @ 16-byte Spill
266; CHECK-NEXT:    vmov.i32 q0, #0x7fff
267; CHECK-NEXT:    vstrw.32 q0, [sp, #64] @ 16-byte Spill
268; CHECK-NEXT:    vldrw.u32 q0, [r3]
269; CHECK-NEXT:    adr r3, .LCPI3_8
270; CHECK-NEXT:    vstrw.32 q0, [sp, #48] @ 16-byte Spill
271; CHECK-NEXT:    vldrw.u32 q0, [r6]
272; CHECK-NEXT:    vstrw.32 q0, [sp, #32] @ 16-byte Spill
273; CHECK-NEXT:    vldrw.u32 q0, [r7]
274; CHECK-NEXT:    vstrw.32 q0, [sp, #16] @ 16-byte Spill
275; CHECK-NEXT:    vldrw.u32 q0, [r3]
276; CHECK-NEXT:    vstrw.32 q0, [sp] @ 16-byte Spill
277; CHECK-NEXT:  .LBB3_2: @ %vector.body
278; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
279; CHECK-NEXT:    vldrw.u32 q0, [sp, #192] @ 16-byte Reload
280; CHECK-NEXT:    vctp.32 r2
281; CHECK-NEXT:    vpst
282; CHECK-NEXT:    vldrbt.u32 q4, [r0, q0]
283; CHECK-NEXT:    vldrw.u32 q0, [sp, #176] @ 16-byte Reload
284; CHECK-NEXT:    vpst
285; CHECK-NEXT:    vldrbt.u32 q7, [r0, q0]
286; CHECK-NEXT:    vldrw.u32 q0, [sp, #144] @ 16-byte Reload
287; CHECK-NEXT:    vldrw.u32 q5, [sp, #112] @ 16-byte Reload
288; CHECK-NEXT:    subs r2, #4
289; CHECK-NEXT:    vmul.i32 q6, q7, q0
290; CHECK-NEXT:    vldrw.u32 q0, [sp, #128] @ 16-byte Reload
291; CHECK-NEXT:    vpst
292; CHECK-NEXT:    vldrbt.u32 q1, [r0, q5]
293; CHECK-NEXT:    vldrw.u32 q2, [sp, #80] @ 16-byte Reload
294; CHECK-NEXT:    vmul.i32 q3, q4, q0
295; CHECK-NEXT:    vldrw.u32 q0, [sp, #96] @ 16-byte Reload
296; CHECK-NEXT:    vadd.i32 q3, q3, q6
297; CHECK-NEXT:    adds r0, #12
298; CHECK-NEXT:    vmul.i32 q6, q1, q0
299; CHECK-NEXT:    vldrw.u32 q0, [sp, #160] @ 16-byte Reload
300; CHECK-NEXT:    vadd.i32 q3, q3, q6
301; CHECK-NEXT:    vadd.i32 q3, q3, q0
302; CHECK-NEXT:    vshr.u32 q6, q3, #16
303; CHECK-NEXT:    vmul.i32 q3, q7, q2
304; CHECK-NEXT:    vldrw.u32 q2, [sp, #64] @ 16-byte Reload
305; CHECK-NEXT:    vmul.i32 q2, q4, q2
306; CHECK-NEXT:    vadd.i32 q2, q2, q3
307; CHECK-NEXT:    vldrw.u32 q3, [sp, #48] @ 16-byte Reload
308; CHECK-NEXT:    vmul.i32 q3, q1, q3
309; CHECK-NEXT:    vadd.i32 q2, q2, q3
310; CHECK-NEXT:    vldrw.u32 q3, [sp, #32] @ 16-byte Reload
311; CHECK-NEXT:    vadd.i32 q2, q2, q0
312; CHECK-NEXT:    vmul.i32 q3, q7, q3
313; CHECK-NEXT:    vldrw.u32 q7, [sp, #16] @ 16-byte Reload
314; CHECK-NEXT:    vshr.u32 q2, q2, #16
315; CHECK-NEXT:    vmul.i32 q4, q4, q7
316; CHECK-NEXT:    vadd.i32 q3, q4, q3
317; CHECK-NEXT:    vldrw.u32 q4, [sp] @ 16-byte Reload
318; CHECK-NEXT:    vmul.i32 q1, q1, q4
319; CHECK-NEXT:    vadd.i32 q1, q3, q1
320; CHECK-NEXT:    vadd.i32 q1, q1, q0
321; CHECK-NEXT:    vldrw.u32 q0, [sp, #192] @ 16-byte Reload
322; CHECK-NEXT:    vshr.u32 q1, q1, #16
323; CHECK-NEXT:    vpst
324; CHECK-NEXT:    vstrbt.32 q1, [r1, q0]
325; CHECK-NEXT:    vldrw.u32 q0, [sp, #176] @ 16-byte Reload
326; CHECK-NEXT:    vpstt
327; CHECK-NEXT:    vstrbt.32 q2, [r1, q0]
328; CHECK-NEXT:    vstrbt.32 q6, [r1, q5]
329; CHECK-NEXT:    adds r1, #12
330; CHECK-NEXT:    le lr, .LBB3_2
331; CHECK-NEXT:  .LBB3_3: @ %for.cond.cleanup
332; CHECK-NEXT:    add sp, #216
333; CHECK-NEXT:    vpop {d8, d9, d10, d11, d12, d13, d14, d15}
334; CHECK-NEXT:    pop.w {r4, r5, r6, r7, r8, pc}
335; CHECK-NEXT:    .p2align 4
336; CHECK-NEXT:  @ %bb.4:
337; CHECK-NEXT:  .LCPI3_0:
338; CHECK-NEXT:    .long 4294952177 @ 0xffffc4f1
339; CHECK-NEXT:    .long 4294952177 @ 0xffffc4f1
340; CHECK-NEXT:    .long 4294952177 @ 0xffffc4f1
341; CHECK-NEXT:    .long 4294952177 @ 0xffffc4f1
342; CHECK-NEXT:  .LCPI3_1:
343; CHECK-NEXT:    .long 19485 @ 0x4c1d
344; CHECK-NEXT:    .long 19485 @ 0x4c1d
345; CHECK-NEXT:    .long 19485 @ 0x4c1d
346; CHECK-NEXT:    .long 19485 @ 0x4c1d
347; CHECK-NEXT:  .LCPI3_2:
348; CHECK-NEXT:    .long 2 @ 0x2
349; CHECK-NEXT:    .long 5 @ 0x5
350; CHECK-NEXT:    .long 8 @ 0x8
351; CHECK-NEXT:    .long 11 @ 0xb
352; CHECK-NEXT:  .LCPI3_3:
353; CHECK-NEXT:    .long 13282 @ 0x33e2
354; CHECK-NEXT:    .long 13282 @ 0x33e2
355; CHECK-NEXT:    .long 13282 @ 0x33e2
356; CHECK-NEXT:    .long 13282 @ 0x33e2
357; CHECK-NEXT:  .LCPI3_4:
358; CHECK-NEXT:    .long 4294934529 @ 0xffff8001
359; CHECK-NEXT:    .long 4294934529 @ 0xffff8001
360; CHECK-NEXT:    .long 4294934529 @ 0xffff8001
361; CHECK-NEXT:    .long 4294934529 @ 0xffff8001
362; CHECK-NEXT:  .LCPI3_5:
363; CHECK-NEXT:    .long 1 @ 0x1
364; CHECK-NEXT:    .long 4 @ 0x4
365; CHECK-NEXT:    .long 7 @ 0x7
366; CHECK-NEXT:    .long 10 @ 0xa
367; CHECK-NEXT:  .LCPI3_6:
368; CHECK-NEXT:    .long 0 @ 0x0
369; CHECK-NEXT:    .long 3 @ 0x3
370; CHECK-NEXT:    .long 6 @ 0x6
371; CHECK-NEXT:    .long 9 @ 0x9
372; CHECK-NEXT:  .LCPI3_7:
373; CHECK-NEXT:    .long 4294949648 @ 0xffffbb10
374; CHECK-NEXT:    .long 4294949648 @ 0xffffbb10
375; CHECK-NEXT:    .long 4294949648 @ 0xffffbb10
376; CHECK-NEXT:    .long 4294949648 @ 0xffffbb10
377; CHECK-NEXT:  .LCPI3_8:
378; CHECK-NEXT:    .long 7471 @ 0x1d2f
379; CHECK-NEXT:    .long 7471 @ 0x1d2f
380; CHECK-NEXT:    .long 7471 @ 0x1d2f
381; CHECK-NEXT:    .long 7471 @ 0x1d2f
382; CHECK-NEXT:  .LCPI3_9:
383; CHECK-NEXT:    .long 19595 @ 0x4c8b
384; CHECK-NEXT:    .long 19595 @ 0x4c8b
385; CHECK-NEXT:    .long 19595 @ 0x4c8b
386; CHECK-NEXT:    .long 19595 @ 0x4c8b
387; CHECK-NEXT:  .LCPI3_10:
388; CHECK-NEXT:    .long 38470 @ 0x9646
389; CHECK-NEXT:    .long 38470 @ 0x9646
390; CHECK-NEXT:    .long 38470 @ 0x9646
391; CHECK-NEXT:    .long 38470 @ 0x9646
392entry:
393  %cmp47.not = icmp eq i32 %N, 0
394  br i1 %cmp47.not, label %for.cond.cleanup, label %vector.ph
395
396vector.ph:                                        ; preds = %vector.memcheck
397  %n.rnd.up = add i32 %N, 3
398  %n.vec = and i32 %n.rnd.up, -4
399  br label %vector.body
400
401vector.body:                                      ; preds = %vector.body, %vector.ph
402  %pointer.phi = phi i8* [ %r, %vector.ph ], [ %ptr.ind, %vector.body ]
403  %pointer.phi55 = phi i8* [ %w, %vector.ph ], [ %ptr.ind56, %vector.body ]
404  %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
405  %l1 = getelementptr i8, i8* %pointer.phi, <4 x i32> <i32 0, i32 3, i32 6, i32 9>
406  %l2 = getelementptr i8, i8* %pointer.phi55, <4 x i32> <i32 0, i32 3, i32 6, i32 9>
407  %l3 = getelementptr inbounds i8, <4 x i8*> %l1, i32 1
408  %active.lane.mask = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %index, i32 %N)
409  %wide.masked.gather = call <4 x i8> @llvm.masked.gather.v4i8.v4p0i8(<4 x i8*> %l1, i32 1, <4 x i1> %active.lane.mask, <4 x i8> undef)
410  %l4 = getelementptr inbounds i8, <4 x i8*> %l1, i32 2
411  %wide.masked.gather57 = call <4 x i8> @llvm.masked.gather.v4i8.v4p0i8(<4 x i8*> %l3, i32 1, <4 x i1> %active.lane.mask, <4 x i8> undef)
412  %wide.masked.gather58 = call <4 x i8> @llvm.masked.gather.v4i8.v4p0i8(<4 x i8*> %l4, i32 1, <4 x i1> %active.lane.mask, <4 x i8> undef)
413  %l5 = zext <4 x i8> %wide.masked.gather to <4 x i32>
414  %l6 = mul nuw nsw <4 x i32> %l5, <i32 19595, i32 19595, i32 19595, i32 19595>
415  %l7 = zext <4 x i8> %wide.masked.gather57 to <4 x i32>
416  %l8 = mul nuw nsw <4 x i32> %l7, <i32 38470, i32 38470, i32 38470, i32 38470>
417  %l9 = zext <4 x i8> %wide.masked.gather58 to <4 x i32>
418  %l10 = mul nuw nsw <4 x i32> %l9, <i32 7471, i32 7471, i32 7471, i32 7471>
419  %l11 = add nuw nsw <4 x i32> %l6, <i32 32768, i32 32768, i32 32768, i32 32768>
420  %l12 = add nuw nsw <4 x i32> %l11, %l8
421  %l13 = add nuw nsw <4 x i32> %l12, %l10
422  %l14 = lshr <4 x i32> %l13, <i32 16, i32 16, i32 16, i32 16>
423  %l15 = trunc <4 x i32> %l14 to <4 x i8>
424  %l16 = mul nuw nsw <4 x i32> %l5, <i32 32767, i32 32767, i32 32767, i32 32767>
425  %l17 = mul nsw <4 x i32> %l7, <i32 -15119, i32 -15119, i32 -15119, i32 -15119>
426  %l18 = mul nsw <4 x i32> %l9, <i32 -17648, i32 -17648, i32 -17648, i32 -17648>
427  %l19 = add nuw nsw <4 x i32> %l16, <i32 32768, i32 32768, i32 32768, i32 32768>
428  %l20 = add nsw <4 x i32> %l19, %l17
429  %l21 = add nsw <4 x i32> %l20, %l18
430  %l22 = lshr <4 x i32> %l21, <i32 16, i32 16, i32 16, i32 16>
431  %l23 = trunc <4 x i32> %l22 to <4 x i8>
432  %l24 = mul nuw nsw <4 x i32> %l5, <i32 13282, i32 13282, i32 13282, i32 13282>
433  %l25 = mul nsw <4 x i32> %l7, <i32 -32767, i32 -32767, i32 -32767, i32 -32767>
434  %l26 = mul nuw nsw <4 x i32> %l9, <i32 19485, i32 19485, i32 19485, i32 19485>
435  %l27 = add nuw nsw <4 x i32> %l24, <i32 32768, i32 32768, i32 32768, i32 32768>
436  %l28 = add nsw <4 x i32> %l27, %l25
437  %l29 = add nsw <4 x i32> %l28, %l26
438  %l30 = lshr <4 x i32> %l29, <i32 16, i32 16, i32 16, i32 16>
439  %l31 = trunc <4 x i32> %l30 to <4 x i8>
440  %l32 = getelementptr inbounds i8, <4 x i8*> %l2, i32 1
441  call void @llvm.masked.scatter.v4i8.v4p0i8(<4 x i8> %l15, <4 x i8*> %l2, i32 1, <4 x i1> %active.lane.mask)
442  %l33 = getelementptr inbounds i8, <4 x i8*> %l2, i32 2
443  call void @llvm.masked.scatter.v4i8.v4p0i8(<4 x i8> %l23, <4 x i8*> %l32, i32 1, <4 x i1> %active.lane.mask)
444  call void @llvm.masked.scatter.v4i8.v4p0i8(<4 x i8> %l31, <4 x i8*> %l33, i32 1, <4 x i1> %active.lane.mask)
445  %index.next = add i32 %index, 4
446  %l34 = icmp eq i32 %index.next, %n.vec
447  %ptr.ind = getelementptr i8, i8* %pointer.phi, i32 12
448  %ptr.ind56 = getelementptr i8, i8* %pointer.phi55, i32 12
449  br i1 %l34, label %for.cond.cleanup, label %vector.body
450
451for.cond.cleanup:                                 ; preds = %vector.body, %for.body, %entry
452  ret void
453}
454
455declare i32 @llvm.vector.reduce.add.v4i32(<4 x i32>)
456declare <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*>, i32, <4 x i1>, <4 x i32>)
457declare <4 x i8> @llvm.masked.gather.v4i8.v4p0i8(<4 x i8*>, i32, <4 x i1>, <4 x i8>)
458declare <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32, i32)
459declare <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>*, i32, <4 x i1>, <4 x i32>)
460declare void @llvm.masked.scatter.v4i32.v4p0i32(<4 x i32>, <4 x i32*>, i32, <4 x i1>)
461declare void @llvm.masked.scatter.v4i8.v4p0i8(<4 x i8>, <4 x i8*>, i32, <4 x i1>)
462