1; RUN: llc -verify-machineinstrs -mtriple=x86_64-unknown < %s | FileCheck %s --check-prefix=X64 --check-prefix=X64-ALL
2; RUN: llc -verify-machineinstrs -mtriple=x86_64-unknown --x86-lvi-load-no-cbranch < %s | FileCheck %s --check-prefix=X64
3; RUN: llc -O0 -verify-machineinstrs -mtriple=x86_64-unknown < %s | FileCheck %s --check-prefix=X64-NOOPT
4
5; Function Attrs: noinline nounwind optnone uwtable
6define dso_local i32 @test(i32** %secret, i32 %secret_size) #0 {
7; X64-LABEL: test:
8entry:
9  %secret.addr = alloca i32**, align 8
10  %secret_size.addr = alloca i32, align 4
11  %ret_val = alloca i32, align 4
12  %i = alloca i32, align 4
13  store i32** %secret, i32*** %secret.addr, align 8
14  store i32 %secret_size, i32* %secret_size.addr, align 4
15  store i32 0, i32* %ret_val, align 4
16  call void @llvm.x86.sse2.lfence()
17  store i32 0, i32* %i, align 4
18  br label %for.cond
19
20; X64: # %bb.0: # %entry
21; X64-NEXT:      movq %rdi, -{{[0-9]+}}(%rsp)
22; X64-NEXT:      movl %esi, -{{[0-9]+}}(%rsp)
23; X64-NEXT:      movl $0, -{{[0-9]+}}(%rsp)
24; X64-NEXT:      lfence
25; X64-NEXT:      movl $0, -{{[0-9]+}}(%rsp)
26; X64-NEXT:      jmp .LBB0_1
27
28; X64-NOOPT: # %bb.0: # %entry
29; X64-NOOPT-NEXT:      lfence
30; X64-NOOPT-NEXT:      movq %rdi, -{{[0-9]+}}(%rsp)
31; X64-NOOPT-NEXT:      lfence
32; X64-NOOPT-NEXT:      movl %esi, -{{[0-9]+}}(%rsp)
33; X64-NOOPT-NEXT:      lfence
34; X64-NOOPT-NEXT:      movl $0, -{{[0-9]+}}(%rsp)
35; X64-NOOPT-NEXT:      lfence
36; X64-NOOPT-NEXT:      movl $0, -{{[0-9]+}}(%rsp)
37
38for.cond:                                         ; preds = %for.inc, %entry
39  %0 = load i32, i32* %i, align 4
40  %1 = load i32, i32* %secret_size.addr, align 4
41  %cmp = icmp slt i32 %0, %1
42  br i1 %cmp, label %for.body, label %for.end
43
44; X64: .LBB0_1: # %for.cond
45; X64-NEXT:      # =>This Inner Loop Header: Depth=1
46; X64-NEXT:      movl -{{[0-9]+}}(%rsp), %eax
47; X64-ALL-NEXT:  lfence
48; X64-NEXT:      cmpl -{{[0-9]+}}(%rsp), %eax
49; X64-ALL-NEXT:  lfence
50; X64-NEXT:      jge .LBB0_5
51
52; X64-NOOPT: .LBB0_1: # %for.cond
53; X64-NOOPT-NEXT:      # =>This Inner Loop Header: Depth=1
54; X64-NOOPT-NEXT:  lfence
55; X64-NOOPT-NEXT:      movl -{{[0-9]+}}(%rsp), %eax
56; X64-NOOPT-NEXT:  lfence
57; X64-NOOPT-NEXT:      cmpl -{{[0-9]+}}(%rsp), %eax
58; X64-NOOPT-NEXT:  lfence
59; X64-NOOPT-NEXT:      jge .LBB0_6
60
61for.body:                                         ; preds = %for.cond
62  %2 = load i32, i32* %i, align 4
63  %rem = srem i32 %2, 2
64  %cmp1 = icmp eq i32 %rem, 0
65  br i1 %cmp1, label %if.then, label %if.end
66
67; X64: # %bb.2: # %for.body
68; X64-NEXT: # in Loop: Header=BB0_1 Depth=1
69; X64-NEXT:      movl -{{[0-9]+}}(%rsp), %eax
70; X64-ALL-NEXT:  lfence
71; X64-NEXT:      movl %eax, %ecx
72; X64-NEXT:      shrl $31, %ecx
73; X64-NEXT:      addl %eax, %ecx
74; X64-NEXT:      andl $-2, %ecx
75; X64-NEXT:      cmpl %ecx, %eax
76; X64-NEXT:      jne .LBB0_4
77
78; X64-NOOPT: # %bb.2: # %for.body
79; X64-NOOPT-NEXT: # in Loop: Header=BB0_1 Depth=1
80; X64-NOOPT-NEXT:  lfence
81; X64-NOOPT-NEXT:      movl -{{[0-9]+}}(%rsp), %eax
82; X64-NOOPT-NEXT:      cltd
83; X64-NOOPT-NEXT:      movl $2, %ecx
84; X64-NOOPT-NEXT:      idivl %ecx
85; X64-NOOPT-NEXT:      cmpl $0, %edx
86; X64-NOOPT-NEXT:  lfence
87; X64-NOOPT-NEXT:      jne .LBB0_4
88
89if.then:                                          ; preds = %for.body
90  %3 = load i32**, i32*** %secret.addr, align 8
91  %4 = load i32, i32* %ret_val, align 4
92  %idxprom = sext i32 %4 to i64
93  %arrayidx = getelementptr inbounds i32*, i32** %3, i64 %idxprom
94  %5 = load i32*, i32** %arrayidx, align 8
95  %6 = load i32, i32* %5, align 4
96  store i32 %6, i32* %ret_val, align 4
97  br label %if.end
98
99; X64: # %bb.3: # %if.then
100; X64-NEXT: # in Loop: Header=BB0_1 Depth=1
101; X64-NEXT:      movq -{{[0-9]+}}(%rsp), %rax
102; X64-NEXT:      lfence
103; X64-NEXT:      movslq -{{[0-9]+}}(%rsp), %rcx
104; X64-NEXT:      lfence
105; X64-NEXT:      movq (%rax,%rcx,8), %rax
106; X64-NEXT:      lfence
107; X64-NEXT:      movl (%rax), %eax
108; X64-NEXT:      movl %eax, -{{[0-9]+}}(%rsp)
109; X64-NEXT:      jmp .LBB0_4
110
111; X64-NOOPT: # %bb.3: # %if.then
112; X64-NOOPT-NEXT: # in Loop: Header=BB0_1 Depth=1
113; X64-NOOPT-NEXT:      lfence
114; X64-NOOPT-NEXT:      movq -{{[0-9]+}}(%rsp), %rax
115; X64-NOOPT-NEXT:      lfence
116; X64-NOOPT-NEXT:      movslq -{{[0-9]+}}(%rsp), %rcx
117; X64-NOOPT-NEXT:      lfence
118; X64-NOOPT-NEXT:      movq (%rax,%rcx,8), %rax
119; X64-NOOPT-NEXT:      lfence
120; X64-NOOPT-NEXT:      movl (%rax), %eax
121; X64-NOOPT-NEXT:      lfence
122; X64-NOOPT-NEXT:      movl %eax, -{{[0-9]+}}(%rsp)
123
124if.end:                                           ; preds = %if.then, %for.body
125  br label %for.inc
126
127for.inc:                                          ; preds = %if.end
128  %7 = load i32, i32* %i, align 4
129  %inc = add nsw i32 %7, 1
130  store i32 %inc, i32* %i, align 4
131  br label %for.cond
132
133; X64-NOOPT: .LBB0_5: # %for.inc
134; X64-NOOPT-NEXT: # in Loop: Header=BB0_1 Depth=1
135; X64-NOOPT-NEXT:      lfence
136; X64-NOOPT-NEXT:      movl -{{[0-9]+}}(%rsp), %eax
137; X64-NOOPT-NEXT:      addl $1, %eax
138; X64-NOOPT-NEXT:      lfence
139; X64-NOOPT-NEXT:      movl %eax, -{{[0-9]+}}(%rsp)
140; X64-NOOPT-NEXT:      lfence
141; X64-NOOPT-NEXT:      jmp .LBB0_1
142
143for.end:                                          ; preds = %for.cond
144  %8 = load i32, i32* %ret_val, align 4
145  ret i32 %8
146}
147
148; Function Attrs: nounwind
149declare void @llvm.x86.sse2.lfence() #1
150
151attributes #0 = { "target-features"="+lvi-load-hardening" }
152attributes #1 = { nounwind }
153