1; RUN: opt < %s -bounds-checking -S | FileCheck %s
2; RUN: opt < %s -passes=bounds-checking -S | FileCheck %s
3target datalayout = "e-p:64:64:64-p1:16:16:16-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
4
5@.str = private constant [8 x i8] c"abcdefg\00"   ; <[8 x i8]*>
6
7@.str_as1 = private addrspace(1) constant [8 x i8] c"abcdefg\00"   ; <[8 x i8] addrspace(1)*>
8
9
10declare noalias i8* @malloc(i64) nounwind
11declare noalias i8* @calloc(i64, i64) nounwind
12declare noalias i8* @realloc(i8* nocapture, i64) nounwind
13
14; CHECK: @f1
15define void @f1() nounwind {
16  %1 = tail call i8* @malloc(i64 32)
17  %2 = bitcast i8* %1 to i32*
18  %idx = getelementptr inbounds i32, i32* %2, i64 2
19; CHECK-NOT: trap
20  store i32 3, i32* %idx, align 4
21  ret void
22}
23
24; CHECK: @f2
25define void @f2() nounwind {
26  %1 = tail call i8* @malloc(i64 32)
27  %2 = bitcast i8* %1 to i32*
28  %idx = getelementptr inbounds i32, i32* %2, i64 8
29; CHECK: trap
30  store i32 3, i32* %idx, align 4
31  ret void
32}
33
34; CHECK: @f3
35define void @f3(i64 %x) nounwind {
36  %1 = tail call i8* @calloc(i64 4, i64 %x)
37  %2 = bitcast i8* %1 to i32*
38  %idx = getelementptr inbounds i32, i32* %2, i64 8
39; CHECK: mul i64 4, %
40; CHECK: sub i64 {{.*}}, 32
41; CHECK-NEXT: icmp ult i64 {{.*}}, 32
42; CHECK-NEXT: icmp ult i64 {{.*}}, 4
43; CHECK-NEXT: or i1
44; CHECK: trap
45  store i32 3, i32* %idx, align 4
46  ret void
47}
48
49; CHECK: @store_volatile
50define void @store_volatile(i64 %x) nounwind {
51  %1 = tail call i8* @calloc(i64 4, i64 %x)
52  %2 = bitcast i8* %1 to i32*
53  %idx = getelementptr inbounds i32, i32* %2, i64 8
54; CHECK-NOT: trap
55  store volatile i32 3, i32* %idx, align 4
56  ret void
57}
58
59; CHECK: @f4
60define void @f4(i64 %x) nounwind {
61  %1 = tail call i8* @realloc(i8* null, i64 %x) nounwind
62  %2 = bitcast i8* %1 to i32*
63  %idx = getelementptr inbounds i32, i32* %2, i64 8
64; CHECK: trap
65  %3 = load i32, i32* %idx, align 4
66  ret void
67}
68
69; CHECK: @f5
70define void @f5(i64 %x) nounwind {
71  %idx = getelementptr inbounds [8 x i8], [8 x i8]* @.str, i64 0, i64 %x
72; CHECK: trap
73  %1 = load i8, i8* %idx, align 4
74  ret void
75}
76
77define void @f5_as1(i64 %x) nounwind {
78; CHECK: @f5_as1
79  %idx = getelementptr inbounds [8 x i8], [8 x i8] addrspace(1)* @.str_as1, i64 0, i64 %x
80  ; CHECK: sub i16
81  ; CHECK: icmp ult i16
82; CHECK: trap
83  %1 = load i8, i8 addrspace(1)* %idx, align 4
84  ret void
85}
86
87; CHECK: @f6
88define void @f6(i64 %x) nounwind {
89  %1 = alloca i128
90; CHECK-NOT: trap
91  %2 = load i128, i128* %1, align 4
92  ret void
93}
94
95; CHECK: @f7
96define void @f7(i64 %x) nounwind {
97  %1 = alloca i128, i64 %x
98; CHECK: mul i64 16,
99; CHECK: trap
100  %2 = load i128, i128* %1, align 4
101  ret void
102}
103
104; CHECK: @f8
105define void @f8() nounwind {
106  %1 = alloca i128
107  %2 = alloca i128
108  %3 = select i1 undef, i128* %1, i128* %2
109; CHECK-NOT: trap
110  %4 = load i128, i128* %3, align 4
111  ret void
112}
113
114; CHECK: @f9
115define void @f9(i128* %arg) nounwind {
116  %1 = alloca i128
117  %2 = select i1 undef, i128* %arg, i128* %1
118; CHECK-NOT: trap
119  %3 = load i128, i128* %2, align 4
120  ret void
121}
122
123; CHECK: @f10
124define void @f10(i64 %x, i64 %y) nounwind {
125  %1 = alloca i128, i64 %x
126  %2 = alloca i128, i64 %y
127  %3 = select i1 undef, i128* %1, i128* %2
128; CHECK: select
129; CHECK: select
130; CHECK: trap
131  %4 = load i128, i128* %3, align 4
132  ret void
133}
134
135; CHECK: @f11
136define void @f11(i128* byval(i128) %x) nounwind {
137  %1 = bitcast i128* %x to i8*
138  %2 = getelementptr inbounds i8, i8* %1, i64 16
139; CHECK: br label
140  %3 = load i8, i8* %2, align 4
141  ret void
142}
143
144; CHECK: @f11_as1
145define void @f11_as1(i128 addrspace(1)* byval(i128) %x) nounwind {
146  %1 = bitcast i128 addrspace(1)* %x to i8 addrspace(1)*
147  %2 = getelementptr inbounds i8, i8 addrspace(1)* %1, i16 16
148; CHECK: br label
149  %3 = load i8, i8 addrspace(1)* %2, align 4
150  ret void
151}
152
153; CHECK: @f12
154define i64 @f12(i64 %x, i64 %y) nounwind {
155  %1 = tail call i8* @calloc(i64 1, i64 %x)
156; CHECK: mul i64 %y, 8
157; CHECK: trap
158  %2 = bitcast i8* %1 to i64*
159  %3 = getelementptr inbounds i64, i64* %2, i64 %y
160  %4 = load i64, i64* %3, align 8
161  ret i64 %4
162}
163
164; CHECK: @load_volatile
165define i64 @load_volatile(i64 %x, i64 %y) nounwind {
166  %1 = tail call i8* @calloc(i64 1, i64 %x)
167; CHECK-NOT: trap
168  %2 = bitcast i8* %1 to i64*
169  %3 = getelementptr inbounds i64, i64* %2, i64 %y
170  %4 = load volatile i64, i64* %3, align 8
171  ret i64 %4
172}
173
174; PR17402
175; CHECK-LABEL: @f13
176define void @f13() nounwind {
177entry:
178  br label %alive
179
180dead:
181  ; Self-refential GEPs can occur in dead code.
182  %incdec.ptr = getelementptr inbounds i32, i32* %incdec.ptr, i64 1
183  ; CHECK: %incdec.ptr = getelementptr inbounds i32, i32* %incdec.ptr
184  %l = load i32, i32* %incdec.ptr
185  br label %alive
186
187alive:
188  ret void
189}
190