1; RUN: opt < %s -msan-check-access-address=0 -S -passes=msan 2>&1 | FileCheck  \
2; RUN: %s
3; RUN: opt < %s -msan -msan-check-access-address=0 -S | FileCheck %s
4; RUN: opt < %s -msan-check-access-address=0 -msan-track-origins=1 -S          \
5; RUN: -passes=msan 2>&1 | FileCheck %s
6; RUN: opt < %s -msan -msan-check-access-address=0 -msan-track-origins=1 -S | FileCheck %s
7; RUN: opt < %s -msan-check-access-address=0 -msan-track-origins=2 -S          \
8; RUN: -passes=msan 2>&1 | FileCheck %s
9; RUN: opt < %s -msan -msan-check-access-address=0 -msan-track-origins=2 -S | FileCheck %s
10
11target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
12target triple = "x86_64-unknown-linux-gnu"
13
14; atomicrmw xchg: store clean shadow, return clean shadow
15
16define i32 @AtomicRmwXchg(i32* %p, i32 %x) sanitize_memory {
17entry:
18  %0 = atomicrmw xchg i32* %p, i32 %x seq_cst
19  ret i32 %0
20}
21
22; CHECK-LABEL: @AtomicRmwXchg
23; CHECK: store i32 0,
24; CHECK: atomicrmw xchg {{.*}} seq_cst
25; CHECK: store i32 0, {{.*}} @__msan_retval_tls
26; CHECK: ret i32
27
28
29; atomicrmw max: exactly the same as above
30
31define i32 @AtomicRmwMax(i32* %p, i32 %x) sanitize_memory {
32entry:
33  %0 = atomicrmw max i32* %p, i32 %x seq_cst
34  ret i32 %0
35}
36
37; CHECK-LABEL: @AtomicRmwMax
38; CHECK: store i32 0,
39; CHECK: atomicrmw max {{.*}} seq_cst
40; CHECK: store i32 0, {{.*}} @__msan_retval_tls
41; CHECK: ret i32
42
43
44; cmpxchg: the same as above, but also check %a shadow
45
46define i32 @Cmpxchg(i32* %p, i32 %a, i32 %b) sanitize_memory {
47entry:
48  %pair = cmpxchg i32* %p, i32 %a, i32 %b seq_cst seq_cst
49  %0 = extractvalue { i32, i1 } %pair, 0
50  ret i32 %0
51}
52
53; CHECK-LABEL: @Cmpxchg
54; CHECK: store { i32, i1 } zeroinitializer,
55; CHECK: icmp
56; CHECK: br
57; CHECK: @__msan_warning_with_origin
58; CHECK: cmpxchg {{.*}} seq_cst seq_cst
59; CHECK: store i32 0, {{.*}} @__msan_retval_tls
60; CHECK: ret i32
61
62
63; relaxed cmpxchg: bump up to "release monotonic"
64
65define i32 @CmpxchgMonotonic(i32* %p, i32 %a, i32 %b) sanitize_memory {
66entry:
67  %pair = cmpxchg i32* %p, i32 %a, i32 %b monotonic monotonic
68  %0 = extractvalue { i32, i1 } %pair, 0
69  ret i32 %0
70}
71
72; CHECK-LABEL: @CmpxchgMonotonic
73; CHECK: store { i32, i1 } zeroinitializer,
74; CHECK: icmp
75; CHECK: br
76; CHECK: @__msan_warning_with_origin
77; CHECK: cmpxchg {{.*}} release monotonic
78; CHECK: store i32 0, {{.*}} @__msan_retval_tls
79; CHECK: ret i32
80
81
82; atomic load: preserve alignment, load shadow value after app value
83
84define i32 @AtomicLoad(i32* %p) sanitize_memory {
85entry:
86  %0 = load atomic i32, i32* %p seq_cst, align 16
87  ret i32 %0
88}
89
90; CHECK-LABEL: @AtomicLoad
91; CHECK: load atomic i32, i32* {{.*}} seq_cst, align 16
92; CHECK: [[SHADOW:%[01-9a-z_]+]] = load i32, i32* {{.*}}, align 16
93; CHECK: store i32 {{.*}}[[SHADOW]], {{.*}} @__msan_retval_tls
94; CHECK: ret i32
95
96
97; atomic load: preserve alignment, load shadow value after app value
98
99define i32 @AtomicLoadAcquire(i32* %p) sanitize_memory {
100entry:
101  %0 = load atomic i32, i32* %p acquire, align 16
102  ret i32 %0
103}
104
105; CHECK-LABEL: @AtomicLoadAcquire
106; CHECK: load atomic i32, i32* {{.*}} acquire, align 16
107; CHECK: [[SHADOW:%[01-9a-z_]+]] = load i32, i32* {{.*}}, align 16
108; CHECK: store i32 {{.*}}[[SHADOW]], {{.*}} @__msan_retval_tls
109; CHECK: ret i32
110
111
112; atomic load monotonic: bump up to load acquire
113
114define i32 @AtomicLoadMonotonic(i32* %p) sanitize_memory {
115entry:
116  %0 = load atomic i32, i32* %p monotonic, align 16
117  ret i32 %0
118}
119
120; CHECK-LABEL: @AtomicLoadMonotonic
121; CHECK: load atomic i32, i32* {{.*}} acquire, align 16
122; CHECK: [[SHADOW:%[01-9a-z_]+]] = load i32, i32* {{.*}}, align 16
123; CHECK: store i32 {{.*}}[[SHADOW]], {{.*}} @__msan_retval_tls
124; CHECK: ret i32
125
126
127; atomic load unordered: bump up to load acquire
128
129define i32 @AtomicLoadUnordered(i32* %p) sanitize_memory {
130entry:
131  %0 = load atomic i32, i32* %p unordered, align 16
132  ret i32 %0
133}
134
135; CHECK-LABEL: @AtomicLoadUnordered
136; CHECK: load atomic i32, i32* {{.*}} acquire, align 16
137; CHECK: [[SHADOW:%[01-9a-z_]+]] = load i32, i32* {{.*}}, align 16
138; CHECK: store i32 {{.*}}[[SHADOW]], {{.*}} @__msan_retval_tls
139; CHECK: ret i32
140
141
142; atomic store: preserve alignment, store clean shadow value before app value
143
144define void @AtomicStore(i32* %p, i32 %x) sanitize_memory {
145entry:
146  store atomic i32 %x, i32* %p seq_cst, align 16
147  ret void
148}
149
150; CHECK-LABEL: @AtomicStore
151; CHECK-NOT: @__msan_param_tls
152; CHECK: store i32 0, i32* {{.*}}, align 16
153; CHECK: store atomic i32 %x, i32* %p seq_cst, align 16
154; CHECK: ret void
155
156
157; atomic store: preserve alignment, store clean shadow value before app value
158
159define void @AtomicStoreRelease(i32* %p, i32 %x) sanitize_memory {
160entry:
161  store atomic i32 %x, i32* %p release, align 16
162  ret void
163}
164
165; CHECK-LABEL: @AtomicStoreRelease
166; CHECK-NOT: @__msan_param_tls
167; CHECK: store i32 0, i32* {{.*}}, align 16
168; CHECK: store atomic i32 %x, i32* %p release, align 16
169; CHECK: ret void
170
171
172; atomic store monotonic: bumped up to store release
173
174define void @AtomicStoreMonotonic(i32* %p, i32 %x) sanitize_memory {
175entry:
176  store atomic i32 %x, i32* %p monotonic, align 16
177  ret void
178}
179
180; CHECK-LABEL: @AtomicStoreMonotonic
181; CHECK-NOT: @__msan_param_tls
182; CHECK: store i32 0, i32* {{.*}}, align 16
183; CHECK: store atomic i32 %x, i32* %p release, align 16
184; CHECK: ret void
185
186
187; atomic store unordered: bumped up to store release
188
189define void @AtomicStoreUnordered(i32* %p, i32 %x) sanitize_memory {
190entry:
191  store atomic i32 %x, i32* %p unordered, align 16
192  ret void
193}
194
195; CHECK-LABEL: @AtomicStoreUnordered
196; CHECK-NOT: @__msan_param_tls
197; CHECK: store i32 0, i32* {{.*}}, align 16
198; CHECK: store atomic i32 %x, i32* %p release, align 16
199; CHECK: ret void
200