1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt < %s -msan-check-access-address=0 -msan-track-origins=1 -msan-eager-checks -S -passes='module(msan-module),function(msan)' 2>&1 | \
3; RUN:   FileCheck -allow-deprecated-dag-overlap -check-prefixes=CHECK,CHECK-ORIGINS %s
4
5target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
6target triple = "x86_64-unknown-linux-gnu"
7
8define noundef i32 @NormalRet() nounwind uwtable sanitize_memory {
9; CHECK-LABEL: @NormalRet(
10; CHECK-NEXT:    call void @llvm.donothing()
11; CHECK-NEXT:    ret i32 123
12;
13  ret i32 123
14}
15
16define i32 @PartialRet() nounwind uwtable sanitize_memory {
17; CHECK-LABEL: @PartialRet(
18; CHECK-NEXT:    call void @llvm.donothing()
19; CHECK-NEXT:    store i32 0, i32* bitcast ([100 x i64]* @__msan_retval_tls to i32*), align 8
20; CHECK-NEXT:    store i32 0, i32* @__msan_retval_origin_tls, align 4
21; CHECK-NEXT:    ret i32 123
22;
23  ret i32 123
24}
25
26define noundef i32 @LoadedRet() nounwind uwtable sanitize_memory {
27; CHECK-LABEL: @LoadedRet(
28; CHECK-NEXT:    call void @llvm.donothing()
29; CHECK-NEXT:    [[P:%.*]] = inttoptr i64 0 to i32*
30; CHECK-NEXT:    [[O:%.*]] = load i32, i32* [[P]], align 4
31; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint i32* [[P]] to i64
32; CHECK-NEXT:    [[TMP2:%.*]] = xor i64 [[TMP1]], 87960930222080
33; CHECK-NEXT:    [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to i32*
34; CHECK-NEXT:    [[TMP4:%.*]] = add i64 [[TMP2]], 17592186044416
35; CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to i32*
36; CHECK-NEXT:    [[_MSLD:%.*]] = load i32, i32* [[TMP3]], align 4
37; CHECK-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
38; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i32 [[_MSLD]], 0
39; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP7:%.*]], label [[TMP8:%.*]], !prof !0
40; CHECK:       7:
41; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP6]]) [[ATTR2:#.*]]
42; CHECK-NEXT:    unreachable
43; CHECK:       8:
44; CHECK-NEXT:    ret i32 [[O]]
45;
46  %p = inttoptr i64 0 to i32 *
47  %o = load i32, i32 *%p
48  ret i32 %o
49}
50
51
52define void @NormalArg(i32 noundef %a) nounwind uwtable sanitize_memory {
53; CHECK-LABEL: @NormalArg(
54; CHECK-NEXT:    call void @llvm.donothing()
55; CHECK-NEXT:    [[P:%.*]] = inttoptr i64 0 to i32*
56; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint i32* [[P]] to i64
57; CHECK-NEXT:    [[TMP2:%.*]] = xor i64 [[TMP1]], 87960930222080
58; CHECK-NEXT:    [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to i32*
59; CHECK-NEXT:    [[TMP4:%.*]] = add i64 [[TMP2]], 17592186044416
60; CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to i32*
61; CHECK-NEXT:    store i32 0, i32* [[TMP3]], align 4
62; CHECK-NEXT:    store i32 [[A:%.*]], i32* [[P]], align 4
63; CHECK-NEXT:    ret void
64;
65  %p = inttoptr i64 0 to i32 *
66  store i32 %a, i32 *%p
67  ret void
68}
69
70define void @PartialArg(i32 %a) nounwind uwtable sanitize_memory {
71; CHECK-LABEL: @PartialArg(
72; CHECK-NEXT:    [[TMP1:%.*]] = load i32, i32* bitcast ([100 x i64]* @__msan_param_tls to i32*), align 8
73; CHECK-NEXT:    [[TMP2:%.*]] = load i32, i32* getelementptr inbounds ([200 x i32], [200 x i32]* @__msan_param_origin_tls, i32 0, i32 0), align 4
74; CHECK-NEXT:    call void @llvm.donothing()
75; CHECK-NEXT:    [[P:%.*]] = inttoptr i64 0 to i32*
76; CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint i32* [[P]] to i64
77; CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 87960930222080
78; CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to i32*
79; CHECK-NEXT:    [[TMP6:%.*]] = add i64 [[TMP4]], 17592186044416
80; CHECK-NEXT:    [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to i32*
81; CHECK-NEXT:    store i32 [[TMP1]], i32* [[TMP5]], align 4
82; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i32 [[TMP1]], 0
83; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof !0
84; CHECK:       8:
85; CHECK-NEXT:    store i32 [[TMP2]], i32* [[TMP7]], align 4
86; CHECK-NEXT:    br label [[TMP9]]
87; CHECK:       9:
88; CHECK-NEXT:    store i32 [[A:%.*]], i32* [[P]], align 4
89; CHECK-NEXT:    ret void
90;
91  %p = inttoptr i64 0 to i32 *
92  store i32 %a, i32 *%p
93  ret void
94}
95
96define void @CallNormal() nounwind uwtable sanitize_memory {
97; CHECK-LABEL: @CallNormal(
98; CHECK-NEXT:    call void @llvm.donothing()
99; CHECK-NEXT:    [[R:%.*]] = call i32 @NormalRet() [[ATTR0:#.*]]
100; CHECK-NEXT:    call void @NormalArg(i32 [[R]]) [[ATTR0]]
101; CHECK-NEXT:    ret void
102;
103  %r = call i32 @NormalRet() nounwind uwtable sanitize_memory
104  call void @NormalArg(i32 %r) nounwind uwtable sanitize_memory
105  ret void
106}
107
108define void @CallWithLoaded() nounwind uwtable sanitize_memory {
109; CHECK-LABEL: @CallWithLoaded(
110; CHECK-NEXT:    call void @llvm.donothing()
111; CHECK-NEXT:    [[P:%.*]] = inttoptr i64 0 to i32*
112; CHECK-NEXT:    [[O:%.*]] = load i32, i32* [[P]], align 4
113; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint i32* [[P]] to i64
114; CHECK-NEXT:    [[TMP2:%.*]] = xor i64 [[TMP1]], 87960930222080
115; CHECK-NEXT:    [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to i32*
116; CHECK-NEXT:    [[TMP4:%.*]] = add i64 [[TMP2]], 17592186044416
117; CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to i32*
118; CHECK-NEXT:    [[_MSLD:%.*]] = load i32, i32* [[TMP3]], align 4
119; CHECK-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
120; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i32 [[_MSLD]], 0
121; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP7:%.*]], label [[TMP8:%.*]], !prof !0
122; CHECK:       7:
123; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP6]]) [[ATTR2]]
124; CHECK-NEXT:    unreachable
125; CHECK:       8:
126; CHECK-NEXT:    call void @NormalArg(i32 [[O]]) [[ATTR0]]
127; CHECK-NEXT:    ret void
128;
129  %p = inttoptr i64 0 to i32 *
130  %o = load i32, i32 *%p
131  call void @NormalArg(i32 %o) nounwind uwtable sanitize_memory
132  ret void
133}
134
135define void @CallPartial() nounwind uwtable sanitize_memory {
136; CHECK-LABEL: @CallPartial(
137; CHECK-NEXT:    call void @llvm.donothing()
138; CHECK-NEXT:    store i32 0, i32* bitcast ([100 x i64]* @__msan_retval_tls to i32*), align 8
139; CHECK-NEXT:    [[R:%.*]] = call i32 @PartialRet() [[ATTR0]]
140; CHECK-NEXT:    [[_MSRET:%.*]] = load i32, i32* bitcast ([100 x i64]* @__msan_retval_tls to i32*), align 8
141; CHECK-NEXT:    [[TMP1:%.*]] = load i32, i32* @__msan_retval_origin_tls, align 4
142; CHECK-NEXT:    store i32 [[_MSRET]], i32* bitcast ([100 x i64]* @__msan_param_tls to i32*), align 8
143; CHECK-NEXT:    store i32 [[TMP1]], i32* getelementptr inbounds ([200 x i32], [200 x i32]* @__msan_param_origin_tls, i32 0, i32 0), align 4
144; CHECK-NEXT:    call void @PartialArg(i32 [[R]]) [[ATTR0]]
145; CHECK-NEXT:    ret void
146;
147  %r = call i32 @PartialRet() nounwind uwtable sanitize_memory
148  call void @PartialArg(i32 %r) nounwind uwtable sanitize_memory
149  ret void
150}
151