1; RUN: opt < %s -msan-check-access-address=0 -S -passes=msan 2>&1 | FileCheck %s
2; RUN: opt < %s -msan-check-access-address=0 -msan-track-origins=2 -S -passes=msan 2>&1 | FileCheck %s -check-prefixes=CHECK,CHECK-ORIGIN
3; RUN: opt < %s -msan -msan-check-access-address=0 -S | FileCheck %s
4target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
5target triple = "x86_64-unknown-linux-gnu"
6
7declare void @__atomic_load(i64, i8*, i8*, i32)
8declare void @__atomic_store(i64, i8*, i8*, i32)
9
10define i24 @odd_sized_load(i24* %ptr) sanitize_memory {
11; CHECK: @odd_sized_load(i24* {{.*}}[[PTR:%.+]])
12; CHECK: [[VAL_PTR:%.*]] = alloca i24, align 1
13; CHECK-ORIGIN: @__msan_set_alloca_origin
14; CHECK: [[VAL_PTR_I8:%.*]] = bitcast i24* [[VAL_PTR]] to i8*
15; CHECK: [[PTR_I8:%.*]] = bitcast i24* [[PTR]] to i8*
16; CHECK: call void @__atomic_load(i64 3, i8* [[PTR_I8]], i8* [[VAL_PTR_I8]], i32 2)
17
18; CHECK: ptrtoint i8* [[PTR_I8]]
19; CHECK: xor
20; CHECK: [[SPTR_I8:%.*]] = inttoptr
21; CHECK-ORIGIN: add
22; CHECK-ORIGIN: and
23; CHECK-ORIGIN: [[OPTR:%.*]] = inttoptr
24
25; CHECK: ptrtoint i8* [[VAL_PTR_I8]]
26; CHECK: xor
27; CHECK: [[VAL_SPTR_I8:%.*]] = inttoptr
28; CHECK-ORIGIN: add
29; CHECK-ORIGIN: and
30; CHECK-ORIGIN: [[VAL_OPTR:%.*]] = inttoptr
31
32; CHECK: call void @llvm.memcpy{{.*}}(i8* align 1 [[VAL_SPTR_I8]], i8* align 1 [[SPTR_I8]], i64 3
33
34; CHECK-ORIGIN: [[ARG_ORIGIN:%.*]] = load i32, i32* [[OPTR]]
35; CHECK-ORIGIN: [[VAL_ORIGIN:%.*]] = call i32 @__msan_chain_origin(i32 [[ARG_ORIGIN]])
36; CHECK-ORIGIN: call void @__msan_set_origin(i8* [[VAL_PTR_I8]], i64 3, i32 [[VAL_ORIGIN]])
37
38; CHECK: [[VAL:%.*]] = load i24, i24* [[VAL_PTR]]
39; CHECK: ret i24 [[VAL]]
40  %val_ptr = alloca i24, align 1
41  %val_ptr_i8 = bitcast i24* %val_ptr to i8*
42  %ptr_i8 = bitcast i24* %ptr to i8*
43  call void @__atomic_load(i64 3, i8* %ptr_i8, i8* %val_ptr_i8, i32 0)
44  %val = load i24, i24* %val_ptr
45  ret i24 %val
46}
47
48define void @odd_sized_store(i24* %ptr, i24 %val) sanitize_memory {
49; CHECK: @odd_sized_store(i24* {{.*}}[[PTR:%.+]], i24 {{.*}}[[VAL:%.+]])
50; CHECK: [[VAL_PTR:%.*]] = alloca i24, align 1
51; CHECK: store i24 [[VAL]], i24* [[VAL_PTR]]
52; CHECK: [[VAL_PTR_I8:%.*]] = bitcast i24* [[VAL_PTR]] to i8*
53; CHECK: [[PTR_I8:%.*]] = bitcast i24* [[PTR]] to i8*
54
55; CHECK: ptrtoint i8* [[PTR_I8]]
56; CHECK: xor
57; CHECK: [[SPTR_I8:%.*]] = inttoptr
58; CHECK: call void @llvm.memset{{.*}}(i8* align 1 [[SPTR_I8]], i8 0, i64 3
59
60; CHECK: call void @__atomic_store(i64 3, i8* [[VAL_PTR_I8]], i8* [[PTR_I8]], i32 3)
61; CHECK: ret void
62  %val_ptr = alloca i24, align 1
63  store i24 %val, i24* %val_ptr
64  %val_ptr_i8 = bitcast i24* %val_ptr to i8*
65  %ptr_i8 = bitcast i24* %ptr to i8*
66  call void @__atomic_store(i64 3, i8* %val_ptr_i8, i8* %ptr_i8, i32 0)
67  ret void
68}
69
70