1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2
3; XFAIL: *
4; RUN: opt < %s -basic-aa -dse -S | FileCheck %s
5; RUN: opt < %s -aa-pipeline=basic-aa -passes=dse -S | FileCheck %s
6target datalayout = "E-p:64:64:64-a0:0:8-f32:32:32-f64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-v64:64:64-v128:128:128"
7
8declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1) nounwind
9declare void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* nocapture, i8, i64, i32) nounwind
10declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i1) nounwind
11declare void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32) nounwind
12
13; PR8701
14
15;; Fully dead overwrite of memcpy.
16define void @test15(i8* %P, i8* %Q) nounwind ssp {
17; CHECK-LABEL: @test15(
18; CHECK-NEXT:    tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[P:%.*]], i8* [[Q:%.*]], i64 12, i1 false)
19; CHECK-NEXT:    ret void
20;
21  tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i1 false)
22  tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i1 false)
23  ret void
24}
25
26;; Fully dead overwrite of memcpy.
27define void @test15_atomic(i8* %P, i8* %Q) nounwind ssp {
28; CHECK-LABEL: @test15_atomic(
29; CHECK-NEXT:    tail call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 [[P:%.*]], i8* align 1 [[Q:%.*]], i64 12, i32 1)
30; CHECK-NEXT:    ret void
31;
32  tail call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %P, i8* align 1 %Q, i64 12, i32 1)
33  tail call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %P, i8* align 1 %Q, i64 12, i32 1)
34  ret void
35}
36
37;; Fully dead overwrite of memcpy.
38define void @test15_atomic_weaker(i8* %P, i8* %Q) nounwind ssp {
39; CHECK-LABEL: @test15_atomic_weaker(
40; CHECK-NEXT:    tail call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 [[P:%.*]], i8* align 1 [[Q:%.*]], i64 12, i32 1)
41; CHECK-NEXT:    ret void
42;
43  tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 1 %P, i8* align 1 %Q, i64 12, i1 false)
44  tail call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %P, i8* align 1 %Q, i64 12, i32 1)
45  ret void
46}
47
48;; Fully dead overwrite of memcpy.
49define void @test15_atomic_weaker_2(i8* %P, i8* %Q) nounwind ssp {
50; CHECK-LABEL: @test15_atomic_weaker_2(
51; CHECK-NEXT:    tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 1 [[P:%.*]], i8* align 1 [[Q:%.*]], i64 12, i1 false)
52; CHECK-NEXT:    ret void
53;
54  tail call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %P, i8* align 1 %Q, i64 12, i32 1)
55  tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 1 %P, i8* align 1 %Q, i64 12, i1 false)
56  ret void
57}
58
59;; Full overwrite of smaller memcpy.
60define void @test16(i8* %P, i8* %Q) nounwind ssp {
61; CHECK-LABEL: @test16(
62; CHECK-NEXT:    tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[P:%.*]], i8* [[Q:%.*]], i64 12, i1 false)
63; CHECK-NEXT:    ret void
64;
65  tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 8, i1 false)
66  tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i1 false)
67  ret void
68}
69
70;; Full overwrite of smaller memcpy.
71define void @test16_atomic(i8* %P, i8* %Q) nounwind ssp {
72; CHECK-LABEL: @test16_atomic(
73; CHECK-NEXT:    tail call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 [[P:%.*]], i8* align 1 [[Q:%.*]], i64 12, i32 1)
74; CHECK-NEXT:    ret void
75;
76  tail call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %P, i8* align 1 %Q, i64 8, i32 1)
77  tail call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %P, i8* align 1 %Q, i64 12, i32 1)
78  ret void
79}
80
81;; Full overwrite of smaller memory where overwrite has stronger atomicity
82define void @test16_atomic_weaker(i8* %P, i8* %Q) nounwind ssp {
83; CHECK-LABEL: @test16_atomic_weaker(
84; CHECK-NEXT:    tail call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 [[P:%.*]], i8* align 1 [[Q:%.*]], i64 12, i32 1)
85; CHECK-NEXT:    ret void
86;
87  tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 1 %P, i8* align 1 %Q, i64 8, i1 false)
88  tail call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %P, i8* align 1 %Q, i64 12, i32 1)
89  ret void
90}
91
92;; Full overwrite of smaller memory where overwrite has weaker atomicity.
93define void @test16_atomic_weaker_2(i8* %P, i8* %Q) nounwind ssp {
94; CHECK-LABEL: @test16_atomic_weaker_2(
95; CHECK-NEXT:    tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 1 [[P:%.*]], i8* align 1 [[Q:%.*]], i64 12, i1 false)
96; CHECK-NEXT:    ret void
97;
98  tail call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %P, i8* align 1 %Q, i64 8, i32 1)
99  tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 1 %P, i8* align 1 %Q, i64 12, i1 false)
100  ret void
101}
102
103;; Overwrite of memset by memcpy.
104define void @test17(i8* %P, i8* noalias %Q) nounwind ssp {
105; CHECK-LABEL: @test17(
106; CHECK-NEXT:    tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[P:%.*]], i8* [[Q:%.*]], i64 12, i1 false)
107; CHECK-NEXT:    ret void
108;
109  tail call void @llvm.memset.p0i8.i64(i8* %P, i8 42, i64 8, i1 false)
110  tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i1 false)
111  ret void
112}
113
114;; Overwrite of memset by memcpy.
115define void @test17_atomic(i8* %P, i8* noalias %Q) nounwind ssp {
116; CHECK-LABEL: @test17_atomic(
117; CHECK-NEXT:    tail call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 [[P:%.*]], i8* align 1 [[Q:%.*]], i64 12, i32 1)
118; CHECK-NEXT:    ret void
119;
120  tail call void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* align 1 %P, i8 42, i64 8, i32 1)
121  tail call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %P, i8* align 1 %Q, i64 12, i32 1)
122  ret void
123}
124
125;; Overwrite of memset by memcpy. Overwrite is stronger atomicity. We can
126;; remove the memset.
127define void @test17_atomic_weaker(i8* %P, i8* noalias %Q) nounwind ssp {
128; CHECK-LABEL: @test17_atomic_weaker(
129; CHECK-NEXT:    tail call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 [[P:%.*]], i8* align 1 [[Q:%.*]], i64 12, i32 1)
130; CHECK-NEXT:    ret void
131;
132  tail call void @llvm.memset.p0i8.i64(i8* align 1 %P, i8 42, i64 8, i1 false)
133  tail call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %P, i8* align 1 %Q, i64 12, i32 1)
134  ret void
135}
136
137;; Overwrite of memset by memcpy. Overwrite is weaker atomicity. We can remove
138;; the memset.
139define void @test17_atomic_weaker_2(i8* %P, i8* noalias %Q) nounwind ssp {
140; CHECK-LABEL: @test17_atomic_weaker_2(
141; CHECK-NEXT:    tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 1 [[P:%.*]], i8* align 1 [[Q:%.*]], i64 12, i1 false)
142; CHECK-NEXT:    ret void
143;
144  tail call void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* align 1 %P, i8 42, i64 8, i32 1)
145  tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 1 %P, i8* align 1 %Q, i64 12, i1 false)
146  ret void
147}
148
149; Should not delete the volatile memset.
150define void @test17v(i8* %P, i8* %Q) nounwind ssp {
151; CHECK-LABEL: @test17v(
152; CHECK-NEXT:    tail call void @llvm.memset.p0i8.i64(i8* [[P:%.*]], i8 42, i64 8, i1 true)
153; CHECK-NEXT:    tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[P]], i8* [[Q:%.*]], i64 12, i1 false)
154; CHECK-NEXT:    ret void
155;
156  tail call void @llvm.memset.p0i8.i64(i8* %P, i8 42, i64 8, i1 true)
157  tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i1 false)
158  ret void
159}
160
161; PR8728
162; Do not delete instruction where possible situation is:
163; A = B
164; A = A
165;
166; NB! See PR11763 - currently LLVM allows memcpy's source and destination to be
167; equal (but not inequal and overlapping).
168define void @test18(i8* %P, i8* %Q, i8* %R) nounwind ssp {
169; CHECK-LABEL: @test18(
170; CHECK-NEXT:    tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[P:%.*]], i8* [[Q:%.*]], i64 12, i1 false)
171; CHECK-NEXT:    tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[P]], i8* [[R:%.*]], i64 12, i1 false)
172; CHECK-NEXT:    ret void
173;
174  tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i1 false)
175  tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %R, i64 12, i1 false)
176  ret void
177}
178
179define void @test18_atomic(i8* %P, i8* %Q, i8* %R) nounwind ssp {
180; CHECK-LABEL: @test18_atomic(
181; CHECK-NEXT:    tail call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 [[P:%.*]], i8* align 1 [[Q:%.*]], i64 12, i32 1)
182; CHECK-NEXT:    tail call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 [[P]], i8* align 1 [[R:%.*]], i64 12, i32 1)
183; CHECK-NEXT:    ret void
184;
185  tail call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %P, i8* align 1 %Q, i64 12, i32 1)
186  tail call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %P, i8* align 1 %R, i64 12, i32 1)
187  ret void
188}
189
190