1 // RUN: %clang_cc1 %s -emit-llvm -o - -triple=x86_64-apple-darwin10 | FileCheck %s
2 
3 extern int int_source();
4 extern void int_sink(int x);
5 
6 namespace test0 {
7   struct A {
8     int aField;
9     int bField;
10   };
11 
12   struct B {
13     int onebit : 2;
14     int twobit : 6;
15     int intField;
16   };
17 
18   struct __attribute__((packed, aligned(2))) C : A, B {
19   };
20 
21   // These accesses should have alignment 4 because they're at offset 0
22   // in a reference with an assumed alignment of 4.
23   // CHECK-LABEL: @_ZN5test01aERNS_1BE
a(B & b)24   void a(B &b) {
25     // CHECK: [[CALL:%.*]] = call i32 @_Z10int_sourcev()
26     // CHECK: [[B_P:%.*]] = load [[B:%.*]]*, [[B]]**
27     // CHECK: [[FIELD_P:%.*]] = bitcast [[B]]* [[B_P]] to i8*
28     // CHECK: [[TRUNC:%.*]] = trunc i32 [[CALL]] to i8
29     // CHECK: [[OLD_VALUE:%.*]] = load i8, i8* [[FIELD_P]], align 4
30     // CHECK: [[T0:%.*]] = and i8 [[TRUNC]], 3
31     // CHECK: [[T1:%.*]] = and i8 [[OLD_VALUE]], -4
32     // CHECK: [[T2:%.*]] = or i8 [[T1]], [[T0]]
33     // CHECK: store i8 [[T2]], i8* [[FIELD_P]], align 4
34     b.onebit = int_source();
35 
36     // CHECK: [[B_P:%.*]] = load [[B]]*, [[B]]**
37     // CHECK: [[FIELD_P:%.*]] = bitcast [[B]]* [[B_P]] to i8*
38     // CHECK: [[VALUE:%.*]] = load i8, i8* [[FIELD_P]], align 4
39     // CHECK: [[T0:%.*]] = shl i8 [[VALUE]], 6
40     // CHECK: [[T1:%.*]] = ashr i8 [[T0]], 6
41     // CHECK: [[T2:%.*]] = sext i8 [[T1]] to i32
42     // CHECK: call void @_Z8int_sinki(i32 [[T2]])
43     int_sink(b.onebit);
44   }
45 
46   // These accesses should have alignment 2 because they're at offset 8
47   // in a reference/pointer with an assumed alignment of 2.
48   // CHECK-LABEL: @_ZN5test01bERNS_1CE
b(C & c)49   void b(C &c) {
50     // CHECK: [[CALL:%.*]] = call i32 @_Z10int_sourcev()
51     // CHECK: [[C_P:%.*]] = load [[C:%.*]]*, [[C]]**
52     // CHECK: [[T0:%.*]] = bitcast [[C]]* [[C_P]] to i8*
53     // CHECK: [[T1:%.*]] = getelementptr inbounds i8, i8* [[T0]], i64 8
54     // CHECK: [[B_P:%.*]] = bitcast i8* [[T1]] to [[B]]*
55     // CHECK: [[FIELD_P:%.*]] = bitcast [[B]]* [[B_P]] to i8*
56     // CHECK: [[TRUNC:%.*]] = trunc i32 [[CALL]] to i8
57     // CHECK: [[OLD_VALUE:%.*]] = load i8, i8* [[FIELD_P]], align 2
58     // CHECK: [[T0:%.*]] = and i8 [[TRUNC]], 3
59     // CHECK: [[T1:%.*]] = and i8 [[OLD_VALUE]], -4
60     // CHECK: [[T2:%.*]] = or i8 [[T1]], [[T0]]
61     // CHECK: store i8 [[T2]], i8* [[FIELD_P]], align 2
62     c.onebit = int_source();
63 
64     // CHECK: [[C_P:%.*]] = load [[C]]*, [[C]]**
65     // CHECK: [[T0:%.*]] = bitcast [[C]]* [[C_P]] to i8*
66     // CHECK: [[T1:%.*]] = getelementptr inbounds i8, i8* [[T0]], i64 8
67     // CHECK: [[B_P:%.*]] = bitcast i8* [[T1]] to [[B]]*
68     // CHECK: [[FIELD_P:%.*]] = bitcast [[B]]* [[B_P]] to i8*
69     // CHECK: [[VALUE:%.*]] = load i8, i8* [[FIELD_P]], align 2
70     // CHECK: [[T0:%.*]] = shl i8 [[VALUE]], 6
71     // CHECK: [[T1:%.*]] = ashr i8 [[T0]], 6
72     // CHECK: [[T2:%.*]] = sext i8 [[T1]] to i32
73     // CHECK: call void @_Z8int_sinki(i32 [[T2]])
74     int_sink(c.onebit);
75   }
76 
77   // CHECK-LABEL: @_ZN5test01cEPNS_1CE
c(C * c)78   void c(C *c) {
79     // CHECK: [[CALL:%.*]] = call i32 @_Z10int_sourcev()
80     // CHECK: [[C_P:%.*]] = load [[C]]*, [[C]]**
81     // CHECK: [[T0:%.*]] = bitcast [[C]]* [[C_P]] to i8*
82     // CHECK: [[T1:%.*]] = getelementptr inbounds i8, i8* [[T0]], i64 8
83     // CHECK: [[B_P:%.*]] = bitcast i8* [[T1]] to [[B]]*
84     // CHECK: [[FIELD_P:%.*]] = bitcast [[B]]* [[B_P]] to i8*
85     // CHECK: [[TRUNC:%.*]] = trunc i32 [[CALL]] to i8
86     // CHECK: [[OLD_VALUE:%.*]] = load i8, i8* [[FIELD_P]], align 2
87     // CHECK: [[T0:%.*]] = and i8 [[TRUNC]], 3
88     // CHECK: [[T1:%.*]] = and i8 [[OLD_VALUE]], -4
89     // CHECK: [[T2:%.*]] = or i8 [[T1]], [[T0]]
90     // CHECK: store i8 [[T2]], i8* [[FIELD_P]], align 2
91     c->onebit = int_source();
92 
93     // CHECK: [[C_P:%.*]] = load [[C:%.*]]*, [[C]]**
94     // CHECK: [[T0:%.*]] = bitcast [[C]]* [[C_P]] to i8*
95     // CHECK: [[T1:%.*]] = getelementptr inbounds i8, i8* [[T0]], i64 8
96     // CHECK: [[B_P:%.*]] = bitcast i8* [[T1]] to [[B:%.*]]*
97     // CHECK: [[FIELD_P:%.*]] = bitcast [[B]]* [[B_P]] to i8*
98     // CHECK: [[VALUE:%.*]] = load i8, i8* [[FIELD_P]], align 2
99     // CHECK: [[T0:%.*]] = shl i8 [[VALUE]], 6
100     // CHECK: [[T1:%.*]] = ashr i8 [[T0]], 6
101     // CHECK: [[T2:%.*]] = sext i8 [[T1]] to i32
102     // CHECK: call void @_Z8int_sinki(i32 [[T2]])
103     int_sink(c->onebit);
104   }
105 
106   // These accesses should have alignment 2 because they're at offset 8
107   // in an alignment-2 variable.
108   // CHECK-LABEL: @_ZN5test01dEv
d()109   void d() {
110     // CHECK: [[C_P:%.*]] = alloca [[C:%.*]], align 2
111     C c;
112 
113     // CHECK: [[CALL:%.*]] = call i32 @_Z10int_sourcev()
114     // CHECK: [[T0:%.*]] = bitcast [[C]]* [[C_P]] to i8*
115     // CHECK: [[T1:%.*]] = getelementptr inbounds i8, i8* [[T0]], i64 8
116     // CHECK: [[B_P:%.*]] = bitcast i8* [[T1]] to [[B]]*
117     // CHECK: [[FIELD_P:%.*]] = bitcast [[B]]* [[B_P]] to i8*
118     // CHECK: [[TRUNC:%.*]] = trunc i32 [[CALL]] to i8
119     // CHECK: [[OLD_VALUE:%.*]] = load i8, i8* [[FIELD_P]], align 2
120     // CHECK: [[T0:%.*]] = and i8 [[TRUNC]], 3
121     // CHECK: [[T1:%.*]] = and i8 [[OLD_VALUE]], -4
122     // CHECK: [[T2:%.*]] = or i8 [[T1]], [[T0]]
123     // CHECK: store i8 [[T2]], i8* [[FIELD_P]], align 2
124     c.onebit = int_source();
125 
126     // CHECK: [[T0:%.*]] = bitcast [[C]]* [[C_P]] to i8*
127     // CHECK: [[T1:%.*]] = getelementptr inbounds i8, i8* [[T0]], i64 8
128     // CHECK: [[B_P:%.*]] = bitcast i8* [[T1]] to [[B:%.*]]*
129     // CHECK: [[FIELD_P:%.*]] = bitcast [[B]]* [[B_P]] to i8*
130     // CHECK: [[VALUE:%.*]] = load i8, i8* [[FIELD_P]], align 2
131     // CHECK: [[T0:%.*]] = shl i8 [[VALUE]], 6
132     // CHECK: [[T1:%.*]] = ashr i8 [[T0]], 6
133     // CHECK: [[T2:%.*]] = sext i8 [[T1]] to i32
134     // CHECK: call void @_Z8int_sinki(i32 [[T2]])
135     int_sink(c.onebit);
136   }
137 
138   // These accesses should have alignment 8 because they're at offset 8
139   // in an alignment-16 variable.
140   // CHECK-LABEL: @_ZN5test01eEv
e()141   void e() {
142     // CHECK: [[C_P:%.*]] = alloca [[C:%.*]], align 16
143     __attribute__((aligned(16))) C c;
144 
145     // CHECK: [[CALL:%.*]] = call i32 @_Z10int_sourcev()
146     // CHECK: [[T0:%.*]] = bitcast [[C]]* [[C_P]] to i8*
147     // CHECK: [[T1:%.*]] = getelementptr inbounds i8, i8* [[T0]], i64 8
148     // CHECK: [[B_P:%.*]] = bitcast i8* [[T1]] to [[B]]*
149     // CHECK: [[FIELD_P:%.*]] = bitcast [[B]]* [[B_P]] to i8*
150     // CHECK: [[TRUNC:%.*]] = trunc i32 [[CALL]] to i8
151     // CHECK: [[OLD_VALUE:%.*]] = load i8, i8* [[FIELD_P]], align 8
152     // CHECK: [[T0:%.*]] = and i8 [[TRUNC]], 3
153     // CHECK: [[T1:%.*]] = and i8 [[OLD_VALUE]], -4
154     // CHECK: [[T2:%.*]] = or i8 [[T1]], [[T0]]
155     // CHECK: store i8 [[T2]], i8* [[FIELD_P]], align 8
156     c.onebit = int_source();
157 
158     // CHECK: [[T0:%.*]] = bitcast [[C]]* [[C_P]] to i8*
159     // CHECK: [[T1:%.*]] = getelementptr inbounds i8, i8* [[T0]], i64 8
160     // CHECK: [[B_P:%.*]] = bitcast i8* [[T1]] to [[B:%.*]]*
161     // CHECK: [[FIELD_P:%.*]] = bitcast [[B]]* [[B_P]] to i8*
162     // CHECK: [[VALUE:%.*]] = load i8, i8* [[FIELD_P]], align 8
163     // CHECK: [[T0:%.*]] = shl i8 [[VALUE]], 6
164     // CHECK: [[T1:%.*]] = ashr i8 [[T0]], 6
165     // CHECK: [[T2:%.*]] = sext i8 [[T1]] to i32
166     // CHECK: call void @_Z8int_sinki(i32 [[T2]])
167     int_sink(c.onebit);
168   }
169 }
170 
171 namespace test1 {
172   struct Array {
173     int elts[4];
174   };
175 
176   struct A {
177     __attribute__((aligned(16))) Array aArray;
178   };
179 
180   struct B : virtual A {
181     void *bPointer; // puts bArray at offset 16
182     Array bArray;
183   };
184 
185   struct C : virtual A { // must be viable as primary base
186     // Non-empty, nv-size not a multiple of 16.
187     void *cPointer1;
188     void *cPointer2;
189   };
190 
191   // Proof of concept that the non-virtual components of B do not have
192   // to be 16-byte-aligned.
193   struct D : C, B {};
194 
195   // For the following tests, we want to assign into a variable whose
196   // alignment is high enough that it will absolutely not be the
197   // constraint on the memcpy alignment.
198   typedef __attribute__((aligned(64))) Array AlignedArray;
199 
200   // CHECK-LABEL: @_ZN5test11aERNS_1AE
a(A & a)201   void a(A &a) {
202     // CHECK: [[RESULT:%.*]] = alloca [[ARRAY:%.*]], align 64
203     // CHECK: [[A_P:%.*]] = load [[A:%.*]]*, [[A]]**
204     // CHECK: [[ARRAY_P:%.*]] = getelementptr inbounds [[A]], [[A]]* [[A_P]], i32 0, i32 0
205     // CHECK: [[T0:%.*]] = bitcast [[ARRAY]]* [[RESULT]] to i8*
206     // CHECK: [[T1:%.*]] = bitcast [[ARRAY]]* [[ARRAY_P]] to i8*
207     // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[T0]], i8* [[T1]], i64 16, i32 16, i1 false)
208     AlignedArray result = a.aArray;
209   }
210 
211   // CHECK-LABEL: @_ZN5test11bERNS_1BE
b(B & b)212   void b(B &b) {
213     // CHECK: [[RESULT:%.*]] = alloca [[ARRAY]], align 64
214     // CHECK: [[B_P:%.*]] = load [[B:%.*]]*, [[B]]**
215     // CHECK: [[VPTR_P:%.*]] = bitcast [[B]]* [[B_P]] to i8**
216     // CHECK: [[VPTR:%.*]] = load i8*, i8** [[VPTR_P]], align 8
217     // CHECK: [[T0:%.*]] = getelementptr i8, i8* [[VPTR]], i64 -24
218     // CHECK: [[OFFSET_P:%.*]] = bitcast i8* [[T0]] to i64*
219     // CHECK: [[OFFSET:%.*]] = load i64, i64* [[OFFSET_P]], align 8
220     // CHECK: [[T0:%.*]] = bitcast [[B]]* [[B_P]] to i8*
221     // CHECK: [[T1:%.*]] = getelementptr inbounds i8, i8* [[T0]], i64 [[OFFSET]]
222     // CHECK: [[A_P:%.*]] = bitcast i8* [[T1]] to [[A]]*
223     // CHECK: [[ARRAY_P:%.*]] = getelementptr inbounds [[A]], [[A]]* [[A_P]], i32 0, i32 0
224     // CHECK: [[T0:%.*]] = bitcast [[ARRAY]]* [[RESULT]] to i8*
225     // CHECK: [[T1:%.*]] = bitcast [[ARRAY]]* [[ARRAY_P]] to i8*
226     // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[T0]], i8* [[T1]], i64 16, i32 16, i1 false)
227     AlignedArray result = b.aArray;
228   }
229 
230   // CHECK-LABEL: @_ZN5test11cERNS_1BE
c(B & b)231   void c(B &b) {
232     // CHECK: [[RESULT:%.*]] = alloca [[ARRAY]], align 64
233     // CHECK: [[B_P:%.*]] = load [[B]]*, [[B]]**
234     // CHECK: [[ARRAY_P:%.*]] = getelementptr inbounds [[B]], [[B]]* [[B_P]], i32 0, i32 2
235     // CHECK: [[T0:%.*]] = bitcast [[ARRAY]]* [[RESULT]] to i8*
236     // CHECK: [[T1:%.*]] = bitcast [[ARRAY]]* [[ARRAY_P]] to i8*
237     // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[T0]], i8* [[T1]], i64 16, i32 8, i1 false)
238     AlignedArray result = b.bArray;
239   }
240 
241   // CHECK-LABEL: @_ZN5test11dEPNS_1BE
d(B * b)242   void d(B *b) {
243     // CHECK: [[RESULT:%.*]] = alloca [[ARRAY]], align 64
244     // CHECK: [[B_P:%.*]] = load [[B]]*, [[B]]**
245     // CHECK: [[ARRAY_P:%.*]] = getelementptr inbounds [[B]], [[B]]* [[B_P]], i32 0, i32 2
246     // CHECK: [[T0:%.*]] = bitcast [[ARRAY]]* [[RESULT]] to i8*
247     // CHECK: [[T1:%.*]] = bitcast [[ARRAY]]* [[ARRAY_P]] to i8*
248     // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[T0]], i8* [[T1]], i64 16, i32 8, i1 false)
249     AlignedArray result = b->bArray;
250   }
251 
252   // CHECK-LABEL: @_ZN5test11eEv
e()253   void e() {
254     // CHECK: [[B_P:%.*]] = alloca [[B]], align 16
255     // CHECK: [[RESULT:%.*]] = alloca [[ARRAY]], align 64
256     // CHECK: [[ARRAY_P:%.*]] = getelementptr inbounds [[B]], [[B]]* [[B_P]], i32 0, i32 2
257     // CHECK: [[T0:%.*]] = bitcast [[ARRAY]]* [[RESULT]] to i8*
258     // CHECK: [[T1:%.*]] = bitcast [[ARRAY]]* [[ARRAY_P]] to i8*
259     // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[T0]], i8* [[T1]], i64 16, i32 16, i1 false)
260     B b;
261     AlignedArray result = b.bArray;
262   }
263 
264   // CHECK-LABEL: @_ZN5test11fEv
f()265   void f() {
266     // TODO: we should devirtualize this derived-to-base conversion.
267     // CHECK: [[D_P:%.*]] = alloca [[D:%.*]], align 16
268     // CHECK: [[RESULT:%.*]] = alloca [[ARRAY]], align 64
269     // CHECK: [[VPTR_P:%.*]] = bitcast [[D]]* [[D_P]] to i8**
270     // CHECK: [[VPTR:%.*]] = load i8*, i8** [[VPTR_P]], align 16
271     // CHECK: [[T0:%.*]] = getelementptr i8, i8* [[VPTR]], i64 -24
272     // CHECK: [[OFFSET_P:%.*]] = bitcast i8* [[T0]] to i64*
273     // CHECK: [[OFFSET:%.*]] = load i64, i64* [[OFFSET_P]], align 8
274     // CHECK: [[T0:%.*]] = bitcast [[D]]* [[D_P]] to i8*
275     // CHECK: [[T1:%.*]] = getelementptr inbounds i8, i8* [[T0]], i64 [[OFFSET]]
276     // CHECK: [[A_P:%.*]] = bitcast i8* [[T1]] to [[A]]*
277     // CHECK: [[ARRAY_P:%.*]] = getelementptr inbounds [[A]], [[A]]* [[A_P]], i32 0, i32 0
278     // CHECK: [[T0:%.*]] = bitcast [[ARRAY]]* [[RESULT]] to i8*
279     // CHECK: [[T1:%.*]] = bitcast [[ARRAY]]* [[ARRAY_P]] to i8*
280     // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[T0]], i8* [[T1]], i64 16, i32 16, i1 false)
281     D d;
282     AlignedArray result = d.aArray;
283   }
284 
285   // CHECK-LABEL: @_ZN5test11gEv
g()286   void g() {
287     // CHECK: [[D_P:%.*]] = alloca [[D]], align 16
288     // CHECK: [[RESULT:%.*]] = alloca [[ARRAY]], align 64
289     // CHECK: [[T0:%.*]] = bitcast [[D]]* [[D_P]] to i8*
290     // CHECK: [[T1:%.*]] = getelementptr inbounds i8, i8* [[T0]], i64 24
291     // CHECK: [[B_P:%.*]] = bitcast i8* [[T1]] to [[B:%.*]]*
292     // CHECK: [[ARRAY_P:%.*]] = getelementptr inbounds [[B]], [[B]]* [[B_P]], i32 0, i32 2
293     // CHECK: [[T0:%.*]] = bitcast [[ARRAY]]* [[RESULT]] to i8*
294     // CHECK: [[T1:%.*]] = bitcast [[ARRAY]]* [[ARRAY_P]] to i8*
295     // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[T0]], i8* [[T1]], i64 16, i32 8, i1 false)
296     D d;
297     AlignedArray result = d.bArray;
298   }
299 }
300