1 //===------- ItaniumCXXABI.cpp - Emit LLVM Code from ASTs for a Module ----===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This provides C++ code generation targeting the Itanium C++ ABI. The class
11 // in this file generates structures that follow the Itanium C++ ABI, which is
12 // documented at:
13 // http://www.codesourcery.com/public/cxx-abi/abi.html
14 // http://www.codesourcery.com/public/cxx-abi/abi-eh.html
15 //
16 // It also supports the closely-related ARM ABI, documented at:
17 // http://infocenter.arm.com/help/topic/com.arm.doc.ihi0041c/IHI0041C_cppabi.pdf
18 //
19 //===----------------------------------------------------------------------===//
20
21 #include "CGCXXABI.h"
22 #include "CGCleanup.h"
23 #include "CGRecordLayout.h"
24 #include "CGVTables.h"
25 #include "CodeGenFunction.h"
26 #include "CodeGenModule.h"
27 #include "TargetInfo.h"
28 #include "clang/AST/Mangle.h"
29 #include "clang/AST/Type.h"
30 #include "clang/AST/StmtCXX.h"
31 #include "llvm/IR/CallSite.h"
32 #include "llvm/IR/DataLayout.h"
33 #include "llvm/IR/Instructions.h"
34 #include "llvm/IR/Intrinsics.h"
35 #include "llvm/IR/Value.h"
36
37 using namespace clang;
38 using namespace CodeGen;
39
40 namespace {
41 class ItaniumCXXABI : public CodeGen::CGCXXABI {
42 /// VTables - All the vtables which have been defined.
43 llvm::DenseMap<const CXXRecordDecl *, llvm::GlobalVariable *> VTables;
44
45 protected:
46 bool UseARMMethodPtrABI;
47 bool UseARMGuardVarABI;
48
getMangleContext()49 ItaniumMangleContext &getMangleContext() {
50 return cast<ItaniumMangleContext>(CodeGen::CGCXXABI::getMangleContext());
51 }
52
53 public:
ItaniumCXXABI(CodeGen::CodeGenModule & CGM,bool UseARMMethodPtrABI=false,bool UseARMGuardVarABI=false)54 ItaniumCXXABI(CodeGen::CodeGenModule &CGM,
55 bool UseARMMethodPtrABI = false,
56 bool UseARMGuardVarABI = false) :
57 CGCXXABI(CGM), UseARMMethodPtrABI(UseARMMethodPtrABI),
58 UseARMGuardVarABI(UseARMGuardVarABI) { }
59
60 bool classifyReturnType(CGFunctionInfo &FI) const override;
61
getRecordArgABI(const CXXRecordDecl * RD) const62 RecordArgABI getRecordArgABI(const CXXRecordDecl *RD) const override {
63 // Structures with either a non-trivial destructor or a non-trivial
64 // copy constructor are always indirect.
65 // FIXME: Use canCopyArgument() when it is fixed to handle lazily declared
66 // special members.
67 if (RD->hasNonTrivialDestructor() || RD->hasNonTrivialCopyConstructor())
68 return RAA_Indirect;
69 return RAA_Default;
70 }
71
isThisCompleteObject(GlobalDecl GD) const72 bool isThisCompleteObject(GlobalDecl GD) const override {
73 // The Itanium ABI has separate complete-object vs. base-object
74 // variants of both constructors and destructors.
75 if (isa<CXXDestructorDecl>(GD.getDecl())) {
76 switch (GD.getDtorType()) {
77 case Dtor_Complete:
78 case Dtor_Deleting:
79 return true;
80
81 case Dtor_Base:
82 return false;
83
84 case Dtor_Comdat:
85 llvm_unreachable("emitting dtor comdat as function?");
86 }
87 llvm_unreachable("bad dtor kind");
88 }
89 if (isa<CXXConstructorDecl>(GD.getDecl())) {
90 switch (GD.getCtorType()) {
91 case Ctor_Complete:
92 return true;
93
94 case Ctor_Base:
95 return false;
96
97 case Ctor_CopyingClosure:
98 case Ctor_DefaultClosure:
99 llvm_unreachable("closure ctors in Itanium ABI?");
100
101 case Ctor_Comdat:
102 llvm_unreachable("emitting ctor comdat as function?");
103 }
104 llvm_unreachable("bad dtor kind");
105 }
106
107 // No other kinds.
108 return false;
109 }
110
111 bool isZeroInitializable(const MemberPointerType *MPT) override;
112
113 llvm::Type *ConvertMemberPointerType(const MemberPointerType *MPT) override;
114
115 llvm::Value *
116 EmitLoadOfMemberFunctionPointer(CodeGenFunction &CGF,
117 const Expr *E,
118 Address This,
119 llvm::Value *&ThisPtrForCall,
120 llvm::Value *MemFnPtr,
121 const MemberPointerType *MPT) override;
122
123 llvm::Value *
124 EmitMemberDataPointerAddress(CodeGenFunction &CGF, const Expr *E,
125 Address Base,
126 llvm::Value *MemPtr,
127 const MemberPointerType *MPT) override;
128
129 llvm::Value *EmitMemberPointerConversion(CodeGenFunction &CGF,
130 const CastExpr *E,
131 llvm::Value *Src) override;
132 llvm::Constant *EmitMemberPointerConversion(const CastExpr *E,
133 llvm::Constant *Src) override;
134
135 llvm::Constant *EmitNullMemberPointer(const MemberPointerType *MPT) override;
136
137 llvm::Constant *EmitMemberFunctionPointer(const CXXMethodDecl *MD) override;
138 llvm::Constant *EmitMemberDataPointer(const MemberPointerType *MPT,
139 CharUnits offset) override;
140 llvm::Constant *EmitMemberPointer(const APValue &MP, QualType MPT) override;
141 llvm::Constant *BuildMemberPointer(const CXXMethodDecl *MD,
142 CharUnits ThisAdjustment);
143
144 llvm::Value *EmitMemberPointerComparison(CodeGenFunction &CGF,
145 llvm::Value *L, llvm::Value *R,
146 const MemberPointerType *MPT,
147 bool Inequality) override;
148
149 llvm::Value *EmitMemberPointerIsNotNull(CodeGenFunction &CGF,
150 llvm::Value *Addr,
151 const MemberPointerType *MPT) override;
152
153 void emitVirtualObjectDelete(CodeGenFunction &CGF, const CXXDeleteExpr *DE,
154 Address Ptr, QualType ElementType,
155 const CXXDestructorDecl *Dtor) override;
156
157 /// Itanium says that an _Unwind_Exception has to be "double-word"
158 /// aligned (and thus the end of it is also so-aligned), meaning 16
159 /// bytes. Of course, that was written for the actual Itanium,
160 /// which is a 64-bit platform. Classically, the ABI doesn't really
161 /// specify the alignment on other platforms, but in practice
162 /// libUnwind declares the struct with __attribute__((aligned)), so
163 /// we assume that alignment here. (It's generally 16 bytes, but
164 /// some targets overwrite it.)
getAlignmentOfExnObject()165 CharUnits getAlignmentOfExnObject() {
166 auto align = CGM.getContext().getTargetDefaultAlignForAttributeAligned();
167 return CGM.getContext().toCharUnitsFromBits(align);
168 }
169
170 void emitRethrow(CodeGenFunction &CGF, bool isNoReturn) override;
171 void emitThrow(CodeGenFunction &CGF, const CXXThrowExpr *E) override;
172
173 void emitBeginCatch(CodeGenFunction &CGF, const CXXCatchStmt *C) override;
174
175 llvm::CallInst *
176 emitTerminateForUnexpectedException(CodeGenFunction &CGF,
177 llvm::Value *Exn) override;
178
179 void EmitFundamentalRTTIDescriptor(QualType Type);
180 void EmitFundamentalRTTIDescriptors();
181 llvm::Constant *getAddrOfRTTIDescriptor(QualType Ty) override;
182 CatchTypeInfo
getAddrOfCXXCatchHandlerType(QualType Ty,QualType CatchHandlerType)183 getAddrOfCXXCatchHandlerType(QualType Ty,
184 QualType CatchHandlerType) override {
185 return CatchTypeInfo{getAddrOfRTTIDescriptor(Ty), 0};
186 }
187
188 bool shouldTypeidBeNullChecked(bool IsDeref, QualType SrcRecordTy) override;
189 void EmitBadTypeidCall(CodeGenFunction &CGF) override;
190 llvm::Value *EmitTypeid(CodeGenFunction &CGF, QualType SrcRecordTy,
191 Address ThisPtr,
192 llvm::Type *StdTypeInfoPtrTy) override;
193
194 bool shouldDynamicCastCallBeNullChecked(bool SrcIsPtr,
195 QualType SrcRecordTy) override;
196
197 llvm::Value *EmitDynamicCastCall(CodeGenFunction &CGF, Address Value,
198 QualType SrcRecordTy, QualType DestTy,
199 QualType DestRecordTy,
200 llvm::BasicBlock *CastEnd) override;
201
202 llvm::Value *EmitDynamicCastToVoid(CodeGenFunction &CGF, Address Value,
203 QualType SrcRecordTy,
204 QualType DestTy) override;
205
206 bool EmitBadCastCall(CodeGenFunction &CGF) override;
207
208 llvm::Value *
209 GetVirtualBaseClassOffset(CodeGenFunction &CGF, Address This,
210 const CXXRecordDecl *ClassDecl,
211 const CXXRecordDecl *BaseClassDecl) override;
212
213 void EmitCXXConstructors(const CXXConstructorDecl *D) override;
214
215 void buildStructorSignature(const CXXMethodDecl *MD, StructorType T,
216 SmallVectorImpl<CanQualType> &ArgTys) override;
217
useThunkForDtorVariant(const CXXDestructorDecl * Dtor,CXXDtorType DT) const218 bool useThunkForDtorVariant(const CXXDestructorDecl *Dtor,
219 CXXDtorType DT) const override {
220 // Itanium does not emit any destructor variant as an inline thunk.
221 // Delegating may occur as an optimization, but all variants are either
222 // emitted with external linkage or as linkonce if they are inline and used.
223 return false;
224 }
225
226 void EmitCXXDestructors(const CXXDestructorDecl *D) override;
227
228 void addImplicitStructorParams(CodeGenFunction &CGF, QualType &ResTy,
229 FunctionArgList &Params) override;
230
231 void EmitInstanceFunctionProlog(CodeGenFunction &CGF) override;
232
233 unsigned addImplicitConstructorArgs(CodeGenFunction &CGF,
234 const CXXConstructorDecl *D,
235 CXXCtorType Type, bool ForVirtualBase,
236 bool Delegating,
237 CallArgList &Args) override;
238
239 void EmitDestructorCall(CodeGenFunction &CGF, const CXXDestructorDecl *DD,
240 CXXDtorType Type, bool ForVirtualBase,
241 bool Delegating, Address This) override;
242
243 void emitVTableDefinitions(CodeGenVTables &CGVT,
244 const CXXRecordDecl *RD) override;
245
246 bool isVirtualOffsetNeededForVTableField(CodeGenFunction &CGF,
247 CodeGenFunction::VPtr Vptr) override;
248
doStructorsInitializeVPtrs(const CXXRecordDecl * VTableClass)249 bool doStructorsInitializeVPtrs(const CXXRecordDecl *VTableClass) override {
250 return true;
251 }
252
253 llvm::Constant *
254 getVTableAddressPoint(BaseSubobject Base,
255 const CXXRecordDecl *VTableClass) override;
256
257 llvm::Value *getVTableAddressPointInStructor(
258 CodeGenFunction &CGF, const CXXRecordDecl *VTableClass,
259 BaseSubobject Base, const CXXRecordDecl *NearestVBase) override;
260
261 llvm::Value *getVTableAddressPointInStructorWithVTT(
262 CodeGenFunction &CGF, const CXXRecordDecl *VTableClass,
263 BaseSubobject Base, const CXXRecordDecl *NearestVBase);
264
265 llvm::Constant *
266 getVTableAddressPointForConstExpr(BaseSubobject Base,
267 const CXXRecordDecl *VTableClass) override;
268
269 llvm::GlobalVariable *getAddrOfVTable(const CXXRecordDecl *RD,
270 CharUnits VPtrOffset) override;
271
272 llvm::Value *getVirtualFunctionPointer(CodeGenFunction &CGF, GlobalDecl GD,
273 Address This, llvm::Type *Ty,
274 SourceLocation Loc) override;
275
276 llvm::Value *EmitVirtualDestructorCall(CodeGenFunction &CGF,
277 const CXXDestructorDecl *Dtor,
278 CXXDtorType DtorType,
279 Address This,
280 const CXXMemberCallExpr *CE) override;
281
282 void emitVirtualInheritanceTables(const CXXRecordDecl *RD) override;
283
284 bool canSpeculativelyEmitVTable(const CXXRecordDecl *RD) const override;
285
setThunkLinkage(llvm::Function * Thunk,bool ForVTable,GlobalDecl GD,bool ReturnAdjustment)286 void setThunkLinkage(llvm::Function *Thunk, bool ForVTable, GlobalDecl GD,
287 bool ReturnAdjustment) override {
288 // Allow inlining of thunks by emitting them with available_externally
289 // linkage together with vtables when needed.
290 if (ForVTable && !Thunk->hasLocalLinkage())
291 Thunk->setLinkage(llvm::GlobalValue::AvailableExternallyLinkage);
292 }
293
294 llvm::Value *performThisAdjustment(CodeGenFunction &CGF, Address This,
295 const ThisAdjustment &TA) override;
296
297 llvm::Value *performReturnAdjustment(CodeGenFunction &CGF, Address Ret,
298 const ReturnAdjustment &RA) override;
299
getSrcArgforCopyCtor(const CXXConstructorDecl *,FunctionArgList & Args) const300 size_t getSrcArgforCopyCtor(const CXXConstructorDecl *,
301 FunctionArgList &Args) const override {
302 assert(!Args.empty() && "expected the arglist to not be empty!");
303 return Args.size() - 1;
304 }
305
GetPureVirtualCallName()306 StringRef GetPureVirtualCallName() override { return "__cxa_pure_virtual"; }
GetDeletedVirtualCallName()307 StringRef GetDeletedVirtualCallName() override
308 { return "__cxa_deleted_virtual"; }
309
310 CharUnits getArrayCookieSizeImpl(QualType elementType) override;
311 Address InitializeArrayCookie(CodeGenFunction &CGF,
312 Address NewPtr,
313 llvm::Value *NumElements,
314 const CXXNewExpr *expr,
315 QualType ElementType) override;
316 llvm::Value *readArrayCookieImpl(CodeGenFunction &CGF,
317 Address allocPtr,
318 CharUnits cookieSize) override;
319
320 void EmitGuardedInit(CodeGenFunction &CGF, const VarDecl &D,
321 llvm::GlobalVariable *DeclPtr,
322 bool PerformInit) override;
323 void registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
324 llvm::Constant *dtor, llvm::Constant *addr) override;
325
326 llvm::Function *getOrCreateThreadLocalWrapper(const VarDecl *VD,
327 llvm::Value *Val);
328 void EmitThreadLocalInitFuncs(
329 CodeGenModule &CGM,
330 ArrayRef<const VarDecl *> CXXThreadLocals,
331 ArrayRef<llvm::Function *> CXXThreadLocalInits,
332 ArrayRef<const VarDecl *> CXXThreadLocalInitVars) override;
333
usesThreadWrapperFunction() const334 bool usesThreadWrapperFunction() const override { return true; }
335 LValue EmitThreadLocalVarDeclLValue(CodeGenFunction &CGF, const VarDecl *VD,
336 QualType LValType) override;
337
338 bool NeedsVTTParameter(GlobalDecl GD) override;
339
340 /**************************** RTTI Uniqueness ******************************/
341
342 protected:
343 /// Returns true if the ABI requires RTTI type_info objects to be unique
344 /// across a program.
shouldRTTIBeUnique() const345 virtual bool shouldRTTIBeUnique() const { return true; }
346
347 public:
348 /// What sort of unique-RTTI behavior should we use?
349 enum RTTIUniquenessKind {
350 /// We are guaranteeing, or need to guarantee, that the RTTI string
351 /// is unique.
352 RUK_Unique,
353
354 /// We are not guaranteeing uniqueness for the RTTI string, so we
355 /// can demote to hidden visibility but must use string comparisons.
356 RUK_NonUniqueHidden,
357
358 /// We are not guaranteeing uniqueness for the RTTI string, so we
359 /// have to use string comparisons, but we also have to emit it with
360 /// non-hidden visibility.
361 RUK_NonUniqueVisible
362 };
363
364 /// Return the required visibility status for the given type and linkage in
365 /// the current ABI.
366 RTTIUniquenessKind
367 classifyRTTIUniqueness(QualType CanTy,
368 llvm::GlobalValue::LinkageTypes Linkage) const;
369 friend class ItaniumRTTIBuilder;
370
371 void emitCXXStructor(const CXXMethodDecl *MD, StructorType Type) override;
372
373 private:
hasAnyUsedVirtualInlineFunction(const CXXRecordDecl * RD) const374 bool hasAnyUsedVirtualInlineFunction(const CXXRecordDecl *RD) const {
375 const auto &VtableLayout =
376 CGM.getItaniumVTableContext().getVTableLayout(RD);
377
378 for (const auto &VtableComponent : VtableLayout.vtable_components()) {
379 if (!VtableComponent.isUsedFunctionPointerKind())
380 continue;
381
382 const CXXMethodDecl *Method = VtableComponent.getFunctionDecl();
383 if (Method->getCanonicalDecl()->isInlined())
384 return true;
385 }
386 return false;
387 }
388
isVTableHidden(const CXXRecordDecl * RD) const389 bool isVTableHidden(const CXXRecordDecl *RD) const {
390 const auto &VtableLayout =
391 CGM.getItaniumVTableContext().getVTableLayout(RD);
392
393 for (const auto &VtableComponent : VtableLayout.vtable_components()) {
394 if (VtableComponent.isRTTIKind()) {
395 const CXXRecordDecl *RTTIDecl = VtableComponent.getRTTIDecl();
396 if (RTTIDecl->getVisibility() == Visibility::HiddenVisibility)
397 return true;
398 } else if (VtableComponent.isUsedFunctionPointerKind()) {
399 const CXXMethodDecl *Method = VtableComponent.getFunctionDecl();
400 if (Method->getVisibility() == Visibility::HiddenVisibility &&
401 !Method->isDefined())
402 return true;
403 }
404 }
405 return false;
406 }
407 };
408
409 class ARMCXXABI : public ItaniumCXXABI {
410 public:
ARMCXXABI(CodeGen::CodeGenModule & CGM)411 ARMCXXABI(CodeGen::CodeGenModule &CGM) :
412 ItaniumCXXABI(CGM, /* UseARMMethodPtrABI = */ true,
413 /* UseARMGuardVarABI = */ true) {}
414
HasThisReturn(GlobalDecl GD) const415 bool HasThisReturn(GlobalDecl GD) const override {
416 return (isa<CXXConstructorDecl>(GD.getDecl()) || (
417 isa<CXXDestructorDecl>(GD.getDecl()) &&
418 GD.getDtorType() != Dtor_Deleting));
419 }
420
421 void EmitReturnFromThunk(CodeGenFunction &CGF, RValue RV,
422 QualType ResTy) override;
423
424 CharUnits getArrayCookieSizeImpl(QualType elementType) override;
425 Address InitializeArrayCookie(CodeGenFunction &CGF,
426 Address NewPtr,
427 llvm::Value *NumElements,
428 const CXXNewExpr *expr,
429 QualType ElementType) override;
430 llvm::Value *readArrayCookieImpl(CodeGenFunction &CGF, Address allocPtr,
431 CharUnits cookieSize) override;
432 };
433
434 class iOS64CXXABI : public ARMCXXABI {
435 public:
iOS64CXXABI(CodeGen::CodeGenModule & CGM)436 iOS64CXXABI(CodeGen::CodeGenModule &CGM) : ARMCXXABI(CGM) {}
437
438 // ARM64 libraries are prepared for non-unique RTTI.
shouldRTTIBeUnique() const439 bool shouldRTTIBeUnique() const override { return false; }
440 };
441
442 class WebAssemblyCXXABI final : public ItaniumCXXABI {
443 public:
WebAssemblyCXXABI(CodeGen::CodeGenModule & CGM)444 explicit WebAssemblyCXXABI(CodeGen::CodeGenModule &CGM)
445 : ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true,
446 /*UseARMGuardVarABI=*/true) {}
447
448 private:
HasThisReturn(GlobalDecl GD) const449 bool HasThisReturn(GlobalDecl GD) const override {
450 return isa<CXXConstructorDecl>(GD.getDecl()) ||
451 (isa<CXXDestructorDecl>(GD.getDecl()) &&
452 GD.getDtorType() != Dtor_Deleting);
453 }
454 };
455 }
456
CreateItaniumCXXABI(CodeGenModule & CGM)457 CodeGen::CGCXXABI *CodeGen::CreateItaniumCXXABI(CodeGenModule &CGM) {
458 switch (CGM.getTarget().getCXXABI().getKind()) {
459 // For IR-generation purposes, there's no significant difference
460 // between the ARM and iOS ABIs.
461 case TargetCXXABI::GenericARM:
462 case TargetCXXABI::iOS:
463 case TargetCXXABI::WatchOS:
464 return new ARMCXXABI(CGM);
465
466 case TargetCXXABI::iOS64:
467 return new iOS64CXXABI(CGM);
468
469 // Note that AArch64 uses the generic ItaniumCXXABI class since it doesn't
470 // include the other 32-bit ARM oddities: constructor/destructor return values
471 // and array cookies.
472 case TargetCXXABI::GenericAArch64:
473 return new ItaniumCXXABI(CGM, /* UseARMMethodPtrABI = */ true,
474 /* UseARMGuardVarABI = */ true);
475
476 case TargetCXXABI::GenericMIPS:
477 return new ItaniumCXXABI(CGM, /* UseARMMethodPtrABI = */ true);
478
479 case TargetCXXABI::WebAssembly:
480 return new WebAssemblyCXXABI(CGM);
481
482 case TargetCXXABI::GenericItanium:
483 if (CGM.getContext().getTargetInfo().getTriple().getArch()
484 == llvm::Triple::le32) {
485 // For PNaCl, use ARM-style method pointers so that PNaCl code
486 // does not assume anything about the alignment of function
487 // pointers.
488 return new ItaniumCXXABI(CGM, /* UseARMMethodPtrABI = */ true,
489 /* UseARMGuardVarABI = */ false);
490 }
491 return new ItaniumCXXABI(CGM);
492
493 case TargetCXXABI::Microsoft:
494 llvm_unreachable("Microsoft ABI is not Itanium-based");
495 }
496 llvm_unreachable("bad ABI kind");
497 }
498
499 llvm::Type *
ConvertMemberPointerType(const MemberPointerType * MPT)500 ItaniumCXXABI::ConvertMemberPointerType(const MemberPointerType *MPT) {
501 if (MPT->isMemberDataPointer())
502 return CGM.PtrDiffTy;
503 return llvm::StructType::get(CGM.PtrDiffTy, CGM.PtrDiffTy, nullptr);
504 }
505
506 /// In the Itanium and ARM ABIs, method pointers have the form:
507 /// struct { ptrdiff_t ptr; ptrdiff_t adj; } memptr;
508 ///
509 /// In the Itanium ABI:
510 /// - method pointers are virtual if (memptr.ptr & 1) is nonzero
511 /// - the this-adjustment is (memptr.adj)
512 /// - the virtual offset is (memptr.ptr - 1)
513 ///
514 /// In the ARM ABI:
515 /// - method pointers are virtual if (memptr.adj & 1) is nonzero
516 /// - the this-adjustment is (memptr.adj >> 1)
517 /// - the virtual offset is (memptr.ptr)
518 /// ARM uses 'adj' for the virtual flag because Thumb functions
519 /// may be only single-byte aligned.
520 ///
521 /// If the member is virtual, the adjusted 'this' pointer points
522 /// to a vtable pointer from which the virtual offset is applied.
523 ///
524 /// If the member is non-virtual, memptr.ptr is the address of
525 /// the function to call.
EmitLoadOfMemberFunctionPointer(CodeGenFunction & CGF,const Expr * E,Address ThisAddr,llvm::Value * & ThisPtrForCall,llvm::Value * MemFnPtr,const MemberPointerType * MPT)526 llvm::Value *ItaniumCXXABI::EmitLoadOfMemberFunctionPointer(
527 CodeGenFunction &CGF, const Expr *E, Address ThisAddr,
528 llvm::Value *&ThisPtrForCall,
529 llvm::Value *MemFnPtr, const MemberPointerType *MPT) {
530 CGBuilderTy &Builder = CGF.Builder;
531
532 const FunctionProtoType *FPT =
533 MPT->getPointeeType()->getAs<FunctionProtoType>();
534 const CXXRecordDecl *RD =
535 cast<CXXRecordDecl>(MPT->getClass()->getAs<RecordType>()->getDecl());
536
537 llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(
538 CGM.getTypes().arrangeCXXMethodType(RD, FPT, /*FD=*/nullptr));
539
540 llvm::Constant *ptrdiff_1 = llvm::ConstantInt::get(CGM.PtrDiffTy, 1);
541
542 llvm::BasicBlock *FnVirtual = CGF.createBasicBlock("memptr.virtual");
543 llvm::BasicBlock *FnNonVirtual = CGF.createBasicBlock("memptr.nonvirtual");
544 llvm::BasicBlock *FnEnd = CGF.createBasicBlock("memptr.end");
545
546 // Extract memptr.adj, which is in the second field.
547 llvm::Value *RawAdj = Builder.CreateExtractValue(MemFnPtr, 1, "memptr.adj");
548
549 // Compute the true adjustment.
550 llvm::Value *Adj = RawAdj;
551 if (UseARMMethodPtrABI)
552 Adj = Builder.CreateAShr(Adj, ptrdiff_1, "memptr.adj.shifted");
553
554 // Apply the adjustment and cast back to the original struct type
555 // for consistency.
556 llvm::Value *This = ThisAddr.getPointer();
557 llvm::Value *Ptr = Builder.CreateBitCast(This, Builder.getInt8PtrTy());
558 Ptr = Builder.CreateInBoundsGEP(Ptr, Adj);
559 This = Builder.CreateBitCast(Ptr, This->getType(), "this.adjusted");
560 ThisPtrForCall = This;
561
562 // Load the function pointer.
563 llvm::Value *FnAsInt = Builder.CreateExtractValue(MemFnPtr, 0, "memptr.ptr");
564
565 // If the LSB in the function pointer is 1, the function pointer points to
566 // a virtual function.
567 llvm::Value *IsVirtual;
568 if (UseARMMethodPtrABI)
569 IsVirtual = Builder.CreateAnd(RawAdj, ptrdiff_1);
570 else
571 IsVirtual = Builder.CreateAnd(FnAsInt, ptrdiff_1);
572 IsVirtual = Builder.CreateIsNotNull(IsVirtual, "memptr.isvirtual");
573 Builder.CreateCondBr(IsVirtual, FnVirtual, FnNonVirtual);
574
575 // In the virtual path, the adjustment left 'This' pointing to the
576 // vtable of the correct base subobject. The "function pointer" is an
577 // offset within the vtable (+1 for the virtual flag on non-ARM).
578 CGF.EmitBlock(FnVirtual);
579
580 // Cast the adjusted this to a pointer to vtable pointer and load.
581 llvm::Type *VTableTy = Builder.getInt8PtrTy();
582 CharUnits VTablePtrAlign =
583 CGF.CGM.getDynamicOffsetAlignment(ThisAddr.getAlignment(), RD,
584 CGF.getPointerAlign());
585 llvm::Value *VTable =
586 CGF.GetVTablePtr(Address(This, VTablePtrAlign), VTableTy, RD);
587
588 // Apply the offset.
589 llvm::Value *VTableOffset = FnAsInt;
590 if (!UseARMMethodPtrABI)
591 VTableOffset = Builder.CreateSub(VTableOffset, ptrdiff_1);
592 VTable = Builder.CreateGEP(VTable, VTableOffset);
593
594 // Load the virtual function to call.
595 VTable = Builder.CreateBitCast(VTable, FTy->getPointerTo()->getPointerTo());
596 llvm::Value *VirtualFn =
597 Builder.CreateAlignedLoad(VTable, CGF.getPointerAlign(),
598 "memptr.virtualfn");
599 CGF.EmitBranch(FnEnd);
600
601 // In the non-virtual path, the function pointer is actually a
602 // function pointer.
603 CGF.EmitBlock(FnNonVirtual);
604 llvm::Value *NonVirtualFn =
605 Builder.CreateIntToPtr(FnAsInt, FTy->getPointerTo(), "memptr.nonvirtualfn");
606
607 // We're done.
608 CGF.EmitBlock(FnEnd);
609 llvm::PHINode *Callee = Builder.CreatePHI(FTy->getPointerTo(), 2);
610 Callee->addIncoming(VirtualFn, FnVirtual);
611 Callee->addIncoming(NonVirtualFn, FnNonVirtual);
612 return Callee;
613 }
614
615 /// Compute an l-value by applying the given pointer-to-member to a
616 /// base object.
EmitMemberDataPointerAddress(CodeGenFunction & CGF,const Expr * E,Address Base,llvm::Value * MemPtr,const MemberPointerType * MPT)617 llvm::Value *ItaniumCXXABI::EmitMemberDataPointerAddress(
618 CodeGenFunction &CGF, const Expr *E, Address Base, llvm::Value *MemPtr,
619 const MemberPointerType *MPT) {
620 assert(MemPtr->getType() == CGM.PtrDiffTy);
621
622 CGBuilderTy &Builder = CGF.Builder;
623
624 // Cast to char*.
625 Base = Builder.CreateElementBitCast(Base, CGF.Int8Ty);
626
627 // Apply the offset, which we assume is non-null.
628 llvm::Value *Addr =
629 Builder.CreateInBoundsGEP(Base.getPointer(), MemPtr, "memptr.offset");
630
631 // Cast the address to the appropriate pointer type, adopting the
632 // address space of the base pointer.
633 llvm::Type *PType = CGF.ConvertTypeForMem(MPT->getPointeeType())
634 ->getPointerTo(Base.getAddressSpace());
635 return Builder.CreateBitCast(Addr, PType);
636 }
637
638 /// Perform a bitcast, derived-to-base, or base-to-derived member pointer
639 /// conversion.
640 ///
641 /// Bitcast conversions are always a no-op under Itanium.
642 ///
643 /// Obligatory offset/adjustment diagram:
644 /// <-- offset --> <-- adjustment -->
645 /// |--------------------------|----------------------|--------------------|
646 /// ^Derived address point ^Base address point ^Member address point
647 ///
648 /// So when converting a base member pointer to a derived member pointer,
649 /// we add the offset to the adjustment because the address point has
650 /// decreased; and conversely, when converting a derived MP to a base MP
651 /// we subtract the offset from the adjustment because the address point
652 /// has increased.
653 ///
654 /// The standard forbids (at compile time) conversion to and from
655 /// virtual bases, which is why we don't have to consider them here.
656 ///
657 /// The standard forbids (at run time) casting a derived MP to a base
658 /// MP when the derived MP does not point to a member of the base.
659 /// This is why -1 is a reasonable choice for null data member
660 /// pointers.
661 llvm::Value *
EmitMemberPointerConversion(CodeGenFunction & CGF,const CastExpr * E,llvm::Value * src)662 ItaniumCXXABI::EmitMemberPointerConversion(CodeGenFunction &CGF,
663 const CastExpr *E,
664 llvm::Value *src) {
665 assert(E->getCastKind() == CK_DerivedToBaseMemberPointer ||
666 E->getCastKind() == CK_BaseToDerivedMemberPointer ||
667 E->getCastKind() == CK_ReinterpretMemberPointer);
668
669 // Under Itanium, reinterprets don't require any additional processing.
670 if (E->getCastKind() == CK_ReinterpretMemberPointer) return src;
671
672 // Use constant emission if we can.
673 if (isa<llvm::Constant>(src))
674 return EmitMemberPointerConversion(E, cast<llvm::Constant>(src));
675
676 llvm::Constant *adj = getMemberPointerAdjustment(E);
677 if (!adj) return src;
678
679 CGBuilderTy &Builder = CGF.Builder;
680 bool isDerivedToBase = (E->getCastKind() == CK_DerivedToBaseMemberPointer);
681
682 const MemberPointerType *destTy =
683 E->getType()->castAs<MemberPointerType>();
684
685 // For member data pointers, this is just a matter of adding the
686 // offset if the source is non-null.
687 if (destTy->isMemberDataPointer()) {
688 llvm::Value *dst;
689 if (isDerivedToBase)
690 dst = Builder.CreateNSWSub(src, adj, "adj");
691 else
692 dst = Builder.CreateNSWAdd(src, adj, "adj");
693
694 // Null check.
695 llvm::Value *null = llvm::Constant::getAllOnesValue(src->getType());
696 llvm::Value *isNull = Builder.CreateICmpEQ(src, null, "memptr.isnull");
697 return Builder.CreateSelect(isNull, src, dst);
698 }
699
700 // The this-adjustment is left-shifted by 1 on ARM.
701 if (UseARMMethodPtrABI) {
702 uint64_t offset = cast<llvm::ConstantInt>(adj)->getZExtValue();
703 offset <<= 1;
704 adj = llvm::ConstantInt::get(adj->getType(), offset);
705 }
706
707 llvm::Value *srcAdj = Builder.CreateExtractValue(src, 1, "src.adj");
708 llvm::Value *dstAdj;
709 if (isDerivedToBase)
710 dstAdj = Builder.CreateNSWSub(srcAdj, adj, "adj");
711 else
712 dstAdj = Builder.CreateNSWAdd(srcAdj, adj, "adj");
713
714 return Builder.CreateInsertValue(src, dstAdj, 1);
715 }
716
717 llvm::Constant *
EmitMemberPointerConversion(const CastExpr * E,llvm::Constant * src)718 ItaniumCXXABI::EmitMemberPointerConversion(const CastExpr *E,
719 llvm::Constant *src) {
720 assert(E->getCastKind() == CK_DerivedToBaseMemberPointer ||
721 E->getCastKind() == CK_BaseToDerivedMemberPointer ||
722 E->getCastKind() == CK_ReinterpretMemberPointer);
723
724 // Under Itanium, reinterprets don't require any additional processing.
725 if (E->getCastKind() == CK_ReinterpretMemberPointer) return src;
726
727 // If the adjustment is trivial, we don't need to do anything.
728 llvm::Constant *adj = getMemberPointerAdjustment(E);
729 if (!adj) return src;
730
731 bool isDerivedToBase = (E->getCastKind() == CK_DerivedToBaseMemberPointer);
732
733 const MemberPointerType *destTy =
734 E->getType()->castAs<MemberPointerType>();
735
736 // For member data pointers, this is just a matter of adding the
737 // offset if the source is non-null.
738 if (destTy->isMemberDataPointer()) {
739 // null maps to null.
740 if (src->isAllOnesValue()) return src;
741
742 if (isDerivedToBase)
743 return llvm::ConstantExpr::getNSWSub(src, adj);
744 else
745 return llvm::ConstantExpr::getNSWAdd(src, adj);
746 }
747
748 // The this-adjustment is left-shifted by 1 on ARM.
749 if (UseARMMethodPtrABI) {
750 uint64_t offset = cast<llvm::ConstantInt>(adj)->getZExtValue();
751 offset <<= 1;
752 adj = llvm::ConstantInt::get(adj->getType(), offset);
753 }
754
755 llvm::Constant *srcAdj = llvm::ConstantExpr::getExtractValue(src, 1);
756 llvm::Constant *dstAdj;
757 if (isDerivedToBase)
758 dstAdj = llvm::ConstantExpr::getNSWSub(srcAdj, adj);
759 else
760 dstAdj = llvm::ConstantExpr::getNSWAdd(srcAdj, adj);
761
762 return llvm::ConstantExpr::getInsertValue(src, dstAdj, 1);
763 }
764
765 llvm::Constant *
EmitNullMemberPointer(const MemberPointerType * MPT)766 ItaniumCXXABI::EmitNullMemberPointer(const MemberPointerType *MPT) {
767 // Itanium C++ ABI 2.3:
768 // A NULL pointer is represented as -1.
769 if (MPT->isMemberDataPointer())
770 return llvm::ConstantInt::get(CGM.PtrDiffTy, -1ULL, /*isSigned=*/true);
771
772 llvm::Constant *Zero = llvm::ConstantInt::get(CGM.PtrDiffTy, 0);
773 llvm::Constant *Values[2] = { Zero, Zero };
774 return llvm::ConstantStruct::getAnon(Values);
775 }
776
777 llvm::Constant *
EmitMemberDataPointer(const MemberPointerType * MPT,CharUnits offset)778 ItaniumCXXABI::EmitMemberDataPointer(const MemberPointerType *MPT,
779 CharUnits offset) {
780 // Itanium C++ ABI 2.3:
781 // A pointer to data member is an offset from the base address of
782 // the class object containing it, represented as a ptrdiff_t
783 return llvm::ConstantInt::get(CGM.PtrDiffTy, offset.getQuantity());
784 }
785
786 llvm::Constant *
EmitMemberFunctionPointer(const CXXMethodDecl * MD)787 ItaniumCXXABI::EmitMemberFunctionPointer(const CXXMethodDecl *MD) {
788 return BuildMemberPointer(MD, CharUnits::Zero());
789 }
790
BuildMemberPointer(const CXXMethodDecl * MD,CharUnits ThisAdjustment)791 llvm::Constant *ItaniumCXXABI::BuildMemberPointer(const CXXMethodDecl *MD,
792 CharUnits ThisAdjustment) {
793 assert(MD->isInstance() && "Member function must not be static!");
794 MD = MD->getCanonicalDecl();
795
796 CodeGenTypes &Types = CGM.getTypes();
797
798 // Get the function pointer (or index if this is a virtual function).
799 llvm::Constant *MemPtr[2];
800 if (MD->isVirtual()) {
801 uint64_t Index = CGM.getItaniumVTableContext().getMethodVTableIndex(MD);
802
803 const ASTContext &Context = getContext();
804 CharUnits PointerWidth =
805 Context.toCharUnitsFromBits(Context.getTargetInfo().getPointerWidth(0));
806 uint64_t VTableOffset = (Index * PointerWidth.getQuantity());
807
808 if (UseARMMethodPtrABI) {
809 // ARM C++ ABI 3.2.1:
810 // This ABI specifies that adj contains twice the this
811 // adjustment, plus 1 if the member function is virtual. The
812 // least significant bit of adj then makes exactly the same
813 // discrimination as the least significant bit of ptr does for
814 // Itanium.
815 MemPtr[0] = llvm::ConstantInt::get(CGM.PtrDiffTy, VTableOffset);
816 MemPtr[1] = llvm::ConstantInt::get(CGM.PtrDiffTy,
817 2 * ThisAdjustment.getQuantity() + 1);
818 } else {
819 // Itanium C++ ABI 2.3:
820 // For a virtual function, [the pointer field] is 1 plus the
821 // virtual table offset (in bytes) of the function,
822 // represented as a ptrdiff_t.
823 MemPtr[0] = llvm::ConstantInt::get(CGM.PtrDiffTy, VTableOffset + 1);
824 MemPtr[1] = llvm::ConstantInt::get(CGM.PtrDiffTy,
825 ThisAdjustment.getQuantity());
826 }
827 } else {
828 const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>();
829 llvm::Type *Ty;
830 // Check whether the function has a computable LLVM signature.
831 if (Types.isFuncTypeConvertible(FPT)) {
832 // The function has a computable LLVM signature; use the correct type.
833 Ty = Types.GetFunctionType(Types.arrangeCXXMethodDeclaration(MD));
834 } else {
835 // Use an arbitrary non-function type to tell GetAddrOfFunction that the
836 // function type is incomplete.
837 Ty = CGM.PtrDiffTy;
838 }
839 llvm::Constant *addr = CGM.GetAddrOfFunction(MD, Ty);
840
841 MemPtr[0] = llvm::ConstantExpr::getPtrToInt(addr, CGM.PtrDiffTy);
842 MemPtr[1] = llvm::ConstantInt::get(CGM.PtrDiffTy,
843 (UseARMMethodPtrABI ? 2 : 1) *
844 ThisAdjustment.getQuantity());
845 }
846
847 return llvm::ConstantStruct::getAnon(MemPtr);
848 }
849
EmitMemberPointer(const APValue & MP,QualType MPType)850 llvm::Constant *ItaniumCXXABI::EmitMemberPointer(const APValue &MP,
851 QualType MPType) {
852 const MemberPointerType *MPT = MPType->castAs<MemberPointerType>();
853 const ValueDecl *MPD = MP.getMemberPointerDecl();
854 if (!MPD)
855 return EmitNullMemberPointer(MPT);
856
857 CharUnits ThisAdjustment = getMemberPointerPathAdjustment(MP);
858
859 if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(MPD))
860 return BuildMemberPointer(MD, ThisAdjustment);
861
862 CharUnits FieldOffset =
863 getContext().toCharUnitsFromBits(getContext().getFieldOffset(MPD));
864 return EmitMemberDataPointer(MPT, ThisAdjustment + FieldOffset);
865 }
866
867 /// The comparison algorithm is pretty easy: the member pointers are
868 /// the same if they're either bitwise identical *or* both null.
869 ///
870 /// ARM is different here only because null-ness is more complicated.
871 llvm::Value *
EmitMemberPointerComparison(CodeGenFunction & CGF,llvm::Value * L,llvm::Value * R,const MemberPointerType * MPT,bool Inequality)872 ItaniumCXXABI::EmitMemberPointerComparison(CodeGenFunction &CGF,
873 llvm::Value *L,
874 llvm::Value *R,
875 const MemberPointerType *MPT,
876 bool Inequality) {
877 CGBuilderTy &Builder = CGF.Builder;
878
879 llvm::ICmpInst::Predicate Eq;
880 llvm::Instruction::BinaryOps And, Or;
881 if (Inequality) {
882 Eq = llvm::ICmpInst::ICMP_NE;
883 And = llvm::Instruction::Or;
884 Or = llvm::Instruction::And;
885 } else {
886 Eq = llvm::ICmpInst::ICMP_EQ;
887 And = llvm::Instruction::And;
888 Or = llvm::Instruction::Or;
889 }
890
891 // Member data pointers are easy because there's a unique null
892 // value, so it just comes down to bitwise equality.
893 if (MPT->isMemberDataPointer())
894 return Builder.CreateICmp(Eq, L, R);
895
896 // For member function pointers, the tautologies are more complex.
897 // The Itanium tautology is:
898 // (L == R) <==> (L.ptr == R.ptr && (L.ptr == 0 || L.adj == R.adj))
899 // The ARM tautology is:
900 // (L == R) <==> (L.ptr == R.ptr &&
901 // (L.adj == R.adj ||
902 // (L.ptr == 0 && ((L.adj|R.adj) & 1) == 0)))
903 // The inequality tautologies have exactly the same structure, except
904 // applying De Morgan's laws.
905
906 llvm::Value *LPtr = Builder.CreateExtractValue(L, 0, "lhs.memptr.ptr");
907 llvm::Value *RPtr = Builder.CreateExtractValue(R, 0, "rhs.memptr.ptr");
908
909 // This condition tests whether L.ptr == R.ptr. This must always be
910 // true for equality to hold.
911 llvm::Value *PtrEq = Builder.CreateICmp(Eq, LPtr, RPtr, "cmp.ptr");
912
913 // This condition, together with the assumption that L.ptr == R.ptr,
914 // tests whether the pointers are both null. ARM imposes an extra
915 // condition.
916 llvm::Value *Zero = llvm::Constant::getNullValue(LPtr->getType());
917 llvm::Value *EqZero = Builder.CreateICmp(Eq, LPtr, Zero, "cmp.ptr.null");
918
919 // This condition tests whether L.adj == R.adj. If this isn't
920 // true, the pointers are unequal unless they're both null.
921 llvm::Value *LAdj = Builder.CreateExtractValue(L, 1, "lhs.memptr.adj");
922 llvm::Value *RAdj = Builder.CreateExtractValue(R, 1, "rhs.memptr.adj");
923 llvm::Value *AdjEq = Builder.CreateICmp(Eq, LAdj, RAdj, "cmp.adj");
924
925 // Null member function pointers on ARM clear the low bit of Adj,
926 // so the zero condition has to check that neither low bit is set.
927 if (UseARMMethodPtrABI) {
928 llvm::Value *One = llvm::ConstantInt::get(LPtr->getType(), 1);
929
930 // Compute (l.adj | r.adj) & 1 and test it against zero.
931 llvm::Value *OrAdj = Builder.CreateOr(LAdj, RAdj, "or.adj");
932 llvm::Value *OrAdjAnd1 = Builder.CreateAnd(OrAdj, One);
933 llvm::Value *OrAdjAnd1EqZero = Builder.CreateICmp(Eq, OrAdjAnd1, Zero,
934 "cmp.or.adj");
935 EqZero = Builder.CreateBinOp(And, EqZero, OrAdjAnd1EqZero);
936 }
937
938 // Tie together all our conditions.
939 llvm::Value *Result = Builder.CreateBinOp(Or, EqZero, AdjEq);
940 Result = Builder.CreateBinOp(And, PtrEq, Result,
941 Inequality ? "memptr.ne" : "memptr.eq");
942 return Result;
943 }
944
945 llvm::Value *
EmitMemberPointerIsNotNull(CodeGenFunction & CGF,llvm::Value * MemPtr,const MemberPointerType * MPT)946 ItaniumCXXABI::EmitMemberPointerIsNotNull(CodeGenFunction &CGF,
947 llvm::Value *MemPtr,
948 const MemberPointerType *MPT) {
949 CGBuilderTy &Builder = CGF.Builder;
950
951 /// For member data pointers, this is just a check against -1.
952 if (MPT->isMemberDataPointer()) {
953 assert(MemPtr->getType() == CGM.PtrDiffTy);
954 llvm::Value *NegativeOne =
955 llvm::Constant::getAllOnesValue(MemPtr->getType());
956 return Builder.CreateICmpNE(MemPtr, NegativeOne, "memptr.tobool");
957 }
958
959 // In Itanium, a member function pointer is not null if 'ptr' is not null.
960 llvm::Value *Ptr = Builder.CreateExtractValue(MemPtr, 0, "memptr.ptr");
961
962 llvm::Constant *Zero = llvm::ConstantInt::get(Ptr->getType(), 0);
963 llvm::Value *Result = Builder.CreateICmpNE(Ptr, Zero, "memptr.tobool");
964
965 // On ARM, a member function pointer is also non-null if the low bit of 'adj'
966 // (the virtual bit) is set.
967 if (UseARMMethodPtrABI) {
968 llvm::Constant *One = llvm::ConstantInt::get(Ptr->getType(), 1);
969 llvm::Value *Adj = Builder.CreateExtractValue(MemPtr, 1, "memptr.adj");
970 llvm::Value *VirtualBit = Builder.CreateAnd(Adj, One, "memptr.virtualbit");
971 llvm::Value *IsVirtual = Builder.CreateICmpNE(VirtualBit, Zero,
972 "memptr.isvirtual");
973 Result = Builder.CreateOr(Result, IsVirtual);
974 }
975
976 return Result;
977 }
978
classifyReturnType(CGFunctionInfo & FI) const979 bool ItaniumCXXABI::classifyReturnType(CGFunctionInfo &FI) const {
980 const CXXRecordDecl *RD = FI.getReturnType()->getAsCXXRecordDecl();
981 if (!RD)
982 return false;
983
984 // Return indirectly if we have a non-trivial copy ctor or non-trivial dtor.
985 // FIXME: Use canCopyArgument() when it is fixed to handle lazily declared
986 // special members.
987 if (RD->hasNonTrivialDestructor() || RD->hasNonTrivialCopyConstructor()) {
988 auto Align = CGM.getContext().getTypeAlignInChars(FI.getReturnType());
989 FI.getReturnInfo() = ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
990 return true;
991 }
992 return false;
993 }
994
995 /// The Itanium ABI requires non-zero initialization only for data
996 /// member pointers, for which '0' is a valid offset.
isZeroInitializable(const MemberPointerType * MPT)997 bool ItaniumCXXABI::isZeroInitializable(const MemberPointerType *MPT) {
998 return MPT->isMemberFunctionPointer();
999 }
1000
1001 /// The Itanium ABI always places an offset to the complete object
1002 /// at entry -2 in the vtable.
emitVirtualObjectDelete(CodeGenFunction & CGF,const CXXDeleteExpr * DE,Address Ptr,QualType ElementType,const CXXDestructorDecl * Dtor)1003 void ItaniumCXXABI::emitVirtualObjectDelete(CodeGenFunction &CGF,
1004 const CXXDeleteExpr *DE,
1005 Address Ptr,
1006 QualType ElementType,
1007 const CXXDestructorDecl *Dtor) {
1008 bool UseGlobalDelete = DE->isGlobalDelete();
1009 if (UseGlobalDelete) {
1010 // Derive the complete-object pointer, which is what we need
1011 // to pass to the deallocation function.
1012
1013 // Grab the vtable pointer as an intptr_t*.
1014 auto *ClassDecl =
1015 cast<CXXRecordDecl>(ElementType->getAs<RecordType>()->getDecl());
1016 llvm::Value *VTable =
1017 CGF.GetVTablePtr(Ptr, CGF.IntPtrTy->getPointerTo(), ClassDecl);
1018
1019 // Track back to entry -2 and pull out the offset there.
1020 llvm::Value *OffsetPtr = CGF.Builder.CreateConstInBoundsGEP1_64(
1021 VTable, -2, "complete-offset.ptr");
1022 llvm::Value *Offset =
1023 CGF.Builder.CreateAlignedLoad(OffsetPtr, CGF.getPointerAlign());
1024
1025 // Apply the offset.
1026 llvm::Value *CompletePtr =
1027 CGF.Builder.CreateBitCast(Ptr.getPointer(), CGF.Int8PtrTy);
1028 CompletePtr = CGF.Builder.CreateInBoundsGEP(CompletePtr, Offset);
1029
1030 // If we're supposed to call the global delete, make sure we do so
1031 // even if the destructor throws.
1032 CGF.pushCallObjectDeleteCleanup(DE->getOperatorDelete(), CompletePtr,
1033 ElementType);
1034 }
1035
1036 // FIXME: Provide a source location here even though there's no
1037 // CXXMemberCallExpr for dtor call.
1038 CXXDtorType DtorType = UseGlobalDelete ? Dtor_Complete : Dtor_Deleting;
1039 EmitVirtualDestructorCall(CGF, Dtor, DtorType, Ptr, /*CE=*/nullptr);
1040
1041 if (UseGlobalDelete)
1042 CGF.PopCleanupBlock();
1043 }
1044
emitRethrow(CodeGenFunction & CGF,bool isNoReturn)1045 void ItaniumCXXABI::emitRethrow(CodeGenFunction &CGF, bool isNoReturn) {
1046 // void __cxa_rethrow();
1047
1048 llvm::FunctionType *FTy =
1049 llvm::FunctionType::get(CGM.VoidTy, /*IsVarArgs=*/false);
1050
1051 llvm::Constant *Fn = CGM.CreateRuntimeFunction(FTy, "__cxa_rethrow");
1052
1053 if (isNoReturn)
1054 CGF.EmitNoreturnRuntimeCallOrInvoke(Fn, None);
1055 else
1056 CGF.EmitRuntimeCallOrInvoke(Fn);
1057 }
1058
getAllocateExceptionFn(CodeGenModule & CGM)1059 static llvm::Constant *getAllocateExceptionFn(CodeGenModule &CGM) {
1060 // void *__cxa_allocate_exception(size_t thrown_size);
1061
1062 llvm::FunctionType *FTy =
1063 llvm::FunctionType::get(CGM.Int8PtrTy, CGM.SizeTy, /*IsVarArgs=*/false);
1064
1065 return CGM.CreateRuntimeFunction(FTy, "__cxa_allocate_exception");
1066 }
1067
getThrowFn(CodeGenModule & CGM)1068 static llvm::Constant *getThrowFn(CodeGenModule &CGM) {
1069 // void __cxa_throw(void *thrown_exception, std::type_info *tinfo,
1070 // void (*dest) (void *));
1071
1072 llvm::Type *Args[3] = { CGM.Int8PtrTy, CGM.Int8PtrTy, CGM.Int8PtrTy };
1073 llvm::FunctionType *FTy =
1074 llvm::FunctionType::get(CGM.VoidTy, Args, /*IsVarArgs=*/false);
1075
1076 return CGM.CreateRuntimeFunction(FTy, "__cxa_throw");
1077 }
1078
emitThrow(CodeGenFunction & CGF,const CXXThrowExpr * E)1079 void ItaniumCXXABI::emitThrow(CodeGenFunction &CGF, const CXXThrowExpr *E) {
1080 QualType ThrowType = E->getSubExpr()->getType();
1081 // Now allocate the exception object.
1082 llvm::Type *SizeTy = CGF.ConvertType(getContext().getSizeType());
1083 uint64_t TypeSize = getContext().getTypeSizeInChars(ThrowType).getQuantity();
1084
1085 llvm::Constant *AllocExceptionFn = getAllocateExceptionFn(CGM);
1086 llvm::CallInst *ExceptionPtr = CGF.EmitNounwindRuntimeCall(
1087 AllocExceptionFn, llvm::ConstantInt::get(SizeTy, TypeSize), "exception");
1088
1089 CharUnits ExnAlign = getAlignmentOfExnObject();
1090 CGF.EmitAnyExprToExn(E->getSubExpr(), Address(ExceptionPtr, ExnAlign));
1091
1092 // Now throw the exception.
1093 llvm::Constant *TypeInfo = CGM.GetAddrOfRTTIDescriptor(ThrowType,
1094 /*ForEH=*/true);
1095
1096 // The address of the destructor. If the exception type has a
1097 // trivial destructor (or isn't a record), we just pass null.
1098 llvm::Constant *Dtor = nullptr;
1099 if (const RecordType *RecordTy = ThrowType->getAs<RecordType>()) {
1100 CXXRecordDecl *Record = cast<CXXRecordDecl>(RecordTy->getDecl());
1101 if (!Record->hasTrivialDestructor()) {
1102 CXXDestructorDecl *DtorD = Record->getDestructor();
1103 Dtor = CGM.getAddrOfCXXStructor(DtorD, StructorType::Complete);
1104 Dtor = llvm::ConstantExpr::getBitCast(Dtor, CGM.Int8PtrTy);
1105 }
1106 }
1107 if (!Dtor) Dtor = llvm::Constant::getNullValue(CGM.Int8PtrTy);
1108
1109 llvm::Value *args[] = { ExceptionPtr, TypeInfo, Dtor };
1110 CGF.EmitNoreturnRuntimeCallOrInvoke(getThrowFn(CGM), args);
1111 }
1112
getItaniumDynamicCastFn(CodeGenFunction & CGF)1113 static llvm::Constant *getItaniumDynamicCastFn(CodeGenFunction &CGF) {
1114 // void *__dynamic_cast(const void *sub,
1115 // const abi::__class_type_info *src,
1116 // const abi::__class_type_info *dst,
1117 // std::ptrdiff_t src2dst_offset);
1118
1119 llvm::Type *Int8PtrTy = CGF.Int8PtrTy;
1120 llvm::Type *PtrDiffTy =
1121 CGF.ConvertType(CGF.getContext().getPointerDiffType());
1122
1123 llvm::Type *Args[4] = { Int8PtrTy, Int8PtrTy, Int8PtrTy, PtrDiffTy };
1124
1125 llvm::FunctionType *FTy = llvm::FunctionType::get(Int8PtrTy, Args, false);
1126
1127 // Mark the function as nounwind readonly.
1128 llvm::Attribute::AttrKind FuncAttrs[] = { llvm::Attribute::NoUnwind,
1129 llvm::Attribute::ReadOnly };
1130 llvm::AttributeSet Attrs = llvm::AttributeSet::get(
1131 CGF.getLLVMContext(), llvm::AttributeSet::FunctionIndex, FuncAttrs);
1132
1133 return CGF.CGM.CreateRuntimeFunction(FTy, "__dynamic_cast", Attrs);
1134 }
1135
getBadCastFn(CodeGenFunction & CGF)1136 static llvm::Constant *getBadCastFn(CodeGenFunction &CGF) {
1137 // void __cxa_bad_cast();
1138 llvm::FunctionType *FTy = llvm::FunctionType::get(CGF.VoidTy, false);
1139 return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_bad_cast");
1140 }
1141
1142 /// \brief Compute the src2dst_offset hint as described in the
1143 /// Itanium C++ ABI [2.9.7]
computeOffsetHint(ASTContext & Context,const CXXRecordDecl * Src,const CXXRecordDecl * Dst)1144 static CharUnits computeOffsetHint(ASTContext &Context,
1145 const CXXRecordDecl *Src,
1146 const CXXRecordDecl *Dst) {
1147 CXXBasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true,
1148 /*DetectVirtual=*/false);
1149
1150 // If Dst is not derived from Src we can skip the whole computation below and
1151 // return that Src is not a public base of Dst. Record all inheritance paths.
1152 if (!Dst->isDerivedFrom(Src, Paths))
1153 return CharUnits::fromQuantity(-2ULL);
1154
1155 unsigned NumPublicPaths = 0;
1156 CharUnits Offset;
1157
1158 // Now walk all possible inheritance paths.
1159 for (const CXXBasePath &Path : Paths) {
1160 if (Path.Access != AS_public) // Ignore non-public inheritance.
1161 continue;
1162
1163 ++NumPublicPaths;
1164
1165 for (const CXXBasePathElement &PathElement : Path) {
1166 // If the path contains a virtual base class we can't give any hint.
1167 // -1: no hint.
1168 if (PathElement.Base->isVirtual())
1169 return CharUnits::fromQuantity(-1ULL);
1170
1171 if (NumPublicPaths > 1) // Won't use offsets, skip computation.
1172 continue;
1173
1174 // Accumulate the base class offsets.
1175 const ASTRecordLayout &L = Context.getASTRecordLayout(PathElement.Class);
1176 Offset += L.getBaseClassOffset(
1177 PathElement.Base->getType()->getAsCXXRecordDecl());
1178 }
1179 }
1180
1181 // -2: Src is not a public base of Dst.
1182 if (NumPublicPaths == 0)
1183 return CharUnits::fromQuantity(-2ULL);
1184
1185 // -3: Src is a multiple public base type but never a virtual base type.
1186 if (NumPublicPaths > 1)
1187 return CharUnits::fromQuantity(-3ULL);
1188
1189 // Otherwise, the Src type is a unique public nonvirtual base type of Dst.
1190 // Return the offset of Src from the origin of Dst.
1191 return Offset;
1192 }
1193
getBadTypeidFn(CodeGenFunction & CGF)1194 static llvm::Constant *getBadTypeidFn(CodeGenFunction &CGF) {
1195 // void __cxa_bad_typeid();
1196 llvm::FunctionType *FTy = llvm::FunctionType::get(CGF.VoidTy, false);
1197
1198 return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_bad_typeid");
1199 }
1200
shouldTypeidBeNullChecked(bool IsDeref,QualType SrcRecordTy)1201 bool ItaniumCXXABI::shouldTypeidBeNullChecked(bool IsDeref,
1202 QualType SrcRecordTy) {
1203 return IsDeref;
1204 }
1205
EmitBadTypeidCall(CodeGenFunction & CGF)1206 void ItaniumCXXABI::EmitBadTypeidCall(CodeGenFunction &CGF) {
1207 llvm::Value *Fn = getBadTypeidFn(CGF);
1208 CGF.EmitRuntimeCallOrInvoke(Fn).setDoesNotReturn();
1209 CGF.Builder.CreateUnreachable();
1210 }
1211
EmitTypeid(CodeGenFunction & CGF,QualType SrcRecordTy,Address ThisPtr,llvm::Type * StdTypeInfoPtrTy)1212 llvm::Value *ItaniumCXXABI::EmitTypeid(CodeGenFunction &CGF,
1213 QualType SrcRecordTy,
1214 Address ThisPtr,
1215 llvm::Type *StdTypeInfoPtrTy) {
1216 auto *ClassDecl =
1217 cast<CXXRecordDecl>(SrcRecordTy->getAs<RecordType>()->getDecl());
1218 llvm::Value *Value =
1219 CGF.GetVTablePtr(ThisPtr, StdTypeInfoPtrTy->getPointerTo(), ClassDecl);
1220
1221 // Load the type info.
1222 Value = CGF.Builder.CreateConstInBoundsGEP1_64(Value, -1ULL);
1223 return CGF.Builder.CreateAlignedLoad(Value, CGF.getPointerAlign());
1224 }
1225
shouldDynamicCastCallBeNullChecked(bool SrcIsPtr,QualType SrcRecordTy)1226 bool ItaniumCXXABI::shouldDynamicCastCallBeNullChecked(bool SrcIsPtr,
1227 QualType SrcRecordTy) {
1228 return SrcIsPtr;
1229 }
1230
EmitDynamicCastCall(CodeGenFunction & CGF,Address ThisAddr,QualType SrcRecordTy,QualType DestTy,QualType DestRecordTy,llvm::BasicBlock * CastEnd)1231 llvm::Value *ItaniumCXXABI::EmitDynamicCastCall(
1232 CodeGenFunction &CGF, Address ThisAddr, QualType SrcRecordTy,
1233 QualType DestTy, QualType DestRecordTy, llvm::BasicBlock *CastEnd) {
1234 llvm::Type *PtrDiffLTy =
1235 CGF.ConvertType(CGF.getContext().getPointerDiffType());
1236 llvm::Type *DestLTy = CGF.ConvertType(DestTy);
1237
1238 llvm::Value *SrcRTTI =
1239 CGF.CGM.GetAddrOfRTTIDescriptor(SrcRecordTy.getUnqualifiedType());
1240 llvm::Value *DestRTTI =
1241 CGF.CGM.GetAddrOfRTTIDescriptor(DestRecordTy.getUnqualifiedType());
1242
1243 // Compute the offset hint.
1244 const CXXRecordDecl *SrcDecl = SrcRecordTy->getAsCXXRecordDecl();
1245 const CXXRecordDecl *DestDecl = DestRecordTy->getAsCXXRecordDecl();
1246 llvm::Value *OffsetHint = llvm::ConstantInt::get(
1247 PtrDiffLTy,
1248 computeOffsetHint(CGF.getContext(), SrcDecl, DestDecl).getQuantity());
1249
1250 // Emit the call to __dynamic_cast.
1251 llvm::Value *Value = ThisAddr.getPointer();
1252 Value = CGF.EmitCastToVoidPtr(Value);
1253
1254 llvm::Value *args[] = {Value, SrcRTTI, DestRTTI, OffsetHint};
1255 Value = CGF.EmitNounwindRuntimeCall(getItaniumDynamicCastFn(CGF), args);
1256 Value = CGF.Builder.CreateBitCast(Value, DestLTy);
1257
1258 /// C++ [expr.dynamic.cast]p9:
1259 /// A failed cast to reference type throws std::bad_cast
1260 if (DestTy->isReferenceType()) {
1261 llvm::BasicBlock *BadCastBlock =
1262 CGF.createBasicBlock("dynamic_cast.bad_cast");
1263
1264 llvm::Value *IsNull = CGF.Builder.CreateIsNull(Value);
1265 CGF.Builder.CreateCondBr(IsNull, BadCastBlock, CastEnd);
1266
1267 CGF.EmitBlock(BadCastBlock);
1268 EmitBadCastCall(CGF);
1269 }
1270
1271 return Value;
1272 }
1273
EmitDynamicCastToVoid(CodeGenFunction & CGF,Address ThisAddr,QualType SrcRecordTy,QualType DestTy)1274 llvm::Value *ItaniumCXXABI::EmitDynamicCastToVoid(CodeGenFunction &CGF,
1275 Address ThisAddr,
1276 QualType SrcRecordTy,
1277 QualType DestTy) {
1278 llvm::Type *PtrDiffLTy =
1279 CGF.ConvertType(CGF.getContext().getPointerDiffType());
1280 llvm::Type *DestLTy = CGF.ConvertType(DestTy);
1281
1282 auto *ClassDecl =
1283 cast<CXXRecordDecl>(SrcRecordTy->getAs<RecordType>()->getDecl());
1284 // Get the vtable pointer.
1285 llvm::Value *VTable = CGF.GetVTablePtr(ThisAddr, PtrDiffLTy->getPointerTo(),
1286 ClassDecl);
1287
1288 // Get the offset-to-top from the vtable.
1289 llvm::Value *OffsetToTop =
1290 CGF.Builder.CreateConstInBoundsGEP1_64(VTable, -2ULL);
1291 OffsetToTop =
1292 CGF.Builder.CreateAlignedLoad(OffsetToTop, CGF.getPointerAlign(),
1293 "offset.to.top");
1294
1295 // Finally, add the offset to the pointer.
1296 llvm::Value *Value = ThisAddr.getPointer();
1297 Value = CGF.EmitCastToVoidPtr(Value);
1298 Value = CGF.Builder.CreateInBoundsGEP(Value, OffsetToTop);
1299
1300 return CGF.Builder.CreateBitCast(Value, DestLTy);
1301 }
1302
EmitBadCastCall(CodeGenFunction & CGF)1303 bool ItaniumCXXABI::EmitBadCastCall(CodeGenFunction &CGF) {
1304 llvm::Value *Fn = getBadCastFn(CGF);
1305 CGF.EmitRuntimeCallOrInvoke(Fn).setDoesNotReturn();
1306 CGF.Builder.CreateUnreachable();
1307 return true;
1308 }
1309
1310 llvm::Value *
GetVirtualBaseClassOffset(CodeGenFunction & CGF,Address This,const CXXRecordDecl * ClassDecl,const CXXRecordDecl * BaseClassDecl)1311 ItaniumCXXABI::GetVirtualBaseClassOffset(CodeGenFunction &CGF,
1312 Address This,
1313 const CXXRecordDecl *ClassDecl,
1314 const CXXRecordDecl *BaseClassDecl) {
1315 llvm::Value *VTablePtr = CGF.GetVTablePtr(This, CGM.Int8PtrTy, ClassDecl);
1316 CharUnits VBaseOffsetOffset =
1317 CGM.getItaniumVTableContext().getVirtualBaseOffsetOffset(ClassDecl,
1318 BaseClassDecl);
1319
1320 llvm::Value *VBaseOffsetPtr =
1321 CGF.Builder.CreateConstGEP1_64(VTablePtr, VBaseOffsetOffset.getQuantity(),
1322 "vbase.offset.ptr");
1323 VBaseOffsetPtr = CGF.Builder.CreateBitCast(VBaseOffsetPtr,
1324 CGM.PtrDiffTy->getPointerTo());
1325
1326 llvm::Value *VBaseOffset =
1327 CGF.Builder.CreateAlignedLoad(VBaseOffsetPtr, CGF.getPointerAlign(),
1328 "vbase.offset");
1329
1330 return VBaseOffset;
1331 }
1332
EmitCXXConstructors(const CXXConstructorDecl * D)1333 void ItaniumCXXABI::EmitCXXConstructors(const CXXConstructorDecl *D) {
1334 // Just make sure we're in sync with TargetCXXABI.
1335 assert(CGM.getTarget().getCXXABI().hasConstructorVariants());
1336
1337 // The constructor used for constructing this as a base class;
1338 // ignores virtual bases.
1339 CGM.EmitGlobal(GlobalDecl(D, Ctor_Base));
1340
1341 // The constructor used for constructing this as a complete class;
1342 // constructs the virtual bases, then calls the base constructor.
1343 if (!D->getParent()->isAbstract()) {
1344 // We don't need to emit the complete ctor if the class is abstract.
1345 CGM.EmitGlobal(GlobalDecl(D, Ctor_Complete));
1346 }
1347 }
1348
1349 void
buildStructorSignature(const CXXMethodDecl * MD,StructorType T,SmallVectorImpl<CanQualType> & ArgTys)1350 ItaniumCXXABI::buildStructorSignature(const CXXMethodDecl *MD, StructorType T,
1351 SmallVectorImpl<CanQualType> &ArgTys) {
1352 ASTContext &Context = getContext();
1353
1354 // All parameters are already in place except VTT, which goes after 'this'.
1355 // These are Clang types, so we don't need to worry about sret yet.
1356
1357 // Check if we need to add a VTT parameter (which has type void **).
1358 if (T == StructorType::Base && MD->getParent()->getNumVBases() != 0)
1359 ArgTys.insert(ArgTys.begin() + 1,
1360 Context.getPointerType(Context.VoidPtrTy));
1361 }
1362
EmitCXXDestructors(const CXXDestructorDecl * D)1363 void ItaniumCXXABI::EmitCXXDestructors(const CXXDestructorDecl *D) {
1364 // The destructor used for destructing this as a base class; ignores
1365 // virtual bases.
1366 CGM.EmitGlobal(GlobalDecl(D, Dtor_Base));
1367
1368 // The destructor used for destructing this as a most-derived class;
1369 // call the base destructor and then destructs any virtual bases.
1370 CGM.EmitGlobal(GlobalDecl(D, Dtor_Complete));
1371
1372 // The destructor in a virtual table is always a 'deleting'
1373 // destructor, which calls the complete destructor and then uses the
1374 // appropriate operator delete.
1375 if (D->isVirtual())
1376 CGM.EmitGlobal(GlobalDecl(D, Dtor_Deleting));
1377 }
1378
addImplicitStructorParams(CodeGenFunction & CGF,QualType & ResTy,FunctionArgList & Params)1379 void ItaniumCXXABI::addImplicitStructorParams(CodeGenFunction &CGF,
1380 QualType &ResTy,
1381 FunctionArgList &Params) {
1382 const CXXMethodDecl *MD = cast<CXXMethodDecl>(CGF.CurGD.getDecl());
1383 assert(isa<CXXConstructorDecl>(MD) || isa<CXXDestructorDecl>(MD));
1384
1385 // Check if we need a VTT parameter as well.
1386 if (NeedsVTTParameter(CGF.CurGD)) {
1387 ASTContext &Context = getContext();
1388
1389 // FIXME: avoid the fake decl
1390 QualType T = Context.getPointerType(Context.VoidPtrTy);
1391 ImplicitParamDecl *VTTDecl
1392 = ImplicitParamDecl::Create(Context, nullptr, MD->getLocation(),
1393 &Context.Idents.get("vtt"), T);
1394 Params.insert(Params.begin() + 1, VTTDecl);
1395 getStructorImplicitParamDecl(CGF) = VTTDecl;
1396 }
1397 }
1398
EmitInstanceFunctionProlog(CodeGenFunction & CGF)1399 void ItaniumCXXABI::EmitInstanceFunctionProlog(CodeGenFunction &CGF) {
1400 /// Initialize the 'this' slot.
1401 EmitThisParam(CGF);
1402
1403 /// Initialize the 'vtt' slot if needed.
1404 if (getStructorImplicitParamDecl(CGF)) {
1405 getStructorImplicitParamValue(CGF) = CGF.Builder.CreateLoad(
1406 CGF.GetAddrOfLocalVar(getStructorImplicitParamDecl(CGF)), "vtt");
1407 }
1408
1409 /// If this is a function that the ABI specifies returns 'this', initialize
1410 /// the return slot to 'this' at the start of the function.
1411 ///
1412 /// Unlike the setting of return types, this is done within the ABI
1413 /// implementation instead of by clients of CGCXXABI because:
1414 /// 1) getThisValue is currently protected
1415 /// 2) in theory, an ABI could implement 'this' returns some other way;
1416 /// HasThisReturn only specifies a contract, not the implementation
1417 if (HasThisReturn(CGF.CurGD))
1418 CGF.Builder.CreateStore(getThisValue(CGF), CGF.ReturnValue);
1419 }
1420
addImplicitConstructorArgs(CodeGenFunction & CGF,const CXXConstructorDecl * D,CXXCtorType Type,bool ForVirtualBase,bool Delegating,CallArgList & Args)1421 unsigned ItaniumCXXABI::addImplicitConstructorArgs(
1422 CodeGenFunction &CGF, const CXXConstructorDecl *D, CXXCtorType Type,
1423 bool ForVirtualBase, bool Delegating, CallArgList &Args) {
1424 if (!NeedsVTTParameter(GlobalDecl(D, Type)))
1425 return 0;
1426
1427 // Insert the implicit 'vtt' argument as the second argument.
1428 llvm::Value *VTT =
1429 CGF.GetVTTParameter(GlobalDecl(D, Type), ForVirtualBase, Delegating);
1430 QualType VTTTy = getContext().getPointerType(getContext().VoidPtrTy);
1431 Args.insert(Args.begin() + 1,
1432 CallArg(RValue::get(VTT), VTTTy, /*needscopy=*/false));
1433 return 1; // Added one arg.
1434 }
1435
EmitDestructorCall(CodeGenFunction & CGF,const CXXDestructorDecl * DD,CXXDtorType Type,bool ForVirtualBase,bool Delegating,Address This)1436 void ItaniumCXXABI::EmitDestructorCall(CodeGenFunction &CGF,
1437 const CXXDestructorDecl *DD,
1438 CXXDtorType Type, bool ForVirtualBase,
1439 bool Delegating, Address This) {
1440 GlobalDecl GD(DD, Type);
1441 llvm::Value *VTT = CGF.GetVTTParameter(GD, ForVirtualBase, Delegating);
1442 QualType VTTTy = getContext().getPointerType(getContext().VoidPtrTy);
1443
1444 llvm::Value *Callee = nullptr;
1445 if (getContext().getLangOpts().AppleKext)
1446 Callee = CGF.BuildAppleKextVirtualDestructorCall(DD, Type, DD->getParent());
1447
1448 if (!Callee)
1449 Callee = CGM.getAddrOfCXXStructor(DD, getFromDtorType(Type));
1450
1451 CGF.EmitCXXMemberOrOperatorCall(DD, Callee, ReturnValueSlot(),
1452 This.getPointer(), VTT, VTTTy, nullptr);
1453 }
1454
emitVTableDefinitions(CodeGenVTables & CGVT,const CXXRecordDecl * RD)1455 void ItaniumCXXABI::emitVTableDefinitions(CodeGenVTables &CGVT,
1456 const CXXRecordDecl *RD) {
1457 llvm::GlobalVariable *VTable = getAddrOfVTable(RD, CharUnits());
1458 if (VTable->hasInitializer())
1459 return;
1460
1461 ItaniumVTableContext &VTContext = CGM.getItaniumVTableContext();
1462 const VTableLayout &VTLayout = VTContext.getVTableLayout(RD);
1463 llvm::GlobalVariable::LinkageTypes Linkage = CGM.getVTableLinkage(RD);
1464 llvm::Constant *RTTI =
1465 CGM.GetAddrOfRTTIDescriptor(CGM.getContext().getTagDeclType(RD));
1466
1467 // Create and set the initializer.
1468 llvm::Constant *Init = CGVT.CreateVTableInitializer(
1469 RD, VTLayout.vtable_component_begin(), VTLayout.getNumVTableComponents(),
1470 VTLayout.vtable_thunk_begin(), VTLayout.getNumVTableThunks(), RTTI);
1471 VTable->setInitializer(Init);
1472
1473 // Set the correct linkage.
1474 VTable->setLinkage(Linkage);
1475
1476 if (CGM.supportsCOMDAT() && VTable->isWeakForLinker())
1477 VTable->setComdat(CGM.getModule().getOrInsertComdat(VTable->getName()));
1478
1479 // Set the right visibility.
1480 CGM.setGlobalVisibility(VTable, RD);
1481
1482 // Use pointer alignment for the vtable. Otherwise we would align them based
1483 // on the size of the initializer which doesn't make sense as only single
1484 // values are read.
1485 unsigned PAlign = CGM.getTarget().getPointerAlign(0);
1486 VTable->setAlignment(getContext().toCharUnitsFromBits(PAlign).getQuantity());
1487
1488 // If this is the magic class __cxxabiv1::__fundamental_type_info,
1489 // we will emit the typeinfo for the fundamental types. This is the
1490 // same behaviour as GCC.
1491 const DeclContext *DC = RD->getDeclContext();
1492 if (RD->getIdentifier() &&
1493 RD->getIdentifier()->isStr("__fundamental_type_info") &&
1494 isa<NamespaceDecl>(DC) && cast<NamespaceDecl>(DC)->getIdentifier() &&
1495 cast<NamespaceDecl>(DC)->getIdentifier()->isStr("__cxxabiv1") &&
1496 DC->getParent()->isTranslationUnit())
1497 EmitFundamentalRTTIDescriptors();
1498
1499 CGM.EmitVTableBitSetEntries(VTable, VTLayout);
1500 }
1501
isVirtualOffsetNeededForVTableField(CodeGenFunction & CGF,CodeGenFunction::VPtr Vptr)1502 bool ItaniumCXXABI::isVirtualOffsetNeededForVTableField(
1503 CodeGenFunction &CGF, CodeGenFunction::VPtr Vptr) {
1504 if (Vptr.NearestVBase == nullptr)
1505 return false;
1506 return NeedsVTTParameter(CGF.CurGD);
1507 }
1508
getVTableAddressPointInStructor(CodeGenFunction & CGF,const CXXRecordDecl * VTableClass,BaseSubobject Base,const CXXRecordDecl * NearestVBase)1509 llvm::Value *ItaniumCXXABI::getVTableAddressPointInStructor(
1510 CodeGenFunction &CGF, const CXXRecordDecl *VTableClass, BaseSubobject Base,
1511 const CXXRecordDecl *NearestVBase) {
1512
1513 if ((Base.getBase()->getNumVBases() || NearestVBase != nullptr) &&
1514 NeedsVTTParameter(CGF.CurGD)) {
1515 return getVTableAddressPointInStructorWithVTT(CGF, VTableClass, Base,
1516 NearestVBase);
1517 }
1518 return getVTableAddressPoint(Base, VTableClass);
1519 }
1520
1521 llvm::Constant *
getVTableAddressPoint(BaseSubobject Base,const CXXRecordDecl * VTableClass)1522 ItaniumCXXABI::getVTableAddressPoint(BaseSubobject Base,
1523 const CXXRecordDecl *VTableClass) {
1524 llvm::GlobalValue *VTable = getAddrOfVTable(VTableClass, CharUnits());
1525
1526 // Find the appropriate vtable within the vtable group.
1527 uint64_t AddressPoint = CGM.getItaniumVTableContext()
1528 .getVTableLayout(VTableClass)
1529 .getAddressPoint(Base);
1530 llvm::Value *Indices[] = {
1531 llvm::ConstantInt::get(CGM.Int64Ty, 0),
1532 llvm::ConstantInt::get(CGM.Int64Ty, AddressPoint)
1533 };
1534
1535 return llvm::ConstantExpr::getInBoundsGetElementPtr(VTable->getValueType(),
1536 VTable, Indices);
1537 }
1538
getVTableAddressPointInStructorWithVTT(CodeGenFunction & CGF,const CXXRecordDecl * VTableClass,BaseSubobject Base,const CXXRecordDecl * NearestVBase)1539 llvm::Value *ItaniumCXXABI::getVTableAddressPointInStructorWithVTT(
1540 CodeGenFunction &CGF, const CXXRecordDecl *VTableClass, BaseSubobject Base,
1541 const CXXRecordDecl *NearestVBase) {
1542 assert((Base.getBase()->getNumVBases() || NearestVBase != nullptr) &&
1543 NeedsVTTParameter(CGF.CurGD) && "This class doesn't have VTT");
1544
1545 // Get the secondary vpointer index.
1546 uint64_t VirtualPointerIndex =
1547 CGM.getVTables().getSecondaryVirtualPointerIndex(VTableClass, Base);
1548
1549 /// Load the VTT.
1550 llvm::Value *VTT = CGF.LoadCXXVTT();
1551 if (VirtualPointerIndex)
1552 VTT = CGF.Builder.CreateConstInBoundsGEP1_64(VTT, VirtualPointerIndex);
1553
1554 // And load the address point from the VTT.
1555 return CGF.Builder.CreateAlignedLoad(VTT, CGF.getPointerAlign());
1556 }
1557
getVTableAddressPointForConstExpr(BaseSubobject Base,const CXXRecordDecl * VTableClass)1558 llvm::Constant *ItaniumCXXABI::getVTableAddressPointForConstExpr(
1559 BaseSubobject Base, const CXXRecordDecl *VTableClass) {
1560 return getVTableAddressPoint(Base, VTableClass);
1561 }
1562
getAddrOfVTable(const CXXRecordDecl * RD,CharUnits VPtrOffset)1563 llvm::GlobalVariable *ItaniumCXXABI::getAddrOfVTable(const CXXRecordDecl *RD,
1564 CharUnits VPtrOffset) {
1565 assert(VPtrOffset.isZero() && "Itanium ABI only supports zero vptr offsets");
1566
1567 llvm::GlobalVariable *&VTable = VTables[RD];
1568 if (VTable)
1569 return VTable;
1570
1571 // Queue up this v-table for possible deferred emission.
1572 CGM.addDeferredVTable(RD);
1573
1574 SmallString<256> Name;
1575 llvm::raw_svector_ostream Out(Name);
1576 getMangleContext().mangleCXXVTable(RD, Out);
1577
1578 ItaniumVTableContext &VTContext = CGM.getItaniumVTableContext();
1579 llvm::ArrayType *ArrayType = llvm::ArrayType::get(
1580 CGM.Int8PtrTy, VTContext.getVTableLayout(RD).getNumVTableComponents());
1581
1582 VTable = CGM.CreateOrReplaceCXXRuntimeVariable(
1583 Name, ArrayType, llvm::GlobalValue::ExternalLinkage);
1584 VTable->setUnnamedAddr(true);
1585
1586 if (RD->hasAttr<DLLImportAttr>())
1587 VTable->setDLLStorageClass(llvm::GlobalValue::DLLImportStorageClass);
1588 else if (RD->hasAttr<DLLExportAttr>())
1589 VTable->setDLLStorageClass(llvm::GlobalValue::DLLExportStorageClass);
1590
1591 return VTable;
1592 }
1593
getVirtualFunctionPointer(CodeGenFunction & CGF,GlobalDecl GD,Address This,llvm::Type * Ty,SourceLocation Loc)1594 llvm::Value *ItaniumCXXABI::getVirtualFunctionPointer(CodeGenFunction &CGF,
1595 GlobalDecl GD,
1596 Address This,
1597 llvm::Type *Ty,
1598 SourceLocation Loc) {
1599 GD = GD.getCanonicalDecl();
1600 Ty = Ty->getPointerTo()->getPointerTo();
1601 auto *MethodDecl = cast<CXXMethodDecl>(GD.getDecl());
1602 llvm::Value *VTable = CGF.GetVTablePtr(This, Ty, MethodDecl->getParent());
1603
1604 if (CGF.SanOpts.has(SanitizerKind::CFIVCall))
1605 CGF.EmitVTablePtrCheckForCall(MethodDecl, VTable,
1606 CodeGenFunction::CFITCK_VCall, Loc);
1607
1608 uint64_t VTableIndex = CGM.getItaniumVTableContext().getMethodVTableIndex(GD);
1609 llvm::Value *VFuncPtr =
1610 CGF.Builder.CreateConstInBoundsGEP1_64(VTable, VTableIndex, "vfn");
1611 return CGF.Builder.CreateAlignedLoad(VFuncPtr, CGF.getPointerAlign());
1612 }
1613
EmitVirtualDestructorCall(CodeGenFunction & CGF,const CXXDestructorDecl * Dtor,CXXDtorType DtorType,Address This,const CXXMemberCallExpr * CE)1614 llvm::Value *ItaniumCXXABI::EmitVirtualDestructorCall(
1615 CodeGenFunction &CGF, const CXXDestructorDecl *Dtor, CXXDtorType DtorType,
1616 Address This, const CXXMemberCallExpr *CE) {
1617 assert(CE == nullptr || CE->arg_begin() == CE->arg_end());
1618 assert(DtorType == Dtor_Deleting || DtorType == Dtor_Complete);
1619
1620 const CGFunctionInfo *FInfo = &CGM.getTypes().arrangeCXXStructorDeclaration(
1621 Dtor, getFromDtorType(DtorType));
1622 llvm::Type *Ty = CGF.CGM.getTypes().GetFunctionType(*FInfo);
1623 llvm::Value *Callee =
1624 getVirtualFunctionPointer(CGF, GlobalDecl(Dtor, DtorType), This, Ty,
1625 CE ? CE->getLocStart() : SourceLocation());
1626
1627 CGF.EmitCXXMemberOrOperatorCall(Dtor, Callee, ReturnValueSlot(),
1628 This.getPointer(), /*ImplicitParam=*/nullptr,
1629 QualType(), CE);
1630 return nullptr;
1631 }
1632
emitVirtualInheritanceTables(const CXXRecordDecl * RD)1633 void ItaniumCXXABI::emitVirtualInheritanceTables(const CXXRecordDecl *RD) {
1634 CodeGenVTables &VTables = CGM.getVTables();
1635 llvm::GlobalVariable *VTT = VTables.GetAddrOfVTT(RD);
1636 VTables.EmitVTTDefinition(VTT, CGM.getVTableLinkage(RD), RD);
1637 }
1638
canSpeculativelyEmitVTable(const CXXRecordDecl * RD) const1639 bool ItaniumCXXABI::canSpeculativelyEmitVTable(const CXXRecordDecl *RD) const {
1640 // We don't emit available_externally vtables if we are in -fapple-kext mode
1641 // because kext mode does not permit devirtualization.
1642 if (CGM.getLangOpts().AppleKext)
1643 return false;
1644
1645 // If we don't have any inline virtual functions, and if vtable is not hidden,
1646 // then we are safe to emit available_externally copy of vtable.
1647 // FIXME we can still emit a copy of the vtable if we
1648 // can emit definition of the inline functions.
1649 return !hasAnyUsedVirtualInlineFunction(RD) && !isVTableHidden(RD);
1650 }
performTypeAdjustment(CodeGenFunction & CGF,Address InitialPtr,int64_t NonVirtualAdjustment,int64_t VirtualAdjustment,bool IsReturnAdjustment)1651 static llvm::Value *performTypeAdjustment(CodeGenFunction &CGF,
1652 Address InitialPtr,
1653 int64_t NonVirtualAdjustment,
1654 int64_t VirtualAdjustment,
1655 bool IsReturnAdjustment) {
1656 if (!NonVirtualAdjustment && !VirtualAdjustment)
1657 return InitialPtr.getPointer();
1658
1659 Address V = CGF.Builder.CreateElementBitCast(InitialPtr, CGF.Int8Ty);
1660
1661 // In a base-to-derived cast, the non-virtual adjustment is applied first.
1662 if (NonVirtualAdjustment && !IsReturnAdjustment) {
1663 V = CGF.Builder.CreateConstInBoundsByteGEP(V,
1664 CharUnits::fromQuantity(NonVirtualAdjustment));
1665 }
1666
1667 // Perform the virtual adjustment if we have one.
1668 llvm::Value *ResultPtr;
1669 if (VirtualAdjustment) {
1670 llvm::Type *PtrDiffTy =
1671 CGF.ConvertType(CGF.getContext().getPointerDiffType());
1672
1673 Address VTablePtrPtr = CGF.Builder.CreateElementBitCast(V, CGF.Int8PtrTy);
1674 llvm::Value *VTablePtr = CGF.Builder.CreateLoad(VTablePtrPtr);
1675
1676 llvm::Value *OffsetPtr =
1677 CGF.Builder.CreateConstInBoundsGEP1_64(VTablePtr, VirtualAdjustment);
1678
1679 OffsetPtr = CGF.Builder.CreateBitCast(OffsetPtr, PtrDiffTy->getPointerTo());
1680
1681 // Load the adjustment offset from the vtable.
1682 llvm::Value *Offset =
1683 CGF.Builder.CreateAlignedLoad(OffsetPtr, CGF.getPointerAlign());
1684
1685 // Adjust our pointer.
1686 ResultPtr = CGF.Builder.CreateInBoundsGEP(V.getPointer(), Offset);
1687 } else {
1688 ResultPtr = V.getPointer();
1689 }
1690
1691 // In a derived-to-base conversion, the non-virtual adjustment is
1692 // applied second.
1693 if (NonVirtualAdjustment && IsReturnAdjustment) {
1694 ResultPtr = CGF.Builder.CreateConstInBoundsGEP1_64(ResultPtr,
1695 NonVirtualAdjustment);
1696 }
1697
1698 // Cast back to the original type.
1699 return CGF.Builder.CreateBitCast(ResultPtr, InitialPtr.getType());
1700 }
1701
performThisAdjustment(CodeGenFunction & CGF,Address This,const ThisAdjustment & TA)1702 llvm::Value *ItaniumCXXABI::performThisAdjustment(CodeGenFunction &CGF,
1703 Address This,
1704 const ThisAdjustment &TA) {
1705 return performTypeAdjustment(CGF, This, TA.NonVirtual,
1706 TA.Virtual.Itanium.VCallOffsetOffset,
1707 /*IsReturnAdjustment=*/false);
1708 }
1709
1710 llvm::Value *
performReturnAdjustment(CodeGenFunction & CGF,Address Ret,const ReturnAdjustment & RA)1711 ItaniumCXXABI::performReturnAdjustment(CodeGenFunction &CGF, Address Ret,
1712 const ReturnAdjustment &RA) {
1713 return performTypeAdjustment(CGF, Ret, RA.NonVirtual,
1714 RA.Virtual.Itanium.VBaseOffsetOffset,
1715 /*IsReturnAdjustment=*/true);
1716 }
1717
EmitReturnFromThunk(CodeGenFunction & CGF,RValue RV,QualType ResultType)1718 void ARMCXXABI::EmitReturnFromThunk(CodeGenFunction &CGF,
1719 RValue RV, QualType ResultType) {
1720 if (!isa<CXXDestructorDecl>(CGF.CurGD.getDecl()))
1721 return ItaniumCXXABI::EmitReturnFromThunk(CGF, RV, ResultType);
1722
1723 // Destructor thunks in the ARM ABI have indeterminate results.
1724 llvm::Type *T = CGF.ReturnValue.getElementType();
1725 RValue Undef = RValue::get(llvm::UndefValue::get(T));
1726 return ItaniumCXXABI::EmitReturnFromThunk(CGF, Undef, ResultType);
1727 }
1728
1729 /************************** Array allocation cookies **************************/
1730
getArrayCookieSizeImpl(QualType elementType)1731 CharUnits ItaniumCXXABI::getArrayCookieSizeImpl(QualType elementType) {
1732 // The array cookie is a size_t; pad that up to the element alignment.
1733 // The cookie is actually right-justified in that space.
1734 return std::max(CharUnits::fromQuantity(CGM.SizeSizeInBytes),
1735 CGM.getContext().getTypeAlignInChars(elementType));
1736 }
1737
InitializeArrayCookie(CodeGenFunction & CGF,Address NewPtr,llvm::Value * NumElements,const CXXNewExpr * expr,QualType ElementType)1738 Address ItaniumCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
1739 Address NewPtr,
1740 llvm::Value *NumElements,
1741 const CXXNewExpr *expr,
1742 QualType ElementType) {
1743 assert(requiresArrayCookie(expr));
1744
1745 unsigned AS = NewPtr.getAddressSpace();
1746
1747 ASTContext &Ctx = getContext();
1748 CharUnits SizeSize = CGF.getSizeSize();
1749
1750 // The size of the cookie.
1751 CharUnits CookieSize =
1752 std::max(SizeSize, Ctx.getTypeAlignInChars(ElementType));
1753 assert(CookieSize == getArrayCookieSizeImpl(ElementType));
1754
1755 // Compute an offset to the cookie.
1756 Address CookiePtr = NewPtr;
1757 CharUnits CookieOffset = CookieSize - SizeSize;
1758 if (!CookieOffset.isZero())
1759 CookiePtr = CGF.Builder.CreateConstInBoundsByteGEP(CookiePtr, CookieOffset);
1760
1761 // Write the number of elements into the appropriate slot.
1762 Address NumElementsPtr =
1763 CGF.Builder.CreateElementBitCast(CookiePtr, CGF.SizeTy);
1764 llvm::Instruction *SI = CGF.Builder.CreateStore(NumElements, NumElementsPtr);
1765
1766 // Handle the array cookie specially in ASan.
1767 if (CGM.getLangOpts().Sanitize.has(SanitizerKind::Address) && AS == 0 &&
1768 expr->getOperatorNew()->isReplaceableGlobalAllocationFunction()) {
1769 // The store to the CookiePtr does not need to be instrumented.
1770 CGM.getSanitizerMetadata()->disableSanitizerForInstruction(SI);
1771 llvm::FunctionType *FTy =
1772 llvm::FunctionType::get(CGM.VoidTy, NumElementsPtr.getType(), false);
1773 llvm::Constant *F =
1774 CGM.CreateRuntimeFunction(FTy, "__asan_poison_cxx_array_cookie");
1775 CGF.Builder.CreateCall(F, NumElementsPtr.getPointer());
1776 }
1777
1778 // Finally, compute a pointer to the actual data buffer by skipping
1779 // over the cookie completely.
1780 return CGF.Builder.CreateConstInBoundsByteGEP(NewPtr, CookieSize);
1781 }
1782
readArrayCookieImpl(CodeGenFunction & CGF,Address allocPtr,CharUnits cookieSize)1783 llvm::Value *ItaniumCXXABI::readArrayCookieImpl(CodeGenFunction &CGF,
1784 Address allocPtr,
1785 CharUnits cookieSize) {
1786 // The element size is right-justified in the cookie.
1787 Address numElementsPtr = allocPtr;
1788 CharUnits numElementsOffset = cookieSize - CGF.getSizeSize();
1789 if (!numElementsOffset.isZero())
1790 numElementsPtr =
1791 CGF.Builder.CreateConstInBoundsByteGEP(numElementsPtr, numElementsOffset);
1792
1793 unsigned AS = allocPtr.getAddressSpace();
1794 numElementsPtr = CGF.Builder.CreateElementBitCast(numElementsPtr, CGF.SizeTy);
1795 if (!CGM.getLangOpts().Sanitize.has(SanitizerKind::Address) || AS != 0)
1796 return CGF.Builder.CreateLoad(numElementsPtr);
1797 // In asan mode emit a function call instead of a regular load and let the
1798 // run-time deal with it: if the shadow is properly poisoned return the
1799 // cookie, otherwise return 0 to avoid an infinite loop calling DTORs.
1800 // We can't simply ignore this load using nosanitize metadata because
1801 // the metadata may be lost.
1802 llvm::FunctionType *FTy =
1803 llvm::FunctionType::get(CGF.SizeTy, CGF.SizeTy->getPointerTo(0), false);
1804 llvm::Constant *F =
1805 CGM.CreateRuntimeFunction(FTy, "__asan_load_cxx_array_cookie");
1806 return CGF.Builder.CreateCall(F, numElementsPtr.getPointer());
1807 }
1808
getArrayCookieSizeImpl(QualType elementType)1809 CharUnits ARMCXXABI::getArrayCookieSizeImpl(QualType elementType) {
1810 // ARM says that the cookie is always:
1811 // struct array_cookie {
1812 // std::size_t element_size; // element_size != 0
1813 // std::size_t element_count;
1814 // };
1815 // But the base ABI doesn't give anything an alignment greater than
1816 // 8, so we can dismiss this as typical ABI-author blindness to
1817 // actual language complexity and round up to the element alignment.
1818 return std::max(CharUnits::fromQuantity(2 * CGM.SizeSizeInBytes),
1819 CGM.getContext().getTypeAlignInChars(elementType));
1820 }
1821
InitializeArrayCookie(CodeGenFunction & CGF,Address newPtr,llvm::Value * numElements,const CXXNewExpr * expr,QualType elementType)1822 Address ARMCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
1823 Address newPtr,
1824 llvm::Value *numElements,
1825 const CXXNewExpr *expr,
1826 QualType elementType) {
1827 assert(requiresArrayCookie(expr));
1828
1829 // The cookie is always at the start of the buffer.
1830 Address cookie = newPtr;
1831
1832 // The first element is the element size.
1833 cookie = CGF.Builder.CreateElementBitCast(cookie, CGF.SizeTy);
1834 llvm::Value *elementSize = llvm::ConstantInt::get(CGF.SizeTy,
1835 getContext().getTypeSizeInChars(elementType).getQuantity());
1836 CGF.Builder.CreateStore(elementSize, cookie);
1837
1838 // The second element is the element count.
1839 cookie = CGF.Builder.CreateConstInBoundsGEP(cookie, 1, CGF.getSizeSize());
1840 CGF.Builder.CreateStore(numElements, cookie);
1841
1842 // Finally, compute a pointer to the actual data buffer by skipping
1843 // over the cookie completely.
1844 CharUnits cookieSize = ARMCXXABI::getArrayCookieSizeImpl(elementType);
1845 return CGF.Builder.CreateConstInBoundsByteGEP(newPtr, cookieSize);
1846 }
1847
readArrayCookieImpl(CodeGenFunction & CGF,Address allocPtr,CharUnits cookieSize)1848 llvm::Value *ARMCXXABI::readArrayCookieImpl(CodeGenFunction &CGF,
1849 Address allocPtr,
1850 CharUnits cookieSize) {
1851 // The number of elements is at offset sizeof(size_t) relative to
1852 // the allocated pointer.
1853 Address numElementsPtr
1854 = CGF.Builder.CreateConstInBoundsByteGEP(allocPtr, CGF.getSizeSize());
1855
1856 numElementsPtr = CGF.Builder.CreateElementBitCast(numElementsPtr, CGF.SizeTy);
1857 return CGF.Builder.CreateLoad(numElementsPtr);
1858 }
1859
1860 /*********************** Static local initialization **************************/
1861
getGuardAcquireFn(CodeGenModule & CGM,llvm::PointerType * GuardPtrTy)1862 static llvm::Constant *getGuardAcquireFn(CodeGenModule &CGM,
1863 llvm::PointerType *GuardPtrTy) {
1864 // int __cxa_guard_acquire(__guard *guard_object);
1865 llvm::FunctionType *FTy =
1866 llvm::FunctionType::get(CGM.getTypes().ConvertType(CGM.getContext().IntTy),
1867 GuardPtrTy, /*isVarArg=*/false);
1868 return CGM.CreateRuntimeFunction(FTy, "__cxa_guard_acquire",
1869 llvm::AttributeSet::get(CGM.getLLVMContext(),
1870 llvm::AttributeSet::FunctionIndex,
1871 llvm::Attribute::NoUnwind));
1872 }
1873
getGuardReleaseFn(CodeGenModule & CGM,llvm::PointerType * GuardPtrTy)1874 static llvm::Constant *getGuardReleaseFn(CodeGenModule &CGM,
1875 llvm::PointerType *GuardPtrTy) {
1876 // void __cxa_guard_release(__guard *guard_object);
1877 llvm::FunctionType *FTy =
1878 llvm::FunctionType::get(CGM.VoidTy, GuardPtrTy, /*isVarArg=*/false);
1879 return CGM.CreateRuntimeFunction(FTy, "__cxa_guard_release",
1880 llvm::AttributeSet::get(CGM.getLLVMContext(),
1881 llvm::AttributeSet::FunctionIndex,
1882 llvm::Attribute::NoUnwind));
1883 }
1884
getGuardAbortFn(CodeGenModule & CGM,llvm::PointerType * GuardPtrTy)1885 static llvm::Constant *getGuardAbortFn(CodeGenModule &CGM,
1886 llvm::PointerType *GuardPtrTy) {
1887 // void __cxa_guard_abort(__guard *guard_object);
1888 llvm::FunctionType *FTy =
1889 llvm::FunctionType::get(CGM.VoidTy, GuardPtrTy, /*isVarArg=*/false);
1890 return CGM.CreateRuntimeFunction(FTy, "__cxa_guard_abort",
1891 llvm::AttributeSet::get(CGM.getLLVMContext(),
1892 llvm::AttributeSet::FunctionIndex,
1893 llvm::Attribute::NoUnwind));
1894 }
1895
1896 namespace {
1897 struct CallGuardAbort final : EHScopeStack::Cleanup {
1898 llvm::GlobalVariable *Guard;
CallGuardAbort__anona7bdc4f60211::CallGuardAbort1899 CallGuardAbort(llvm::GlobalVariable *Guard) : Guard(Guard) {}
1900
Emit__anona7bdc4f60211::CallGuardAbort1901 void Emit(CodeGenFunction &CGF, Flags flags) override {
1902 CGF.EmitNounwindRuntimeCall(getGuardAbortFn(CGF.CGM, Guard->getType()),
1903 Guard);
1904 }
1905 };
1906 }
1907
1908 /// The ARM code here follows the Itanium code closely enough that we
1909 /// just special-case it at particular places.
EmitGuardedInit(CodeGenFunction & CGF,const VarDecl & D,llvm::GlobalVariable * var,bool shouldPerformInit)1910 void ItaniumCXXABI::EmitGuardedInit(CodeGenFunction &CGF,
1911 const VarDecl &D,
1912 llvm::GlobalVariable *var,
1913 bool shouldPerformInit) {
1914 CGBuilderTy &Builder = CGF.Builder;
1915
1916 // We only need to use thread-safe statics for local non-TLS variables;
1917 // global initialization is always single-threaded.
1918 bool threadsafe = getContext().getLangOpts().ThreadsafeStatics &&
1919 D.isLocalVarDecl() && !D.getTLSKind();
1920
1921 // If we have a global variable with internal linkage and thread-safe statics
1922 // are disabled, we can just let the guard variable be of type i8.
1923 bool useInt8GuardVariable = !threadsafe && var->hasInternalLinkage();
1924
1925 llvm::IntegerType *guardTy;
1926 CharUnits guardAlignment;
1927 if (useInt8GuardVariable) {
1928 guardTy = CGF.Int8Ty;
1929 guardAlignment = CharUnits::One();
1930 } else {
1931 // Guard variables are 64 bits in the generic ABI and size width on ARM
1932 // (i.e. 32-bit on AArch32, 64-bit on AArch64).
1933 if (UseARMGuardVarABI) {
1934 guardTy = CGF.SizeTy;
1935 guardAlignment = CGF.getSizeAlign();
1936 } else {
1937 guardTy = CGF.Int64Ty;
1938 guardAlignment = CharUnits::fromQuantity(
1939 CGM.getDataLayout().getABITypeAlignment(guardTy));
1940 }
1941 }
1942 llvm::PointerType *guardPtrTy = guardTy->getPointerTo();
1943
1944 // Create the guard variable if we don't already have it (as we
1945 // might if we're double-emitting this function body).
1946 llvm::GlobalVariable *guard = CGM.getStaticLocalDeclGuardAddress(&D);
1947 if (!guard) {
1948 // Mangle the name for the guard.
1949 SmallString<256> guardName;
1950 {
1951 llvm::raw_svector_ostream out(guardName);
1952 getMangleContext().mangleStaticGuardVariable(&D, out);
1953 }
1954
1955 // Create the guard variable with a zero-initializer.
1956 // Just absorb linkage and visibility from the guarded variable.
1957 guard = new llvm::GlobalVariable(CGM.getModule(), guardTy,
1958 false, var->getLinkage(),
1959 llvm::ConstantInt::get(guardTy, 0),
1960 guardName.str());
1961 guard->setVisibility(var->getVisibility());
1962 // If the variable is thread-local, so is its guard variable.
1963 guard->setThreadLocalMode(var->getThreadLocalMode());
1964 guard->setAlignment(guardAlignment.getQuantity());
1965
1966 // The ABI says: "It is suggested that it be emitted in the same COMDAT
1967 // group as the associated data object." In practice, this doesn't work for
1968 // non-ELF object formats, so only do it for ELF.
1969 llvm::Comdat *C = var->getComdat();
1970 if (!D.isLocalVarDecl() && C &&
1971 CGM.getTarget().getTriple().isOSBinFormatELF()) {
1972 guard->setComdat(C);
1973 CGF.CurFn->setComdat(C);
1974 } else if (CGM.supportsCOMDAT() && guard->isWeakForLinker()) {
1975 guard->setComdat(CGM.getModule().getOrInsertComdat(guard->getName()));
1976 }
1977
1978 CGM.setStaticLocalDeclGuardAddress(&D, guard);
1979 }
1980
1981 Address guardAddr = Address(guard, guardAlignment);
1982
1983 // Test whether the variable has completed initialization.
1984 //
1985 // Itanium C++ ABI 3.3.2:
1986 // The following is pseudo-code showing how these functions can be used:
1987 // if (obj_guard.first_byte == 0) {
1988 // if ( __cxa_guard_acquire (&obj_guard) ) {
1989 // try {
1990 // ... initialize the object ...;
1991 // } catch (...) {
1992 // __cxa_guard_abort (&obj_guard);
1993 // throw;
1994 // }
1995 // ... queue object destructor with __cxa_atexit() ...;
1996 // __cxa_guard_release (&obj_guard);
1997 // }
1998 // }
1999
2000 // Load the first byte of the guard variable.
2001 llvm::LoadInst *LI =
2002 Builder.CreateLoad(Builder.CreateElementBitCast(guardAddr, CGM.Int8Ty));
2003
2004 // Itanium ABI:
2005 // An implementation supporting thread-safety on multiprocessor
2006 // systems must also guarantee that references to the initialized
2007 // object do not occur before the load of the initialization flag.
2008 //
2009 // In LLVM, we do this by marking the load Acquire.
2010 if (threadsafe)
2011 LI->setAtomic(llvm::Acquire);
2012
2013 // For ARM, we should only check the first bit, rather than the entire byte:
2014 //
2015 // ARM C++ ABI 3.2.3.1:
2016 // To support the potential use of initialization guard variables
2017 // as semaphores that are the target of ARM SWP and LDREX/STREX
2018 // synchronizing instructions we define a static initialization
2019 // guard variable to be a 4-byte aligned, 4-byte word with the
2020 // following inline access protocol.
2021 // #define INITIALIZED 1
2022 // if ((obj_guard & INITIALIZED) != INITIALIZED) {
2023 // if (__cxa_guard_acquire(&obj_guard))
2024 // ...
2025 // }
2026 //
2027 // and similarly for ARM64:
2028 //
2029 // ARM64 C++ ABI 3.2.2:
2030 // This ABI instead only specifies the value bit 0 of the static guard
2031 // variable; all other bits are platform defined. Bit 0 shall be 0 when the
2032 // variable is not initialized and 1 when it is.
2033 llvm::Value *V =
2034 (UseARMGuardVarABI && !useInt8GuardVariable)
2035 ? Builder.CreateAnd(LI, llvm::ConstantInt::get(CGM.Int8Ty, 1))
2036 : LI;
2037 llvm::Value *isInitialized = Builder.CreateIsNull(V, "guard.uninitialized");
2038
2039 llvm::BasicBlock *InitCheckBlock = CGF.createBasicBlock("init.check");
2040 llvm::BasicBlock *EndBlock = CGF.createBasicBlock("init.end");
2041
2042 // Check if the first byte of the guard variable is zero.
2043 Builder.CreateCondBr(isInitialized, InitCheckBlock, EndBlock);
2044
2045 CGF.EmitBlock(InitCheckBlock);
2046
2047 // Variables used when coping with thread-safe statics and exceptions.
2048 if (threadsafe) {
2049 // Call __cxa_guard_acquire.
2050 llvm::Value *V
2051 = CGF.EmitNounwindRuntimeCall(getGuardAcquireFn(CGM, guardPtrTy), guard);
2052
2053 llvm::BasicBlock *InitBlock = CGF.createBasicBlock("init");
2054
2055 Builder.CreateCondBr(Builder.CreateIsNotNull(V, "tobool"),
2056 InitBlock, EndBlock);
2057
2058 // Call __cxa_guard_abort along the exceptional edge.
2059 CGF.EHStack.pushCleanup<CallGuardAbort>(EHCleanup, guard);
2060
2061 CGF.EmitBlock(InitBlock);
2062 }
2063
2064 // Emit the initializer and add a global destructor if appropriate.
2065 CGF.EmitCXXGlobalVarDeclInit(D, var, shouldPerformInit);
2066
2067 if (threadsafe) {
2068 // Pop the guard-abort cleanup if we pushed one.
2069 CGF.PopCleanupBlock();
2070
2071 // Call __cxa_guard_release. This cannot throw.
2072 CGF.EmitNounwindRuntimeCall(getGuardReleaseFn(CGM, guardPtrTy),
2073 guardAddr.getPointer());
2074 } else {
2075 Builder.CreateStore(llvm::ConstantInt::get(guardTy, 1), guardAddr);
2076 }
2077
2078 CGF.EmitBlock(EndBlock);
2079 }
2080
2081 /// Register a global destructor using __cxa_atexit.
emitGlobalDtorWithCXAAtExit(CodeGenFunction & CGF,llvm::Constant * dtor,llvm::Constant * addr,bool TLS)2082 static void emitGlobalDtorWithCXAAtExit(CodeGenFunction &CGF,
2083 llvm::Constant *dtor,
2084 llvm::Constant *addr,
2085 bool TLS) {
2086 const char *Name = "__cxa_atexit";
2087 if (TLS) {
2088 const llvm::Triple &T = CGF.getTarget().getTriple();
2089 Name = T.isOSDarwin() ? "_tlv_atexit" : "__cxa_thread_atexit";
2090 }
2091
2092 // We're assuming that the destructor function is something we can
2093 // reasonably call with the default CC. Go ahead and cast it to the
2094 // right prototype.
2095 llvm::Type *dtorTy =
2096 llvm::FunctionType::get(CGF.VoidTy, CGF.Int8PtrTy, false)->getPointerTo();
2097
2098 // extern "C" int __cxa_atexit(void (*f)(void *), void *p, void *d);
2099 llvm::Type *paramTys[] = { dtorTy, CGF.Int8PtrTy, CGF.Int8PtrTy };
2100 llvm::FunctionType *atexitTy =
2101 llvm::FunctionType::get(CGF.IntTy, paramTys, false);
2102
2103 // Fetch the actual function.
2104 llvm::Constant *atexit = CGF.CGM.CreateRuntimeFunction(atexitTy, Name);
2105 if (llvm::Function *fn = dyn_cast<llvm::Function>(atexit))
2106 fn->setDoesNotThrow();
2107
2108 // Create a variable that binds the atexit to this shared object.
2109 llvm::Constant *handle =
2110 CGF.CGM.CreateRuntimeVariable(CGF.Int8Ty, "__dso_handle");
2111
2112 llvm::Value *args[] = {
2113 llvm::ConstantExpr::getBitCast(dtor, dtorTy),
2114 llvm::ConstantExpr::getBitCast(addr, CGF.Int8PtrTy),
2115 handle
2116 };
2117 CGF.EmitNounwindRuntimeCall(atexit, args);
2118 }
2119
2120 /// Register a global destructor as best as we know how.
registerGlobalDtor(CodeGenFunction & CGF,const VarDecl & D,llvm::Constant * dtor,llvm::Constant * addr)2121 void ItaniumCXXABI::registerGlobalDtor(CodeGenFunction &CGF,
2122 const VarDecl &D,
2123 llvm::Constant *dtor,
2124 llvm::Constant *addr) {
2125 // Use __cxa_atexit if available.
2126 if (CGM.getCodeGenOpts().CXAAtExit)
2127 return emitGlobalDtorWithCXAAtExit(CGF, dtor, addr, D.getTLSKind());
2128
2129 if (D.getTLSKind())
2130 CGM.ErrorUnsupported(&D, "non-trivial TLS destruction");
2131
2132 // In Apple kexts, we want to add a global destructor entry.
2133 // FIXME: shouldn't this be guarded by some variable?
2134 if (CGM.getLangOpts().AppleKext) {
2135 // Generate a global destructor entry.
2136 return CGM.AddCXXDtorEntry(dtor, addr);
2137 }
2138
2139 CGF.registerGlobalDtorWithAtExit(D, dtor, addr);
2140 }
2141
isThreadWrapperReplaceable(const VarDecl * VD,CodeGen::CodeGenModule & CGM)2142 static bool isThreadWrapperReplaceable(const VarDecl *VD,
2143 CodeGen::CodeGenModule &CGM) {
2144 assert(!VD->isStaticLocal() && "static local VarDecls don't need wrappers!");
2145 // Darwin prefers to have references to thread local variables to go through
2146 // the thread wrapper instead of directly referencing the backing variable.
2147 return VD->getTLSKind() == VarDecl::TLS_Dynamic &&
2148 CGM.getTarget().getTriple().isOSDarwin();
2149 }
2150
2151 /// Get the appropriate linkage for the wrapper function. This is essentially
2152 /// the weak form of the variable's linkage; every translation unit which needs
2153 /// the wrapper emits a copy, and we want the linker to merge them.
2154 static llvm::GlobalValue::LinkageTypes
getThreadLocalWrapperLinkage(const VarDecl * VD,CodeGen::CodeGenModule & CGM)2155 getThreadLocalWrapperLinkage(const VarDecl *VD, CodeGen::CodeGenModule &CGM) {
2156 llvm::GlobalValue::LinkageTypes VarLinkage =
2157 CGM.getLLVMLinkageVarDefinition(VD, /*isConstant=*/false);
2158
2159 // For internal linkage variables, we don't need an external or weak wrapper.
2160 if (llvm::GlobalValue::isLocalLinkage(VarLinkage))
2161 return VarLinkage;
2162
2163 // If the thread wrapper is replaceable, give it appropriate linkage.
2164 if (isThreadWrapperReplaceable(VD, CGM))
2165 if (!llvm::GlobalVariable::isLinkOnceLinkage(VarLinkage) &&
2166 !llvm::GlobalVariable::isWeakODRLinkage(VarLinkage))
2167 return VarLinkage;
2168 return llvm::GlobalValue::WeakODRLinkage;
2169 }
2170
2171 llvm::Function *
getOrCreateThreadLocalWrapper(const VarDecl * VD,llvm::Value * Val)2172 ItaniumCXXABI::getOrCreateThreadLocalWrapper(const VarDecl *VD,
2173 llvm::Value *Val) {
2174 // Mangle the name for the thread_local wrapper function.
2175 SmallString<256> WrapperName;
2176 {
2177 llvm::raw_svector_ostream Out(WrapperName);
2178 getMangleContext().mangleItaniumThreadLocalWrapper(VD, Out);
2179 }
2180
2181 if (llvm::Value *V = CGM.getModule().getNamedValue(WrapperName))
2182 return cast<llvm::Function>(V);
2183
2184 llvm::Type *RetTy = Val->getType();
2185 if (VD->getType()->isReferenceType())
2186 RetTy = RetTy->getPointerElementType();
2187
2188 llvm::FunctionType *FnTy = llvm::FunctionType::get(RetTy, false);
2189 llvm::Function *Wrapper =
2190 llvm::Function::Create(FnTy, getThreadLocalWrapperLinkage(VD, CGM),
2191 WrapperName.str(), &CGM.getModule());
2192 // Always resolve references to the wrapper at link time.
2193 if (!Wrapper->hasLocalLinkage() && !(isThreadWrapperReplaceable(VD, CGM) &&
2194 !llvm::GlobalVariable::isLinkOnceLinkage(Wrapper->getLinkage()) &&
2195 !llvm::GlobalVariable::isWeakODRLinkage(Wrapper->getLinkage())))
2196 Wrapper->setVisibility(llvm::GlobalValue::HiddenVisibility);
2197
2198 if (isThreadWrapperReplaceable(VD, CGM)) {
2199 Wrapper->setCallingConv(llvm::CallingConv::CXX_FAST_TLS);
2200 Wrapper->addFnAttr(llvm::Attribute::NoUnwind);
2201 }
2202 return Wrapper;
2203 }
2204
EmitThreadLocalInitFuncs(CodeGenModule & CGM,ArrayRef<const VarDecl * > CXXThreadLocals,ArrayRef<llvm::Function * > CXXThreadLocalInits,ArrayRef<const VarDecl * > CXXThreadLocalInitVars)2205 void ItaniumCXXABI::EmitThreadLocalInitFuncs(
2206 CodeGenModule &CGM, ArrayRef<const VarDecl *> CXXThreadLocals,
2207 ArrayRef<llvm::Function *> CXXThreadLocalInits,
2208 ArrayRef<const VarDecl *> CXXThreadLocalInitVars) {
2209 llvm::Function *InitFunc = nullptr;
2210 if (!CXXThreadLocalInits.empty()) {
2211 // Generate a guarded initialization function.
2212 llvm::FunctionType *FTy =
2213 llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
2214 const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction();
2215 InitFunc = CGM.CreateGlobalInitOrDestructFunction(FTy, "__tls_init", FI,
2216 SourceLocation(),
2217 /*TLS=*/true);
2218 llvm::GlobalVariable *Guard = new llvm::GlobalVariable(
2219 CGM.getModule(), CGM.Int8Ty, /*isConstant=*/false,
2220 llvm::GlobalVariable::InternalLinkage,
2221 llvm::ConstantInt::get(CGM.Int8Ty, 0), "__tls_guard");
2222 Guard->setThreadLocal(true);
2223
2224 CharUnits GuardAlign = CharUnits::One();
2225 Guard->setAlignment(GuardAlign.getQuantity());
2226
2227 CodeGenFunction(CGM)
2228 .GenerateCXXGlobalInitFunc(InitFunc, CXXThreadLocalInits,
2229 Address(Guard, GuardAlign));
2230 }
2231 for (const VarDecl *VD : CXXThreadLocals) {
2232 llvm::GlobalVariable *Var =
2233 cast<llvm::GlobalVariable>(CGM.GetGlobalValue(CGM.getMangledName(VD)));
2234
2235 // Some targets require that all access to thread local variables go through
2236 // the thread wrapper. This means that we cannot attempt to create a thread
2237 // wrapper or a thread helper.
2238 if (isThreadWrapperReplaceable(VD, CGM) && !VD->hasDefinition())
2239 continue;
2240
2241 // Mangle the name for the thread_local initialization function.
2242 SmallString<256> InitFnName;
2243 {
2244 llvm::raw_svector_ostream Out(InitFnName);
2245 getMangleContext().mangleItaniumThreadLocalInit(VD, Out);
2246 }
2247
2248 // If we have a definition for the variable, emit the initialization
2249 // function as an alias to the global Init function (if any). Otherwise,
2250 // produce a declaration of the initialization function.
2251 llvm::GlobalValue *Init = nullptr;
2252 bool InitIsInitFunc = false;
2253 if (VD->hasDefinition()) {
2254 InitIsInitFunc = true;
2255 if (InitFunc)
2256 Init = llvm::GlobalAlias::create(Var->getLinkage(), InitFnName.str(),
2257 InitFunc);
2258 } else {
2259 // Emit a weak global function referring to the initialization function.
2260 // This function will not exist if the TU defining the thread_local
2261 // variable in question does not need any dynamic initialization for
2262 // its thread_local variables.
2263 llvm::FunctionType *FnTy = llvm::FunctionType::get(CGM.VoidTy, false);
2264 Init = llvm::Function::Create(
2265 FnTy, llvm::GlobalVariable::ExternalWeakLinkage, InitFnName.str(),
2266 &CGM.getModule());
2267 }
2268
2269 if (Init)
2270 Init->setVisibility(Var->getVisibility());
2271
2272 llvm::Function *Wrapper = getOrCreateThreadLocalWrapper(VD, Var);
2273 llvm::LLVMContext &Context = CGM.getModule().getContext();
2274 llvm::BasicBlock *Entry = llvm::BasicBlock::Create(Context, "", Wrapper);
2275 CGBuilderTy Builder(CGM, Entry);
2276 if (InitIsInitFunc) {
2277 if (Init)
2278 Builder.CreateCall(Init);
2279 } else {
2280 // Don't know whether we have an init function. Call it if it exists.
2281 llvm::Value *Have = Builder.CreateIsNotNull(Init);
2282 llvm::BasicBlock *InitBB = llvm::BasicBlock::Create(Context, "", Wrapper);
2283 llvm::BasicBlock *ExitBB = llvm::BasicBlock::Create(Context, "", Wrapper);
2284 Builder.CreateCondBr(Have, InitBB, ExitBB);
2285
2286 Builder.SetInsertPoint(InitBB);
2287 Builder.CreateCall(Init);
2288 Builder.CreateBr(ExitBB);
2289
2290 Builder.SetInsertPoint(ExitBB);
2291 }
2292
2293 // For a reference, the result of the wrapper function is a pointer to
2294 // the referenced object.
2295 llvm::Value *Val = Var;
2296 if (VD->getType()->isReferenceType()) {
2297 CharUnits Align = CGM.getContext().getDeclAlign(VD);
2298 Val = Builder.CreateAlignedLoad(Val, Align);
2299 }
2300 if (Val->getType() != Wrapper->getReturnType())
2301 Val = Builder.CreatePointerBitCastOrAddrSpaceCast(
2302 Val, Wrapper->getReturnType(), "");
2303 Builder.CreateRet(Val);
2304 }
2305 }
2306
EmitThreadLocalVarDeclLValue(CodeGenFunction & CGF,const VarDecl * VD,QualType LValType)2307 LValue ItaniumCXXABI::EmitThreadLocalVarDeclLValue(CodeGenFunction &CGF,
2308 const VarDecl *VD,
2309 QualType LValType) {
2310 llvm::Value *Val = CGF.CGM.GetAddrOfGlobalVar(VD);
2311 llvm::Function *Wrapper = getOrCreateThreadLocalWrapper(VD, Val);
2312
2313 llvm::CallInst *CallVal = CGF.Builder.CreateCall(Wrapper);
2314 if (isThreadWrapperReplaceable(VD, CGF.CGM))
2315 CallVal->setCallingConv(llvm::CallingConv::CXX_FAST_TLS);
2316
2317 LValue LV;
2318 if (VD->getType()->isReferenceType())
2319 LV = CGF.MakeNaturalAlignAddrLValue(CallVal, LValType);
2320 else
2321 LV = CGF.MakeAddrLValue(CallVal, LValType,
2322 CGF.getContext().getDeclAlign(VD));
2323 // FIXME: need setObjCGCLValueClass?
2324 return LV;
2325 }
2326
2327 /// Return whether the given global decl needs a VTT parameter, which it does
2328 /// if it's a base constructor or destructor with virtual bases.
NeedsVTTParameter(GlobalDecl GD)2329 bool ItaniumCXXABI::NeedsVTTParameter(GlobalDecl GD) {
2330 const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
2331
2332 // We don't have any virtual bases, just return early.
2333 if (!MD->getParent()->getNumVBases())
2334 return false;
2335
2336 // Check if we have a base constructor.
2337 if (isa<CXXConstructorDecl>(MD) && GD.getCtorType() == Ctor_Base)
2338 return true;
2339
2340 // Check if we have a base destructor.
2341 if (isa<CXXDestructorDecl>(MD) && GD.getDtorType() == Dtor_Base)
2342 return true;
2343
2344 return false;
2345 }
2346
2347 namespace {
2348 class ItaniumRTTIBuilder {
2349 CodeGenModule &CGM; // Per-module state.
2350 llvm::LLVMContext &VMContext;
2351 const ItaniumCXXABI &CXXABI; // Per-module state.
2352
2353 /// Fields - The fields of the RTTI descriptor currently being built.
2354 SmallVector<llvm::Constant *, 16> Fields;
2355
2356 /// GetAddrOfTypeName - Returns the mangled type name of the given type.
2357 llvm::GlobalVariable *
2358 GetAddrOfTypeName(QualType Ty, llvm::GlobalVariable::LinkageTypes Linkage);
2359
2360 /// GetAddrOfExternalRTTIDescriptor - Returns the constant for the RTTI
2361 /// descriptor of the given type.
2362 llvm::Constant *GetAddrOfExternalRTTIDescriptor(QualType Ty);
2363
2364 /// BuildVTablePointer - Build the vtable pointer for the given type.
2365 void BuildVTablePointer(const Type *Ty);
2366
2367 /// BuildSIClassTypeInfo - Build an abi::__si_class_type_info, used for single
2368 /// inheritance, according to the Itanium C++ ABI, 2.9.5p6b.
2369 void BuildSIClassTypeInfo(const CXXRecordDecl *RD);
2370
2371 /// BuildVMIClassTypeInfo - Build an abi::__vmi_class_type_info, used for
2372 /// classes with bases that do not satisfy the abi::__si_class_type_info
2373 /// constraints, according ti the Itanium C++ ABI, 2.9.5p5c.
2374 void BuildVMIClassTypeInfo(const CXXRecordDecl *RD);
2375
2376 /// BuildPointerTypeInfo - Build an abi::__pointer_type_info struct, used
2377 /// for pointer types.
2378 void BuildPointerTypeInfo(QualType PointeeTy);
2379
2380 /// BuildObjCObjectTypeInfo - Build the appropriate kind of
2381 /// type_info for an object type.
2382 void BuildObjCObjectTypeInfo(const ObjCObjectType *Ty);
2383
2384 /// BuildPointerToMemberTypeInfo - Build an abi::__pointer_to_member_type_info
2385 /// struct, used for member pointer types.
2386 void BuildPointerToMemberTypeInfo(const MemberPointerType *Ty);
2387
2388 public:
ItaniumRTTIBuilder(const ItaniumCXXABI & ABI)2389 ItaniumRTTIBuilder(const ItaniumCXXABI &ABI)
2390 : CGM(ABI.CGM), VMContext(CGM.getModule().getContext()), CXXABI(ABI) {}
2391
2392 // Pointer type info flags.
2393 enum {
2394 /// PTI_Const - Type has const qualifier.
2395 PTI_Const = 0x1,
2396
2397 /// PTI_Volatile - Type has volatile qualifier.
2398 PTI_Volatile = 0x2,
2399
2400 /// PTI_Restrict - Type has restrict qualifier.
2401 PTI_Restrict = 0x4,
2402
2403 /// PTI_Incomplete - Type is incomplete.
2404 PTI_Incomplete = 0x8,
2405
2406 /// PTI_ContainingClassIncomplete - Containing class is incomplete.
2407 /// (in pointer to member).
2408 PTI_ContainingClassIncomplete = 0x10
2409 };
2410
2411 // VMI type info flags.
2412 enum {
2413 /// VMI_NonDiamondRepeat - Class has non-diamond repeated inheritance.
2414 VMI_NonDiamondRepeat = 0x1,
2415
2416 /// VMI_DiamondShaped - Class is diamond shaped.
2417 VMI_DiamondShaped = 0x2
2418 };
2419
2420 // Base class type info flags.
2421 enum {
2422 /// BCTI_Virtual - Base class is virtual.
2423 BCTI_Virtual = 0x1,
2424
2425 /// BCTI_Public - Base class is public.
2426 BCTI_Public = 0x2
2427 };
2428
2429 /// BuildTypeInfo - Build the RTTI type info struct for the given type.
2430 ///
2431 /// \param Force - true to force the creation of this RTTI value
2432 llvm::Constant *BuildTypeInfo(QualType Ty, bool Force = false);
2433 };
2434 }
2435
GetAddrOfTypeName(QualType Ty,llvm::GlobalVariable::LinkageTypes Linkage)2436 llvm::GlobalVariable *ItaniumRTTIBuilder::GetAddrOfTypeName(
2437 QualType Ty, llvm::GlobalVariable::LinkageTypes Linkage) {
2438 SmallString<256> Name;
2439 llvm::raw_svector_ostream Out(Name);
2440 CGM.getCXXABI().getMangleContext().mangleCXXRTTIName(Ty, Out);
2441
2442 // We know that the mangled name of the type starts at index 4 of the
2443 // mangled name of the typename, so we can just index into it in order to
2444 // get the mangled name of the type.
2445 llvm::Constant *Init = llvm::ConstantDataArray::getString(VMContext,
2446 Name.substr(4));
2447
2448 llvm::GlobalVariable *GV =
2449 CGM.CreateOrReplaceCXXRuntimeVariable(Name, Init->getType(), Linkage);
2450
2451 GV->setInitializer(Init);
2452
2453 return GV;
2454 }
2455
2456 llvm::Constant *
GetAddrOfExternalRTTIDescriptor(QualType Ty)2457 ItaniumRTTIBuilder::GetAddrOfExternalRTTIDescriptor(QualType Ty) {
2458 // Mangle the RTTI name.
2459 SmallString<256> Name;
2460 llvm::raw_svector_ostream Out(Name);
2461 CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty, Out);
2462
2463 // Look for an existing global.
2464 llvm::GlobalVariable *GV = CGM.getModule().getNamedGlobal(Name);
2465
2466 if (!GV) {
2467 // Create a new global variable.
2468 GV = new llvm::GlobalVariable(CGM.getModule(), CGM.Int8PtrTy,
2469 /*Constant=*/true,
2470 llvm::GlobalValue::ExternalLinkage, nullptr,
2471 Name);
2472 if (const RecordType *RecordTy = dyn_cast<RecordType>(Ty)) {
2473 const CXXRecordDecl *RD = cast<CXXRecordDecl>(RecordTy->getDecl());
2474 if (RD->hasAttr<DLLImportAttr>())
2475 GV->setDLLStorageClass(llvm::GlobalVariable::DLLImportStorageClass);
2476 }
2477 }
2478
2479 return llvm::ConstantExpr::getBitCast(GV, CGM.Int8PtrTy);
2480 }
2481
2482 /// TypeInfoIsInStandardLibrary - Given a builtin type, returns whether the type
2483 /// info for that type is defined in the standard library.
TypeInfoIsInStandardLibrary(const BuiltinType * Ty)2484 static bool TypeInfoIsInStandardLibrary(const BuiltinType *Ty) {
2485 // Itanium C++ ABI 2.9.2:
2486 // Basic type information (e.g. for "int", "bool", etc.) will be kept in
2487 // the run-time support library. Specifically, the run-time support
2488 // library should contain type_info objects for the types X, X* and
2489 // X const*, for every X in: void, std::nullptr_t, bool, wchar_t, char,
2490 // unsigned char, signed char, short, unsigned short, int, unsigned int,
2491 // long, unsigned long, long long, unsigned long long, float, double,
2492 // long double, char16_t, char32_t, and the IEEE 754r decimal and
2493 // half-precision floating point types.
2494 switch (Ty->getKind()) {
2495 case BuiltinType::Void:
2496 case BuiltinType::NullPtr:
2497 case BuiltinType::Bool:
2498 case BuiltinType::WChar_S:
2499 case BuiltinType::WChar_U:
2500 case BuiltinType::Char_U:
2501 case BuiltinType::Char_S:
2502 case BuiltinType::UChar:
2503 case BuiltinType::SChar:
2504 case BuiltinType::Short:
2505 case BuiltinType::UShort:
2506 case BuiltinType::Int:
2507 case BuiltinType::UInt:
2508 case BuiltinType::Long:
2509 case BuiltinType::ULong:
2510 case BuiltinType::LongLong:
2511 case BuiltinType::ULongLong:
2512 case BuiltinType::Half:
2513 case BuiltinType::Float:
2514 case BuiltinType::Double:
2515 case BuiltinType::LongDouble:
2516 case BuiltinType::Char16:
2517 case BuiltinType::Char32:
2518 case BuiltinType::Int128:
2519 case BuiltinType::UInt128:
2520 case BuiltinType::OCLImage1d:
2521 case BuiltinType::OCLImage1dArray:
2522 case BuiltinType::OCLImage1dBuffer:
2523 case BuiltinType::OCLImage2d:
2524 case BuiltinType::OCLImage2dArray:
2525 case BuiltinType::OCLImage2dDepth:
2526 case BuiltinType::OCLImage2dArrayDepth:
2527 case BuiltinType::OCLImage2dMSAA:
2528 case BuiltinType::OCLImage2dArrayMSAA:
2529 case BuiltinType::OCLImage2dMSAADepth:
2530 case BuiltinType::OCLImage2dArrayMSAADepth:
2531 case BuiltinType::OCLImage3d:
2532 case BuiltinType::OCLSampler:
2533 case BuiltinType::OCLEvent:
2534 case BuiltinType::OCLClkEvent:
2535 case BuiltinType::OCLQueue:
2536 case BuiltinType::OCLNDRange:
2537 case BuiltinType::OCLReserveID:
2538 return true;
2539
2540 case BuiltinType::Dependent:
2541 #define BUILTIN_TYPE(Id, SingletonId)
2542 #define PLACEHOLDER_TYPE(Id, SingletonId) \
2543 case BuiltinType::Id:
2544 #include "clang/AST/BuiltinTypes.def"
2545 llvm_unreachable("asking for RRTI for a placeholder type!");
2546
2547 case BuiltinType::ObjCId:
2548 case BuiltinType::ObjCClass:
2549 case BuiltinType::ObjCSel:
2550 llvm_unreachable("FIXME: Objective-C types are unsupported!");
2551 }
2552
2553 llvm_unreachable("Invalid BuiltinType Kind!");
2554 }
2555
TypeInfoIsInStandardLibrary(const PointerType * PointerTy)2556 static bool TypeInfoIsInStandardLibrary(const PointerType *PointerTy) {
2557 QualType PointeeTy = PointerTy->getPointeeType();
2558 const BuiltinType *BuiltinTy = dyn_cast<BuiltinType>(PointeeTy);
2559 if (!BuiltinTy)
2560 return false;
2561
2562 // Check the qualifiers.
2563 Qualifiers Quals = PointeeTy.getQualifiers();
2564 Quals.removeConst();
2565
2566 if (!Quals.empty())
2567 return false;
2568
2569 return TypeInfoIsInStandardLibrary(BuiltinTy);
2570 }
2571
2572 /// IsStandardLibraryRTTIDescriptor - Returns whether the type
2573 /// information for the given type exists in the standard library.
IsStandardLibraryRTTIDescriptor(QualType Ty)2574 static bool IsStandardLibraryRTTIDescriptor(QualType Ty) {
2575 // Type info for builtin types is defined in the standard library.
2576 if (const BuiltinType *BuiltinTy = dyn_cast<BuiltinType>(Ty))
2577 return TypeInfoIsInStandardLibrary(BuiltinTy);
2578
2579 // Type info for some pointer types to builtin types is defined in the
2580 // standard library.
2581 if (const PointerType *PointerTy = dyn_cast<PointerType>(Ty))
2582 return TypeInfoIsInStandardLibrary(PointerTy);
2583
2584 return false;
2585 }
2586
2587 /// ShouldUseExternalRTTIDescriptor - Returns whether the type information for
2588 /// the given type exists somewhere else, and that we should not emit the type
2589 /// information in this translation unit. Assumes that it is not a
2590 /// standard-library type.
ShouldUseExternalRTTIDescriptor(CodeGenModule & CGM,QualType Ty)2591 static bool ShouldUseExternalRTTIDescriptor(CodeGenModule &CGM,
2592 QualType Ty) {
2593 ASTContext &Context = CGM.getContext();
2594
2595 // If RTTI is disabled, assume it might be disabled in the
2596 // translation unit that defines any potential key function, too.
2597 if (!Context.getLangOpts().RTTI) return false;
2598
2599 if (const RecordType *RecordTy = dyn_cast<RecordType>(Ty)) {
2600 const CXXRecordDecl *RD = cast<CXXRecordDecl>(RecordTy->getDecl());
2601 if (!RD->hasDefinition())
2602 return false;
2603
2604 if (!RD->isDynamicClass())
2605 return false;
2606
2607 // FIXME: this may need to be reconsidered if the key function
2608 // changes.
2609 // N.B. We must always emit the RTTI data ourselves if there exists a key
2610 // function.
2611 bool IsDLLImport = RD->hasAttr<DLLImportAttr>();
2612 if (CGM.getVTables().isVTableExternal(RD))
2613 return IsDLLImport ? false : true;
2614
2615 if (IsDLLImport)
2616 return true;
2617 }
2618
2619 return false;
2620 }
2621
2622 /// IsIncompleteClassType - Returns whether the given record type is incomplete.
IsIncompleteClassType(const RecordType * RecordTy)2623 static bool IsIncompleteClassType(const RecordType *RecordTy) {
2624 return !RecordTy->getDecl()->isCompleteDefinition();
2625 }
2626
2627 /// ContainsIncompleteClassType - Returns whether the given type contains an
2628 /// incomplete class type. This is true if
2629 ///
2630 /// * The given type is an incomplete class type.
2631 /// * The given type is a pointer type whose pointee type contains an
2632 /// incomplete class type.
2633 /// * The given type is a member pointer type whose class is an incomplete
2634 /// class type.
2635 /// * The given type is a member pointer type whoise pointee type contains an
2636 /// incomplete class type.
2637 /// is an indirect or direct pointer to an incomplete class type.
ContainsIncompleteClassType(QualType Ty)2638 static bool ContainsIncompleteClassType(QualType Ty) {
2639 if (const RecordType *RecordTy = dyn_cast<RecordType>(Ty)) {
2640 if (IsIncompleteClassType(RecordTy))
2641 return true;
2642 }
2643
2644 if (const PointerType *PointerTy = dyn_cast<PointerType>(Ty))
2645 return ContainsIncompleteClassType(PointerTy->getPointeeType());
2646
2647 if (const MemberPointerType *MemberPointerTy =
2648 dyn_cast<MemberPointerType>(Ty)) {
2649 // Check if the class type is incomplete.
2650 const RecordType *ClassType = cast<RecordType>(MemberPointerTy->getClass());
2651 if (IsIncompleteClassType(ClassType))
2652 return true;
2653
2654 return ContainsIncompleteClassType(MemberPointerTy->getPointeeType());
2655 }
2656
2657 return false;
2658 }
2659
2660 // CanUseSingleInheritance - Return whether the given record decl has a "single,
2661 // public, non-virtual base at offset zero (i.e. the derived class is dynamic
2662 // iff the base is)", according to Itanium C++ ABI, 2.95p6b.
CanUseSingleInheritance(const CXXRecordDecl * RD)2663 static bool CanUseSingleInheritance(const CXXRecordDecl *RD) {
2664 // Check the number of bases.
2665 if (RD->getNumBases() != 1)
2666 return false;
2667
2668 // Get the base.
2669 CXXRecordDecl::base_class_const_iterator Base = RD->bases_begin();
2670
2671 // Check that the base is not virtual.
2672 if (Base->isVirtual())
2673 return false;
2674
2675 // Check that the base is public.
2676 if (Base->getAccessSpecifier() != AS_public)
2677 return false;
2678
2679 // Check that the class is dynamic iff the base is.
2680 const CXXRecordDecl *BaseDecl =
2681 cast<CXXRecordDecl>(Base->getType()->getAs<RecordType>()->getDecl());
2682 if (!BaseDecl->isEmpty() &&
2683 BaseDecl->isDynamicClass() != RD->isDynamicClass())
2684 return false;
2685
2686 return true;
2687 }
2688
BuildVTablePointer(const Type * Ty)2689 void ItaniumRTTIBuilder::BuildVTablePointer(const Type *Ty) {
2690 // abi::__class_type_info.
2691 static const char * const ClassTypeInfo =
2692 "_ZTVN10__cxxabiv117__class_type_infoE";
2693 // abi::__si_class_type_info.
2694 static const char * const SIClassTypeInfo =
2695 "_ZTVN10__cxxabiv120__si_class_type_infoE";
2696 // abi::__vmi_class_type_info.
2697 static const char * const VMIClassTypeInfo =
2698 "_ZTVN10__cxxabiv121__vmi_class_type_infoE";
2699
2700 const char *VTableName = nullptr;
2701
2702 switch (Ty->getTypeClass()) {
2703 #define TYPE(Class, Base)
2704 #define ABSTRACT_TYPE(Class, Base)
2705 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class:
2706 #define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
2707 #define DEPENDENT_TYPE(Class, Base) case Type::Class:
2708 #include "clang/AST/TypeNodes.def"
2709 llvm_unreachable("Non-canonical and dependent types shouldn't get here");
2710
2711 case Type::LValueReference:
2712 case Type::RValueReference:
2713 llvm_unreachable("References shouldn't get here");
2714
2715 case Type::Auto:
2716 llvm_unreachable("Undeduced auto type shouldn't get here");
2717
2718 case Type::Builtin:
2719 // GCC treats vector and complex types as fundamental types.
2720 case Type::Vector:
2721 case Type::ExtVector:
2722 case Type::Complex:
2723 case Type::Atomic:
2724 // FIXME: GCC treats block pointers as fundamental types?!
2725 case Type::BlockPointer:
2726 // abi::__fundamental_type_info.
2727 VTableName = "_ZTVN10__cxxabiv123__fundamental_type_infoE";
2728 break;
2729
2730 case Type::ConstantArray:
2731 case Type::IncompleteArray:
2732 case Type::VariableArray:
2733 // abi::__array_type_info.
2734 VTableName = "_ZTVN10__cxxabiv117__array_type_infoE";
2735 break;
2736
2737 case Type::FunctionNoProto:
2738 case Type::FunctionProto:
2739 // abi::__function_type_info.
2740 VTableName = "_ZTVN10__cxxabiv120__function_type_infoE";
2741 break;
2742
2743 case Type::Enum:
2744 // abi::__enum_type_info.
2745 VTableName = "_ZTVN10__cxxabiv116__enum_type_infoE";
2746 break;
2747
2748 case Type::Record: {
2749 const CXXRecordDecl *RD =
2750 cast<CXXRecordDecl>(cast<RecordType>(Ty)->getDecl());
2751
2752 if (!RD->hasDefinition() || !RD->getNumBases()) {
2753 VTableName = ClassTypeInfo;
2754 } else if (CanUseSingleInheritance(RD)) {
2755 VTableName = SIClassTypeInfo;
2756 } else {
2757 VTableName = VMIClassTypeInfo;
2758 }
2759
2760 break;
2761 }
2762
2763 case Type::ObjCObject:
2764 // Ignore protocol qualifiers.
2765 Ty = cast<ObjCObjectType>(Ty)->getBaseType().getTypePtr();
2766
2767 // Handle id and Class.
2768 if (isa<BuiltinType>(Ty)) {
2769 VTableName = ClassTypeInfo;
2770 break;
2771 }
2772
2773 assert(isa<ObjCInterfaceType>(Ty));
2774 // Fall through.
2775
2776 case Type::ObjCInterface:
2777 if (cast<ObjCInterfaceType>(Ty)->getDecl()->getSuperClass()) {
2778 VTableName = SIClassTypeInfo;
2779 } else {
2780 VTableName = ClassTypeInfo;
2781 }
2782 break;
2783
2784 case Type::ObjCObjectPointer:
2785 case Type::Pointer:
2786 // abi::__pointer_type_info.
2787 VTableName = "_ZTVN10__cxxabiv119__pointer_type_infoE";
2788 break;
2789
2790 case Type::MemberPointer:
2791 // abi::__pointer_to_member_type_info.
2792 VTableName = "_ZTVN10__cxxabiv129__pointer_to_member_type_infoE";
2793 break;
2794 }
2795
2796 llvm::Constant *VTable =
2797 CGM.getModule().getOrInsertGlobal(VTableName, CGM.Int8PtrTy);
2798
2799 llvm::Type *PtrDiffTy =
2800 CGM.getTypes().ConvertType(CGM.getContext().getPointerDiffType());
2801
2802 // The vtable address point is 2.
2803 llvm::Constant *Two = llvm::ConstantInt::get(PtrDiffTy, 2);
2804 VTable =
2805 llvm::ConstantExpr::getInBoundsGetElementPtr(CGM.Int8PtrTy, VTable, Two);
2806 VTable = llvm::ConstantExpr::getBitCast(VTable, CGM.Int8PtrTy);
2807
2808 Fields.push_back(VTable);
2809 }
2810
2811 /// \brief Return the linkage that the type info and type info name constants
2812 /// should have for the given type.
getTypeInfoLinkage(CodeGenModule & CGM,QualType Ty)2813 static llvm::GlobalVariable::LinkageTypes getTypeInfoLinkage(CodeGenModule &CGM,
2814 QualType Ty) {
2815 // Itanium C++ ABI 2.9.5p7:
2816 // In addition, it and all of the intermediate abi::__pointer_type_info
2817 // structs in the chain down to the abi::__class_type_info for the
2818 // incomplete class type must be prevented from resolving to the
2819 // corresponding type_info structs for the complete class type, possibly
2820 // by making them local static objects. Finally, a dummy class RTTI is
2821 // generated for the incomplete type that will not resolve to the final
2822 // complete class RTTI (because the latter need not exist), possibly by
2823 // making it a local static object.
2824 if (ContainsIncompleteClassType(Ty))
2825 return llvm::GlobalValue::InternalLinkage;
2826
2827 switch (Ty->getLinkage()) {
2828 case NoLinkage:
2829 case InternalLinkage:
2830 case UniqueExternalLinkage:
2831 return llvm::GlobalValue::InternalLinkage;
2832
2833 case VisibleNoLinkage:
2834 case ExternalLinkage:
2835 if (!CGM.getLangOpts().RTTI) {
2836 // RTTI is not enabled, which means that this type info struct is going
2837 // to be used for exception handling. Give it linkonce_odr linkage.
2838 return llvm::GlobalValue::LinkOnceODRLinkage;
2839 }
2840
2841 if (const RecordType *Record = dyn_cast<RecordType>(Ty)) {
2842 const CXXRecordDecl *RD = cast<CXXRecordDecl>(Record->getDecl());
2843 if (RD->hasAttr<WeakAttr>())
2844 return llvm::GlobalValue::WeakODRLinkage;
2845 if (RD->isDynamicClass()) {
2846 llvm::GlobalValue::LinkageTypes LT = CGM.getVTableLinkage(RD);
2847 // MinGW won't export the RTTI information when there is a key function.
2848 // Make sure we emit our own copy instead of attempting to dllimport it.
2849 if (RD->hasAttr<DLLImportAttr>() &&
2850 llvm::GlobalValue::isAvailableExternallyLinkage(LT))
2851 LT = llvm::GlobalValue::LinkOnceODRLinkage;
2852 return LT;
2853 }
2854 }
2855
2856 return llvm::GlobalValue::LinkOnceODRLinkage;
2857 }
2858
2859 llvm_unreachable("Invalid linkage!");
2860 }
2861
BuildTypeInfo(QualType Ty,bool Force)2862 llvm::Constant *ItaniumRTTIBuilder::BuildTypeInfo(QualType Ty, bool Force) {
2863 // We want to operate on the canonical type.
2864 Ty = CGM.getContext().getCanonicalType(Ty);
2865
2866 // Check if we've already emitted an RTTI descriptor for this type.
2867 SmallString<256> Name;
2868 llvm::raw_svector_ostream Out(Name);
2869 CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty, Out);
2870
2871 llvm::GlobalVariable *OldGV = CGM.getModule().getNamedGlobal(Name);
2872 if (OldGV && !OldGV->isDeclaration()) {
2873 assert(!OldGV->hasAvailableExternallyLinkage() &&
2874 "available_externally typeinfos not yet implemented");
2875
2876 return llvm::ConstantExpr::getBitCast(OldGV, CGM.Int8PtrTy);
2877 }
2878
2879 // Check if there is already an external RTTI descriptor for this type.
2880 bool IsStdLib = IsStandardLibraryRTTIDescriptor(Ty);
2881 if (!Force && (IsStdLib || ShouldUseExternalRTTIDescriptor(CGM, Ty)))
2882 return GetAddrOfExternalRTTIDescriptor(Ty);
2883
2884 // Emit the standard library with external linkage.
2885 llvm::GlobalVariable::LinkageTypes Linkage;
2886 if (IsStdLib)
2887 Linkage = llvm::GlobalValue::ExternalLinkage;
2888 else
2889 Linkage = getTypeInfoLinkage(CGM, Ty);
2890
2891 // Add the vtable pointer.
2892 BuildVTablePointer(cast<Type>(Ty));
2893
2894 // And the name.
2895 llvm::GlobalVariable *TypeName = GetAddrOfTypeName(Ty, Linkage);
2896 llvm::Constant *TypeNameField;
2897
2898 // If we're supposed to demote the visibility, be sure to set a flag
2899 // to use a string comparison for type_info comparisons.
2900 ItaniumCXXABI::RTTIUniquenessKind RTTIUniqueness =
2901 CXXABI.classifyRTTIUniqueness(Ty, Linkage);
2902 if (RTTIUniqueness != ItaniumCXXABI::RUK_Unique) {
2903 // The flag is the sign bit, which on ARM64 is defined to be clear
2904 // for global pointers. This is very ARM64-specific.
2905 TypeNameField = llvm::ConstantExpr::getPtrToInt(TypeName, CGM.Int64Ty);
2906 llvm::Constant *flag =
2907 llvm::ConstantInt::get(CGM.Int64Ty, ((uint64_t)1) << 63);
2908 TypeNameField = llvm::ConstantExpr::getAdd(TypeNameField, flag);
2909 TypeNameField =
2910 llvm::ConstantExpr::getIntToPtr(TypeNameField, CGM.Int8PtrTy);
2911 } else {
2912 TypeNameField = llvm::ConstantExpr::getBitCast(TypeName, CGM.Int8PtrTy);
2913 }
2914 Fields.push_back(TypeNameField);
2915
2916 switch (Ty->getTypeClass()) {
2917 #define TYPE(Class, Base)
2918 #define ABSTRACT_TYPE(Class, Base)
2919 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class:
2920 #define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
2921 #define DEPENDENT_TYPE(Class, Base) case Type::Class:
2922 #include "clang/AST/TypeNodes.def"
2923 llvm_unreachable("Non-canonical and dependent types shouldn't get here");
2924
2925 // GCC treats vector types as fundamental types.
2926 case Type::Builtin:
2927 case Type::Vector:
2928 case Type::ExtVector:
2929 case Type::Complex:
2930 case Type::BlockPointer:
2931 // Itanium C++ ABI 2.9.5p4:
2932 // abi::__fundamental_type_info adds no data members to std::type_info.
2933 break;
2934
2935 case Type::LValueReference:
2936 case Type::RValueReference:
2937 llvm_unreachable("References shouldn't get here");
2938
2939 case Type::Auto:
2940 llvm_unreachable("Undeduced auto type shouldn't get here");
2941
2942 case Type::ConstantArray:
2943 case Type::IncompleteArray:
2944 case Type::VariableArray:
2945 // Itanium C++ ABI 2.9.5p5:
2946 // abi::__array_type_info adds no data members to std::type_info.
2947 break;
2948
2949 case Type::FunctionNoProto:
2950 case Type::FunctionProto:
2951 // Itanium C++ ABI 2.9.5p5:
2952 // abi::__function_type_info adds no data members to std::type_info.
2953 break;
2954
2955 case Type::Enum:
2956 // Itanium C++ ABI 2.9.5p5:
2957 // abi::__enum_type_info adds no data members to std::type_info.
2958 break;
2959
2960 case Type::Record: {
2961 const CXXRecordDecl *RD =
2962 cast<CXXRecordDecl>(cast<RecordType>(Ty)->getDecl());
2963 if (!RD->hasDefinition() || !RD->getNumBases()) {
2964 // We don't need to emit any fields.
2965 break;
2966 }
2967
2968 if (CanUseSingleInheritance(RD))
2969 BuildSIClassTypeInfo(RD);
2970 else
2971 BuildVMIClassTypeInfo(RD);
2972
2973 break;
2974 }
2975
2976 case Type::ObjCObject:
2977 case Type::ObjCInterface:
2978 BuildObjCObjectTypeInfo(cast<ObjCObjectType>(Ty));
2979 break;
2980
2981 case Type::ObjCObjectPointer:
2982 BuildPointerTypeInfo(cast<ObjCObjectPointerType>(Ty)->getPointeeType());
2983 break;
2984
2985 case Type::Pointer:
2986 BuildPointerTypeInfo(cast<PointerType>(Ty)->getPointeeType());
2987 break;
2988
2989 case Type::MemberPointer:
2990 BuildPointerToMemberTypeInfo(cast<MemberPointerType>(Ty));
2991 break;
2992
2993 case Type::Atomic:
2994 // No fields, at least for the moment.
2995 break;
2996 }
2997
2998 llvm::Constant *Init = llvm::ConstantStruct::getAnon(Fields);
2999
3000 llvm::Module &M = CGM.getModule();
3001 llvm::GlobalVariable *GV =
3002 new llvm::GlobalVariable(M, Init->getType(),
3003 /*Constant=*/true, Linkage, Init, Name);
3004
3005 // If there's already an old global variable, replace it with the new one.
3006 if (OldGV) {
3007 GV->takeName(OldGV);
3008 llvm::Constant *NewPtr =
3009 llvm::ConstantExpr::getBitCast(GV, OldGV->getType());
3010 OldGV->replaceAllUsesWith(NewPtr);
3011 OldGV->eraseFromParent();
3012 }
3013
3014 if (CGM.supportsCOMDAT() && GV->isWeakForLinker())
3015 GV->setComdat(M.getOrInsertComdat(GV->getName()));
3016
3017 // The Itanium ABI specifies that type_info objects must be globally
3018 // unique, with one exception: if the type is an incomplete class
3019 // type or a (possibly indirect) pointer to one. That exception
3020 // affects the general case of comparing type_info objects produced
3021 // by the typeid operator, which is why the comparison operators on
3022 // std::type_info generally use the type_info name pointers instead
3023 // of the object addresses. However, the language's built-in uses
3024 // of RTTI generally require class types to be complete, even when
3025 // manipulating pointers to those class types. This allows the
3026 // implementation of dynamic_cast to rely on address equality tests,
3027 // which is much faster.
3028
3029 // All of this is to say that it's important that both the type_info
3030 // object and the type_info name be uniqued when weakly emitted.
3031
3032 // Give the type_info object and name the formal visibility of the
3033 // type itself.
3034 llvm::GlobalValue::VisibilityTypes llvmVisibility;
3035 if (llvm::GlobalValue::isLocalLinkage(Linkage))
3036 // If the linkage is local, only default visibility makes sense.
3037 llvmVisibility = llvm::GlobalValue::DefaultVisibility;
3038 else if (RTTIUniqueness == ItaniumCXXABI::RUK_NonUniqueHidden)
3039 llvmVisibility = llvm::GlobalValue::HiddenVisibility;
3040 else
3041 llvmVisibility = CodeGenModule::GetLLVMVisibility(Ty->getVisibility());
3042 TypeName->setVisibility(llvmVisibility);
3043 GV->setVisibility(llvmVisibility);
3044
3045 return llvm::ConstantExpr::getBitCast(GV, CGM.Int8PtrTy);
3046 }
3047
3048 /// ComputeQualifierFlags - Compute the pointer type info flags from the
3049 /// given qualifier.
ComputeQualifierFlags(Qualifiers Quals)3050 static unsigned ComputeQualifierFlags(Qualifiers Quals) {
3051 unsigned Flags = 0;
3052
3053 if (Quals.hasConst())
3054 Flags |= ItaniumRTTIBuilder::PTI_Const;
3055 if (Quals.hasVolatile())
3056 Flags |= ItaniumRTTIBuilder::PTI_Volatile;
3057 if (Quals.hasRestrict())
3058 Flags |= ItaniumRTTIBuilder::PTI_Restrict;
3059
3060 return Flags;
3061 }
3062
3063 /// BuildObjCObjectTypeInfo - Build the appropriate kind of type_info
3064 /// for the given Objective-C object type.
BuildObjCObjectTypeInfo(const ObjCObjectType * OT)3065 void ItaniumRTTIBuilder::BuildObjCObjectTypeInfo(const ObjCObjectType *OT) {
3066 // Drop qualifiers.
3067 const Type *T = OT->getBaseType().getTypePtr();
3068 assert(isa<BuiltinType>(T) || isa<ObjCInterfaceType>(T));
3069
3070 // The builtin types are abi::__class_type_infos and don't require
3071 // extra fields.
3072 if (isa<BuiltinType>(T)) return;
3073
3074 ObjCInterfaceDecl *Class = cast<ObjCInterfaceType>(T)->getDecl();
3075 ObjCInterfaceDecl *Super = Class->getSuperClass();
3076
3077 // Root classes are also __class_type_info.
3078 if (!Super) return;
3079
3080 QualType SuperTy = CGM.getContext().getObjCInterfaceType(Super);
3081
3082 // Everything else is single inheritance.
3083 llvm::Constant *BaseTypeInfo =
3084 ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(SuperTy);
3085 Fields.push_back(BaseTypeInfo);
3086 }
3087
3088 /// BuildSIClassTypeInfo - Build an abi::__si_class_type_info, used for single
3089 /// inheritance, according to the Itanium C++ ABI, 2.95p6b.
BuildSIClassTypeInfo(const CXXRecordDecl * RD)3090 void ItaniumRTTIBuilder::BuildSIClassTypeInfo(const CXXRecordDecl *RD) {
3091 // Itanium C++ ABI 2.9.5p6b:
3092 // It adds to abi::__class_type_info a single member pointing to the
3093 // type_info structure for the base type,
3094 llvm::Constant *BaseTypeInfo =
3095 ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(RD->bases_begin()->getType());
3096 Fields.push_back(BaseTypeInfo);
3097 }
3098
3099 namespace {
3100 /// SeenBases - Contains virtual and non-virtual bases seen when traversing
3101 /// a class hierarchy.
3102 struct SeenBases {
3103 llvm::SmallPtrSet<const CXXRecordDecl *, 16> NonVirtualBases;
3104 llvm::SmallPtrSet<const CXXRecordDecl *, 16> VirtualBases;
3105 };
3106 }
3107
3108 /// ComputeVMIClassTypeInfoFlags - Compute the value of the flags member in
3109 /// abi::__vmi_class_type_info.
3110 ///
ComputeVMIClassTypeInfoFlags(const CXXBaseSpecifier * Base,SeenBases & Bases)3111 static unsigned ComputeVMIClassTypeInfoFlags(const CXXBaseSpecifier *Base,
3112 SeenBases &Bases) {
3113
3114 unsigned Flags = 0;
3115
3116 const CXXRecordDecl *BaseDecl =
3117 cast<CXXRecordDecl>(Base->getType()->getAs<RecordType>()->getDecl());
3118
3119 if (Base->isVirtual()) {
3120 // Mark the virtual base as seen.
3121 if (!Bases.VirtualBases.insert(BaseDecl).second) {
3122 // If this virtual base has been seen before, then the class is diamond
3123 // shaped.
3124 Flags |= ItaniumRTTIBuilder::VMI_DiamondShaped;
3125 } else {
3126 if (Bases.NonVirtualBases.count(BaseDecl))
3127 Flags |= ItaniumRTTIBuilder::VMI_NonDiamondRepeat;
3128 }
3129 } else {
3130 // Mark the non-virtual base as seen.
3131 if (!Bases.NonVirtualBases.insert(BaseDecl).second) {
3132 // If this non-virtual base has been seen before, then the class has non-
3133 // diamond shaped repeated inheritance.
3134 Flags |= ItaniumRTTIBuilder::VMI_NonDiamondRepeat;
3135 } else {
3136 if (Bases.VirtualBases.count(BaseDecl))
3137 Flags |= ItaniumRTTIBuilder::VMI_NonDiamondRepeat;
3138 }
3139 }
3140
3141 // Walk all bases.
3142 for (const auto &I : BaseDecl->bases())
3143 Flags |= ComputeVMIClassTypeInfoFlags(&I, Bases);
3144
3145 return Flags;
3146 }
3147
ComputeVMIClassTypeInfoFlags(const CXXRecordDecl * RD)3148 static unsigned ComputeVMIClassTypeInfoFlags(const CXXRecordDecl *RD) {
3149 unsigned Flags = 0;
3150 SeenBases Bases;
3151
3152 // Walk all bases.
3153 for (const auto &I : RD->bases())
3154 Flags |= ComputeVMIClassTypeInfoFlags(&I, Bases);
3155
3156 return Flags;
3157 }
3158
3159 /// BuildVMIClassTypeInfo - Build an abi::__vmi_class_type_info, used for
3160 /// classes with bases that do not satisfy the abi::__si_class_type_info
3161 /// constraints, according ti the Itanium C++ ABI, 2.9.5p5c.
BuildVMIClassTypeInfo(const CXXRecordDecl * RD)3162 void ItaniumRTTIBuilder::BuildVMIClassTypeInfo(const CXXRecordDecl *RD) {
3163 llvm::Type *UnsignedIntLTy =
3164 CGM.getTypes().ConvertType(CGM.getContext().UnsignedIntTy);
3165
3166 // Itanium C++ ABI 2.9.5p6c:
3167 // __flags is a word with flags describing details about the class
3168 // structure, which may be referenced by using the __flags_masks
3169 // enumeration. These flags refer to both direct and indirect bases.
3170 unsigned Flags = ComputeVMIClassTypeInfoFlags(RD);
3171 Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, Flags));
3172
3173 // Itanium C++ ABI 2.9.5p6c:
3174 // __base_count is a word with the number of direct proper base class
3175 // descriptions that follow.
3176 Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, RD->getNumBases()));
3177
3178 if (!RD->getNumBases())
3179 return;
3180
3181 llvm::Type *LongLTy =
3182 CGM.getTypes().ConvertType(CGM.getContext().LongTy);
3183
3184 // Now add the base class descriptions.
3185
3186 // Itanium C++ ABI 2.9.5p6c:
3187 // __base_info[] is an array of base class descriptions -- one for every
3188 // direct proper base. Each description is of the type:
3189 //
3190 // struct abi::__base_class_type_info {
3191 // public:
3192 // const __class_type_info *__base_type;
3193 // long __offset_flags;
3194 //
3195 // enum __offset_flags_masks {
3196 // __virtual_mask = 0x1,
3197 // __public_mask = 0x2,
3198 // __offset_shift = 8
3199 // };
3200 // };
3201 for (const auto &Base : RD->bases()) {
3202 // The __base_type member points to the RTTI for the base type.
3203 Fields.push_back(ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(Base.getType()));
3204
3205 const CXXRecordDecl *BaseDecl =
3206 cast<CXXRecordDecl>(Base.getType()->getAs<RecordType>()->getDecl());
3207
3208 int64_t OffsetFlags = 0;
3209
3210 // All but the lower 8 bits of __offset_flags are a signed offset.
3211 // For a non-virtual base, this is the offset in the object of the base
3212 // subobject. For a virtual base, this is the offset in the virtual table of
3213 // the virtual base offset for the virtual base referenced (negative).
3214 CharUnits Offset;
3215 if (Base.isVirtual())
3216 Offset =
3217 CGM.getItaniumVTableContext().getVirtualBaseOffsetOffset(RD, BaseDecl);
3218 else {
3219 const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
3220 Offset = Layout.getBaseClassOffset(BaseDecl);
3221 };
3222
3223 OffsetFlags = uint64_t(Offset.getQuantity()) << 8;
3224
3225 // The low-order byte of __offset_flags contains flags, as given by the
3226 // masks from the enumeration __offset_flags_masks.
3227 if (Base.isVirtual())
3228 OffsetFlags |= BCTI_Virtual;
3229 if (Base.getAccessSpecifier() == AS_public)
3230 OffsetFlags |= BCTI_Public;
3231
3232 Fields.push_back(llvm::ConstantInt::get(LongLTy, OffsetFlags));
3233 }
3234 }
3235
3236 /// BuildPointerTypeInfo - Build an abi::__pointer_type_info struct,
3237 /// used for pointer types.
BuildPointerTypeInfo(QualType PointeeTy)3238 void ItaniumRTTIBuilder::BuildPointerTypeInfo(QualType PointeeTy) {
3239 Qualifiers Quals;
3240 QualType UnqualifiedPointeeTy =
3241 CGM.getContext().getUnqualifiedArrayType(PointeeTy, Quals);
3242
3243 // Itanium C++ ABI 2.9.5p7:
3244 // __flags is a flag word describing the cv-qualification and other
3245 // attributes of the type pointed to
3246 unsigned Flags = ComputeQualifierFlags(Quals);
3247
3248 // Itanium C++ ABI 2.9.5p7:
3249 // When the abi::__pbase_type_info is for a direct or indirect pointer to an
3250 // incomplete class type, the incomplete target type flag is set.
3251 if (ContainsIncompleteClassType(UnqualifiedPointeeTy))
3252 Flags |= PTI_Incomplete;
3253
3254 llvm::Type *UnsignedIntLTy =
3255 CGM.getTypes().ConvertType(CGM.getContext().UnsignedIntTy);
3256 Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, Flags));
3257
3258 // Itanium C++ ABI 2.9.5p7:
3259 // __pointee is a pointer to the std::type_info derivation for the
3260 // unqualified type being pointed to.
3261 llvm::Constant *PointeeTypeInfo =
3262 ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(UnqualifiedPointeeTy);
3263 Fields.push_back(PointeeTypeInfo);
3264 }
3265
3266 /// BuildPointerToMemberTypeInfo - Build an abi::__pointer_to_member_type_info
3267 /// struct, used for member pointer types.
3268 void
BuildPointerToMemberTypeInfo(const MemberPointerType * Ty)3269 ItaniumRTTIBuilder::BuildPointerToMemberTypeInfo(const MemberPointerType *Ty) {
3270 QualType PointeeTy = Ty->getPointeeType();
3271
3272 Qualifiers Quals;
3273 QualType UnqualifiedPointeeTy =
3274 CGM.getContext().getUnqualifiedArrayType(PointeeTy, Quals);
3275
3276 // Itanium C++ ABI 2.9.5p7:
3277 // __flags is a flag word describing the cv-qualification and other
3278 // attributes of the type pointed to.
3279 unsigned Flags = ComputeQualifierFlags(Quals);
3280
3281 const RecordType *ClassType = cast<RecordType>(Ty->getClass());
3282
3283 // Itanium C++ ABI 2.9.5p7:
3284 // When the abi::__pbase_type_info is for a direct or indirect pointer to an
3285 // incomplete class type, the incomplete target type flag is set.
3286 if (ContainsIncompleteClassType(UnqualifiedPointeeTy))
3287 Flags |= PTI_Incomplete;
3288
3289 if (IsIncompleteClassType(ClassType))
3290 Flags |= PTI_ContainingClassIncomplete;
3291
3292 llvm::Type *UnsignedIntLTy =
3293 CGM.getTypes().ConvertType(CGM.getContext().UnsignedIntTy);
3294 Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, Flags));
3295
3296 // Itanium C++ ABI 2.9.5p7:
3297 // __pointee is a pointer to the std::type_info derivation for the
3298 // unqualified type being pointed to.
3299 llvm::Constant *PointeeTypeInfo =
3300 ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(UnqualifiedPointeeTy);
3301 Fields.push_back(PointeeTypeInfo);
3302
3303 // Itanium C++ ABI 2.9.5p9:
3304 // __context is a pointer to an abi::__class_type_info corresponding to the
3305 // class type containing the member pointed to
3306 // (e.g., the "A" in "int A::*").
3307 Fields.push_back(
3308 ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(QualType(ClassType, 0)));
3309 }
3310
getAddrOfRTTIDescriptor(QualType Ty)3311 llvm::Constant *ItaniumCXXABI::getAddrOfRTTIDescriptor(QualType Ty) {
3312 return ItaniumRTTIBuilder(*this).BuildTypeInfo(Ty);
3313 }
3314
EmitFundamentalRTTIDescriptor(QualType Type)3315 void ItaniumCXXABI::EmitFundamentalRTTIDescriptor(QualType Type) {
3316 QualType PointerType = getContext().getPointerType(Type);
3317 QualType PointerTypeConst = getContext().getPointerType(Type.withConst());
3318 ItaniumRTTIBuilder(*this).BuildTypeInfo(Type, true);
3319 ItaniumRTTIBuilder(*this).BuildTypeInfo(PointerType, true);
3320 ItaniumRTTIBuilder(*this).BuildTypeInfo(PointerTypeConst, true);
3321 }
3322
EmitFundamentalRTTIDescriptors()3323 void ItaniumCXXABI::EmitFundamentalRTTIDescriptors() {
3324 QualType FundamentalTypes[] = {
3325 getContext().VoidTy, getContext().NullPtrTy,
3326 getContext().BoolTy, getContext().WCharTy,
3327 getContext().CharTy, getContext().UnsignedCharTy,
3328 getContext().SignedCharTy, getContext().ShortTy,
3329 getContext().UnsignedShortTy, getContext().IntTy,
3330 getContext().UnsignedIntTy, getContext().LongTy,
3331 getContext().UnsignedLongTy, getContext().LongLongTy,
3332 getContext().UnsignedLongLongTy, getContext().HalfTy,
3333 getContext().FloatTy, getContext().DoubleTy,
3334 getContext().LongDoubleTy, getContext().Char16Ty,
3335 getContext().Char32Ty,
3336 };
3337 for (const QualType &FundamentalType : FundamentalTypes)
3338 EmitFundamentalRTTIDescriptor(FundamentalType);
3339 }
3340
3341 /// What sort of uniqueness rules should we use for the RTTI for the
3342 /// given type?
classifyRTTIUniqueness(QualType CanTy,llvm::GlobalValue::LinkageTypes Linkage) const3343 ItaniumCXXABI::RTTIUniquenessKind ItaniumCXXABI::classifyRTTIUniqueness(
3344 QualType CanTy, llvm::GlobalValue::LinkageTypes Linkage) const {
3345 if (shouldRTTIBeUnique())
3346 return RUK_Unique;
3347
3348 // It's only necessary for linkonce_odr or weak_odr linkage.
3349 if (Linkage != llvm::GlobalValue::LinkOnceODRLinkage &&
3350 Linkage != llvm::GlobalValue::WeakODRLinkage)
3351 return RUK_Unique;
3352
3353 // It's only necessary with default visibility.
3354 if (CanTy->getVisibility() != DefaultVisibility)
3355 return RUK_Unique;
3356
3357 // If we're not required to publish this symbol, hide it.
3358 if (Linkage == llvm::GlobalValue::LinkOnceODRLinkage)
3359 return RUK_NonUniqueHidden;
3360
3361 // If we're required to publish this symbol, as we might be under an
3362 // explicit instantiation, leave it with default visibility but
3363 // enable string-comparisons.
3364 assert(Linkage == llvm::GlobalValue::WeakODRLinkage);
3365 return RUK_NonUniqueVisible;
3366 }
3367
3368 // Find out how to codegen the complete destructor and constructor
3369 namespace {
3370 enum class StructorCodegen { Emit, RAUW, Alias, COMDAT };
3371 }
getCodegenToUse(CodeGenModule & CGM,const CXXMethodDecl * MD)3372 static StructorCodegen getCodegenToUse(CodeGenModule &CGM,
3373 const CXXMethodDecl *MD) {
3374 if (!CGM.getCodeGenOpts().CXXCtorDtorAliases)
3375 return StructorCodegen::Emit;
3376
3377 // The complete and base structors are not equivalent if there are any virtual
3378 // bases, so emit separate functions.
3379 if (MD->getParent()->getNumVBases())
3380 return StructorCodegen::Emit;
3381
3382 GlobalDecl AliasDecl;
3383 if (const auto *DD = dyn_cast<CXXDestructorDecl>(MD)) {
3384 AliasDecl = GlobalDecl(DD, Dtor_Complete);
3385 } else {
3386 const auto *CD = cast<CXXConstructorDecl>(MD);
3387 AliasDecl = GlobalDecl(CD, Ctor_Complete);
3388 }
3389 llvm::GlobalValue::LinkageTypes Linkage = CGM.getFunctionLinkage(AliasDecl);
3390
3391 if (llvm::GlobalValue::isDiscardableIfUnused(Linkage))
3392 return StructorCodegen::RAUW;
3393
3394 // FIXME: Should we allow available_externally aliases?
3395 if (!llvm::GlobalAlias::isValidLinkage(Linkage))
3396 return StructorCodegen::RAUW;
3397
3398 if (llvm::GlobalValue::isWeakForLinker(Linkage)) {
3399 // Only ELF supports COMDATs with arbitrary names (C5/D5).
3400 if (CGM.getTarget().getTriple().isOSBinFormatELF())
3401 return StructorCodegen::COMDAT;
3402 return StructorCodegen::Emit;
3403 }
3404
3405 return StructorCodegen::Alias;
3406 }
3407
emitConstructorDestructorAlias(CodeGenModule & CGM,GlobalDecl AliasDecl,GlobalDecl TargetDecl)3408 static void emitConstructorDestructorAlias(CodeGenModule &CGM,
3409 GlobalDecl AliasDecl,
3410 GlobalDecl TargetDecl) {
3411 llvm::GlobalValue::LinkageTypes Linkage = CGM.getFunctionLinkage(AliasDecl);
3412
3413 StringRef MangledName = CGM.getMangledName(AliasDecl);
3414 llvm::GlobalValue *Entry = CGM.GetGlobalValue(MangledName);
3415 if (Entry && !Entry->isDeclaration())
3416 return;
3417
3418 auto *Aliasee = cast<llvm::GlobalValue>(CGM.GetAddrOfGlobal(TargetDecl));
3419
3420 // Create the alias with no name.
3421 auto *Alias = llvm::GlobalAlias::create(Linkage, "", Aliasee);
3422
3423 // Switch any previous uses to the alias.
3424 if (Entry) {
3425 assert(Entry->getType() == Aliasee->getType() &&
3426 "declaration exists with different type");
3427 Alias->takeName(Entry);
3428 Entry->replaceAllUsesWith(Alias);
3429 Entry->eraseFromParent();
3430 } else {
3431 Alias->setName(MangledName);
3432 }
3433
3434 // Finally, set up the alias with its proper name and attributes.
3435 CGM.setAliasAttributes(cast<NamedDecl>(AliasDecl.getDecl()), Alias);
3436 }
3437
emitCXXStructor(const CXXMethodDecl * MD,StructorType Type)3438 void ItaniumCXXABI::emitCXXStructor(const CXXMethodDecl *MD,
3439 StructorType Type) {
3440 auto *CD = dyn_cast<CXXConstructorDecl>(MD);
3441 const CXXDestructorDecl *DD = CD ? nullptr : cast<CXXDestructorDecl>(MD);
3442
3443 StructorCodegen CGType = getCodegenToUse(CGM, MD);
3444
3445 if (Type == StructorType::Complete) {
3446 GlobalDecl CompleteDecl;
3447 GlobalDecl BaseDecl;
3448 if (CD) {
3449 CompleteDecl = GlobalDecl(CD, Ctor_Complete);
3450 BaseDecl = GlobalDecl(CD, Ctor_Base);
3451 } else {
3452 CompleteDecl = GlobalDecl(DD, Dtor_Complete);
3453 BaseDecl = GlobalDecl(DD, Dtor_Base);
3454 }
3455
3456 if (CGType == StructorCodegen::Alias || CGType == StructorCodegen::COMDAT) {
3457 emitConstructorDestructorAlias(CGM, CompleteDecl, BaseDecl);
3458 return;
3459 }
3460
3461 if (CGType == StructorCodegen::RAUW) {
3462 StringRef MangledName = CGM.getMangledName(CompleteDecl);
3463 auto *Aliasee = CGM.GetAddrOfGlobal(BaseDecl);
3464 CGM.addReplacement(MangledName, Aliasee);
3465 return;
3466 }
3467 }
3468
3469 // The base destructor is equivalent to the base destructor of its
3470 // base class if there is exactly one non-virtual base class with a
3471 // non-trivial destructor, there are no fields with a non-trivial
3472 // destructor, and the body of the destructor is trivial.
3473 if (DD && Type == StructorType::Base && CGType != StructorCodegen::COMDAT &&
3474 !CGM.TryEmitBaseDestructorAsAlias(DD))
3475 return;
3476
3477 llvm::Function *Fn = CGM.codegenCXXStructor(MD, Type);
3478
3479 if (CGType == StructorCodegen::COMDAT) {
3480 SmallString<256> Buffer;
3481 llvm::raw_svector_ostream Out(Buffer);
3482 if (DD)
3483 getMangleContext().mangleCXXDtorComdat(DD, Out);
3484 else
3485 getMangleContext().mangleCXXCtorComdat(CD, Out);
3486 llvm::Comdat *C = CGM.getModule().getOrInsertComdat(Out.str());
3487 Fn->setComdat(C);
3488 } else {
3489 CGM.maybeSetTrivialComdat(*MD, *Fn);
3490 }
3491 }
3492
getBeginCatchFn(CodeGenModule & CGM)3493 static llvm::Constant *getBeginCatchFn(CodeGenModule &CGM) {
3494 // void *__cxa_begin_catch(void*);
3495 llvm::FunctionType *FTy = llvm::FunctionType::get(
3496 CGM.Int8PtrTy, CGM.Int8PtrTy, /*IsVarArgs=*/false);
3497
3498 return CGM.CreateRuntimeFunction(FTy, "__cxa_begin_catch");
3499 }
3500
getEndCatchFn(CodeGenModule & CGM)3501 static llvm::Constant *getEndCatchFn(CodeGenModule &CGM) {
3502 // void __cxa_end_catch();
3503 llvm::FunctionType *FTy =
3504 llvm::FunctionType::get(CGM.VoidTy, /*IsVarArgs=*/false);
3505
3506 return CGM.CreateRuntimeFunction(FTy, "__cxa_end_catch");
3507 }
3508
getGetExceptionPtrFn(CodeGenModule & CGM)3509 static llvm::Constant *getGetExceptionPtrFn(CodeGenModule &CGM) {
3510 // void *__cxa_get_exception_ptr(void*);
3511 llvm::FunctionType *FTy = llvm::FunctionType::get(
3512 CGM.Int8PtrTy, CGM.Int8PtrTy, /*IsVarArgs=*/false);
3513
3514 return CGM.CreateRuntimeFunction(FTy, "__cxa_get_exception_ptr");
3515 }
3516
3517 namespace {
3518 /// A cleanup to call __cxa_end_catch. In many cases, the caught
3519 /// exception type lets us state definitively that the thrown exception
3520 /// type does not have a destructor. In particular:
3521 /// - Catch-alls tell us nothing, so we have to conservatively
3522 /// assume that the thrown exception might have a destructor.
3523 /// - Catches by reference behave according to their base types.
3524 /// - Catches of non-record types will only trigger for exceptions
3525 /// of non-record types, which never have destructors.
3526 /// - Catches of record types can trigger for arbitrary subclasses
3527 /// of the caught type, so we have to assume the actual thrown
3528 /// exception type might have a throwing destructor, even if the
3529 /// caught type's destructor is trivial or nothrow.
3530 struct CallEndCatch final : EHScopeStack::Cleanup {
CallEndCatch__anona7bdc4f60911::CallEndCatch3531 CallEndCatch(bool MightThrow) : MightThrow(MightThrow) {}
3532 bool MightThrow;
3533
Emit__anona7bdc4f60911::CallEndCatch3534 void Emit(CodeGenFunction &CGF, Flags flags) override {
3535 if (!MightThrow) {
3536 CGF.EmitNounwindRuntimeCall(getEndCatchFn(CGF.CGM));
3537 return;
3538 }
3539
3540 CGF.EmitRuntimeCallOrInvoke(getEndCatchFn(CGF.CGM));
3541 }
3542 };
3543 }
3544
3545 /// Emits a call to __cxa_begin_catch and enters a cleanup to call
3546 /// __cxa_end_catch.
3547 ///
3548 /// \param EndMightThrow - true if __cxa_end_catch might throw
CallBeginCatch(CodeGenFunction & CGF,llvm::Value * Exn,bool EndMightThrow)3549 static llvm::Value *CallBeginCatch(CodeGenFunction &CGF,
3550 llvm::Value *Exn,
3551 bool EndMightThrow) {
3552 llvm::CallInst *call =
3553 CGF.EmitNounwindRuntimeCall(getBeginCatchFn(CGF.CGM), Exn);
3554
3555 CGF.EHStack.pushCleanup<CallEndCatch>(NormalAndEHCleanup, EndMightThrow);
3556
3557 return call;
3558 }
3559
3560 /// A "special initializer" callback for initializing a catch
3561 /// parameter during catch initialization.
InitCatchParam(CodeGenFunction & CGF,const VarDecl & CatchParam,Address ParamAddr,SourceLocation Loc)3562 static void InitCatchParam(CodeGenFunction &CGF,
3563 const VarDecl &CatchParam,
3564 Address ParamAddr,
3565 SourceLocation Loc) {
3566 // Load the exception from where the landing pad saved it.
3567 llvm::Value *Exn = CGF.getExceptionFromSlot();
3568
3569 CanQualType CatchType =
3570 CGF.CGM.getContext().getCanonicalType(CatchParam.getType());
3571 llvm::Type *LLVMCatchTy = CGF.ConvertTypeForMem(CatchType);
3572
3573 // If we're catching by reference, we can just cast the object
3574 // pointer to the appropriate pointer.
3575 if (isa<ReferenceType>(CatchType)) {
3576 QualType CaughtType = cast<ReferenceType>(CatchType)->getPointeeType();
3577 bool EndCatchMightThrow = CaughtType->isRecordType();
3578
3579 // __cxa_begin_catch returns the adjusted object pointer.
3580 llvm::Value *AdjustedExn = CallBeginCatch(CGF, Exn, EndCatchMightThrow);
3581
3582 // We have no way to tell the personality function that we're
3583 // catching by reference, so if we're catching a pointer,
3584 // __cxa_begin_catch will actually return that pointer by value.
3585 if (const PointerType *PT = dyn_cast<PointerType>(CaughtType)) {
3586 QualType PointeeType = PT->getPointeeType();
3587
3588 // When catching by reference, generally we should just ignore
3589 // this by-value pointer and use the exception object instead.
3590 if (!PointeeType->isRecordType()) {
3591
3592 // Exn points to the struct _Unwind_Exception header, which
3593 // we have to skip past in order to reach the exception data.
3594 unsigned HeaderSize =
3595 CGF.CGM.getTargetCodeGenInfo().getSizeOfUnwindException();
3596 AdjustedExn = CGF.Builder.CreateConstGEP1_32(Exn, HeaderSize);
3597
3598 // However, if we're catching a pointer-to-record type that won't
3599 // work, because the personality function might have adjusted
3600 // the pointer. There's actually no way for us to fully satisfy
3601 // the language/ABI contract here: we can't use Exn because it
3602 // might have the wrong adjustment, but we can't use the by-value
3603 // pointer because it's off by a level of abstraction.
3604 //
3605 // The current solution is to dump the adjusted pointer into an
3606 // alloca, which breaks language semantics (because changing the
3607 // pointer doesn't change the exception) but at least works.
3608 // The better solution would be to filter out non-exact matches
3609 // and rethrow them, but this is tricky because the rethrow
3610 // really needs to be catchable by other sites at this landing
3611 // pad. The best solution is to fix the personality function.
3612 } else {
3613 // Pull the pointer for the reference type off.
3614 llvm::Type *PtrTy =
3615 cast<llvm::PointerType>(LLVMCatchTy)->getElementType();
3616
3617 // Create the temporary and write the adjusted pointer into it.
3618 Address ExnPtrTmp =
3619 CGF.CreateTempAlloca(PtrTy, CGF.getPointerAlign(), "exn.byref.tmp");
3620 llvm::Value *Casted = CGF.Builder.CreateBitCast(AdjustedExn, PtrTy);
3621 CGF.Builder.CreateStore(Casted, ExnPtrTmp);
3622
3623 // Bind the reference to the temporary.
3624 AdjustedExn = ExnPtrTmp.getPointer();
3625 }
3626 }
3627
3628 llvm::Value *ExnCast =
3629 CGF.Builder.CreateBitCast(AdjustedExn, LLVMCatchTy, "exn.byref");
3630 CGF.Builder.CreateStore(ExnCast, ParamAddr);
3631 return;
3632 }
3633
3634 // Scalars and complexes.
3635 TypeEvaluationKind TEK = CGF.getEvaluationKind(CatchType);
3636 if (TEK != TEK_Aggregate) {
3637 llvm::Value *AdjustedExn = CallBeginCatch(CGF, Exn, false);
3638
3639 // If the catch type is a pointer type, __cxa_begin_catch returns
3640 // the pointer by value.
3641 if (CatchType->hasPointerRepresentation()) {
3642 llvm::Value *CastExn =
3643 CGF.Builder.CreateBitCast(AdjustedExn, LLVMCatchTy, "exn.casted");
3644
3645 switch (CatchType.getQualifiers().getObjCLifetime()) {
3646 case Qualifiers::OCL_Strong:
3647 CastExn = CGF.EmitARCRetainNonBlock(CastExn);
3648 // fallthrough
3649
3650 case Qualifiers::OCL_None:
3651 case Qualifiers::OCL_ExplicitNone:
3652 case Qualifiers::OCL_Autoreleasing:
3653 CGF.Builder.CreateStore(CastExn, ParamAddr);
3654 return;
3655
3656 case Qualifiers::OCL_Weak:
3657 CGF.EmitARCInitWeak(ParamAddr, CastExn);
3658 return;
3659 }
3660 llvm_unreachable("bad ownership qualifier!");
3661 }
3662
3663 // Otherwise, it returns a pointer into the exception object.
3664
3665 llvm::Type *PtrTy = LLVMCatchTy->getPointerTo(0); // addrspace 0 ok
3666 llvm::Value *Cast = CGF.Builder.CreateBitCast(AdjustedExn, PtrTy);
3667
3668 LValue srcLV = CGF.MakeNaturalAlignAddrLValue(Cast, CatchType);
3669 LValue destLV = CGF.MakeAddrLValue(ParamAddr, CatchType);
3670 switch (TEK) {
3671 case TEK_Complex:
3672 CGF.EmitStoreOfComplex(CGF.EmitLoadOfComplex(srcLV, Loc), destLV,
3673 /*init*/ true);
3674 return;
3675 case TEK_Scalar: {
3676 llvm::Value *ExnLoad = CGF.EmitLoadOfScalar(srcLV, Loc);
3677 CGF.EmitStoreOfScalar(ExnLoad, destLV, /*init*/ true);
3678 return;
3679 }
3680 case TEK_Aggregate:
3681 llvm_unreachable("evaluation kind filtered out!");
3682 }
3683 llvm_unreachable("bad evaluation kind");
3684 }
3685
3686 assert(isa<RecordType>(CatchType) && "unexpected catch type!");
3687 auto catchRD = CatchType->getAsCXXRecordDecl();
3688 CharUnits caughtExnAlignment = CGF.CGM.getClassPointerAlignment(catchRD);
3689
3690 llvm::Type *PtrTy = LLVMCatchTy->getPointerTo(0); // addrspace 0 ok
3691
3692 // Check for a copy expression. If we don't have a copy expression,
3693 // that means a trivial copy is okay.
3694 const Expr *copyExpr = CatchParam.getInit();
3695 if (!copyExpr) {
3696 llvm::Value *rawAdjustedExn = CallBeginCatch(CGF, Exn, true);
3697 Address adjustedExn(CGF.Builder.CreateBitCast(rawAdjustedExn, PtrTy),
3698 caughtExnAlignment);
3699 CGF.EmitAggregateCopy(ParamAddr, adjustedExn, CatchType);
3700 return;
3701 }
3702
3703 // We have to call __cxa_get_exception_ptr to get the adjusted
3704 // pointer before copying.
3705 llvm::CallInst *rawAdjustedExn =
3706 CGF.EmitNounwindRuntimeCall(getGetExceptionPtrFn(CGF.CGM), Exn);
3707
3708 // Cast that to the appropriate type.
3709 Address adjustedExn(CGF.Builder.CreateBitCast(rawAdjustedExn, PtrTy),
3710 caughtExnAlignment);
3711
3712 // The copy expression is defined in terms of an OpaqueValueExpr.
3713 // Find it and map it to the adjusted expression.
3714 CodeGenFunction::OpaqueValueMapping
3715 opaque(CGF, OpaqueValueExpr::findInCopyConstruct(copyExpr),
3716 CGF.MakeAddrLValue(adjustedExn, CatchParam.getType()));
3717
3718 // Call the copy ctor in a terminate scope.
3719 CGF.EHStack.pushTerminate();
3720
3721 // Perform the copy construction.
3722 CGF.EmitAggExpr(copyExpr,
3723 AggValueSlot::forAddr(ParamAddr, Qualifiers(),
3724 AggValueSlot::IsNotDestructed,
3725 AggValueSlot::DoesNotNeedGCBarriers,
3726 AggValueSlot::IsNotAliased));
3727
3728 // Leave the terminate scope.
3729 CGF.EHStack.popTerminate();
3730
3731 // Undo the opaque value mapping.
3732 opaque.pop();
3733
3734 // Finally we can call __cxa_begin_catch.
3735 CallBeginCatch(CGF, Exn, true);
3736 }
3737
3738 /// Begins a catch statement by initializing the catch variable and
3739 /// calling __cxa_begin_catch.
emitBeginCatch(CodeGenFunction & CGF,const CXXCatchStmt * S)3740 void ItaniumCXXABI::emitBeginCatch(CodeGenFunction &CGF,
3741 const CXXCatchStmt *S) {
3742 // We have to be very careful with the ordering of cleanups here:
3743 // C++ [except.throw]p4:
3744 // The destruction [of the exception temporary] occurs
3745 // immediately after the destruction of the object declared in
3746 // the exception-declaration in the handler.
3747 //
3748 // So the precise ordering is:
3749 // 1. Construct catch variable.
3750 // 2. __cxa_begin_catch
3751 // 3. Enter __cxa_end_catch cleanup
3752 // 4. Enter dtor cleanup
3753 //
3754 // We do this by using a slightly abnormal initialization process.
3755 // Delegation sequence:
3756 // - ExitCXXTryStmt opens a RunCleanupsScope
3757 // - EmitAutoVarAlloca creates the variable and debug info
3758 // - InitCatchParam initializes the variable from the exception
3759 // - CallBeginCatch calls __cxa_begin_catch
3760 // - CallBeginCatch enters the __cxa_end_catch cleanup
3761 // - EmitAutoVarCleanups enters the variable destructor cleanup
3762 // - EmitCXXTryStmt emits the code for the catch body
3763 // - EmitCXXTryStmt close the RunCleanupsScope
3764
3765 VarDecl *CatchParam = S->getExceptionDecl();
3766 if (!CatchParam) {
3767 llvm::Value *Exn = CGF.getExceptionFromSlot();
3768 CallBeginCatch(CGF, Exn, true);
3769 return;
3770 }
3771
3772 // Emit the local.
3773 CodeGenFunction::AutoVarEmission var = CGF.EmitAutoVarAlloca(*CatchParam);
3774 InitCatchParam(CGF, *CatchParam, var.getObjectAddress(CGF), S->getLocStart());
3775 CGF.EmitAutoVarCleanups(var);
3776 }
3777
3778 /// Get or define the following function:
3779 /// void @__clang_call_terminate(i8* %exn) nounwind noreturn
3780 /// This code is used only in C++.
getClangCallTerminateFn(CodeGenModule & CGM)3781 static llvm::Constant *getClangCallTerminateFn(CodeGenModule &CGM) {
3782 llvm::FunctionType *fnTy =
3783 llvm::FunctionType::get(CGM.VoidTy, CGM.Int8PtrTy, /*IsVarArgs=*/false);
3784 llvm::Constant *fnRef =
3785 CGM.CreateRuntimeFunction(fnTy, "__clang_call_terminate");
3786
3787 llvm::Function *fn = dyn_cast<llvm::Function>(fnRef);
3788 if (fn && fn->empty()) {
3789 fn->setDoesNotThrow();
3790 fn->setDoesNotReturn();
3791
3792 // What we really want is to massively penalize inlining without
3793 // forbidding it completely. The difference between that and
3794 // 'noinline' is negligible.
3795 fn->addFnAttr(llvm::Attribute::NoInline);
3796
3797 // Allow this function to be shared across translation units, but
3798 // we don't want it to turn into an exported symbol.
3799 fn->setLinkage(llvm::Function::LinkOnceODRLinkage);
3800 fn->setVisibility(llvm::Function::HiddenVisibility);
3801 if (CGM.supportsCOMDAT())
3802 fn->setComdat(CGM.getModule().getOrInsertComdat(fn->getName()));
3803
3804 // Set up the function.
3805 llvm::BasicBlock *entry =
3806 llvm::BasicBlock::Create(CGM.getLLVMContext(), "", fn);
3807 CGBuilderTy builder(CGM, entry);
3808
3809 // Pull the exception pointer out of the parameter list.
3810 llvm::Value *exn = &*fn->arg_begin();
3811
3812 // Call __cxa_begin_catch(exn).
3813 llvm::CallInst *catchCall = builder.CreateCall(getBeginCatchFn(CGM), exn);
3814 catchCall->setDoesNotThrow();
3815 catchCall->setCallingConv(CGM.getRuntimeCC());
3816
3817 // Call std::terminate().
3818 llvm::CallInst *termCall = builder.CreateCall(CGM.getTerminateFn());
3819 termCall->setDoesNotThrow();
3820 termCall->setDoesNotReturn();
3821 termCall->setCallingConv(CGM.getRuntimeCC());
3822
3823 // std::terminate cannot return.
3824 builder.CreateUnreachable();
3825 }
3826
3827 return fnRef;
3828 }
3829
3830 llvm::CallInst *
emitTerminateForUnexpectedException(CodeGenFunction & CGF,llvm::Value * Exn)3831 ItaniumCXXABI::emitTerminateForUnexpectedException(CodeGenFunction &CGF,
3832 llvm::Value *Exn) {
3833 // In C++, we want to call __cxa_begin_catch() before terminating.
3834 if (Exn) {
3835 assert(CGF.CGM.getLangOpts().CPlusPlus);
3836 return CGF.EmitNounwindRuntimeCall(getClangCallTerminateFn(CGF.CGM), Exn);
3837 }
3838 return CGF.EmitNounwindRuntimeCall(CGF.CGM.getTerminateFn());
3839 }
3840