1 //===------- ItaniumCXXABI.cpp - Emit LLVM Code from ASTs for a Module ----===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This provides C++ code generation targeting the Itanium C++ ABI. The class
11 // in this file generates structures that follow the Itanium C++ ABI, which is
12 // documented at:
13 // http://www.codesourcery.com/public/cxx-abi/abi.html
14 // http://www.codesourcery.com/public/cxx-abi/abi-eh.html
15 //
16 // It also supports the closely-related ARM ABI, documented at:
17 // http://infocenter.arm.com/help/topic/com.arm.doc.ihi0041c/IHI0041C_cppabi.pdf
18 //
19 //===----------------------------------------------------------------------===//
20
21 #include "CGCXXABI.h"
22 #include "CGCleanup.h"
23 #include "CGRecordLayout.h"
24 #include "CGVTables.h"
25 #include "CodeGenFunction.h"
26 #include "CodeGenModule.h"
27 #include "TargetInfo.h"
28 #include "clang/AST/Mangle.h"
29 #include "clang/AST/Type.h"
30 #include "clang/AST/StmtCXX.h"
31 #include "llvm/IR/CallSite.h"
32 #include "llvm/IR/DataLayout.h"
33 #include "llvm/IR/Instructions.h"
34 #include "llvm/IR/Intrinsics.h"
35 #include "llvm/IR/Value.h"
36
37 using namespace clang;
38 using namespace CodeGen;
39
40 namespace {
41 class ItaniumCXXABI : public CodeGen::CGCXXABI {
42 /// VTables - All the vtables which have been defined.
43 llvm::DenseMap<const CXXRecordDecl *, llvm::GlobalVariable *> VTables;
44
45 protected:
46 bool UseARMMethodPtrABI;
47 bool UseARMGuardVarABI;
48
getMangleContext()49 ItaniumMangleContext &getMangleContext() {
50 return cast<ItaniumMangleContext>(CodeGen::CGCXXABI::getMangleContext());
51 }
52
53 public:
ItaniumCXXABI(CodeGen::CodeGenModule & CGM,bool UseARMMethodPtrABI=false,bool UseARMGuardVarABI=false)54 ItaniumCXXABI(CodeGen::CodeGenModule &CGM,
55 bool UseARMMethodPtrABI = false,
56 bool UseARMGuardVarABI = false) :
57 CGCXXABI(CGM), UseARMMethodPtrABI(UseARMMethodPtrABI),
58 UseARMGuardVarABI(UseARMGuardVarABI) { }
59
60 bool classifyReturnType(CGFunctionInfo &FI) const override;
61
getRecordArgABI(const CXXRecordDecl * RD) const62 RecordArgABI getRecordArgABI(const CXXRecordDecl *RD) const override {
63 // Structures with either a non-trivial destructor or a non-trivial
64 // copy constructor are always indirect.
65 // FIXME: Use canCopyArgument() when it is fixed to handle lazily declared
66 // special members.
67 if (RD->hasNonTrivialDestructor() || RD->hasNonTrivialCopyConstructor())
68 return RAA_Indirect;
69 return RAA_Default;
70 }
71
72 bool isZeroInitializable(const MemberPointerType *MPT) override;
73
74 llvm::Type *ConvertMemberPointerType(const MemberPointerType *MPT) override;
75
76 llvm::Value *
77 EmitLoadOfMemberFunctionPointer(CodeGenFunction &CGF,
78 const Expr *E,
79 llvm::Value *&This,
80 llvm::Value *MemFnPtr,
81 const MemberPointerType *MPT) override;
82
83 llvm::Value *
84 EmitMemberDataPointerAddress(CodeGenFunction &CGF, const Expr *E,
85 llvm::Value *Base,
86 llvm::Value *MemPtr,
87 const MemberPointerType *MPT) override;
88
89 llvm::Value *EmitMemberPointerConversion(CodeGenFunction &CGF,
90 const CastExpr *E,
91 llvm::Value *Src) override;
92 llvm::Constant *EmitMemberPointerConversion(const CastExpr *E,
93 llvm::Constant *Src) override;
94
95 llvm::Constant *EmitNullMemberPointer(const MemberPointerType *MPT) override;
96
97 llvm::Constant *EmitMemberPointer(const CXXMethodDecl *MD) override;
98 llvm::Constant *EmitMemberDataPointer(const MemberPointerType *MPT,
99 CharUnits offset) override;
100 llvm::Constant *EmitMemberPointer(const APValue &MP, QualType MPT) override;
101 llvm::Constant *BuildMemberPointer(const CXXMethodDecl *MD,
102 CharUnits ThisAdjustment);
103
104 llvm::Value *EmitMemberPointerComparison(CodeGenFunction &CGF,
105 llvm::Value *L, llvm::Value *R,
106 const MemberPointerType *MPT,
107 bool Inequality) override;
108
109 llvm::Value *EmitMemberPointerIsNotNull(CodeGenFunction &CGF,
110 llvm::Value *Addr,
111 const MemberPointerType *MPT) override;
112
113 void emitVirtualObjectDelete(CodeGenFunction &CGF, const CXXDeleteExpr *DE,
114 llvm::Value *Ptr, QualType ElementType,
115 const CXXDestructorDecl *Dtor) override;
116
117 void emitRethrow(CodeGenFunction &CGF, bool isNoReturn) override;
118 void emitThrow(CodeGenFunction &CGF, const CXXThrowExpr *E) override;
119
120 void emitBeginCatch(CodeGenFunction &CGF, const CXXCatchStmt *C) override;
121
122 llvm::CallInst *
123 emitTerminateForUnexpectedException(CodeGenFunction &CGF,
124 llvm::Value *Exn) override;
125
126 void EmitFundamentalRTTIDescriptor(QualType Type);
127 void EmitFundamentalRTTIDescriptors();
128 llvm::Constant *getAddrOfRTTIDescriptor(QualType Ty) override;
129 llvm::Constant *
getAddrOfCXXCatchHandlerType(QualType Ty,QualType CatchHandlerType)130 getAddrOfCXXCatchHandlerType(QualType Ty,
131 QualType CatchHandlerType) override {
132 return getAddrOfRTTIDescriptor(Ty);
133 }
134
135 bool shouldTypeidBeNullChecked(bool IsDeref, QualType SrcRecordTy) override;
136 void EmitBadTypeidCall(CodeGenFunction &CGF) override;
137 llvm::Value *EmitTypeid(CodeGenFunction &CGF, QualType SrcRecordTy,
138 llvm::Value *ThisPtr,
139 llvm::Type *StdTypeInfoPtrTy) override;
140
141 bool shouldDynamicCastCallBeNullChecked(bool SrcIsPtr,
142 QualType SrcRecordTy) override;
143
144 llvm::Value *EmitDynamicCastCall(CodeGenFunction &CGF, llvm::Value *Value,
145 QualType SrcRecordTy, QualType DestTy,
146 QualType DestRecordTy,
147 llvm::BasicBlock *CastEnd) override;
148
149 llvm::Value *EmitDynamicCastToVoid(CodeGenFunction &CGF, llvm::Value *Value,
150 QualType SrcRecordTy,
151 QualType DestTy) override;
152
153 bool EmitBadCastCall(CodeGenFunction &CGF) override;
154
155 llvm::Value *
156 GetVirtualBaseClassOffset(CodeGenFunction &CGF, llvm::Value *This,
157 const CXXRecordDecl *ClassDecl,
158 const CXXRecordDecl *BaseClassDecl) override;
159
160 void EmitCXXConstructors(const CXXConstructorDecl *D) override;
161
162 void buildStructorSignature(const CXXMethodDecl *MD, StructorType T,
163 SmallVectorImpl<CanQualType> &ArgTys) override;
164
useThunkForDtorVariant(const CXXDestructorDecl * Dtor,CXXDtorType DT) const165 bool useThunkForDtorVariant(const CXXDestructorDecl *Dtor,
166 CXXDtorType DT) const override {
167 // Itanium does not emit any destructor variant as an inline thunk.
168 // Delegating may occur as an optimization, but all variants are either
169 // emitted with external linkage or as linkonce if they are inline and used.
170 return false;
171 }
172
173 void EmitCXXDestructors(const CXXDestructorDecl *D) override;
174
175 void addImplicitStructorParams(CodeGenFunction &CGF, QualType &ResTy,
176 FunctionArgList &Params) override;
177
178 void EmitInstanceFunctionProlog(CodeGenFunction &CGF) override;
179
180 unsigned addImplicitConstructorArgs(CodeGenFunction &CGF,
181 const CXXConstructorDecl *D,
182 CXXCtorType Type, bool ForVirtualBase,
183 bool Delegating,
184 CallArgList &Args) override;
185
186 void EmitDestructorCall(CodeGenFunction &CGF, const CXXDestructorDecl *DD,
187 CXXDtorType Type, bool ForVirtualBase,
188 bool Delegating, llvm::Value *This) override;
189
190 void emitVTableDefinitions(CodeGenVTables &CGVT,
191 const CXXRecordDecl *RD) override;
192
193 llvm::Value *getVTableAddressPointInStructor(
194 CodeGenFunction &CGF, const CXXRecordDecl *VTableClass,
195 BaseSubobject Base, const CXXRecordDecl *NearestVBase,
196 bool &NeedsVirtualOffset) override;
197
198 llvm::Constant *
199 getVTableAddressPointForConstExpr(BaseSubobject Base,
200 const CXXRecordDecl *VTableClass) override;
201
202 llvm::GlobalVariable *getAddrOfVTable(const CXXRecordDecl *RD,
203 CharUnits VPtrOffset) override;
204
205 llvm::Value *getVirtualFunctionPointer(CodeGenFunction &CGF, GlobalDecl GD,
206 llvm::Value *This,
207 llvm::Type *Ty) override;
208
209 llvm::Value *EmitVirtualDestructorCall(CodeGenFunction &CGF,
210 const CXXDestructorDecl *Dtor,
211 CXXDtorType DtorType,
212 llvm::Value *This,
213 const CXXMemberCallExpr *CE) override;
214
215 void emitVirtualInheritanceTables(const CXXRecordDecl *RD) override;
216
setThunkLinkage(llvm::Function * Thunk,bool ForVTable,GlobalDecl GD,bool ReturnAdjustment)217 void setThunkLinkage(llvm::Function *Thunk, bool ForVTable, GlobalDecl GD,
218 bool ReturnAdjustment) override {
219 // Allow inlining of thunks by emitting them with available_externally
220 // linkage together with vtables when needed.
221 if (ForVTable)
222 Thunk->setLinkage(llvm::GlobalValue::AvailableExternallyLinkage);
223 }
224
225 llvm::Value *performThisAdjustment(CodeGenFunction &CGF, llvm::Value *This,
226 const ThisAdjustment &TA) override;
227
228 llvm::Value *performReturnAdjustment(CodeGenFunction &CGF, llvm::Value *Ret,
229 const ReturnAdjustment &RA) override;
230
getSrcArgforCopyCtor(const CXXConstructorDecl *,FunctionArgList & Args) const231 size_t getSrcArgforCopyCtor(const CXXConstructorDecl *,
232 FunctionArgList &Args) const override {
233 assert(!Args.empty() && "expected the arglist to not be empty!");
234 return Args.size() - 1;
235 }
236
GetPureVirtualCallName()237 StringRef GetPureVirtualCallName() override { return "__cxa_pure_virtual"; }
GetDeletedVirtualCallName()238 StringRef GetDeletedVirtualCallName() override
239 { return "__cxa_deleted_virtual"; }
240
241 CharUnits getArrayCookieSizeImpl(QualType elementType) override;
242 llvm::Value *InitializeArrayCookie(CodeGenFunction &CGF,
243 llvm::Value *NewPtr,
244 llvm::Value *NumElements,
245 const CXXNewExpr *expr,
246 QualType ElementType) override;
247 llvm::Value *readArrayCookieImpl(CodeGenFunction &CGF,
248 llvm::Value *allocPtr,
249 CharUnits cookieSize) override;
250
251 void EmitGuardedInit(CodeGenFunction &CGF, const VarDecl &D,
252 llvm::GlobalVariable *DeclPtr,
253 bool PerformInit) override;
254 void registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
255 llvm::Constant *dtor, llvm::Constant *addr) override;
256
257 llvm::Function *getOrCreateThreadLocalWrapper(const VarDecl *VD,
258 llvm::Value *Val);
259 void EmitThreadLocalInitFuncs(
260 CodeGenModule &CGM,
261 ArrayRef<std::pair<const VarDecl *, llvm::GlobalVariable *>>
262 CXXThreadLocals,
263 ArrayRef<llvm::Function *> CXXThreadLocalInits,
264 ArrayRef<llvm::GlobalVariable *> CXXThreadLocalInitVars) override;
265
usesThreadWrapperFunction() const266 bool usesThreadWrapperFunction() const override { return true; }
267 LValue EmitThreadLocalVarDeclLValue(CodeGenFunction &CGF, const VarDecl *VD,
268 QualType LValType) override;
269
270 bool NeedsVTTParameter(GlobalDecl GD) override;
271
272 /**************************** RTTI Uniqueness ******************************/
273
274 protected:
275 /// Returns true if the ABI requires RTTI type_info objects to be unique
276 /// across a program.
shouldRTTIBeUnique() const277 virtual bool shouldRTTIBeUnique() const { return true; }
278
279 public:
280 /// What sort of unique-RTTI behavior should we use?
281 enum RTTIUniquenessKind {
282 /// We are guaranteeing, or need to guarantee, that the RTTI string
283 /// is unique.
284 RUK_Unique,
285
286 /// We are not guaranteeing uniqueness for the RTTI string, so we
287 /// can demote to hidden visibility but must use string comparisons.
288 RUK_NonUniqueHidden,
289
290 /// We are not guaranteeing uniqueness for the RTTI string, so we
291 /// have to use string comparisons, but we also have to emit it with
292 /// non-hidden visibility.
293 RUK_NonUniqueVisible
294 };
295
296 /// Return the required visibility status for the given type and linkage in
297 /// the current ABI.
298 RTTIUniquenessKind
299 classifyRTTIUniqueness(QualType CanTy,
300 llvm::GlobalValue::LinkageTypes Linkage) const;
301 friend class ItaniumRTTIBuilder;
302
303 void emitCXXStructor(const CXXMethodDecl *MD, StructorType Type) override;
304 };
305
306 class ARMCXXABI : public ItaniumCXXABI {
307 public:
ARMCXXABI(CodeGen::CodeGenModule & CGM)308 ARMCXXABI(CodeGen::CodeGenModule &CGM) :
309 ItaniumCXXABI(CGM, /* UseARMMethodPtrABI = */ true,
310 /* UseARMGuardVarABI = */ true) {}
311
HasThisReturn(GlobalDecl GD) const312 bool HasThisReturn(GlobalDecl GD) const override {
313 return (isa<CXXConstructorDecl>(GD.getDecl()) || (
314 isa<CXXDestructorDecl>(GD.getDecl()) &&
315 GD.getDtorType() != Dtor_Deleting));
316 }
317
318 void EmitReturnFromThunk(CodeGenFunction &CGF, RValue RV,
319 QualType ResTy) override;
320
321 CharUnits getArrayCookieSizeImpl(QualType elementType) override;
322 llvm::Value *InitializeArrayCookie(CodeGenFunction &CGF,
323 llvm::Value *NewPtr,
324 llvm::Value *NumElements,
325 const CXXNewExpr *expr,
326 QualType ElementType) override;
327 llvm::Value *readArrayCookieImpl(CodeGenFunction &CGF, llvm::Value *allocPtr,
328 CharUnits cookieSize) override;
329 };
330
331 class iOS64CXXABI : public ARMCXXABI {
332 public:
iOS64CXXABI(CodeGen::CodeGenModule & CGM)333 iOS64CXXABI(CodeGen::CodeGenModule &CGM) : ARMCXXABI(CGM) {}
334
335 // ARM64 libraries are prepared for non-unique RTTI.
shouldRTTIBeUnique() const336 bool shouldRTTIBeUnique() const override { return false; }
337 };
338 }
339
CreateItaniumCXXABI(CodeGenModule & CGM)340 CodeGen::CGCXXABI *CodeGen::CreateItaniumCXXABI(CodeGenModule &CGM) {
341 switch (CGM.getTarget().getCXXABI().getKind()) {
342 // For IR-generation purposes, there's no significant difference
343 // between the ARM and iOS ABIs.
344 case TargetCXXABI::GenericARM:
345 case TargetCXXABI::iOS:
346 return new ARMCXXABI(CGM);
347
348 case TargetCXXABI::iOS64:
349 return new iOS64CXXABI(CGM);
350
351 // Note that AArch64 uses the generic ItaniumCXXABI class since it doesn't
352 // include the other 32-bit ARM oddities: constructor/destructor return values
353 // and array cookies.
354 case TargetCXXABI::GenericAArch64:
355 return new ItaniumCXXABI(CGM, /* UseARMMethodPtrABI = */ true,
356 /* UseARMGuardVarABI = */ true);
357
358 case TargetCXXABI::GenericMIPS:
359 return new ItaniumCXXABI(CGM, /* UseARMMethodPtrABI = */ true);
360
361 case TargetCXXABI::GenericItanium:
362 if (CGM.getContext().getTargetInfo().getTriple().getArch()
363 == llvm::Triple::le32) {
364 // For PNaCl, use ARM-style method pointers so that PNaCl code
365 // does not assume anything about the alignment of function
366 // pointers.
367 return new ItaniumCXXABI(CGM, /* UseARMMethodPtrABI = */ true,
368 /* UseARMGuardVarABI = */ false);
369 }
370 return new ItaniumCXXABI(CGM);
371
372 case TargetCXXABI::Microsoft:
373 llvm_unreachable("Microsoft ABI is not Itanium-based");
374 }
375 llvm_unreachable("bad ABI kind");
376 }
377
378 llvm::Type *
ConvertMemberPointerType(const MemberPointerType * MPT)379 ItaniumCXXABI::ConvertMemberPointerType(const MemberPointerType *MPT) {
380 if (MPT->isMemberDataPointer())
381 return CGM.PtrDiffTy;
382 return llvm::StructType::get(CGM.PtrDiffTy, CGM.PtrDiffTy, nullptr);
383 }
384
385 /// In the Itanium and ARM ABIs, method pointers have the form:
386 /// struct { ptrdiff_t ptr; ptrdiff_t adj; } memptr;
387 ///
388 /// In the Itanium ABI:
389 /// - method pointers are virtual if (memptr.ptr & 1) is nonzero
390 /// - the this-adjustment is (memptr.adj)
391 /// - the virtual offset is (memptr.ptr - 1)
392 ///
393 /// In the ARM ABI:
394 /// - method pointers are virtual if (memptr.adj & 1) is nonzero
395 /// - the this-adjustment is (memptr.adj >> 1)
396 /// - the virtual offset is (memptr.ptr)
397 /// ARM uses 'adj' for the virtual flag because Thumb functions
398 /// may be only single-byte aligned.
399 ///
400 /// If the member is virtual, the adjusted 'this' pointer points
401 /// to a vtable pointer from which the virtual offset is applied.
402 ///
403 /// If the member is non-virtual, memptr.ptr is the address of
404 /// the function to call.
EmitLoadOfMemberFunctionPointer(CodeGenFunction & CGF,const Expr * E,llvm::Value * & This,llvm::Value * MemFnPtr,const MemberPointerType * MPT)405 llvm::Value *ItaniumCXXABI::EmitLoadOfMemberFunctionPointer(
406 CodeGenFunction &CGF, const Expr *E, llvm::Value *&This,
407 llvm::Value *MemFnPtr, const MemberPointerType *MPT) {
408 CGBuilderTy &Builder = CGF.Builder;
409
410 const FunctionProtoType *FPT =
411 MPT->getPointeeType()->getAs<FunctionProtoType>();
412 const CXXRecordDecl *RD =
413 cast<CXXRecordDecl>(MPT->getClass()->getAs<RecordType>()->getDecl());
414
415 llvm::FunctionType *FTy =
416 CGM.getTypes().GetFunctionType(
417 CGM.getTypes().arrangeCXXMethodType(RD, FPT));
418
419 llvm::Constant *ptrdiff_1 = llvm::ConstantInt::get(CGM.PtrDiffTy, 1);
420
421 llvm::BasicBlock *FnVirtual = CGF.createBasicBlock("memptr.virtual");
422 llvm::BasicBlock *FnNonVirtual = CGF.createBasicBlock("memptr.nonvirtual");
423 llvm::BasicBlock *FnEnd = CGF.createBasicBlock("memptr.end");
424
425 // Extract memptr.adj, which is in the second field.
426 llvm::Value *RawAdj = Builder.CreateExtractValue(MemFnPtr, 1, "memptr.adj");
427
428 // Compute the true adjustment.
429 llvm::Value *Adj = RawAdj;
430 if (UseARMMethodPtrABI)
431 Adj = Builder.CreateAShr(Adj, ptrdiff_1, "memptr.adj.shifted");
432
433 // Apply the adjustment and cast back to the original struct type
434 // for consistency.
435 llvm::Value *Ptr = Builder.CreateBitCast(This, Builder.getInt8PtrTy());
436 Ptr = Builder.CreateInBoundsGEP(Ptr, Adj);
437 This = Builder.CreateBitCast(Ptr, This->getType(), "this.adjusted");
438
439 // Load the function pointer.
440 llvm::Value *FnAsInt = Builder.CreateExtractValue(MemFnPtr, 0, "memptr.ptr");
441
442 // If the LSB in the function pointer is 1, the function pointer points to
443 // a virtual function.
444 llvm::Value *IsVirtual;
445 if (UseARMMethodPtrABI)
446 IsVirtual = Builder.CreateAnd(RawAdj, ptrdiff_1);
447 else
448 IsVirtual = Builder.CreateAnd(FnAsInt, ptrdiff_1);
449 IsVirtual = Builder.CreateIsNotNull(IsVirtual, "memptr.isvirtual");
450 Builder.CreateCondBr(IsVirtual, FnVirtual, FnNonVirtual);
451
452 // In the virtual path, the adjustment left 'This' pointing to the
453 // vtable of the correct base subobject. The "function pointer" is an
454 // offset within the vtable (+1 for the virtual flag on non-ARM).
455 CGF.EmitBlock(FnVirtual);
456
457 // Cast the adjusted this to a pointer to vtable pointer and load.
458 llvm::Type *VTableTy = Builder.getInt8PtrTy();
459 llvm::Value *VTable = CGF.GetVTablePtr(This, VTableTy);
460
461 // Apply the offset.
462 llvm::Value *VTableOffset = FnAsInt;
463 if (!UseARMMethodPtrABI)
464 VTableOffset = Builder.CreateSub(VTableOffset, ptrdiff_1);
465 VTable = Builder.CreateGEP(VTable, VTableOffset);
466
467 // Load the virtual function to call.
468 VTable = Builder.CreateBitCast(VTable, FTy->getPointerTo()->getPointerTo());
469 llvm::Value *VirtualFn = Builder.CreateLoad(VTable, "memptr.virtualfn");
470 CGF.EmitBranch(FnEnd);
471
472 // In the non-virtual path, the function pointer is actually a
473 // function pointer.
474 CGF.EmitBlock(FnNonVirtual);
475 llvm::Value *NonVirtualFn =
476 Builder.CreateIntToPtr(FnAsInt, FTy->getPointerTo(), "memptr.nonvirtualfn");
477
478 // We're done.
479 CGF.EmitBlock(FnEnd);
480 llvm::PHINode *Callee = Builder.CreatePHI(FTy->getPointerTo(), 2);
481 Callee->addIncoming(VirtualFn, FnVirtual);
482 Callee->addIncoming(NonVirtualFn, FnNonVirtual);
483 return Callee;
484 }
485
486 /// Compute an l-value by applying the given pointer-to-member to a
487 /// base object.
EmitMemberDataPointerAddress(CodeGenFunction & CGF,const Expr * E,llvm::Value * Base,llvm::Value * MemPtr,const MemberPointerType * MPT)488 llvm::Value *ItaniumCXXABI::EmitMemberDataPointerAddress(
489 CodeGenFunction &CGF, const Expr *E, llvm::Value *Base, llvm::Value *MemPtr,
490 const MemberPointerType *MPT) {
491 assert(MemPtr->getType() == CGM.PtrDiffTy);
492
493 CGBuilderTy &Builder = CGF.Builder;
494
495 unsigned AS = Base->getType()->getPointerAddressSpace();
496
497 // Cast to char*.
498 Base = Builder.CreateBitCast(Base, Builder.getInt8Ty()->getPointerTo(AS));
499
500 // Apply the offset, which we assume is non-null.
501 llvm::Value *Addr = Builder.CreateInBoundsGEP(Base, MemPtr, "memptr.offset");
502
503 // Cast the address to the appropriate pointer type, adopting the
504 // address space of the base pointer.
505 llvm::Type *PType
506 = CGF.ConvertTypeForMem(MPT->getPointeeType())->getPointerTo(AS);
507 return Builder.CreateBitCast(Addr, PType);
508 }
509
510 /// Perform a bitcast, derived-to-base, or base-to-derived member pointer
511 /// conversion.
512 ///
513 /// Bitcast conversions are always a no-op under Itanium.
514 ///
515 /// Obligatory offset/adjustment diagram:
516 /// <-- offset --> <-- adjustment -->
517 /// |--------------------------|----------------------|--------------------|
518 /// ^Derived address point ^Base address point ^Member address point
519 ///
520 /// So when converting a base member pointer to a derived member pointer,
521 /// we add the offset to the adjustment because the address point has
522 /// decreased; and conversely, when converting a derived MP to a base MP
523 /// we subtract the offset from the adjustment because the address point
524 /// has increased.
525 ///
526 /// The standard forbids (at compile time) conversion to and from
527 /// virtual bases, which is why we don't have to consider them here.
528 ///
529 /// The standard forbids (at run time) casting a derived MP to a base
530 /// MP when the derived MP does not point to a member of the base.
531 /// This is why -1 is a reasonable choice for null data member
532 /// pointers.
533 llvm::Value *
EmitMemberPointerConversion(CodeGenFunction & CGF,const CastExpr * E,llvm::Value * src)534 ItaniumCXXABI::EmitMemberPointerConversion(CodeGenFunction &CGF,
535 const CastExpr *E,
536 llvm::Value *src) {
537 assert(E->getCastKind() == CK_DerivedToBaseMemberPointer ||
538 E->getCastKind() == CK_BaseToDerivedMemberPointer ||
539 E->getCastKind() == CK_ReinterpretMemberPointer);
540
541 // Under Itanium, reinterprets don't require any additional processing.
542 if (E->getCastKind() == CK_ReinterpretMemberPointer) return src;
543
544 // Use constant emission if we can.
545 if (isa<llvm::Constant>(src))
546 return EmitMemberPointerConversion(E, cast<llvm::Constant>(src));
547
548 llvm::Constant *adj = getMemberPointerAdjustment(E);
549 if (!adj) return src;
550
551 CGBuilderTy &Builder = CGF.Builder;
552 bool isDerivedToBase = (E->getCastKind() == CK_DerivedToBaseMemberPointer);
553
554 const MemberPointerType *destTy =
555 E->getType()->castAs<MemberPointerType>();
556
557 // For member data pointers, this is just a matter of adding the
558 // offset if the source is non-null.
559 if (destTy->isMemberDataPointer()) {
560 llvm::Value *dst;
561 if (isDerivedToBase)
562 dst = Builder.CreateNSWSub(src, adj, "adj");
563 else
564 dst = Builder.CreateNSWAdd(src, adj, "adj");
565
566 // Null check.
567 llvm::Value *null = llvm::Constant::getAllOnesValue(src->getType());
568 llvm::Value *isNull = Builder.CreateICmpEQ(src, null, "memptr.isnull");
569 return Builder.CreateSelect(isNull, src, dst);
570 }
571
572 // The this-adjustment is left-shifted by 1 on ARM.
573 if (UseARMMethodPtrABI) {
574 uint64_t offset = cast<llvm::ConstantInt>(adj)->getZExtValue();
575 offset <<= 1;
576 adj = llvm::ConstantInt::get(adj->getType(), offset);
577 }
578
579 llvm::Value *srcAdj = Builder.CreateExtractValue(src, 1, "src.adj");
580 llvm::Value *dstAdj;
581 if (isDerivedToBase)
582 dstAdj = Builder.CreateNSWSub(srcAdj, adj, "adj");
583 else
584 dstAdj = Builder.CreateNSWAdd(srcAdj, adj, "adj");
585
586 return Builder.CreateInsertValue(src, dstAdj, 1);
587 }
588
589 llvm::Constant *
EmitMemberPointerConversion(const CastExpr * E,llvm::Constant * src)590 ItaniumCXXABI::EmitMemberPointerConversion(const CastExpr *E,
591 llvm::Constant *src) {
592 assert(E->getCastKind() == CK_DerivedToBaseMemberPointer ||
593 E->getCastKind() == CK_BaseToDerivedMemberPointer ||
594 E->getCastKind() == CK_ReinterpretMemberPointer);
595
596 // Under Itanium, reinterprets don't require any additional processing.
597 if (E->getCastKind() == CK_ReinterpretMemberPointer) return src;
598
599 // If the adjustment is trivial, we don't need to do anything.
600 llvm::Constant *adj = getMemberPointerAdjustment(E);
601 if (!adj) return src;
602
603 bool isDerivedToBase = (E->getCastKind() == CK_DerivedToBaseMemberPointer);
604
605 const MemberPointerType *destTy =
606 E->getType()->castAs<MemberPointerType>();
607
608 // For member data pointers, this is just a matter of adding the
609 // offset if the source is non-null.
610 if (destTy->isMemberDataPointer()) {
611 // null maps to null.
612 if (src->isAllOnesValue()) return src;
613
614 if (isDerivedToBase)
615 return llvm::ConstantExpr::getNSWSub(src, adj);
616 else
617 return llvm::ConstantExpr::getNSWAdd(src, adj);
618 }
619
620 // The this-adjustment is left-shifted by 1 on ARM.
621 if (UseARMMethodPtrABI) {
622 uint64_t offset = cast<llvm::ConstantInt>(adj)->getZExtValue();
623 offset <<= 1;
624 adj = llvm::ConstantInt::get(adj->getType(), offset);
625 }
626
627 llvm::Constant *srcAdj = llvm::ConstantExpr::getExtractValue(src, 1);
628 llvm::Constant *dstAdj;
629 if (isDerivedToBase)
630 dstAdj = llvm::ConstantExpr::getNSWSub(srcAdj, adj);
631 else
632 dstAdj = llvm::ConstantExpr::getNSWAdd(srcAdj, adj);
633
634 return llvm::ConstantExpr::getInsertValue(src, dstAdj, 1);
635 }
636
637 llvm::Constant *
EmitNullMemberPointer(const MemberPointerType * MPT)638 ItaniumCXXABI::EmitNullMemberPointer(const MemberPointerType *MPT) {
639 // Itanium C++ ABI 2.3:
640 // A NULL pointer is represented as -1.
641 if (MPT->isMemberDataPointer())
642 return llvm::ConstantInt::get(CGM.PtrDiffTy, -1ULL, /*isSigned=*/true);
643
644 llvm::Constant *Zero = llvm::ConstantInt::get(CGM.PtrDiffTy, 0);
645 llvm::Constant *Values[2] = { Zero, Zero };
646 return llvm::ConstantStruct::getAnon(Values);
647 }
648
649 llvm::Constant *
EmitMemberDataPointer(const MemberPointerType * MPT,CharUnits offset)650 ItaniumCXXABI::EmitMemberDataPointer(const MemberPointerType *MPT,
651 CharUnits offset) {
652 // Itanium C++ ABI 2.3:
653 // A pointer to data member is an offset from the base address of
654 // the class object containing it, represented as a ptrdiff_t
655 return llvm::ConstantInt::get(CGM.PtrDiffTy, offset.getQuantity());
656 }
657
EmitMemberPointer(const CXXMethodDecl * MD)658 llvm::Constant *ItaniumCXXABI::EmitMemberPointer(const CXXMethodDecl *MD) {
659 return BuildMemberPointer(MD, CharUnits::Zero());
660 }
661
BuildMemberPointer(const CXXMethodDecl * MD,CharUnits ThisAdjustment)662 llvm::Constant *ItaniumCXXABI::BuildMemberPointer(const CXXMethodDecl *MD,
663 CharUnits ThisAdjustment) {
664 assert(MD->isInstance() && "Member function must not be static!");
665 MD = MD->getCanonicalDecl();
666
667 CodeGenTypes &Types = CGM.getTypes();
668
669 // Get the function pointer (or index if this is a virtual function).
670 llvm::Constant *MemPtr[2];
671 if (MD->isVirtual()) {
672 uint64_t Index = CGM.getItaniumVTableContext().getMethodVTableIndex(MD);
673
674 const ASTContext &Context = getContext();
675 CharUnits PointerWidth =
676 Context.toCharUnitsFromBits(Context.getTargetInfo().getPointerWidth(0));
677 uint64_t VTableOffset = (Index * PointerWidth.getQuantity());
678
679 if (UseARMMethodPtrABI) {
680 // ARM C++ ABI 3.2.1:
681 // This ABI specifies that adj contains twice the this
682 // adjustment, plus 1 if the member function is virtual. The
683 // least significant bit of adj then makes exactly the same
684 // discrimination as the least significant bit of ptr does for
685 // Itanium.
686 MemPtr[0] = llvm::ConstantInt::get(CGM.PtrDiffTy, VTableOffset);
687 MemPtr[1] = llvm::ConstantInt::get(CGM.PtrDiffTy,
688 2 * ThisAdjustment.getQuantity() + 1);
689 } else {
690 // Itanium C++ ABI 2.3:
691 // For a virtual function, [the pointer field] is 1 plus the
692 // virtual table offset (in bytes) of the function,
693 // represented as a ptrdiff_t.
694 MemPtr[0] = llvm::ConstantInt::get(CGM.PtrDiffTy, VTableOffset + 1);
695 MemPtr[1] = llvm::ConstantInt::get(CGM.PtrDiffTy,
696 ThisAdjustment.getQuantity());
697 }
698 } else {
699 const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>();
700 llvm::Type *Ty;
701 // Check whether the function has a computable LLVM signature.
702 if (Types.isFuncTypeConvertible(FPT)) {
703 // The function has a computable LLVM signature; use the correct type.
704 Ty = Types.GetFunctionType(Types.arrangeCXXMethodDeclaration(MD));
705 } else {
706 // Use an arbitrary non-function type to tell GetAddrOfFunction that the
707 // function type is incomplete.
708 Ty = CGM.PtrDiffTy;
709 }
710 llvm::Constant *addr = CGM.GetAddrOfFunction(MD, Ty);
711
712 MemPtr[0] = llvm::ConstantExpr::getPtrToInt(addr, CGM.PtrDiffTy);
713 MemPtr[1] = llvm::ConstantInt::get(CGM.PtrDiffTy,
714 (UseARMMethodPtrABI ? 2 : 1) *
715 ThisAdjustment.getQuantity());
716 }
717
718 return llvm::ConstantStruct::getAnon(MemPtr);
719 }
720
EmitMemberPointer(const APValue & MP,QualType MPType)721 llvm::Constant *ItaniumCXXABI::EmitMemberPointer(const APValue &MP,
722 QualType MPType) {
723 const MemberPointerType *MPT = MPType->castAs<MemberPointerType>();
724 const ValueDecl *MPD = MP.getMemberPointerDecl();
725 if (!MPD)
726 return EmitNullMemberPointer(MPT);
727
728 CharUnits ThisAdjustment = getMemberPointerPathAdjustment(MP);
729
730 if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(MPD))
731 return BuildMemberPointer(MD, ThisAdjustment);
732
733 CharUnits FieldOffset =
734 getContext().toCharUnitsFromBits(getContext().getFieldOffset(MPD));
735 return EmitMemberDataPointer(MPT, ThisAdjustment + FieldOffset);
736 }
737
738 /// The comparison algorithm is pretty easy: the member pointers are
739 /// the same if they're either bitwise identical *or* both null.
740 ///
741 /// ARM is different here only because null-ness is more complicated.
742 llvm::Value *
EmitMemberPointerComparison(CodeGenFunction & CGF,llvm::Value * L,llvm::Value * R,const MemberPointerType * MPT,bool Inequality)743 ItaniumCXXABI::EmitMemberPointerComparison(CodeGenFunction &CGF,
744 llvm::Value *L,
745 llvm::Value *R,
746 const MemberPointerType *MPT,
747 bool Inequality) {
748 CGBuilderTy &Builder = CGF.Builder;
749
750 llvm::ICmpInst::Predicate Eq;
751 llvm::Instruction::BinaryOps And, Or;
752 if (Inequality) {
753 Eq = llvm::ICmpInst::ICMP_NE;
754 And = llvm::Instruction::Or;
755 Or = llvm::Instruction::And;
756 } else {
757 Eq = llvm::ICmpInst::ICMP_EQ;
758 And = llvm::Instruction::And;
759 Or = llvm::Instruction::Or;
760 }
761
762 // Member data pointers are easy because there's a unique null
763 // value, so it just comes down to bitwise equality.
764 if (MPT->isMemberDataPointer())
765 return Builder.CreateICmp(Eq, L, R);
766
767 // For member function pointers, the tautologies are more complex.
768 // The Itanium tautology is:
769 // (L == R) <==> (L.ptr == R.ptr && (L.ptr == 0 || L.adj == R.adj))
770 // The ARM tautology is:
771 // (L == R) <==> (L.ptr == R.ptr &&
772 // (L.adj == R.adj ||
773 // (L.ptr == 0 && ((L.adj|R.adj) & 1) == 0)))
774 // The inequality tautologies have exactly the same structure, except
775 // applying De Morgan's laws.
776
777 llvm::Value *LPtr = Builder.CreateExtractValue(L, 0, "lhs.memptr.ptr");
778 llvm::Value *RPtr = Builder.CreateExtractValue(R, 0, "rhs.memptr.ptr");
779
780 // This condition tests whether L.ptr == R.ptr. This must always be
781 // true for equality to hold.
782 llvm::Value *PtrEq = Builder.CreateICmp(Eq, LPtr, RPtr, "cmp.ptr");
783
784 // This condition, together with the assumption that L.ptr == R.ptr,
785 // tests whether the pointers are both null. ARM imposes an extra
786 // condition.
787 llvm::Value *Zero = llvm::Constant::getNullValue(LPtr->getType());
788 llvm::Value *EqZero = Builder.CreateICmp(Eq, LPtr, Zero, "cmp.ptr.null");
789
790 // This condition tests whether L.adj == R.adj. If this isn't
791 // true, the pointers are unequal unless they're both null.
792 llvm::Value *LAdj = Builder.CreateExtractValue(L, 1, "lhs.memptr.adj");
793 llvm::Value *RAdj = Builder.CreateExtractValue(R, 1, "rhs.memptr.adj");
794 llvm::Value *AdjEq = Builder.CreateICmp(Eq, LAdj, RAdj, "cmp.adj");
795
796 // Null member function pointers on ARM clear the low bit of Adj,
797 // so the zero condition has to check that neither low bit is set.
798 if (UseARMMethodPtrABI) {
799 llvm::Value *One = llvm::ConstantInt::get(LPtr->getType(), 1);
800
801 // Compute (l.adj | r.adj) & 1 and test it against zero.
802 llvm::Value *OrAdj = Builder.CreateOr(LAdj, RAdj, "or.adj");
803 llvm::Value *OrAdjAnd1 = Builder.CreateAnd(OrAdj, One);
804 llvm::Value *OrAdjAnd1EqZero = Builder.CreateICmp(Eq, OrAdjAnd1, Zero,
805 "cmp.or.adj");
806 EqZero = Builder.CreateBinOp(And, EqZero, OrAdjAnd1EqZero);
807 }
808
809 // Tie together all our conditions.
810 llvm::Value *Result = Builder.CreateBinOp(Or, EqZero, AdjEq);
811 Result = Builder.CreateBinOp(And, PtrEq, Result,
812 Inequality ? "memptr.ne" : "memptr.eq");
813 return Result;
814 }
815
816 llvm::Value *
EmitMemberPointerIsNotNull(CodeGenFunction & CGF,llvm::Value * MemPtr,const MemberPointerType * MPT)817 ItaniumCXXABI::EmitMemberPointerIsNotNull(CodeGenFunction &CGF,
818 llvm::Value *MemPtr,
819 const MemberPointerType *MPT) {
820 CGBuilderTy &Builder = CGF.Builder;
821
822 /// For member data pointers, this is just a check against -1.
823 if (MPT->isMemberDataPointer()) {
824 assert(MemPtr->getType() == CGM.PtrDiffTy);
825 llvm::Value *NegativeOne =
826 llvm::Constant::getAllOnesValue(MemPtr->getType());
827 return Builder.CreateICmpNE(MemPtr, NegativeOne, "memptr.tobool");
828 }
829
830 // In Itanium, a member function pointer is not null if 'ptr' is not null.
831 llvm::Value *Ptr = Builder.CreateExtractValue(MemPtr, 0, "memptr.ptr");
832
833 llvm::Constant *Zero = llvm::ConstantInt::get(Ptr->getType(), 0);
834 llvm::Value *Result = Builder.CreateICmpNE(Ptr, Zero, "memptr.tobool");
835
836 // On ARM, a member function pointer is also non-null if the low bit of 'adj'
837 // (the virtual bit) is set.
838 if (UseARMMethodPtrABI) {
839 llvm::Constant *One = llvm::ConstantInt::get(Ptr->getType(), 1);
840 llvm::Value *Adj = Builder.CreateExtractValue(MemPtr, 1, "memptr.adj");
841 llvm::Value *VirtualBit = Builder.CreateAnd(Adj, One, "memptr.virtualbit");
842 llvm::Value *IsVirtual = Builder.CreateICmpNE(VirtualBit, Zero,
843 "memptr.isvirtual");
844 Result = Builder.CreateOr(Result, IsVirtual);
845 }
846
847 return Result;
848 }
849
classifyReturnType(CGFunctionInfo & FI) const850 bool ItaniumCXXABI::classifyReturnType(CGFunctionInfo &FI) const {
851 const CXXRecordDecl *RD = FI.getReturnType()->getAsCXXRecordDecl();
852 if (!RD)
853 return false;
854
855 // Return indirectly if we have a non-trivial copy ctor or non-trivial dtor.
856 // FIXME: Use canCopyArgument() when it is fixed to handle lazily declared
857 // special members.
858 if (RD->hasNonTrivialDestructor() || RD->hasNonTrivialCopyConstructor()) {
859 FI.getReturnInfo() = ABIArgInfo::getIndirect(0, /*ByVal=*/false);
860 return true;
861 }
862 return false;
863 }
864
865 /// The Itanium ABI requires non-zero initialization only for data
866 /// member pointers, for which '0' is a valid offset.
isZeroInitializable(const MemberPointerType * MPT)867 bool ItaniumCXXABI::isZeroInitializable(const MemberPointerType *MPT) {
868 return MPT->getPointeeType()->isFunctionType();
869 }
870
871 /// The Itanium ABI always places an offset to the complete object
872 /// at entry -2 in the vtable.
emitVirtualObjectDelete(CodeGenFunction & CGF,const CXXDeleteExpr * DE,llvm::Value * Ptr,QualType ElementType,const CXXDestructorDecl * Dtor)873 void ItaniumCXXABI::emitVirtualObjectDelete(CodeGenFunction &CGF,
874 const CXXDeleteExpr *DE,
875 llvm::Value *Ptr,
876 QualType ElementType,
877 const CXXDestructorDecl *Dtor) {
878 bool UseGlobalDelete = DE->isGlobalDelete();
879 if (UseGlobalDelete) {
880 // Derive the complete-object pointer, which is what we need
881 // to pass to the deallocation function.
882
883 // Grab the vtable pointer as an intptr_t*.
884 llvm::Value *VTable = CGF.GetVTablePtr(Ptr, CGF.IntPtrTy->getPointerTo());
885
886 // Track back to entry -2 and pull out the offset there.
887 llvm::Value *OffsetPtr = CGF.Builder.CreateConstInBoundsGEP1_64(
888 VTable, -2, "complete-offset.ptr");
889 llvm::LoadInst *Offset = CGF.Builder.CreateLoad(OffsetPtr);
890 Offset->setAlignment(CGF.PointerAlignInBytes);
891
892 // Apply the offset.
893 llvm::Value *CompletePtr = CGF.Builder.CreateBitCast(Ptr, CGF.Int8PtrTy);
894 CompletePtr = CGF.Builder.CreateInBoundsGEP(CompletePtr, Offset);
895
896 // If we're supposed to call the global delete, make sure we do so
897 // even if the destructor throws.
898 CGF.pushCallObjectDeleteCleanup(DE->getOperatorDelete(), CompletePtr,
899 ElementType);
900 }
901
902 // FIXME: Provide a source location here even though there's no
903 // CXXMemberCallExpr for dtor call.
904 CXXDtorType DtorType = UseGlobalDelete ? Dtor_Complete : Dtor_Deleting;
905 EmitVirtualDestructorCall(CGF, Dtor, DtorType, Ptr, /*CE=*/nullptr);
906
907 if (UseGlobalDelete)
908 CGF.PopCleanupBlock();
909 }
910
emitRethrow(CodeGenFunction & CGF,bool isNoReturn)911 void ItaniumCXXABI::emitRethrow(CodeGenFunction &CGF, bool isNoReturn) {
912 // void __cxa_rethrow();
913
914 llvm::FunctionType *FTy =
915 llvm::FunctionType::get(CGM.VoidTy, /*IsVarArgs=*/false);
916
917 llvm::Constant *Fn = CGM.CreateRuntimeFunction(FTy, "__cxa_rethrow");
918
919 if (isNoReturn)
920 CGF.EmitNoreturnRuntimeCallOrInvoke(Fn, None);
921 else
922 CGF.EmitRuntimeCallOrInvoke(Fn);
923 }
924
getAllocateExceptionFn(CodeGenModule & CGM)925 static llvm::Constant *getAllocateExceptionFn(CodeGenModule &CGM) {
926 // void *__cxa_allocate_exception(size_t thrown_size);
927
928 llvm::FunctionType *FTy =
929 llvm::FunctionType::get(CGM.Int8PtrTy, CGM.SizeTy, /*IsVarArgs=*/false);
930
931 return CGM.CreateRuntimeFunction(FTy, "__cxa_allocate_exception");
932 }
933
getThrowFn(CodeGenModule & CGM)934 static llvm::Constant *getThrowFn(CodeGenModule &CGM) {
935 // void __cxa_throw(void *thrown_exception, std::type_info *tinfo,
936 // void (*dest) (void *));
937
938 llvm::Type *Args[3] = { CGM.Int8PtrTy, CGM.Int8PtrTy, CGM.Int8PtrTy };
939 llvm::FunctionType *FTy =
940 llvm::FunctionType::get(CGM.VoidTy, Args, /*IsVarArgs=*/false);
941
942 return CGM.CreateRuntimeFunction(FTy, "__cxa_throw");
943 }
944
emitThrow(CodeGenFunction & CGF,const CXXThrowExpr * E)945 void ItaniumCXXABI::emitThrow(CodeGenFunction &CGF, const CXXThrowExpr *E) {
946 QualType ThrowType = E->getSubExpr()->getType();
947 // Now allocate the exception object.
948 llvm::Type *SizeTy = CGF.ConvertType(getContext().getSizeType());
949 uint64_t TypeSize = getContext().getTypeSizeInChars(ThrowType).getQuantity();
950
951 llvm::Constant *AllocExceptionFn = getAllocateExceptionFn(CGM);
952 llvm::CallInst *ExceptionPtr = CGF.EmitNounwindRuntimeCall(
953 AllocExceptionFn, llvm::ConstantInt::get(SizeTy, TypeSize), "exception");
954
955 CGF.EmitAnyExprToExn(E->getSubExpr(), ExceptionPtr);
956
957 // Now throw the exception.
958 llvm::Constant *TypeInfo = CGM.GetAddrOfRTTIDescriptor(ThrowType,
959 /*ForEH=*/true);
960
961 // The address of the destructor. If the exception type has a
962 // trivial destructor (or isn't a record), we just pass null.
963 llvm::Constant *Dtor = nullptr;
964 if (const RecordType *RecordTy = ThrowType->getAs<RecordType>()) {
965 CXXRecordDecl *Record = cast<CXXRecordDecl>(RecordTy->getDecl());
966 if (!Record->hasTrivialDestructor()) {
967 CXXDestructorDecl *DtorD = Record->getDestructor();
968 Dtor = CGM.getAddrOfCXXStructor(DtorD, StructorType::Complete);
969 Dtor = llvm::ConstantExpr::getBitCast(Dtor, CGM.Int8PtrTy);
970 }
971 }
972 if (!Dtor) Dtor = llvm::Constant::getNullValue(CGM.Int8PtrTy);
973
974 llvm::Value *args[] = { ExceptionPtr, TypeInfo, Dtor };
975 CGF.EmitNoreturnRuntimeCallOrInvoke(getThrowFn(CGM), args);
976 }
977
getItaniumDynamicCastFn(CodeGenFunction & CGF)978 static llvm::Constant *getItaniumDynamicCastFn(CodeGenFunction &CGF) {
979 // void *__dynamic_cast(const void *sub,
980 // const abi::__class_type_info *src,
981 // const abi::__class_type_info *dst,
982 // std::ptrdiff_t src2dst_offset);
983
984 llvm::Type *Int8PtrTy = CGF.Int8PtrTy;
985 llvm::Type *PtrDiffTy =
986 CGF.ConvertType(CGF.getContext().getPointerDiffType());
987
988 llvm::Type *Args[4] = { Int8PtrTy, Int8PtrTy, Int8PtrTy, PtrDiffTy };
989
990 llvm::FunctionType *FTy = llvm::FunctionType::get(Int8PtrTy, Args, false);
991
992 // Mark the function as nounwind readonly.
993 llvm::Attribute::AttrKind FuncAttrs[] = { llvm::Attribute::NoUnwind,
994 llvm::Attribute::ReadOnly };
995 llvm::AttributeSet Attrs = llvm::AttributeSet::get(
996 CGF.getLLVMContext(), llvm::AttributeSet::FunctionIndex, FuncAttrs);
997
998 return CGF.CGM.CreateRuntimeFunction(FTy, "__dynamic_cast", Attrs);
999 }
1000
getBadCastFn(CodeGenFunction & CGF)1001 static llvm::Constant *getBadCastFn(CodeGenFunction &CGF) {
1002 // void __cxa_bad_cast();
1003 llvm::FunctionType *FTy = llvm::FunctionType::get(CGF.VoidTy, false);
1004 return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_bad_cast");
1005 }
1006
1007 /// \brief Compute the src2dst_offset hint as described in the
1008 /// Itanium C++ ABI [2.9.7]
computeOffsetHint(ASTContext & Context,const CXXRecordDecl * Src,const CXXRecordDecl * Dst)1009 static CharUnits computeOffsetHint(ASTContext &Context,
1010 const CXXRecordDecl *Src,
1011 const CXXRecordDecl *Dst) {
1012 CXXBasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true,
1013 /*DetectVirtual=*/false);
1014
1015 // If Dst is not derived from Src we can skip the whole computation below and
1016 // return that Src is not a public base of Dst. Record all inheritance paths.
1017 if (!Dst->isDerivedFrom(Src, Paths))
1018 return CharUnits::fromQuantity(-2ULL);
1019
1020 unsigned NumPublicPaths = 0;
1021 CharUnits Offset;
1022
1023 // Now walk all possible inheritance paths.
1024 for (CXXBasePaths::paths_iterator I = Paths.begin(), E = Paths.end(); I != E;
1025 ++I) {
1026 if (I->Access != AS_public) // Ignore non-public inheritance.
1027 continue;
1028
1029 ++NumPublicPaths;
1030
1031 for (CXXBasePath::iterator J = I->begin(), JE = I->end(); J != JE; ++J) {
1032 // If the path contains a virtual base class we can't give any hint.
1033 // -1: no hint.
1034 if (J->Base->isVirtual())
1035 return CharUnits::fromQuantity(-1ULL);
1036
1037 if (NumPublicPaths > 1) // Won't use offsets, skip computation.
1038 continue;
1039
1040 // Accumulate the base class offsets.
1041 const ASTRecordLayout &L = Context.getASTRecordLayout(J->Class);
1042 Offset += L.getBaseClassOffset(J->Base->getType()->getAsCXXRecordDecl());
1043 }
1044 }
1045
1046 // -2: Src is not a public base of Dst.
1047 if (NumPublicPaths == 0)
1048 return CharUnits::fromQuantity(-2ULL);
1049
1050 // -3: Src is a multiple public base type but never a virtual base type.
1051 if (NumPublicPaths > 1)
1052 return CharUnits::fromQuantity(-3ULL);
1053
1054 // Otherwise, the Src type is a unique public nonvirtual base type of Dst.
1055 // Return the offset of Src from the origin of Dst.
1056 return Offset;
1057 }
1058
getBadTypeidFn(CodeGenFunction & CGF)1059 static llvm::Constant *getBadTypeidFn(CodeGenFunction &CGF) {
1060 // void __cxa_bad_typeid();
1061 llvm::FunctionType *FTy = llvm::FunctionType::get(CGF.VoidTy, false);
1062
1063 return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_bad_typeid");
1064 }
1065
shouldTypeidBeNullChecked(bool IsDeref,QualType SrcRecordTy)1066 bool ItaniumCXXABI::shouldTypeidBeNullChecked(bool IsDeref,
1067 QualType SrcRecordTy) {
1068 return IsDeref;
1069 }
1070
EmitBadTypeidCall(CodeGenFunction & CGF)1071 void ItaniumCXXABI::EmitBadTypeidCall(CodeGenFunction &CGF) {
1072 llvm::Value *Fn = getBadTypeidFn(CGF);
1073 CGF.EmitRuntimeCallOrInvoke(Fn).setDoesNotReturn();
1074 CGF.Builder.CreateUnreachable();
1075 }
1076
EmitTypeid(CodeGenFunction & CGF,QualType SrcRecordTy,llvm::Value * ThisPtr,llvm::Type * StdTypeInfoPtrTy)1077 llvm::Value *ItaniumCXXABI::EmitTypeid(CodeGenFunction &CGF,
1078 QualType SrcRecordTy,
1079 llvm::Value *ThisPtr,
1080 llvm::Type *StdTypeInfoPtrTy) {
1081 llvm::Value *Value =
1082 CGF.GetVTablePtr(ThisPtr, StdTypeInfoPtrTy->getPointerTo());
1083
1084 // Load the type info.
1085 Value = CGF.Builder.CreateConstInBoundsGEP1_64(Value, -1ULL);
1086 return CGF.Builder.CreateLoad(Value);
1087 }
1088
shouldDynamicCastCallBeNullChecked(bool SrcIsPtr,QualType SrcRecordTy)1089 bool ItaniumCXXABI::shouldDynamicCastCallBeNullChecked(bool SrcIsPtr,
1090 QualType SrcRecordTy) {
1091 return SrcIsPtr;
1092 }
1093
EmitDynamicCastCall(CodeGenFunction & CGF,llvm::Value * Value,QualType SrcRecordTy,QualType DestTy,QualType DestRecordTy,llvm::BasicBlock * CastEnd)1094 llvm::Value *ItaniumCXXABI::EmitDynamicCastCall(
1095 CodeGenFunction &CGF, llvm::Value *Value, QualType SrcRecordTy,
1096 QualType DestTy, QualType DestRecordTy, llvm::BasicBlock *CastEnd) {
1097 llvm::Type *PtrDiffLTy =
1098 CGF.ConvertType(CGF.getContext().getPointerDiffType());
1099 llvm::Type *DestLTy = CGF.ConvertType(DestTy);
1100
1101 llvm::Value *SrcRTTI =
1102 CGF.CGM.GetAddrOfRTTIDescriptor(SrcRecordTy.getUnqualifiedType());
1103 llvm::Value *DestRTTI =
1104 CGF.CGM.GetAddrOfRTTIDescriptor(DestRecordTy.getUnqualifiedType());
1105
1106 // Compute the offset hint.
1107 const CXXRecordDecl *SrcDecl = SrcRecordTy->getAsCXXRecordDecl();
1108 const CXXRecordDecl *DestDecl = DestRecordTy->getAsCXXRecordDecl();
1109 llvm::Value *OffsetHint = llvm::ConstantInt::get(
1110 PtrDiffLTy,
1111 computeOffsetHint(CGF.getContext(), SrcDecl, DestDecl).getQuantity());
1112
1113 // Emit the call to __dynamic_cast.
1114 Value = CGF.EmitCastToVoidPtr(Value);
1115
1116 llvm::Value *args[] = {Value, SrcRTTI, DestRTTI, OffsetHint};
1117 Value = CGF.EmitNounwindRuntimeCall(getItaniumDynamicCastFn(CGF), args);
1118 Value = CGF.Builder.CreateBitCast(Value, DestLTy);
1119
1120 /// C++ [expr.dynamic.cast]p9:
1121 /// A failed cast to reference type throws std::bad_cast
1122 if (DestTy->isReferenceType()) {
1123 llvm::BasicBlock *BadCastBlock =
1124 CGF.createBasicBlock("dynamic_cast.bad_cast");
1125
1126 llvm::Value *IsNull = CGF.Builder.CreateIsNull(Value);
1127 CGF.Builder.CreateCondBr(IsNull, BadCastBlock, CastEnd);
1128
1129 CGF.EmitBlock(BadCastBlock);
1130 EmitBadCastCall(CGF);
1131 }
1132
1133 return Value;
1134 }
1135
EmitDynamicCastToVoid(CodeGenFunction & CGF,llvm::Value * Value,QualType SrcRecordTy,QualType DestTy)1136 llvm::Value *ItaniumCXXABI::EmitDynamicCastToVoid(CodeGenFunction &CGF,
1137 llvm::Value *Value,
1138 QualType SrcRecordTy,
1139 QualType DestTy) {
1140 llvm::Type *PtrDiffLTy =
1141 CGF.ConvertType(CGF.getContext().getPointerDiffType());
1142 llvm::Type *DestLTy = CGF.ConvertType(DestTy);
1143
1144 // Get the vtable pointer.
1145 llvm::Value *VTable = CGF.GetVTablePtr(Value, PtrDiffLTy->getPointerTo());
1146
1147 // Get the offset-to-top from the vtable.
1148 llvm::Value *OffsetToTop =
1149 CGF.Builder.CreateConstInBoundsGEP1_64(VTable, -2ULL);
1150 OffsetToTop = CGF.Builder.CreateLoad(OffsetToTop, "offset.to.top");
1151
1152 // Finally, add the offset to the pointer.
1153 Value = CGF.EmitCastToVoidPtr(Value);
1154 Value = CGF.Builder.CreateInBoundsGEP(Value, OffsetToTop);
1155
1156 return CGF.Builder.CreateBitCast(Value, DestLTy);
1157 }
1158
EmitBadCastCall(CodeGenFunction & CGF)1159 bool ItaniumCXXABI::EmitBadCastCall(CodeGenFunction &CGF) {
1160 llvm::Value *Fn = getBadCastFn(CGF);
1161 CGF.EmitRuntimeCallOrInvoke(Fn).setDoesNotReturn();
1162 CGF.Builder.CreateUnreachable();
1163 return true;
1164 }
1165
1166 llvm::Value *
GetVirtualBaseClassOffset(CodeGenFunction & CGF,llvm::Value * This,const CXXRecordDecl * ClassDecl,const CXXRecordDecl * BaseClassDecl)1167 ItaniumCXXABI::GetVirtualBaseClassOffset(CodeGenFunction &CGF,
1168 llvm::Value *This,
1169 const CXXRecordDecl *ClassDecl,
1170 const CXXRecordDecl *BaseClassDecl) {
1171 llvm::Value *VTablePtr = CGF.GetVTablePtr(This, CGM.Int8PtrTy);
1172 CharUnits VBaseOffsetOffset =
1173 CGM.getItaniumVTableContext().getVirtualBaseOffsetOffset(ClassDecl,
1174 BaseClassDecl);
1175
1176 llvm::Value *VBaseOffsetPtr =
1177 CGF.Builder.CreateConstGEP1_64(VTablePtr, VBaseOffsetOffset.getQuantity(),
1178 "vbase.offset.ptr");
1179 VBaseOffsetPtr = CGF.Builder.CreateBitCast(VBaseOffsetPtr,
1180 CGM.PtrDiffTy->getPointerTo());
1181
1182 llvm::Value *VBaseOffset =
1183 CGF.Builder.CreateLoad(VBaseOffsetPtr, "vbase.offset");
1184
1185 return VBaseOffset;
1186 }
1187
EmitCXXConstructors(const CXXConstructorDecl * D)1188 void ItaniumCXXABI::EmitCXXConstructors(const CXXConstructorDecl *D) {
1189 // Just make sure we're in sync with TargetCXXABI.
1190 assert(CGM.getTarget().getCXXABI().hasConstructorVariants());
1191
1192 // The constructor used for constructing this as a base class;
1193 // ignores virtual bases.
1194 CGM.EmitGlobal(GlobalDecl(D, Ctor_Base));
1195
1196 // The constructor used for constructing this as a complete class;
1197 // constructs the virtual bases, then calls the base constructor.
1198 if (!D->getParent()->isAbstract()) {
1199 // We don't need to emit the complete ctor if the class is abstract.
1200 CGM.EmitGlobal(GlobalDecl(D, Ctor_Complete));
1201 }
1202 }
1203
1204 void
buildStructorSignature(const CXXMethodDecl * MD,StructorType T,SmallVectorImpl<CanQualType> & ArgTys)1205 ItaniumCXXABI::buildStructorSignature(const CXXMethodDecl *MD, StructorType T,
1206 SmallVectorImpl<CanQualType> &ArgTys) {
1207 ASTContext &Context = getContext();
1208
1209 // All parameters are already in place except VTT, which goes after 'this'.
1210 // These are Clang types, so we don't need to worry about sret yet.
1211
1212 // Check if we need to add a VTT parameter (which has type void **).
1213 if (T == StructorType::Base && MD->getParent()->getNumVBases() != 0)
1214 ArgTys.insert(ArgTys.begin() + 1,
1215 Context.getPointerType(Context.VoidPtrTy));
1216 }
1217
EmitCXXDestructors(const CXXDestructorDecl * D)1218 void ItaniumCXXABI::EmitCXXDestructors(const CXXDestructorDecl *D) {
1219 // The destructor used for destructing this as a base class; ignores
1220 // virtual bases.
1221 CGM.EmitGlobal(GlobalDecl(D, Dtor_Base));
1222
1223 // The destructor used for destructing this as a most-derived class;
1224 // call the base destructor and then destructs any virtual bases.
1225 CGM.EmitGlobal(GlobalDecl(D, Dtor_Complete));
1226
1227 // The destructor in a virtual table is always a 'deleting'
1228 // destructor, which calls the complete destructor and then uses the
1229 // appropriate operator delete.
1230 if (D->isVirtual())
1231 CGM.EmitGlobal(GlobalDecl(D, Dtor_Deleting));
1232 }
1233
addImplicitStructorParams(CodeGenFunction & CGF,QualType & ResTy,FunctionArgList & Params)1234 void ItaniumCXXABI::addImplicitStructorParams(CodeGenFunction &CGF,
1235 QualType &ResTy,
1236 FunctionArgList &Params) {
1237 const CXXMethodDecl *MD = cast<CXXMethodDecl>(CGF.CurGD.getDecl());
1238 assert(isa<CXXConstructorDecl>(MD) || isa<CXXDestructorDecl>(MD));
1239
1240 // Check if we need a VTT parameter as well.
1241 if (NeedsVTTParameter(CGF.CurGD)) {
1242 ASTContext &Context = getContext();
1243
1244 // FIXME: avoid the fake decl
1245 QualType T = Context.getPointerType(Context.VoidPtrTy);
1246 ImplicitParamDecl *VTTDecl
1247 = ImplicitParamDecl::Create(Context, nullptr, MD->getLocation(),
1248 &Context.Idents.get("vtt"), T);
1249 Params.insert(Params.begin() + 1, VTTDecl);
1250 getStructorImplicitParamDecl(CGF) = VTTDecl;
1251 }
1252 }
1253
EmitInstanceFunctionProlog(CodeGenFunction & CGF)1254 void ItaniumCXXABI::EmitInstanceFunctionProlog(CodeGenFunction &CGF) {
1255 /// Initialize the 'this' slot.
1256 EmitThisParam(CGF);
1257
1258 /// Initialize the 'vtt' slot if needed.
1259 if (getStructorImplicitParamDecl(CGF)) {
1260 getStructorImplicitParamValue(CGF) = CGF.Builder.CreateLoad(
1261 CGF.GetAddrOfLocalVar(getStructorImplicitParamDecl(CGF)), "vtt");
1262 }
1263
1264 /// If this is a function that the ABI specifies returns 'this', initialize
1265 /// the return slot to 'this' at the start of the function.
1266 ///
1267 /// Unlike the setting of return types, this is done within the ABI
1268 /// implementation instead of by clients of CGCXXABI because:
1269 /// 1) getThisValue is currently protected
1270 /// 2) in theory, an ABI could implement 'this' returns some other way;
1271 /// HasThisReturn only specifies a contract, not the implementation
1272 if (HasThisReturn(CGF.CurGD))
1273 CGF.Builder.CreateStore(getThisValue(CGF), CGF.ReturnValue);
1274 }
1275
addImplicitConstructorArgs(CodeGenFunction & CGF,const CXXConstructorDecl * D,CXXCtorType Type,bool ForVirtualBase,bool Delegating,CallArgList & Args)1276 unsigned ItaniumCXXABI::addImplicitConstructorArgs(
1277 CodeGenFunction &CGF, const CXXConstructorDecl *D, CXXCtorType Type,
1278 bool ForVirtualBase, bool Delegating, CallArgList &Args) {
1279 if (!NeedsVTTParameter(GlobalDecl(D, Type)))
1280 return 0;
1281
1282 // Insert the implicit 'vtt' argument as the second argument.
1283 llvm::Value *VTT =
1284 CGF.GetVTTParameter(GlobalDecl(D, Type), ForVirtualBase, Delegating);
1285 QualType VTTTy = getContext().getPointerType(getContext().VoidPtrTy);
1286 Args.insert(Args.begin() + 1,
1287 CallArg(RValue::get(VTT), VTTTy, /*needscopy=*/false));
1288 return 1; // Added one arg.
1289 }
1290
EmitDestructorCall(CodeGenFunction & CGF,const CXXDestructorDecl * DD,CXXDtorType Type,bool ForVirtualBase,bool Delegating,llvm::Value * This)1291 void ItaniumCXXABI::EmitDestructorCall(CodeGenFunction &CGF,
1292 const CXXDestructorDecl *DD,
1293 CXXDtorType Type, bool ForVirtualBase,
1294 bool Delegating, llvm::Value *This) {
1295 GlobalDecl GD(DD, Type);
1296 llvm::Value *VTT = CGF.GetVTTParameter(GD, ForVirtualBase, Delegating);
1297 QualType VTTTy = getContext().getPointerType(getContext().VoidPtrTy);
1298
1299 llvm::Value *Callee = nullptr;
1300 if (getContext().getLangOpts().AppleKext)
1301 Callee = CGF.BuildAppleKextVirtualDestructorCall(DD, Type, DD->getParent());
1302
1303 if (!Callee)
1304 Callee = CGM.getAddrOfCXXStructor(DD, getFromDtorType(Type));
1305
1306 CGF.EmitCXXMemberOrOperatorCall(DD, Callee, ReturnValueSlot(), This, VTT,
1307 VTTTy, nullptr);
1308 }
1309
emitVTableDefinitions(CodeGenVTables & CGVT,const CXXRecordDecl * RD)1310 void ItaniumCXXABI::emitVTableDefinitions(CodeGenVTables &CGVT,
1311 const CXXRecordDecl *RD) {
1312 llvm::GlobalVariable *VTable = getAddrOfVTable(RD, CharUnits());
1313 if (VTable->hasInitializer())
1314 return;
1315
1316 ItaniumVTableContext &VTContext = CGM.getItaniumVTableContext();
1317 const VTableLayout &VTLayout = VTContext.getVTableLayout(RD);
1318 llvm::GlobalVariable::LinkageTypes Linkage = CGM.getVTableLinkage(RD);
1319 llvm::Constant *RTTI =
1320 CGM.GetAddrOfRTTIDescriptor(CGM.getContext().getTagDeclType(RD));
1321
1322 // Create and set the initializer.
1323 llvm::Constant *Init = CGVT.CreateVTableInitializer(
1324 RD, VTLayout.vtable_component_begin(), VTLayout.getNumVTableComponents(),
1325 VTLayout.vtable_thunk_begin(), VTLayout.getNumVTableThunks(), RTTI);
1326 VTable->setInitializer(Init);
1327
1328 // Set the correct linkage.
1329 VTable->setLinkage(Linkage);
1330
1331 if (CGM.supportsCOMDAT() && VTable->isWeakForLinker())
1332 VTable->setComdat(CGM.getModule().getOrInsertComdat(VTable->getName()));
1333
1334 // Set the right visibility.
1335 CGM.setGlobalVisibility(VTable, RD);
1336
1337 // Use pointer alignment for the vtable. Otherwise we would align them based
1338 // on the size of the initializer which doesn't make sense as only single
1339 // values are read.
1340 unsigned PAlign = CGM.getTarget().getPointerAlign(0);
1341 VTable->setAlignment(getContext().toCharUnitsFromBits(PAlign).getQuantity());
1342
1343 // If this is the magic class __cxxabiv1::__fundamental_type_info,
1344 // we will emit the typeinfo for the fundamental types. This is the
1345 // same behaviour as GCC.
1346 const DeclContext *DC = RD->getDeclContext();
1347 if (RD->getIdentifier() &&
1348 RD->getIdentifier()->isStr("__fundamental_type_info") &&
1349 isa<NamespaceDecl>(DC) && cast<NamespaceDecl>(DC)->getIdentifier() &&
1350 cast<NamespaceDecl>(DC)->getIdentifier()->isStr("__cxxabiv1") &&
1351 DC->getParent()->isTranslationUnit())
1352 EmitFundamentalRTTIDescriptors();
1353
1354 CGM.EmitVTableBitSetEntries(VTable, VTLayout);
1355 }
1356
getVTableAddressPointInStructor(CodeGenFunction & CGF,const CXXRecordDecl * VTableClass,BaseSubobject Base,const CXXRecordDecl * NearestVBase,bool & NeedsVirtualOffset)1357 llvm::Value *ItaniumCXXABI::getVTableAddressPointInStructor(
1358 CodeGenFunction &CGF, const CXXRecordDecl *VTableClass, BaseSubobject Base,
1359 const CXXRecordDecl *NearestVBase, bool &NeedsVirtualOffset) {
1360 bool NeedsVTTParam = CGM.getCXXABI().NeedsVTTParameter(CGF.CurGD);
1361 NeedsVirtualOffset = (NeedsVTTParam && NearestVBase);
1362
1363 llvm::Value *VTableAddressPoint;
1364 if (NeedsVTTParam && (Base.getBase()->getNumVBases() || NearestVBase)) {
1365 // Get the secondary vpointer index.
1366 uint64_t VirtualPointerIndex =
1367 CGM.getVTables().getSecondaryVirtualPointerIndex(VTableClass, Base);
1368
1369 /// Load the VTT.
1370 llvm::Value *VTT = CGF.LoadCXXVTT();
1371 if (VirtualPointerIndex)
1372 VTT = CGF.Builder.CreateConstInBoundsGEP1_64(VTT, VirtualPointerIndex);
1373
1374 // And load the address point from the VTT.
1375 VTableAddressPoint = CGF.Builder.CreateLoad(VTT);
1376 } else {
1377 llvm::Constant *VTable =
1378 CGM.getCXXABI().getAddrOfVTable(VTableClass, CharUnits());
1379 uint64_t AddressPoint = CGM.getItaniumVTableContext()
1380 .getVTableLayout(VTableClass)
1381 .getAddressPoint(Base);
1382 VTableAddressPoint =
1383 CGF.Builder.CreateConstInBoundsGEP2_64(VTable, 0, AddressPoint);
1384 }
1385
1386 return VTableAddressPoint;
1387 }
1388
getVTableAddressPointForConstExpr(BaseSubobject Base,const CXXRecordDecl * VTableClass)1389 llvm::Constant *ItaniumCXXABI::getVTableAddressPointForConstExpr(
1390 BaseSubobject Base, const CXXRecordDecl *VTableClass) {
1391 auto *VTable = getAddrOfVTable(VTableClass, CharUnits());
1392
1393 // Find the appropriate vtable within the vtable group.
1394 uint64_t AddressPoint = CGM.getItaniumVTableContext()
1395 .getVTableLayout(VTableClass)
1396 .getAddressPoint(Base);
1397 llvm::Value *Indices[] = {
1398 llvm::ConstantInt::get(CGM.Int64Ty, 0),
1399 llvm::ConstantInt::get(CGM.Int64Ty, AddressPoint)
1400 };
1401
1402 return llvm::ConstantExpr::getInBoundsGetElementPtr(VTable->getValueType(),
1403 VTable, Indices);
1404 }
1405
getAddrOfVTable(const CXXRecordDecl * RD,CharUnits VPtrOffset)1406 llvm::GlobalVariable *ItaniumCXXABI::getAddrOfVTable(const CXXRecordDecl *RD,
1407 CharUnits VPtrOffset) {
1408 assert(VPtrOffset.isZero() && "Itanium ABI only supports zero vptr offsets");
1409
1410 llvm::GlobalVariable *&VTable = VTables[RD];
1411 if (VTable)
1412 return VTable;
1413
1414 // Queue up this v-table for possible deferred emission.
1415 CGM.addDeferredVTable(RD);
1416
1417 SmallString<256> OutName;
1418 llvm::raw_svector_ostream Out(OutName);
1419 getMangleContext().mangleCXXVTable(RD, Out);
1420 Out.flush();
1421 StringRef Name = OutName.str();
1422
1423 ItaniumVTableContext &VTContext = CGM.getItaniumVTableContext();
1424 llvm::ArrayType *ArrayType = llvm::ArrayType::get(
1425 CGM.Int8PtrTy, VTContext.getVTableLayout(RD).getNumVTableComponents());
1426
1427 VTable = CGM.CreateOrReplaceCXXRuntimeVariable(
1428 Name, ArrayType, llvm::GlobalValue::ExternalLinkage);
1429 VTable->setUnnamedAddr(true);
1430
1431 if (RD->hasAttr<DLLImportAttr>())
1432 VTable->setDLLStorageClass(llvm::GlobalValue::DLLImportStorageClass);
1433 else if (RD->hasAttr<DLLExportAttr>())
1434 VTable->setDLLStorageClass(llvm::GlobalValue::DLLExportStorageClass);
1435
1436 return VTable;
1437 }
1438
getVirtualFunctionPointer(CodeGenFunction & CGF,GlobalDecl GD,llvm::Value * This,llvm::Type * Ty)1439 llvm::Value *ItaniumCXXABI::getVirtualFunctionPointer(CodeGenFunction &CGF,
1440 GlobalDecl GD,
1441 llvm::Value *This,
1442 llvm::Type *Ty) {
1443 GD = GD.getCanonicalDecl();
1444 Ty = Ty->getPointerTo()->getPointerTo();
1445 llvm::Value *VTable = CGF.GetVTablePtr(This, Ty);
1446
1447 if (CGF.SanOpts.has(SanitizerKind::CFIVCall))
1448 CGF.EmitVTablePtrCheckForCall(cast<CXXMethodDecl>(GD.getDecl()), VTable);
1449
1450 uint64_t VTableIndex = CGM.getItaniumVTableContext().getMethodVTableIndex(GD);
1451 llvm::Value *VFuncPtr =
1452 CGF.Builder.CreateConstInBoundsGEP1_64(VTable, VTableIndex, "vfn");
1453 return CGF.Builder.CreateLoad(VFuncPtr);
1454 }
1455
EmitVirtualDestructorCall(CodeGenFunction & CGF,const CXXDestructorDecl * Dtor,CXXDtorType DtorType,llvm::Value * This,const CXXMemberCallExpr * CE)1456 llvm::Value *ItaniumCXXABI::EmitVirtualDestructorCall(
1457 CodeGenFunction &CGF, const CXXDestructorDecl *Dtor, CXXDtorType DtorType,
1458 llvm::Value *This, const CXXMemberCallExpr *CE) {
1459 assert(CE == nullptr || CE->arg_begin() == CE->arg_end());
1460 assert(DtorType == Dtor_Deleting || DtorType == Dtor_Complete);
1461
1462 const CGFunctionInfo *FInfo = &CGM.getTypes().arrangeCXXStructorDeclaration(
1463 Dtor, getFromDtorType(DtorType));
1464 llvm::Type *Ty = CGF.CGM.getTypes().GetFunctionType(*FInfo);
1465 llvm::Value *Callee =
1466 getVirtualFunctionPointer(CGF, GlobalDecl(Dtor, DtorType), This, Ty);
1467
1468 CGF.EmitCXXMemberOrOperatorCall(Dtor, Callee, ReturnValueSlot(), This,
1469 /*ImplicitParam=*/nullptr, QualType(), CE);
1470 return nullptr;
1471 }
1472
emitVirtualInheritanceTables(const CXXRecordDecl * RD)1473 void ItaniumCXXABI::emitVirtualInheritanceTables(const CXXRecordDecl *RD) {
1474 CodeGenVTables &VTables = CGM.getVTables();
1475 llvm::GlobalVariable *VTT = VTables.GetAddrOfVTT(RD);
1476 VTables.EmitVTTDefinition(VTT, CGM.getVTableLinkage(RD), RD);
1477 }
1478
performTypeAdjustment(CodeGenFunction & CGF,llvm::Value * Ptr,int64_t NonVirtualAdjustment,int64_t VirtualAdjustment,bool IsReturnAdjustment)1479 static llvm::Value *performTypeAdjustment(CodeGenFunction &CGF,
1480 llvm::Value *Ptr,
1481 int64_t NonVirtualAdjustment,
1482 int64_t VirtualAdjustment,
1483 bool IsReturnAdjustment) {
1484 if (!NonVirtualAdjustment && !VirtualAdjustment)
1485 return Ptr;
1486
1487 llvm::Type *Int8PtrTy = CGF.Int8PtrTy;
1488 llvm::Value *V = CGF.Builder.CreateBitCast(Ptr, Int8PtrTy);
1489
1490 if (NonVirtualAdjustment && !IsReturnAdjustment) {
1491 // Perform the non-virtual adjustment for a base-to-derived cast.
1492 V = CGF.Builder.CreateConstInBoundsGEP1_64(V, NonVirtualAdjustment);
1493 }
1494
1495 if (VirtualAdjustment) {
1496 llvm::Type *PtrDiffTy =
1497 CGF.ConvertType(CGF.getContext().getPointerDiffType());
1498
1499 // Perform the virtual adjustment.
1500 llvm::Value *VTablePtrPtr =
1501 CGF.Builder.CreateBitCast(V, Int8PtrTy->getPointerTo());
1502
1503 llvm::Value *VTablePtr = CGF.Builder.CreateLoad(VTablePtrPtr);
1504
1505 llvm::Value *OffsetPtr =
1506 CGF.Builder.CreateConstInBoundsGEP1_64(VTablePtr, VirtualAdjustment);
1507
1508 OffsetPtr = CGF.Builder.CreateBitCast(OffsetPtr, PtrDiffTy->getPointerTo());
1509
1510 // Load the adjustment offset from the vtable.
1511 llvm::Value *Offset = CGF.Builder.CreateLoad(OffsetPtr);
1512
1513 // Adjust our pointer.
1514 V = CGF.Builder.CreateInBoundsGEP(V, Offset);
1515 }
1516
1517 if (NonVirtualAdjustment && IsReturnAdjustment) {
1518 // Perform the non-virtual adjustment for a derived-to-base cast.
1519 V = CGF.Builder.CreateConstInBoundsGEP1_64(V, NonVirtualAdjustment);
1520 }
1521
1522 // Cast back to the original type.
1523 return CGF.Builder.CreateBitCast(V, Ptr->getType());
1524 }
1525
performThisAdjustment(CodeGenFunction & CGF,llvm::Value * This,const ThisAdjustment & TA)1526 llvm::Value *ItaniumCXXABI::performThisAdjustment(CodeGenFunction &CGF,
1527 llvm::Value *This,
1528 const ThisAdjustment &TA) {
1529 return performTypeAdjustment(CGF, This, TA.NonVirtual,
1530 TA.Virtual.Itanium.VCallOffsetOffset,
1531 /*IsReturnAdjustment=*/false);
1532 }
1533
1534 llvm::Value *
performReturnAdjustment(CodeGenFunction & CGF,llvm::Value * Ret,const ReturnAdjustment & RA)1535 ItaniumCXXABI::performReturnAdjustment(CodeGenFunction &CGF, llvm::Value *Ret,
1536 const ReturnAdjustment &RA) {
1537 return performTypeAdjustment(CGF, Ret, RA.NonVirtual,
1538 RA.Virtual.Itanium.VBaseOffsetOffset,
1539 /*IsReturnAdjustment=*/true);
1540 }
1541
EmitReturnFromThunk(CodeGenFunction & CGF,RValue RV,QualType ResultType)1542 void ARMCXXABI::EmitReturnFromThunk(CodeGenFunction &CGF,
1543 RValue RV, QualType ResultType) {
1544 if (!isa<CXXDestructorDecl>(CGF.CurGD.getDecl()))
1545 return ItaniumCXXABI::EmitReturnFromThunk(CGF, RV, ResultType);
1546
1547 // Destructor thunks in the ARM ABI have indeterminate results.
1548 llvm::Type *T =
1549 cast<llvm::PointerType>(CGF.ReturnValue->getType())->getElementType();
1550 RValue Undef = RValue::get(llvm::UndefValue::get(T));
1551 return ItaniumCXXABI::EmitReturnFromThunk(CGF, Undef, ResultType);
1552 }
1553
1554 /************************** Array allocation cookies **************************/
1555
getArrayCookieSizeImpl(QualType elementType)1556 CharUnits ItaniumCXXABI::getArrayCookieSizeImpl(QualType elementType) {
1557 // The array cookie is a size_t; pad that up to the element alignment.
1558 // The cookie is actually right-justified in that space.
1559 return std::max(CharUnits::fromQuantity(CGM.SizeSizeInBytes),
1560 CGM.getContext().getTypeAlignInChars(elementType));
1561 }
1562
InitializeArrayCookie(CodeGenFunction & CGF,llvm::Value * NewPtr,llvm::Value * NumElements,const CXXNewExpr * expr,QualType ElementType)1563 llvm::Value *ItaniumCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
1564 llvm::Value *NewPtr,
1565 llvm::Value *NumElements,
1566 const CXXNewExpr *expr,
1567 QualType ElementType) {
1568 assert(requiresArrayCookie(expr));
1569
1570 unsigned AS = NewPtr->getType()->getPointerAddressSpace();
1571
1572 ASTContext &Ctx = getContext();
1573 QualType SizeTy = Ctx.getSizeType();
1574 CharUnits SizeSize = Ctx.getTypeSizeInChars(SizeTy);
1575
1576 // The size of the cookie.
1577 CharUnits CookieSize =
1578 std::max(SizeSize, Ctx.getTypeAlignInChars(ElementType));
1579 assert(CookieSize == getArrayCookieSizeImpl(ElementType));
1580
1581 // Compute an offset to the cookie.
1582 llvm::Value *CookiePtr = NewPtr;
1583 CharUnits CookieOffset = CookieSize - SizeSize;
1584 if (!CookieOffset.isZero())
1585 CookiePtr = CGF.Builder.CreateConstInBoundsGEP1_64(CookiePtr,
1586 CookieOffset.getQuantity());
1587
1588 // Write the number of elements into the appropriate slot.
1589 llvm::Type *NumElementsTy = CGF.ConvertType(SizeTy)->getPointerTo(AS);
1590 llvm::Value *NumElementsPtr =
1591 CGF.Builder.CreateBitCast(CookiePtr, NumElementsTy);
1592 llvm::Instruction *SI = CGF.Builder.CreateStore(NumElements, NumElementsPtr);
1593 if (CGM.getLangOpts().Sanitize.has(SanitizerKind::Address) && AS == 0 &&
1594 expr->getOperatorNew()->isReplaceableGlobalAllocationFunction()) {
1595 // The store to the CookiePtr does not need to be instrumented.
1596 CGM.getSanitizerMetadata()->disableSanitizerForInstruction(SI);
1597 llvm::FunctionType *FTy =
1598 llvm::FunctionType::get(CGM.VoidTy, NumElementsTy, false);
1599 llvm::Constant *F =
1600 CGM.CreateRuntimeFunction(FTy, "__asan_poison_cxx_array_cookie");
1601 CGF.Builder.CreateCall(F, NumElementsPtr);
1602 }
1603
1604 // Finally, compute a pointer to the actual data buffer by skipping
1605 // over the cookie completely.
1606 return CGF.Builder.CreateConstInBoundsGEP1_64(NewPtr,
1607 CookieSize.getQuantity());
1608 }
1609
readArrayCookieImpl(CodeGenFunction & CGF,llvm::Value * allocPtr,CharUnits cookieSize)1610 llvm::Value *ItaniumCXXABI::readArrayCookieImpl(CodeGenFunction &CGF,
1611 llvm::Value *allocPtr,
1612 CharUnits cookieSize) {
1613 // The element size is right-justified in the cookie.
1614 llvm::Value *numElementsPtr = allocPtr;
1615 CharUnits numElementsOffset =
1616 cookieSize - CharUnits::fromQuantity(CGF.SizeSizeInBytes);
1617 if (!numElementsOffset.isZero())
1618 numElementsPtr =
1619 CGF.Builder.CreateConstInBoundsGEP1_64(numElementsPtr,
1620 numElementsOffset.getQuantity());
1621
1622 unsigned AS = allocPtr->getType()->getPointerAddressSpace();
1623 numElementsPtr =
1624 CGF.Builder.CreateBitCast(numElementsPtr, CGF.SizeTy->getPointerTo(AS));
1625 if (!CGM.getLangOpts().Sanitize.has(SanitizerKind::Address) || AS != 0)
1626 return CGF.Builder.CreateLoad(numElementsPtr);
1627 // In asan mode emit a function call instead of a regular load and let the
1628 // run-time deal with it: if the shadow is properly poisoned return the
1629 // cookie, otherwise return 0 to avoid an infinite loop calling DTORs.
1630 // We can't simply ignore this load using nosanitize metadata because
1631 // the metadata may be lost.
1632 llvm::FunctionType *FTy =
1633 llvm::FunctionType::get(CGF.SizeTy, CGF.SizeTy->getPointerTo(0), false);
1634 llvm::Constant *F =
1635 CGM.CreateRuntimeFunction(FTy, "__asan_load_cxx_array_cookie");
1636 return CGF.Builder.CreateCall(F, numElementsPtr);
1637 }
1638
getArrayCookieSizeImpl(QualType elementType)1639 CharUnits ARMCXXABI::getArrayCookieSizeImpl(QualType elementType) {
1640 // ARM says that the cookie is always:
1641 // struct array_cookie {
1642 // std::size_t element_size; // element_size != 0
1643 // std::size_t element_count;
1644 // };
1645 // But the base ABI doesn't give anything an alignment greater than
1646 // 8, so we can dismiss this as typical ABI-author blindness to
1647 // actual language complexity and round up to the element alignment.
1648 return std::max(CharUnits::fromQuantity(2 * CGM.SizeSizeInBytes),
1649 CGM.getContext().getTypeAlignInChars(elementType));
1650 }
1651
InitializeArrayCookie(CodeGenFunction & CGF,llvm::Value * newPtr,llvm::Value * numElements,const CXXNewExpr * expr,QualType elementType)1652 llvm::Value *ARMCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
1653 llvm::Value *newPtr,
1654 llvm::Value *numElements,
1655 const CXXNewExpr *expr,
1656 QualType elementType) {
1657 assert(requiresArrayCookie(expr));
1658
1659 // NewPtr is a char*, but we generalize to arbitrary addrspaces.
1660 unsigned AS = newPtr->getType()->getPointerAddressSpace();
1661
1662 // The cookie is always at the start of the buffer.
1663 llvm::Value *cookie = newPtr;
1664
1665 // The first element is the element size.
1666 cookie = CGF.Builder.CreateBitCast(cookie, CGF.SizeTy->getPointerTo(AS));
1667 llvm::Value *elementSize = llvm::ConstantInt::get(CGF.SizeTy,
1668 getContext().getTypeSizeInChars(elementType).getQuantity());
1669 CGF.Builder.CreateStore(elementSize, cookie);
1670
1671 // The second element is the element count.
1672 cookie = CGF.Builder.CreateConstInBoundsGEP1_32(CGF.SizeTy, cookie, 1);
1673 CGF.Builder.CreateStore(numElements, cookie);
1674
1675 // Finally, compute a pointer to the actual data buffer by skipping
1676 // over the cookie completely.
1677 CharUnits cookieSize = ARMCXXABI::getArrayCookieSizeImpl(elementType);
1678 return CGF.Builder.CreateConstInBoundsGEP1_64(newPtr,
1679 cookieSize.getQuantity());
1680 }
1681
readArrayCookieImpl(CodeGenFunction & CGF,llvm::Value * allocPtr,CharUnits cookieSize)1682 llvm::Value *ARMCXXABI::readArrayCookieImpl(CodeGenFunction &CGF,
1683 llvm::Value *allocPtr,
1684 CharUnits cookieSize) {
1685 // The number of elements is at offset sizeof(size_t) relative to
1686 // the allocated pointer.
1687 llvm::Value *numElementsPtr
1688 = CGF.Builder.CreateConstInBoundsGEP1_64(allocPtr, CGF.SizeSizeInBytes);
1689
1690 unsigned AS = allocPtr->getType()->getPointerAddressSpace();
1691 numElementsPtr =
1692 CGF.Builder.CreateBitCast(numElementsPtr, CGF.SizeTy->getPointerTo(AS));
1693 return CGF.Builder.CreateLoad(numElementsPtr);
1694 }
1695
1696 /*********************** Static local initialization **************************/
1697
getGuardAcquireFn(CodeGenModule & CGM,llvm::PointerType * GuardPtrTy)1698 static llvm::Constant *getGuardAcquireFn(CodeGenModule &CGM,
1699 llvm::PointerType *GuardPtrTy) {
1700 // int __cxa_guard_acquire(__guard *guard_object);
1701 llvm::FunctionType *FTy =
1702 llvm::FunctionType::get(CGM.getTypes().ConvertType(CGM.getContext().IntTy),
1703 GuardPtrTy, /*isVarArg=*/false);
1704 return CGM.CreateRuntimeFunction(FTy, "__cxa_guard_acquire",
1705 llvm::AttributeSet::get(CGM.getLLVMContext(),
1706 llvm::AttributeSet::FunctionIndex,
1707 llvm::Attribute::NoUnwind));
1708 }
1709
getGuardReleaseFn(CodeGenModule & CGM,llvm::PointerType * GuardPtrTy)1710 static llvm::Constant *getGuardReleaseFn(CodeGenModule &CGM,
1711 llvm::PointerType *GuardPtrTy) {
1712 // void __cxa_guard_release(__guard *guard_object);
1713 llvm::FunctionType *FTy =
1714 llvm::FunctionType::get(CGM.VoidTy, GuardPtrTy, /*isVarArg=*/false);
1715 return CGM.CreateRuntimeFunction(FTy, "__cxa_guard_release",
1716 llvm::AttributeSet::get(CGM.getLLVMContext(),
1717 llvm::AttributeSet::FunctionIndex,
1718 llvm::Attribute::NoUnwind));
1719 }
1720
getGuardAbortFn(CodeGenModule & CGM,llvm::PointerType * GuardPtrTy)1721 static llvm::Constant *getGuardAbortFn(CodeGenModule &CGM,
1722 llvm::PointerType *GuardPtrTy) {
1723 // void __cxa_guard_abort(__guard *guard_object);
1724 llvm::FunctionType *FTy =
1725 llvm::FunctionType::get(CGM.VoidTy, GuardPtrTy, /*isVarArg=*/false);
1726 return CGM.CreateRuntimeFunction(FTy, "__cxa_guard_abort",
1727 llvm::AttributeSet::get(CGM.getLLVMContext(),
1728 llvm::AttributeSet::FunctionIndex,
1729 llvm::Attribute::NoUnwind));
1730 }
1731
1732 namespace {
1733 struct CallGuardAbort : EHScopeStack::Cleanup {
1734 llvm::GlobalVariable *Guard;
CallGuardAbort__anona7bdc4f60211::CallGuardAbort1735 CallGuardAbort(llvm::GlobalVariable *Guard) : Guard(Guard) {}
1736
Emit__anona7bdc4f60211::CallGuardAbort1737 void Emit(CodeGenFunction &CGF, Flags flags) override {
1738 CGF.EmitNounwindRuntimeCall(getGuardAbortFn(CGF.CGM, Guard->getType()),
1739 Guard);
1740 }
1741 };
1742 }
1743
1744 /// The ARM code here follows the Itanium code closely enough that we
1745 /// just special-case it at particular places.
EmitGuardedInit(CodeGenFunction & CGF,const VarDecl & D,llvm::GlobalVariable * var,bool shouldPerformInit)1746 void ItaniumCXXABI::EmitGuardedInit(CodeGenFunction &CGF,
1747 const VarDecl &D,
1748 llvm::GlobalVariable *var,
1749 bool shouldPerformInit) {
1750 CGBuilderTy &Builder = CGF.Builder;
1751
1752 // We only need to use thread-safe statics for local non-TLS variables;
1753 // global initialization is always single-threaded.
1754 bool threadsafe = getContext().getLangOpts().ThreadsafeStatics &&
1755 D.isLocalVarDecl() && !D.getTLSKind();
1756
1757 // If we have a global variable with internal linkage and thread-safe statics
1758 // are disabled, we can just let the guard variable be of type i8.
1759 bool useInt8GuardVariable = !threadsafe && var->hasInternalLinkage();
1760
1761 llvm::IntegerType *guardTy;
1762 if (useInt8GuardVariable) {
1763 guardTy = CGF.Int8Ty;
1764 } else {
1765 // Guard variables are 64 bits in the generic ABI and size width on ARM
1766 // (i.e. 32-bit on AArch32, 64-bit on AArch64).
1767 guardTy = (UseARMGuardVarABI ? CGF.SizeTy : CGF.Int64Ty);
1768 }
1769 llvm::PointerType *guardPtrTy = guardTy->getPointerTo();
1770
1771 // Create the guard variable if we don't already have it (as we
1772 // might if we're double-emitting this function body).
1773 llvm::GlobalVariable *guard = CGM.getStaticLocalDeclGuardAddress(&D);
1774 if (!guard) {
1775 // Mangle the name for the guard.
1776 SmallString<256> guardName;
1777 {
1778 llvm::raw_svector_ostream out(guardName);
1779 getMangleContext().mangleStaticGuardVariable(&D, out);
1780 out.flush();
1781 }
1782
1783 // Create the guard variable with a zero-initializer.
1784 // Just absorb linkage and visibility from the guarded variable.
1785 guard = new llvm::GlobalVariable(CGM.getModule(), guardTy,
1786 false, var->getLinkage(),
1787 llvm::ConstantInt::get(guardTy, 0),
1788 guardName.str());
1789 guard->setVisibility(var->getVisibility());
1790 // If the variable is thread-local, so is its guard variable.
1791 guard->setThreadLocalMode(var->getThreadLocalMode());
1792
1793 // The ABI says: It is suggested that it be emitted in the same COMDAT group
1794 // as the associated data object
1795 llvm::Comdat *C = var->getComdat();
1796 if (!D.isLocalVarDecl() && C) {
1797 guard->setComdat(C);
1798 CGF.CurFn->setComdat(C);
1799 } else if (CGM.supportsCOMDAT() && guard->isWeakForLinker()) {
1800 guard->setComdat(CGM.getModule().getOrInsertComdat(guard->getName()));
1801 }
1802
1803 CGM.setStaticLocalDeclGuardAddress(&D, guard);
1804 }
1805
1806 // Test whether the variable has completed initialization.
1807 //
1808 // Itanium C++ ABI 3.3.2:
1809 // The following is pseudo-code showing how these functions can be used:
1810 // if (obj_guard.first_byte == 0) {
1811 // if ( __cxa_guard_acquire (&obj_guard) ) {
1812 // try {
1813 // ... initialize the object ...;
1814 // } catch (...) {
1815 // __cxa_guard_abort (&obj_guard);
1816 // throw;
1817 // }
1818 // ... queue object destructor with __cxa_atexit() ...;
1819 // __cxa_guard_release (&obj_guard);
1820 // }
1821 // }
1822
1823 // Load the first byte of the guard variable.
1824 llvm::LoadInst *LI =
1825 Builder.CreateLoad(Builder.CreateBitCast(guard, CGM.Int8PtrTy));
1826 LI->setAlignment(1);
1827
1828 // Itanium ABI:
1829 // An implementation supporting thread-safety on multiprocessor
1830 // systems must also guarantee that references to the initialized
1831 // object do not occur before the load of the initialization flag.
1832 //
1833 // In LLVM, we do this by marking the load Acquire.
1834 if (threadsafe)
1835 LI->setAtomic(llvm::Acquire);
1836
1837 // For ARM, we should only check the first bit, rather than the entire byte:
1838 //
1839 // ARM C++ ABI 3.2.3.1:
1840 // To support the potential use of initialization guard variables
1841 // as semaphores that are the target of ARM SWP and LDREX/STREX
1842 // synchronizing instructions we define a static initialization
1843 // guard variable to be a 4-byte aligned, 4-byte word with the
1844 // following inline access protocol.
1845 // #define INITIALIZED 1
1846 // if ((obj_guard & INITIALIZED) != INITIALIZED) {
1847 // if (__cxa_guard_acquire(&obj_guard))
1848 // ...
1849 // }
1850 //
1851 // and similarly for ARM64:
1852 //
1853 // ARM64 C++ ABI 3.2.2:
1854 // This ABI instead only specifies the value bit 0 of the static guard
1855 // variable; all other bits are platform defined. Bit 0 shall be 0 when the
1856 // variable is not initialized and 1 when it is.
1857 llvm::Value *V =
1858 (UseARMGuardVarABI && !useInt8GuardVariable)
1859 ? Builder.CreateAnd(LI, llvm::ConstantInt::get(CGM.Int8Ty, 1))
1860 : LI;
1861 llvm::Value *isInitialized = Builder.CreateIsNull(V, "guard.uninitialized");
1862
1863 llvm::BasicBlock *InitCheckBlock = CGF.createBasicBlock("init.check");
1864 llvm::BasicBlock *EndBlock = CGF.createBasicBlock("init.end");
1865
1866 // Check if the first byte of the guard variable is zero.
1867 Builder.CreateCondBr(isInitialized, InitCheckBlock, EndBlock);
1868
1869 CGF.EmitBlock(InitCheckBlock);
1870
1871 // Variables used when coping with thread-safe statics and exceptions.
1872 if (threadsafe) {
1873 // Call __cxa_guard_acquire.
1874 llvm::Value *V
1875 = CGF.EmitNounwindRuntimeCall(getGuardAcquireFn(CGM, guardPtrTy), guard);
1876
1877 llvm::BasicBlock *InitBlock = CGF.createBasicBlock("init");
1878
1879 Builder.CreateCondBr(Builder.CreateIsNotNull(V, "tobool"),
1880 InitBlock, EndBlock);
1881
1882 // Call __cxa_guard_abort along the exceptional edge.
1883 CGF.EHStack.pushCleanup<CallGuardAbort>(EHCleanup, guard);
1884
1885 CGF.EmitBlock(InitBlock);
1886 }
1887
1888 // Emit the initializer and add a global destructor if appropriate.
1889 CGF.EmitCXXGlobalVarDeclInit(D, var, shouldPerformInit);
1890
1891 if (threadsafe) {
1892 // Pop the guard-abort cleanup if we pushed one.
1893 CGF.PopCleanupBlock();
1894
1895 // Call __cxa_guard_release. This cannot throw.
1896 CGF.EmitNounwindRuntimeCall(getGuardReleaseFn(CGM, guardPtrTy), guard);
1897 } else {
1898 Builder.CreateStore(llvm::ConstantInt::get(guardTy, 1), guard);
1899 }
1900
1901 CGF.EmitBlock(EndBlock);
1902 }
1903
1904 /// Register a global destructor using __cxa_atexit.
emitGlobalDtorWithCXAAtExit(CodeGenFunction & CGF,llvm::Constant * dtor,llvm::Constant * addr,bool TLS)1905 static void emitGlobalDtorWithCXAAtExit(CodeGenFunction &CGF,
1906 llvm::Constant *dtor,
1907 llvm::Constant *addr,
1908 bool TLS) {
1909 const char *Name = "__cxa_atexit";
1910 if (TLS) {
1911 const llvm::Triple &T = CGF.getTarget().getTriple();
1912 Name = T.isMacOSX() ? "_tlv_atexit" : "__cxa_thread_atexit";
1913 }
1914
1915 // We're assuming that the destructor function is something we can
1916 // reasonably call with the default CC. Go ahead and cast it to the
1917 // right prototype.
1918 llvm::Type *dtorTy =
1919 llvm::FunctionType::get(CGF.VoidTy, CGF.Int8PtrTy, false)->getPointerTo();
1920
1921 // extern "C" int __cxa_atexit(void (*f)(void *), void *p, void *d);
1922 llvm::Type *paramTys[] = { dtorTy, CGF.Int8PtrTy, CGF.Int8PtrTy };
1923 llvm::FunctionType *atexitTy =
1924 llvm::FunctionType::get(CGF.IntTy, paramTys, false);
1925
1926 // Fetch the actual function.
1927 llvm::Constant *atexit = CGF.CGM.CreateRuntimeFunction(atexitTy, Name);
1928 if (llvm::Function *fn = dyn_cast<llvm::Function>(atexit))
1929 fn->setDoesNotThrow();
1930
1931 // Create a variable that binds the atexit to this shared object.
1932 llvm::Constant *handle =
1933 CGF.CGM.CreateRuntimeVariable(CGF.Int8Ty, "__dso_handle");
1934
1935 llvm::Value *args[] = {
1936 llvm::ConstantExpr::getBitCast(dtor, dtorTy),
1937 llvm::ConstantExpr::getBitCast(addr, CGF.Int8PtrTy),
1938 handle
1939 };
1940 CGF.EmitNounwindRuntimeCall(atexit, args);
1941 }
1942
1943 /// Register a global destructor as best as we know how.
registerGlobalDtor(CodeGenFunction & CGF,const VarDecl & D,llvm::Constant * dtor,llvm::Constant * addr)1944 void ItaniumCXXABI::registerGlobalDtor(CodeGenFunction &CGF,
1945 const VarDecl &D,
1946 llvm::Constant *dtor,
1947 llvm::Constant *addr) {
1948 // Use __cxa_atexit if available.
1949 if (CGM.getCodeGenOpts().CXAAtExit)
1950 return emitGlobalDtorWithCXAAtExit(CGF, dtor, addr, D.getTLSKind());
1951
1952 if (D.getTLSKind())
1953 CGM.ErrorUnsupported(&D, "non-trivial TLS destruction");
1954
1955 // In Apple kexts, we want to add a global destructor entry.
1956 // FIXME: shouldn't this be guarded by some variable?
1957 if (CGM.getLangOpts().AppleKext) {
1958 // Generate a global destructor entry.
1959 return CGM.AddCXXDtorEntry(dtor, addr);
1960 }
1961
1962 CGF.registerGlobalDtorWithAtExit(D, dtor, addr);
1963 }
1964
isThreadWrapperReplaceable(const VarDecl * VD,CodeGen::CodeGenModule & CGM)1965 static bool isThreadWrapperReplaceable(const VarDecl *VD,
1966 CodeGen::CodeGenModule &CGM) {
1967 assert(!VD->isStaticLocal() && "static local VarDecls don't need wrappers!");
1968 // OS X prefers to have references to thread local variables to go through
1969 // the thread wrapper instead of directly referencing the backing variable.
1970 return VD->getTLSKind() == VarDecl::TLS_Dynamic &&
1971 CGM.getTarget().getTriple().isMacOSX();
1972 }
1973
1974 /// Get the appropriate linkage for the wrapper function. This is essentially
1975 /// the weak form of the variable's linkage; every translation unit which needs
1976 /// the wrapper emits a copy, and we want the linker to merge them.
1977 static llvm::GlobalValue::LinkageTypes
getThreadLocalWrapperLinkage(const VarDecl * VD,CodeGen::CodeGenModule & CGM)1978 getThreadLocalWrapperLinkage(const VarDecl *VD, CodeGen::CodeGenModule &CGM) {
1979 llvm::GlobalValue::LinkageTypes VarLinkage =
1980 CGM.getLLVMLinkageVarDefinition(VD, /*isConstant=*/false);
1981
1982 // For internal linkage variables, we don't need an external or weak wrapper.
1983 if (llvm::GlobalValue::isLocalLinkage(VarLinkage))
1984 return VarLinkage;
1985
1986 // If the thread wrapper is replaceable, give it appropriate linkage.
1987 if (isThreadWrapperReplaceable(VD, CGM)) {
1988 if (llvm::GlobalVariable::isLinkOnceLinkage(VarLinkage) ||
1989 llvm::GlobalVariable::isWeakODRLinkage(VarLinkage))
1990 return llvm::GlobalVariable::WeakAnyLinkage;
1991 return VarLinkage;
1992 }
1993 return llvm::GlobalValue::WeakODRLinkage;
1994 }
1995
1996 llvm::Function *
getOrCreateThreadLocalWrapper(const VarDecl * VD,llvm::Value * Val)1997 ItaniumCXXABI::getOrCreateThreadLocalWrapper(const VarDecl *VD,
1998 llvm::Value *Val) {
1999 // Mangle the name for the thread_local wrapper function.
2000 SmallString<256> WrapperName;
2001 {
2002 llvm::raw_svector_ostream Out(WrapperName);
2003 getMangleContext().mangleItaniumThreadLocalWrapper(VD, Out);
2004 Out.flush();
2005 }
2006
2007 if (llvm::Value *V = CGM.getModule().getNamedValue(WrapperName))
2008 return cast<llvm::Function>(V);
2009
2010 llvm::Type *RetTy = Val->getType();
2011 if (VD->getType()->isReferenceType())
2012 RetTy = RetTy->getPointerElementType();
2013
2014 llvm::FunctionType *FnTy = llvm::FunctionType::get(RetTy, false);
2015 llvm::Function *Wrapper =
2016 llvm::Function::Create(FnTy, getThreadLocalWrapperLinkage(VD, CGM),
2017 WrapperName.str(), &CGM.getModule());
2018 // Always resolve references to the wrapper at link time.
2019 if (!Wrapper->hasLocalLinkage() && !isThreadWrapperReplaceable(VD, CGM))
2020 Wrapper->setVisibility(llvm::GlobalValue::HiddenVisibility);
2021 return Wrapper;
2022 }
2023
EmitThreadLocalInitFuncs(CodeGenModule & CGM,ArrayRef<std::pair<const VarDecl *,llvm::GlobalVariable * >> CXXThreadLocals,ArrayRef<llvm::Function * > CXXThreadLocalInits,ArrayRef<llvm::GlobalVariable * > CXXThreadLocalInitVars)2024 void ItaniumCXXABI::EmitThreadLocalInitFuncs(
2025 CodeGenModule &CGM,
2026 ArrayRef<std::pair<const VarDecl *, llvm::GlobalVariable *>>
2027 CXXThreadLocals, ArrayRef<llvm::Function *> CXXThreadLocalInits,
2028 ArrayRef<llvm::GlobalVariable *> CXXThreadLocalInitVars) {
2029 llvm::Function *InitFunc = nullptr;
2030 if (!CXXThreadLocalInits.empty()) {
2031 // Generate a guarded initialization function.
2032 llvm::FunctionType *FTy =
2033 llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
2034 InitFunc = CGM.CreateGlobalInitOrDestructFunction(FTy, "__tls_init",
2035 SourceLocation(),
2036 /*TLS=*/true);
2037 llvm::GlobalVariable *Guard = new llvm::GlobalVariable(
2038 CGM.getModule(), CGM.Int8Ty, /*isConstant=*/false,
2039 llvm::GlobalVariable::InternalLinkage,
2040 llvm::ConstantInt::get(CGM.Int8Ty, 0), "__tls_guard");
2041 Guard->setThreadLocal(true);
2042 CodeGenFunction(CGM)
2043 .GenerateCXXGlobalInitFunc(InitFunc, CXXThreadLocalInits, Guard);
2044 }
2045 for (unsigned I = 0, N = CXXThreadLocals.size(); I != N; ++I) {
2046 const VarDecl *VD = CXXThreadLocals[I].first;
2047 llvm::GlobalVariable *Var = CXXThreadLocals[I].second;
2048
2049 // Some targets require that all access to thread local variables go through
2050 // the thread wrapper. This means that we cannot attempt to create a thread
2051 // wrapper or a thread helper.
2052 if (isThreadWrapperReplaceable(VD, CGM) && !VD->hasDefinition())
2053 continue;
2054
2055 // Mangle the name for the thread_local initialization function.
2056 SmallString<256> InitFnName;
2057 {
2058 llvm::raw_svector_ostream Out(InitFnName);
2059 getMangleContext().mangleItaniumThreadLocalInit(VD, Out);
2060 Out.flush();
2061 }
2062
2063 // If we have a definition for the variable, emit the initialization
2064 // function as an alias to the global Init function (if any). Otherwise,
2065 // produce a declaration of the initialization function.
2066 llvm::GlobalValue *Init = nullptr;
2067 bool InitIsInitFunc = false;
2068 if (VD->hasDefinition()) {
2069 InitIsInitFunc = true;
2070 if (InitFunc)
2071 Init = llvm::GlobalAlias::create(Var->getLinkage(), InitFnName.str(),
2072 InitFunc);
2073 } else {
2074 // Emit a weak global function referring to the initialization function.
2075 // This function will not exist if the TU defining the thread_local
2076 // variable in question does not need any dynamic initialization for
2077 // its thread_local variables.
2078 llvm::FunctionType *FnTy = llvm::FunctionType::get(CGM.VoidTy, false);
2079 Init = llvm::Function::Create(
2080 FnTy, llvm::GlobalVariable::ExternalWeakLinkage, InitFnName.str(),
2081 &CGM.getModule());
2082 }
2083
2084 if (Init)
2085 Init->setVisibility(Var->getVisibility());
2086
2087 llvm::Function *Wrapper = getOrCreateThreadLocalWrapper(VD, Var);
2088 llvm::LLVMContext &Context = CGM.getModule().getContext();
2089 llvm::BasicBlock *Entry = llvm::BasicBlock::Create(Context, "", Wrapper);
2090 CGBuilderTy Builder(Entry);
2091 if (InitIsInitFunc) {
2092 if (Init)
2093 Builder.CreateCall(Init);
2094 } else {
2095 // Don't know whether we have an init function. Call it if it exists.
2096 llvm::Value *Have = Builder.CreateIsNotNull(Init);
2097 llvm::BasicBlock *InitBB = llvm::BasicBlock::Create(Context, "", Wrapper);
2098 llvm::BasicBlock *ExitBB = llvm::BasicBlock::Create(Context, "", Wrapper);
2099 Builder.CreateCondBr(Have, InitBB, ExitBB);
2100
2101 Builder.SetInsertPoint(InitBB);
2102 Builder.CreateCall(Init);
2103 Builder.CreateBr(ExitBB);
2104
2105 Builder.SetInsertPoint(ExitBB);
2106 }
2107
2108 // For a reference, the result of the wrapper function is a pointer to
2109 // the referenced object.
2110 llvm::Value *Val = Var;
2111 if (VD->getType()->isReferenceType()) {
2112 llvm::LoadInst *LI = Builder.CreateLoad(Val);
2113 LI->setAlignment(CGM.getContext().getDeclAlign(VD).getQuantity());
2114 Val = LI;
2115 }
2116 if (Val->getType() != Wrapper->getReturnType())
2117 Val = Builder.CreatePointerBitCastOrAddrSpaceCast(
2118 Val, Wrapper->getReturnType(), "");
2119 Builder.CreateRet(Val);
2120 }
2121 }
2122
EmitThreadLocalVarDeclLValue(CodeGenFunction & CGF,const VarDecl * VD,QualType LValType)2123 LValue ItaniumCXXABI::EmitThreadLocalVarDeclLValue(CodeGenFunction &CGF,
2124 const VarDecl *VD,
2125 QualType LValType) {
2126 QualType T = VD->getType();
2127 llvm::Type *Ty = CGF.getTypes().ConvertTypeForMem(T);
2128 llvm::Value *Val = CGF.CGM.GetAddrOfGlobalVar(VD, Ty);
2129 llvm::Function *Wrapper = getOrCreateThreadLocalWrapper(VD, Val);
2130
2131 Val = CGF.Builder.CreateCall(Wrapper);
2132
2133 LValue LV;
2134 if (VD->getType()->isReferenceType())
2135 LV = CGF.MakeNaturalAlignAddrLValue(Val, LValType);
2136 else
2137 LV = CGF.MakeAddrLValue(Val, LValType, CGF.getContext().getDeclAlign(VD));
2138 // FIXME: need setObjCGCLValueClass?
2139 return LV;
2140 }
2141
2142 /// Return whether the given global decl needs a VTT parameter, which it does
2143 /// if it's a base constructor or destructor with virtual bases.
NeedsVTTParameter(GlobalDecl GD)2144 bool ItaniumCXXABI::NeedsVTTParameter(GlobalDecl GD) {
2145 const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
2146
2147 // We don't have any virtual bases, just return early.
2148 if (!MD->getParent()->getNumVBases())
2149 return false;
2150
2151 // Check if we have a base constructor.
2152 if (isa<CXXConstructorDecl>(MD) && GD.getCtorType() == Ctor_Base)
2153 return true;
2154
2155 // Check if we have a base destructor.
2156 if (isa<CXXDestructorDecl>(MD) && GD.getDtorType() == Dtor_Base)
2157 return true;
2158
2159 return false;
2160 }
2161
2162 namespace {
2163 class ItaniumRTTIBuilder {
2164 CodeGenModule &CGM; // Per-module state.
2165 llvm::LLVMContext &VMContext;
2166 const ItaniumCXXABI &CXXABI; // Per-module state.
2167
2168 /// Fields - The fields of the RTTI descriptor currently being built.
2169 SmallVector<llvm::Constant *, 16> Fields;
2170
2171 /// GetAddrOfTypeName - Returns the mangled type name of the given type.
2172 llvm::GlobalVariable *
2173 GetAddrOfTypeName(QualType Ty, llvm::GlobalVariable::LinkageTypes Linkage);
2174
2175 /// GetAddrOfExternalRTTIDescriptor - Returns the constant for the RTTI
2176 /// descriptor of the given type.
2177 llvm::Constant *GetAddrOfExternalRTTIDescriptor(QualType Ty);
2178
2179 /// BuildVTablePointer - Build the vtable pointer for the given type.
2180 void BuildVTablePointer(const Type *Ty);
2181
2182 /// BuildSIClassTypeInfo - Build an abi::__si_class_type_info, used for single
2183 /// inheritance, according to the Itanium C++ ABI, 2.9.5p6b.
2184 void BuildSIClassTypeInfo(const CXXRecordDecl *RD);
2185
2186 /// BuildVMIClassTypeInfo - Build an abi::__vmi_class_type_info, used for
2187 /// classes with bases that do not satisfy the abi::__si_class_type_info
2188 /// constraints, according ti the Itanium C++ ABI, 2.9.5p5c.
2189 void BuildVMIClassTypeInfo(const CXXRecordDecl *RD);
2190
2191 /// BuildPointerTypeInfo - Build an abi::__pointer_type_info struct, used
2192 /// for pointer types.
2193 void BuildPointerTypeInfo(QualType PointeeTy);
2194
2195 /// BuildObjCObjectTypeInfo - Build the appropriate kind of
2196 /// type_info for an object type.
2197 void BuildObjCObjectTypeInfo(const ObjCObjectType *Ty);
2198
2199 /// BuildPointerToMemberTypeInfo - Build an abi::__pointer_to_member_type_info
2200 /// struct, used for member pointer types.
2201 void BuildPointerToMemberTypeInfo(const MemberPointerType *Ty);
2202
2203 public:
ItaniumRTTIBuilder(const ItaniumCXXABI & ABI)2204 ItaniumRTTIBuilder(const ItaniumCXXABI &ABI)
2205 : CGM(ABI.CGM), VMContext(CGM.getModule().getContext()), CXXABI(ABI) {}
2206
2207 // Pointer type info flags.
2208 enum {
2209 /// PTI_Const - Type has const qualifier.
2210 PTI_Const = 0x1,
2211
2212 /// PTI_Volatile - Type has volatile qualifier.
2213 PTI_Volatile = 0x2,
2214
2215 /// PTI_Restrict - Type has restrict qualifier.
2216 PTI_Restrict = 0x4,
2217
2218 /// PTI_Incomplete - Type is incomplete.
2219 PTI_Incomplete = 0x8,
2220
2221 /// PTI_ContainingClassIncomplete - Containing class is incomplete.
2222 /// (in pointer to member).
2223 PTI_ContainingClassIncomplete = 0x10
2224 };
2225
2226 // VMI type info flags.
2227 enum {
2228 /// VMI_NonDiamondRepeat - Class has non-diamond repeated inheritance.
2229 VMI_NonDiamondRepeat = 0x1,
2230
2231 /// VMI_DiamondShaped - Class is diamond shaped.
2232 VMI_DiamondShaped = 0x2
2233 };
2234
2235 // Base class type info flags.
2236 enum {
2237 /// BCTI_Virtual - Base class is virtual.
2238 BCTI_Virtual = 0x1,
2239
2240 /// BCTI_Public - Base class is public.
2241 BCTI_Public = 0x2
2242 };
2243
2244 /// BuildTypeInfo - Build the RTTI type info struct for the given type.
2245 ///
2246 /// \param Force - true to force the creation of this RTTI value
2247 llvm::Constant *BuildTypeInfo(QualType Ty, bool Force = false);
2248 };
2249 }
2250
GetAddrOfTypeName(QualType Ty,llvm::GlobalVariable::LinkageTypes Linkage)2251 llvm::GlobalVariable *ItaniumRTTIBuilder::GetAddrOfTypeName(
2252 QualType Ty, llvm::GlobalVariable::LinkageTypes Linkage) {
2253 SmallString<256> OutName;
2254 llvm::raw_svector_ostream Out(OutName);
2255 CGM.getCXXABI().getMangleContext().mangleCXXRTTIName(Ty, Out);
2256 Out.flush();
2257 StringRef Name = OutName.str();
2258
2259 // We know that the mangled name of the type starts at index 4 of the
2260 // mangled name of the typename, so we can just index into it in order to
2261 // get the mangled name of the type.
2262 llvm::Constant *Init = llvm::ConstantDataArray::getString(VMContext,
2263 Name.substr(4));
2264
2265 llvm::GlobalVariable *GV =
2266 CGM.CreateOrReplaceCXXRuntimeVariable(Name, Init->getType(), Linkage);
2267
2268 GV->setInitializer(Init);
2269
2270 return GV;
2271 }
2272
2273 llvm::Constant *
GetAddrOfExternalRTTIDescriptor(QualType Ty)2274 ItaniumRTTIBuilder::GetAddrOfExternalRTTIDescriptor(QualType Ty) {
2275 // Mangle the RTTI name.
2276 SmallString<256> OutName;
2277 llvm::raw_svector_ostream Out(OutName);
2278 CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty, Out);
2279 Out.flush();
2280 StringRef Name = OutName.str();
2281
2282 // Look for an existing global.
2283 llvm::GlobalVariable *GV = CGM.getModule().getNamedGlobal(Name);
2284
2285 if (!GV) {
2286 // Create a new global variable.
2287 GV = new llvm::GlobalVariable(CGM.getModule(), CGM.Int8PtrTy,
2288 /*Constant=*/true,
2289 llvm::GlobalValue::ExternalLinkage, nullptr,
2290 Name);
2291 if (const RecordType *RecordTy = dyn_cast<RecordType>(Ty)) {
2292 const CXXRecordDecl *RD = cast<CXXRecordDecl>(RecordTy->getDecl());
2293 if (RD->hasAttr<DLLImportAttr>())
2294 GV->setDLLStorageClass(llvm::GlobalVariable::DLLImportStorageClass);
2295 }
2296 }
2297
2298 return llvm::ConstantExpr::getBitCast(GV, CGM.Int8PtrTy);
2299 }
2300
2301 /// TypeInfoIsInStandardLibrary - Given a builtin type, returns whether the type
2302 /// info for that type is defined in the standard library.
TypeInfoIsInStandardLibrary(const BuiltinType * Ty)2303 static bool TypeInfoIsInStandardLibrary(const BuiltinType *Ty) {
2304 // Itanium C++ ABI 2.9.2:
2305 // Basic type information (e.g. for "int", "bool", etc.) will be kept in
2306 // the run-time support library. Specifically, the run-time support
2307 // library should contain type_info objects for the types X, X* and
2308 // X const*, for every X in: void, std::nullptr_t, bool, wchar_t, char,
2309 // unsigned char, signed char, short, unsigned short, int, unsigned int,
2310 // long, unsigned long, long long, unsigned long long, float, double,
2311 // long double, char16_t, char32_t, and the IEEE 754r decimal and
2312 // half-precision floating point types.
2313 switch (Ty->getKind()) {
2314 case BuiltinType::Void:
2315 case BuiltinType::NullPtr:
2316 case BuiltinType::Bool:
2317 case BuiltinType::WChar_S:
2318 case BuiltinType::WChar_U:
2319 case BuiltinType::Char_U:
2320 case BuiltinType::Char_S:
2321 case BuiltinType::UChar:
2322 case BuiltinType::SChar:
2323 case BuiltinType::Short:
2324 case BuiltinType::UShort:
2325 case BuiltinType::Int:
2326 case BuiltinType::UInt:
2327 case BuiltinType::Long:
2328 case BuiltinType::ULong:
2329 case BuiltinType::LongLong:
2330 case BuiltinType::ULongLong:
2331 case BuiltinType::Half:
2332 case BuiltinType::Float:
2333 case BuiltinType::Double:
2334 case BuiltinType::LongDouble:
2335 case BuiltinType::Char16:
2336 case BuiltinType::Char32:
2337 case BuiltinType::Int128:
2338 case BuiltinType::UInt128:
2339 case BuiltinType::OCLImage1d:
2340 case BuiltinType::OCLImage1dArray:
2341 case BuiltinType::OCLImage1dBuffer:
2342 case BuiltinType::OCLImage2d:
2343 case BuiltinType::OCLImage2dArray:
2344 case BuiltinType::OCLImage3d:
2345 case BuiltinType::OCLSampler:
2346 case BuiltinType::OCLEvent:
2347 return true;
2348
2349 case BuiltinType::Dependent:
2350 #define BUILTIN_TYPE(Id, SingletonId)
2351 #define PLACEHOLDER_TYPE(Id, SingletonId) \
2352 case BuiltinType::Id:
2353 #include "clang/AST/BuiltinTypes.def"
2354 llvm_unreachable("asking for RRTI for a placeholder type!");
2355
2356 case BuiltinType::ObjCId:
2357 case BuiltinType::ObjCClass:
2358 case BuiltinType::ObjCSel:
2359 llvm_unreachable("FIXME: Objective-C types are unsupported!");
2360 }
2361
2362 llvm_unreachable("Invalid BuiltinType Kind!");
2363 }
2364
TypeInfoIsInStandardLibrary(const PointerType * PointerTy)2365 static bool TypeInfoIsInStandardLibrary(const PointerType *PointerTy) {
2366 QualType PointeeTy = PointerTy->getPointeeType();
2367 const BuiltinType *BuiltinTy = dyn_cast<BuiltinType>(PointeeTy);
2368 if (!BuiltinTy)
2369 return false;
2370
2371 // Check the qualifiers.
2372 Qualifiers Quals = PointeeTy.getQualifiers();
2373 Quals.removeConst();
2374
2375 if (!Quals.empty())
2376 return false;
2377
2378 return TypeInfoIsInStandardLibrary(BuiltinTy);
2379 }
2380
2381 /// IsStandardLibraryRTTIDescriptor - Returns whether the type
2382 /// information for the given type exists in the standard library.
IsStandardLibraryRTTIDescriptor(QualType Ty)2383 static bool IsStandardLibraryRTTIDescriptor(QualType Ty) {
2384 // Type info for builtin types is defined in the standard library.
2385 if (const BuiltinType *BuiltinTy = dyn_cast<BuiltinType>(Ty))
2386 return TypeInfoIsInStandardLibrary(BuiltinTy);
2387
2388 // Type info for some pointer types to builtin types is defined in the
2389 // standard library.
2390 if (const PointerType *PointerTy = dyn_cast<PointerType>(Ty))
2391 return TypeInfoIsInStandardLibrary(PointerTy);
2392
2393 return false;
2394 }
2395
2396 /// ShouldUseExternalRTTIDescriptor - Returns whether the type information for
2397 /// the given type exists somewhere else, and that we should not emit the type
2398 /// information in this translation unit. Assumes that it is not a
2399 /// standard-library type.
ShouldUseExternalRTTIDescriptor(CodeGenModule & CGM,QualType Ty)2400 static bool ShouldUseExternalRTTIDescriptor(CodeGenModule &CGM,
2401 QualType Ty) {
2402 ASTContext &Context = CGM.getContext();
2403
2404 // If RTTI is disabled, assume it might be disabled in the
2405 // translation unit that defines any potential key function, too.
2406 if (!Context.getLangOpts().RTTI) return false;
2407
2408 if (const RecordType *RecordTy = dyn_cast<RecordType>(Ty)) {
2409 const CXXRecordDecl *RD = cast<CXXRecordDecl>(RecordTy->getDecl());
2410 if (!RD->hasDefinition())
2411 return false;
2412
2413 if (!RD->isDynamicClass())
2414 return false;
2415
2416 // FIXME: this may need to be reconsidered if the key function
2417 // changes.
2418 if (CGM.getVTables().isVTableExternal(RD))
2419 return true;
2420
2421 if (RD->hasAttr<DLLImportAttr>())
2422 return true;
2423 }
2424
2425 return false;
2426 }
2427
2428 /// IsIncompleteClassType - Returns whether the given record type is incomplete.
IsIncompleteClassType(const RecordType * RecordTy)2429 static bool IsIncompleteClassType(const RecordType *RecordTy) {
2430 return !RecordTy->getDecl()->isCompleteDefinition();
2431 }
2432
2433 /// ContainsIncompleteClassType - Returns whether the given type contains an
2434 /// incomplete class type. This is true if
2435 ///
2436 /// * The given type is an incomplete class type.
2437 /// * The given type is a pointer type whose pointee type contains an
2438 /// incomplete class type.
2439 /// * The given type is a member pointer type whose class is an incomplete
2440 /// class type.
2441 /// * The given type is a member pointer type whoise pointee type contains an
2442 /// incomplete class type.
2443 /// is an indirect or direct pointer to an incomplete class type.
ContainsIncompleteClassType(QualType Ty)2444 static bool ContainsIncompleteClassType(QualType Ty) {
2445 if (const RecordType *RecordTy = dyn_cast<RecordType>(Ty)) {
2446 if (IsIncompleteClassType(RecordTy))
2447 return true;
2448 }
2449
2450 if (const PointerType *PointerTy = dyn_cast<PointerType>(Ty))
2451 return ContainsIncompleteClassType(PointerTy->getPointeeType());
2452
2453 if (const MemberPointerType *MemberPointerTy =
2454 dyn_cast<MemberPointerType>(Ty)) {
2455 // Check if the class type is incomplete.
2456 const RecordType *ClassType = cast<RecordType>(MemberPointerTy->getClass());
2457 if (IsIncompleteClassType(ClassType))
2458 return true;
2459
2460 return ContainsIncompleteClassType(MemberPointerTy->getPointeeType());
2461 }
2462
2463 return false;
2464 }
2465
2466 // CanUseSingleInheritance - Return whether the given record decl has a "single,
2467 // public, non-virtual base at offset zero (i.e. the derived class is dynamic
2468 // iff the base is)", according to Itanium C++ ABI, 2.95p6b.
CanUseSingleInheritance(const CXXRecordDecl * RD)2469 static bool CanUseSingleInheritance(const CXXRecordDecl *RD) {
2470 // Check the number of bases.
2471 if (RD->getNumBases() != 1)
2472 return false;
2473
2474 // Get the base.
2475 CXXRecordDecl::base_class_const_iterator Base = RD->bases_begin();
2476
2477 // Check that the base is not virtual.
2478 if (Base->isVirtual())
2479 return false;
2480
2481 // Check that the base is public.
2482 if (Base->getAccessSpecifier() != AS_public)
2483 return false;
2484
2485 // Check that the class is dynamic iff the base is.
2486 const CXXRecordDecl *BaseDecl =
2487 cast<CXXRecordDecl>(Base->getType()->getAs<RecordType>()->getDecl());
2488 if (!BaseDecl->isEmpty() &&
2489 BaseDecl->isDynamicClass() != RD->isDynamicClass())
2490 return false;
2491
2492 return true;
2493 }
2494
BuildVTablePointer(const Type * Ty)2495 void ItaniumRTTIBuilder::BuildVTablePointer(const Type *Ty) {
2496 // abi::__class_type_info.
2497 static const char * const ClassTypeInfo =
2498 "_ZTVN10__cxxabiv117__class_type_infoE";
2499 // abi::__si_class_type_info.
2500 static const char * const SIClassTypeInfo =
2501 "_ZTVN10__cxxabiv120__si_class_type_infoE";
2502 // abi::__vmi_class_type_info.
2503 static const char * const VMIClassTypeInfo =
2504 "_ZTVN10__cxxabiv121__vmi_class_type_infoE";
2505
2506 const char *VTableName = nullptr;
2507
2508 switch (Ty->getTypeClass()) {
2509 #define TYPE(Class, Base)
2510 #define ABSTRACT_TYPE(Class, Base)
2511 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class:
2512 #define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
2513 #define DEPENDENT_TYPE(Class, Base) case Type::Class:
2514 #include "clang/AST/TypeNodes.def"
2515 llvm_unreachable("Non-canonical and dependent types shouldn't get here");
2516
2517 case Type::LValueReference:
2518 case Type::RValueReference:
2519 llvm_unreachable("References shouldn't get here");
2520
2521 case Type::Auto:
2522 llvm_unreachable("Undeduced auto type shouldn't get here");
2523
2524 case Type::Builtin:
2525 // GCC treats vector and complex types as fundamental types.
2526 case Type::Vector:
2527 case Type::ExtVector:
2528 case Type::Complex:
2529 case Type::Atomic:
2530 // FIXME: GCC treats block pointers as fundamental types?!
2531 case Type::BlockPointer:
2532 // abi::__fundamental_type_info.
2533 VTableName = "_ZTVN10__cxxabiv123__fundamental_type_infoE";
2534 break;
2535
2536 case Type::ConstantArray:
2537 case Type::IncompleteArray:
2538 case Type::VariableArray:
2539 // abi::__array_type_info.
2540 VTableName = "_ZTVN10__cxxabiv117__array_type_infoE";
2541 break;
2542
2543 case Type::FunctionNoProto:
2544 case Type::FunctionProto:
2545 // abi::__function_type_info.
2546 VTableName = "_ZTVN10__cxxabiv120__function_type_infoE";
2547 break;
2548
2549 case Type::Enum:
2550 // abi::__enum_type_info.
2551 VTableName = "_ZTVN10__cxxabiv116__enum_type_infoE";
2552 break;
2553
2554 case Type::Record: {
2555 const CXXRecordDecl *RD =
2556 cast<CXXRecordDecl>(cast<RecordType>(Ty)->getDecl());
2557
2558 if (!RD->hasDefinition() || !RD->getNumBases()) {
2559 VTableName = ClassTypeInfo;
2560 } else if (CanUseSingleInheritance(RD)) {
2561 VTableName = SIClassTypeInfo;
2562 } else {
2563 VTableName = VMIClassTypeInfo;
2564 }
2565
2566 break;
2567 }
2568
2569 case Type::ObjCObject:
2570 // Ignore protocol qualifiers.
2571 Ty = cast<ObjCObjectType>(Ty)->getBaseType().getTypePtr();
2572
2573 // Handle id and Class.
2574 if (isa<BuiltinType>(Ty)) {
2575 VTableName = ClassTypeInfo;
2576 break;
2577 }
2578
2579 assert(isa<ObjCInterfaceType>(Ty));
2580 // Fall through.
2581
2582 case Type::ObjCInterface:
2583 if (cast<ObjCInterfaceType>(Ty)->getDecl()->getSuperClass()) {
2584 VTableName = SIClassTypeInfo;
2585 } else {
2586 VTableName = ClassTypeInfo;
2587 }
2588 break;
2589
2590 case Type::ObjCObjectPointer:
2591 case Type::Pointer:
2592 // abi::__pointer_type_info.
2593 VTableName = "_ZTVN10__cxxabiv119__pointer_type_infoE";
2594 break;
2595
2596 case Type::MemberPointer:
2597 // abi::__pointer_to_member_type_info.
2598 VTableName = "_ZTVN10__cxxabiv129__pointer_to_member_type_infoE";
2599 break;
2600 }
2601
2602 llvm::Constant *VTable =
2603 CGM.getModule().getOrInsertGlobal(VTableName, CGM.Int8PtrTy);
2604
2605 llvm::Type *PtrDiffTy =
2606 CGM.getTypes().ConvertType(CGM.getContext().getPointerDiffType());
2607
2608 // The vtable address point is 2.
2609 llvm::Constant *Two = llvm::ConstantInt::get(PtrDiffTy, 2);
2610 VTable =
2611 llvm::ConstantExpr::getInBoundsGetElementPtr(CGM.Int8PtrTy, VTable, Two);
2612 VTable = llvm::ConstantExpr::getBitCast(VTable, CGM.Int8PtrTy);
2613
2614 Fields.push_back(VTable);
2615 }
2616
2617 /// \brief Return the linkage that the type info and type info name constants
2618 /// should have for the given type.
getTypeInfoLinkage(CodeGenModule & CGM,QualType Ty)2619 static llvm::GlobalVariable::LinkageTypes getTypeInfoLinkage(CodeGenModule &CGM,
2620 QualType Ty) {
2621 // Itanium C++ ABI 2.9.5p7:
2622 // In addition, it and all of the intermediate abi::__pointer_type_info
2623 // structs in the chain down to the abi::__class_type_info for the
2624 // incomplete class type must be prevented from resolving to the
2625 // corresponding type_info structs for the complete class type, possibly
2626 // by making them local static objects. Finally, a dummy class RTTI is
2627 // generated for the incomplete type that will not resolve to the final
2628 // complete class RTTI (because the latter need not exist), possibly by
2629 // making it a local static object.
2630 if (ContainsIncompleteClassType(Ty))
2631 return llvm::GlobalValue::InternalLinkage;
2632
2633 switch (Ty->getLinkage()) {
2634 case NoLinkage:
2635 case InternalLinkage:
2636 case UniqueExternalLinkage:
2637 return llvm::GlobalValue::InternalLinkage;
2638
2639 case VisibleNoLinkage:
2640 case ExternalLinkage:
2641 if (!CGM.getLangOpts().RTTI) {
2642 // RTTI is not enabled, which means that this type info struct is going
2643 // to be used for exception handling. Give it linkonce_odr linkage.
2644 return llvm::GlobalValue::LinkOnceODRLinkage;
2645 }
2646
2647 if (const RecordType *Record = dyn_cast<RecordType>(Ty)) {
2648 const CXXRecordDecl *RD = cast<CXXRecordDecl>(Record->getDecl());
2649 if (RD->hasAttr<WeakAttr>())
2650 return llvm::GlobalValue::WeakODRLinkage;
2651 if (RD->isDynamicClass())
2652 return CGM.getVTableLinkage(RD);
2653 }
2654
2655 return llvm::GlobalValue::LinkOnceODRLinkage;
2656 }
2657
2658 llvm_unreachable("Invalid linkage!");
2659 }
2660
BuildTypeInfo(QualType Ty,bool Force)2661 llvm::Constant *ItaniumRTTIBuilder::BuildTypeInfo(QualType Ty, bool Force) {
2662 // We want to operate on the canonical type.
2663 Ty = CGM.getContext().getCanonicalType(Ty);
2664
2665 // Check if we've already emitted an RTTI descriptor for this type.
2666 SmallString<256> OutName;
2667 llvm::raw_svector_ostream Out(OutName);
2668 CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty, Out);
2669 Out.flush();
2670 StringRef Name = OutName.str();
2671
2672 llvm::GlobalVariable *OldGV = CGM.getModule().getNamedGlobal(Name);
2673 if (OldGV && !OldGV->isDeclaration()) {
2674 assert(!OldGV->hasAvailableExternallyLinkage() &&
2675 "available_externally typeinfos not yet implemented");
2676
2677 return llvm::ConstantExpr::getBitCast(OldGV, CGM.Int8PtrTy);
2678 }
2679
2680 // Check if there is already an external RTTI descriptor for this type.
2681 bool IsStdLib = IsStandardLibraryRTTIDescriptor(Ty);
2682 if (!Force && (IsStdLib || ShouldUseExternalRTTIDescriptor(CGM, Ty)))
2683 return GetAddrOfExternalRTTIDescriptor(Ty);
2684
2685 // Emit the standard library with external linkage.
2686 llvm::GlobalVariable::LinkageTypes Linkage;
2687 if (IsStdLib)
2688 Linkage = llvm::GlobalValue::ExternalLinkage;
2689 else
2690 Linkage = getTypeInfoLinkage(CGM, Ty);
2691
2692 // Add the vtable pointer.
2693 BuildVTablePointer(cast<Type>(Ty));
2694
2695 // And the name.
2696 llvm::GlobalVariable *TypeName = GetAddrOfTypeName(Ty, Linkage);
2697 llvm::Constant *TypeNameField;
2698
2699 // If we're supposed to demote the visibility, be sure to set a flag
2700 // to use a string comparison for type_info comparisons.
2701 ItaniumCXXABI::RTTIUniquenessKind RTTIUniqueness =
2702 CXXABI.classifyRTTIUniqueness(Ty, Linkage);
2703 if (RTTIUniqueness != ItaniumCXXABI::RUK_Unique) {
2704 // The flag is the sign bit, which on ARM64 is defined to be clear
2705 // for global pointers. This is very ARM64-specific.
2706 TypeNameField = llvm::ConstantExpr::getPtrToInt(TypeName, CGM.Int64Ty);
2707 llvm::Constant *flag =
2708 llvm::ConstantInt::get(CGM.Int64Ty, ((uint64_t)1) << 63);
2709 TypeNameField = llvm::ConstantExpr::getAdd(TypeNameField, flag);
2710 TypeNameField =
2711 llvm::ConstantExpr::getIntToPtr(TypeNameField, CGM.Int8PtrTy);
2712 } else {
2713 TypeNameField = llvm::ConstantExpr::getBitCast(TypeName, CGM.Int8PtrTy);
2714 }
2715 Fields.push_back(TypeNameField);
2716
2717 switch (Ty->getTypeClass()) {
2718 #define TYPE(Class, Base)
2719 #define ABSTRACT_TYPE(Class, Base)
2720 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class:
2721 #define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
2722 #define DEPENDENT_TYPE(Class, Base) case Type::Class:
2723 #include "clang/AST/TypeNodes.def"
2724 llvm_unreachable("Non-canonical and dependent types shouldn't get here");
2725
2726 // GCC treats vector types as fundamental types.
2727 case Type::Builtin:
2728 case Type::Vector:
2729 case Type::ExtVector:
2730 case Type::Complex:
2731 case Type::BlockPointer:
2732 // Itanium C++ ABI 2.9.5p4:
2733 // abi::__fundamental_type_info adds no data members to std::type_info.
2734 break;
2735
2736 case Type::LValueReference:
2737 case Type::RValueReference:
2738 llvm_unreachable("References shouldn't get here");
2739
2740 case Type::Auto:
2741 llvm_unreachable("Undeduced auto type shouldn't get here");
2742
2743 case Type::ConstantArray:
2744 case Type::IncompleteArray:
2745 case Type::VariableArray:
2746 // Itanium C++ ABI 2.9.5p5:
2747 // abi::__array_type_info adds no data members to std::type_info.
2748 break;
2749
2750 case Type::FunctionNoProto:
2751 case Type::FunctionProto:
2752 // Itanium C++ ABI 2.9.5p5:
2753 // abi::__function_type_info adds no data members to std::type_info.
2754 break;
2755
2756 case Type::Enum:
2757 // Itanium C++ ABI 2.9.5p5:
2758 // abi::__enum_type_info adds no data members to std::type_info.
2759 break;
2760
2761 case Type::Record: {
2762 const CXXRecordDecl *RD =
2763 cast<CXXRecordDecl>(cast<RecordType>(Ty)->getDecl());
2764 if (!RD->hasDefinition() || !RD->getNumBases()) {
2765 // We don't need to emit any fields.
2766 break;
2767 }
2768
2769 if (CanUseSingleInheritance(RD))
2770 BuildSIClassTypeInfo(RD);
2771 else
2772 BuildVMIClassTypeInfo(RD);
2773
2774 break;
2775 }
2776
2777 case Type::ObjCObject:
2778 case Type::ObjCInterface:
2779 BuildObjCObjectTypeInfo(cast<ObjCObjectType>(Ty));
2780 break;
2781
2782 case Type::ObjCObjectPointer:
2783 BuildPointerTypeInfo(cast<ObjCObjectPointerType>(Ty)->getPointeeType());
2784 break;
2785
2786 case Type::Pointer:
2787 BuildPointerTypeInfo(cast<PointerType>(Ty)->getPointeeType());
2788 break;
2789
2790 case Type::MemberPointer:
2791 BuildPointerToMemberTypeInfo(cast<MemberPointerType>(Ty));
2792 break;
2793
2794 case Type::Atomic:
2795 // No fields, at least for the moment.
2796 break;
2797 }
2798
2799 llvm::Constant *Init = llvm::ConstantStruct::getAnon(Fields);
2800
2801 llvm::Module &M = CGM.getModule();
2802 llvm::GlobalVariable *GV =
2803 new llvm::GlobalVariable(M, Init->getType(),
2804 /*Constant=*/true, Linkage, Init, Name);
2805
2806 if (CGM.supportsCOMDAT() && GV->isWeakForLinker())
2807 GV->setComdat(M.getOrInsertComdat(GV->getName()));
2808
2809 // If there's already an old global variable, replace it with the new one.
2810 if (OldGV) {
2811 GV->takeName(OldGV);
2812 llvm::Constant *NewPtr =
2813 llvm::ConstantExpr::getBitCast(GV, OldGV->getType());
2814 OldGV->replaceAllUsesWith(NewPtr);
2815 OldGV->eraseFromParent();
2816 }
2817
2818 // The Itanium ABI specifies that type_info objects must be globally
2819 // unique, with one exception: if the type is an incomplete class
2820 // type or a (possibly indirect) pointer to one. That exception
2821 // affects the general case of comparing type_info objects produced
2822 // by the typeid operator, which is why the comparison operators on
2823 // std::type_info generally use the type_info name pointers instead
2824 // of the object addresses. However, the language's built-in uses
2825 // of RTTI generally require class types to be complete, even when
2826 // manipulating pointers to those class types. This allows the
2827 // implementation of dynamic_cast to rely on address equality tests,
2828 // which is much faster.
2829
2830 // All of this is to say that it's important that both the type_info
2831 // object and the type_info name be uniqued when weakly emitted.
2832
2833 // Give the type_info object and name the formal visibility of the
2834 // type itself.
2835 llvm::GlobalValue::VisibilityTypes llvmVisibility;
2836 if (llvm::GlobalValue::isLocalLinkage(Linkage))
2837 // If the linkage is local, only default visibility makes sense.
2838 llvmVisibility = llvm::GlobalValue::DefaultVisibility;
2839 else if (RTTIUniqueness == ItaniumCXXABI::RUK_NonUniqueHidden)
2840 llvmVisibility = llvm::GlobalValue::HiddenVisibility;
2841 else
2842 llvmVisibility = CodeGenModule::GetLLVMVisibility(Ty->getVisibility());
2843 TypeName->setVisibility(llvmVisibility);
2844 GV->setVisibility(llvmVisibility);
2845
2846 return llvm::ConstantExpr::getBitCast(GV, CGM.Int8PtrTy);
2847 }
2848
2849 /// ComputeQualifierFlags - Compute the pointer type info flags from the
2850 /// given qualifier.
ComputeQualifierFlags(Qualifiers Quals)2851 static unsigned ComputeQualifierFlags(Qualifiers Quals) {
2852 unsigned Flags = 0;
2853
2854 if (Quals.hasConst())
2855 Flags |= ItaniumRTTIBuilder::PTI_Const;
2856 if (Quals.hasVolatile())
2857 Flags |= ItaniumRTTIBuilder::PTI_Volatile;
2858 if (Quals.hasRestrict())
2859 Flags |= ItaniumRTTIBuilder::PTI_Restrict;
2860
2861 return Flags;
2862 }
2863
2864 /// BuildObjCObjectTypeInfo - Build the appropriate kind of type_info
2865 /// for the given Objective-C object type.
BuildObjCObjectTypeInfo(const ObjCObjectType * OT)2866 void ItaniumRTTIBuilder::BuildObjCObjectTypeInfo(const ObjCObjectType *OT) {
2867 // Drop qualifiers.
2868 const Type *T = OT->getBaseType().getTypePtr();
2869 assert(isa<BuiltinType>(T) || isa<ObjCInterfaceType>(T));
2870
2871 // The builtin types are abi::__class_type_infos and don't require
2872 // extra fields.
2873 if (isa<BuiltinType>(T)) return;
2874
2875 ObjCInterfaceDecl *Class = cast<ObjCInterfaceType>(T)->getDecl();
2876 ObjCInterfaceDecl *Super = Class->getSuperClass();
2877
2878 // Root classes are also __class_type_info.
2879 if (!Super) return;
2880
2881 QualType SuperTy = CGM.getContext().getObjCInterfaceType(Super);
2882
2883 // Everything else is single inheritance.
2884 llvm::Constant *BaseTypeInfo =
2885 ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(SuperTy);
2886 Fields.push_back(BaseTypeInfo);
2887 }
2888
2889 /// BuildSIClassTypeInfo - Build an abi::__si_class_type_info, used for single
2890 /// inheritance, according to the Itanium C++ ABI, 2.95p6b.
BuildSIClassTypeInfo(const CXXRecordDecl * RD)2891 void ItaniumRTTIBuilder::BuildSIClassTypeInfo(const CXXRecordDecl *RD) {
2892 // Itanium C++ ABI 2.9.5p6b:
2893 // It adds to abi::__class_type_info a single member pointing to the
2894 // type_info structure for the base type,
2895 llvm::Constant *BaseTypeInfo =
2896 ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(RD->bases_begin()->getType());
2897 Fields.push_back(BaseTypeInfo);
2898 }
2899
2900 namespace {
2901 /// SeenBases - Contains virtual and non-virtual bases seen when traversing
2902 /// a class hierarchy.
2903 struct SeenBases {
2904 llvm::SmallPtrSet<const CXXRecordDecl *, 16> NonVirtualBases;
2905 llvm::SmallPtrSet<const CXXRecordDecl *, 16> VirtualBases;
2906 };
2907 }
2908
2909 /// ComputeVMIClassTypeInfoFlags - Compute the value of the flags member in
2910 /// abi::__vmi_class_type_info.
2911 ///
ComputeVMIClassTypeInfoFlags(const CXXBaseSpecifier * Base,SeenBases & Bases)2912 static unsigned ComputeVMIClassTypeInfoFlags(const CXXBaseSpecifier *Base,
2913 SeenBases &Bases) {
2914
2915 unsigned Flags = 0;
2916
2917 const CXXRecordDecl *BaseDecl =
2918 cast<CXXRecordDecl>(Base->getType()->getAs<RecordType>()->getDecl());
2919
2920 if (Base->isVirtual()) {
2921 // Mark the virtual base as seen.
2922 if (!Bases.VirtualBases.insert(BaseDecl).second) {
2923 // If this virtual base has been seen before, then the class is diamond
2924 // shaped.
2925 Flags |= ItaniumRTTIBuilder::VMI_DiamondShaped;
2926 } else {
2927 if (Bases.NonVirtualBases.count(BaseDecl))
2928 Flags |= ItaniumRTTIBuilder::VMI_NonDiamondRepeat;
2929 }
2930 } else {
2931 // Mark the non-virtual base as seen.
2932 if (!Bases.NonVirtualBases.insert(BaseDecl).second) {
2933 // If this non-virtual base has been seen before, then the class has non-
2934 // diamond shaped repeated inheritance.
2935 Flags |= ItaniumRTTIBuilder::VMI_NonDiamondRepeat;
2936 } else {
2937 if (Bases.VirtualBases.count(BaseDecl))
2938 Flags |= ItaniumRTTIBuilder::VMI_NonDiamondRepeat;
2939 }
2940 }
2941
2942 // Walk all bases.
2943 for (const auto &I : BaseDecl->bases())
2944 Flags |= ComputeVMIClassTypeInfoFlags(&I, Bases);
2945
2946 return Flags;
2947 }
2948
ComputeVMIClassTypeInfoFlags(const CXXRecordDecl * RD)2949 static unsigned ComputeVMIClassTypeInfoFlags(const CXXRecordDecl *RD) {
2950 unsigned Flags = 0;
2951 SeenBases Bases;
2952
2953 // Walk all bases.
2954 for (const auto &I : RD->bases())
2955 Flags |= ComputeVMIClassTypeInfoFlags(&I, Bases);
2956
2957 return Flags;
2958 }
2959
2960 /// BuildVMIClassTypeInfo - Build an abi::__vmi_class_type_info, used for
2961 /// classes with bases that do not satisfy the abi::__si_class_type_info
2962 /// constraints, according ti the Itanium C++ ABI, 2.9.5p5c.
BuildVMIClassTypeInfo(const CXXRecordDecl * RD)2963 void ItaniumRTTIBuilder::BuildVMIClassTypeInfo(const CXXRecordDecl *RD) {
2964 llvm::Type *UnsignedIntLTy =
2965 CGM.getTypes().ConvertType(CGM.getContext().UnsignedIntTy);
2966
2967 // Itanium C++ ABI 2.9.5p6c:
2968 // __flags is a word with flags describing details about the class
2969 // structure, which may be referenced by using the __flags_masks
2970 // enumeration. These flags refer to both direct and indirect bases.
2971 unsigned Flags = ComputeVMIClassTypeInfoFlags(RD);
2972 Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, Flags));
2973
2974 // Itanium C++ ABI 2.9.5p6c:
2975 // __base_count is a word with the number of direct proper base class
2976 // descriptions that follow.
2977 Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, RD->getNumBases()));
2978
2979 if (!RD->getNumBases())
2980 return;
2981
2982 llvm::Type *LongLTy =
2983 CGM.getTypes().ConvertType(CGM.getContext().LongTy);
2984
2985 // Now add the base class descriptions.
2986
2987 // Itanium C++ ABI 2.9.5p6c:
2988 // __base_info[] is an array of base class descriptions -- one for every
2989 // direct proper base. Each description is of the type:
2990 //
2991 // struct abi::__base_class_type_info {
2992 // public:
2993 // const __class_type_info *__base_type;
2994 // long __offset_flags;
2995 //
2996 // enum __offset_flags_masks {
2997 // __virtual_mask = 0x1,
2998 // __public_mask = 0x2,
2999 // __offset_shift = 8
3000 // };
3001 // };
3002 for (const auto &Base : RD->bases()) {
3003 // The __base_type member points to the RTTI for the base type.
3004 Fields.push_back(ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(Base.getType()));
3005
3006 const CXXRecordDecl *BaseDecl =
3007 cast<CXXRecordDecl>(Base.getType()->getAs<RecordType>()->getDecl());
3008
3009 int64_t OffsetFlags = 0;
3010
3011 // All but the lower 8 bits of __offset_flags are a signed offset.
3012 // For a non-virtual base, this is the offset in the object of the base
3013 // subobject. For a virtual base, this is the offset in the virtual table of
3014 // the virtual base offset for the virtual base referenced (negative).
3015 CharUnits Offset;
3016 if (Base.isVirtual())
3017 Offset =
3018 CGM.getItaniumVTableContext().getVirtualBaseOffsetOffset(RD, BaseDecl);
3019 else {
3020 const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
3021 Offset = Layout.getBaseClassOffset(BaseDecl);
3022 };
3023
3024 OffsetFlags = uint64_t(Offset.getQuantity()) << 8;
3025
3026 // The low-order byte of __offset_flags contains flags, as given by the
3027 // masks from the enumeration __offset_flags_masks.
3028 if (Base.isVirtual())
3029 OffsetFlags |= BCTI_Virtual;
3030 if (Base.getAccessSpecifier() == AS_public)
3031 OffsetFlags |= BCTI_Public;
3032
3033 Fields.push_back(llvm::ConstantInt::get(LongLTy, OffsetFlags));
3034 }
3035 }
3036
3037 /// BuildPointerTypeInfo - Build an abi::__pointer_type_info struct,
3038 /// used for pointer types.
BuildPointerTypeInfo(QualType PointeeTy)3039 void ItaniumRTTIBuilder::BuildPointerTypeInfo(QualType PointeeTy) {
3040 Qualifiers Quals;
3041 QualType UnqualifiedPointeeTy =
3042 CGM.getContext().getUnqualifiedArrayType(PointeeTy, Quals);
3043
3044 // Itanium C++ ABI 2.9.5p7:
3045 // __flags is a flag word describing the cv-qualification and other
3046 // attributes of the type pointed to
3047 unsigned Flags = ComputeQualifierFlags(Quals);
3048
3049 // Itanium C++ ABI 2.9.5p7:
3050 // When the abi::__pbase_type_info is for a direct or indirect pointer to an
3051 // incomplete class type, the incomplete target type flag is set.
3052 if (ContainsIncompleteClassType(UnqualifiedPointeeTy))
3053 Flags |= PTI_Incomplete;
3054
3055 llvm::Type *UnsignedIntLTy =
3056 CGM.getTypes().ConvertType(CGM.getContext().UnsignedIntTy);
3057 Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, Flags));
3058
3059 // Itanium C++ ABI 2.9.5p7:
3060 // __pointee is a pointer to the std::type_info derivation for the
3061 // unqualified type being pointed to.
3062 llvm::Constant *PointeeTypeInfo =
3063 ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(UnqualifiedPointeeTy);
3064 Fields.push_back(PointeeTypeInfo);
3065 }
3066
3067 /// BuildPointerToMemberTypeInfo - Build an abi::__pointer_to_member_type_info
3068 /// struct, used for member pointer types.
3069 void
BuildPointerToMemberTypeInfo(const MemberPointerType * Ty)3070 ItaniumRTTIBuilder::BuildPointerToMemberTypeInfo(const MemberPointerType *Ty) {
3071 QualType PointeeTy = Ty->getPointeeType();
3072
3073 Qualifiers Quals;
3074 QualType UnqualifiedPointeeTy =
3075 CGM.getContext().getUnqualifiedArrayType(PointeeTy, Quals);
3076
3077 // Itanium C++ ABI 2.9.5p7:
3078 // __flags is a flag word describing the cv-qualification and other
3079 // attributes of the type pointed to.
3080 unsigned Flags = ComputeQualifierFlags(Quals);
3081
3082 const RecordType *ClassType = cast<RecordType>(Ty->getClass());
3083
3084 // Itanium C++ ABI 2.9.5p7:
3085 // When the abi::__pbase_type_info is for a direct or indirect pointer to an
3086 // incomplete class type, the incomplete target type flag is set.
3087 if (ContainsIncompleteClassType(UnqualifiedPointeeTy))
3088 Flags |= PTI_Incomplete;
3089
3090 if (IsIncompleteClassType(ClassType))
3091 Flags |= PTI_ContainingClassIncomplete;
3092
3093 llvm::Type *UnsignedIntLTy =
3094 CGM.getTypes().ConvertType(CGM.getContext().UnsignedIntTy);
3095 Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, Flags));
3096
3097 // Itanium C++ ABI 2.9.5p7:
3098 // __pointee is a pointer to the std::type_info derivation for the
3099 // unqualified type being pointed to.
3100 llvm::Constant *PointeeTypeInfo =
3101 ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(UnqualifiedPointeeTy);
3102 Fields.push_back(PointeeTypeInfo);
3103
3104 // Itanium C++ ABI 2.9.5p9:
3105 // __context is a pointer to an abi::__class_type_info corresponding to the
3106 // class type containing the member pointed to
3107 // (e.g., the "A" in "int A::*").
3108 Fields.push_back(
3109 ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(QualType(ClassType, 0)));
3110 }
3111
getAddrOfRTTIDescriptor(QualType Ty)3112 llvm::Constant *ItaniumCXXABI::getAddrOfRTTIDescriptor(QualType Ty) {
3113 return ItaniumRTTIBuilder(*this).BuildTypeInfo(Ty);
3114 }
3115
EmitFundamentalRTTIDescriptor(QualType Type)3116 void ItaniumCXXABI::EmitFundamentalRTTIDescriptor(QualType Type) {
3117 QualType PointerType = getContext().getPointerType(Type);
3118 QualType PointerTypeConst = getContext().getPointerType(Type.withConst());
3119 ItaniumRTTIBuilder(*this).BuildTypeInfo(Type, true);
3120 ItaniumRTTIBuilder(*this).BuildTypeInfo(PointerType, true);
3121 ItaniumRTTIBuilder(*this).BuildTypeInfo(PointerTypeConst, true);
3122 }
3123
EmitFundamentalRTTIDescriptors()3124 void ItaniumCXXABI::EmitFundamentalRTTIDescriptors() {
3125 QualType FundamentalTypes[] = {
3126 getContext().VoidTy, getContext().NullPtrTy,
3127 getContext().BoolTy, getContext().WCharTy,
3128 getContext().CharTy, getContext().UnsignedCharTy,
3129 getContext().SignedCharTy, getContext().ShortTy,
3130 getContext().UnsignedShortTy, getContext().IntTy,
3131 getContext().UnsignedIntTy, getContext().LongTy,
3132 getContext().UnsignedLongTy, getContext().LongLongTy,
3133 getContext().UnsignedLongLongTy, getContext().HalfTy,
3134 getContext().FloatTy, getContext().DoubleTy,
3135 getContext().LongDoubleTy, getContext().Char16Ty,
3136 getContext().Char32Ty,
3137 };
3138 for (const QualType &FundamentalType : FundamentalTypes)
3139 EmitFundamentalRTTIDescriptor(FundamentalType);
3140 }
3141
3142 /// What sort of uniqueness rules should we use for the RTTI for the
3143 /// given type?
classifyRTTIUniqueness(QualType CanTy,llvm::GlobalValue::LinkageTypes Linkage) const3144 ItaniumCXXABI::RTTIUniquenessKind ItaniumCXXABI::classifyRTTIUniqueness(
3145 QualType CanTy, llvm::GlobalValue::LinkageTypes Linkage) const {
3146 if (shouldRTTIBeUnique())
3147 return RUK_Unique;
3148
3149 // It's only necessary for linkonce_odr or weak_odr linkage.
3150 if (Linkage != llvm::GlobalValue::LinkOnceODRLinkage &&
3151 Linkage != llvm::GlobalValue::WeakODRLinkage)
3152 return RUK_Unique;
3153
3154 // It's only necessary with default visibility.
3155 if (CanTy->getVisibility() != DefaultVisibility)
3156 return RUK_Unique;
3157
3158 // If we're not required to publish this symbol, hide it.
3159 if (Linkage == llvm::GlobalValue::LinkOnceODRLinkage)
3160 return RUK_NonUniqueHidden;
3161
3162 // If we're required to publish this symbol, as we might be under an
3163 // explicit instantiation, leave it with default visibility but
3164 // enable string-comparisons.
3165 assert(Linkage == llvm::GlobalValue::WeakODRLinkage);
3166 return RUK_NonUniqueVisible;
3167 }
3168
3169 // Find out how to codegen the complete destructor and constructor
3170 namespace {
3171 enum class StructorCodegen { Emit, RAUW, Alias, COMDAT };
3172 }
getCodegenToUse(CodeGenModule & CGM,const CXXMethodDecl * MD)3173 static StructorCodegen getCodegenToUse(CodeGenModule &CGM,
3174 const CXXMethodDecl *MD) {
3175 if (!CGM.getCodeGenOpts().CXXCtorDtorAliases)
3176 return StructorCodegen::Emit;
3177
3178 // The complete and base structors are not equivalent if there are any virtual
3179 // bases, so emit separate functions.
3180 if (MD->getParent()->getNumVBases())
3181 return StructorCodegen::Emit;
3182
3183 GlobalDecl AliasDecl;
3184 if (const auto *DD = dyn_cast<CXXDestructorDecl>(MD)) {
3185 AliasDecl = GlobalDecl(DD, Dtor_Complete);
3186 } else {
3187 const auto *CD = cast<CXXConstructorDecl>(MD);
3188 AliasDecl = GlobalDecl(CD, Ctor_Complete);
3189 }
3190 llvm::GlobalValue::LinkageTypes Linkage = CGM.getFunctionLinkage(AliasDecl);
3191
3192 if (llvm::GlobalValue::isDiscardableIfUnused(Linkage))
3193 return StructorCodegen::RAUW;
3194
3195 // FIXME: Should we allow available_externally aliases?
3196 if (!llvm::GlobalAlias::isValidLinkage(Linkage))
3197 return StructorCodegen::RAUW;
3198
3199 if (llvm::GlobalValue::isWeakForLinker(Linkage)) {
3200 // Only ELF supports COMDATs with arbitrary names (C5/D5).
3201 if (CGM.getTarget().getTriple().isOSBinFormatELF())
3202 return StructorCodegen::COMDAT;
3203 return StructorCodegen::Emit;
3204 }
3205
3206 return StructorCodegen::Alias;
3207 }
3208
emitConstructorDestructorAlias(CodeGenModule & CGM,GlobalDecl AliasDecl,GlobalDecl TargetDecl)3209 static void emitConstructorDestructorAlias(CodeGenModule &CGM,
3210 GlobalDecl AliasDecl,
3211 GlobalDecl TargetDecl) {
3212 llvm::GlobalValue::LinkageTypes Linkage = CGM.getFunctionLinkage(AliasDecl);
3213
3214 StringRef MangledName = CGM.getMangledName(AliasDecl);
3215 llvm::GlobalValue *Entry = CGM.GetGlobalValue(MangledName);
3216 if (Entry && !Entry->isDeclaration())
3217 return;
3218
3219 auto *Aliasee = cast<llvm::GlobalValue>(CGM.GetAddrOfGlobal(TargetDecl));
3220 llvm::PointerType *AliasType = Aliasee->getType();
3221
3222 // Create the alias with no name.
3223 auto *Alias = llvm::GlobalAlias::create(
3224 AliasType->getElementType(), 0, Linkage, "", Aliasee, &CGM.getModule());
3225
3226 // Switch any previous uses to the alias.
3227 if (Entry) {
3228 assert(Entry->getType() == AliasType &&
3229 "declaration exists with different type");
3230 Alias->takeName(Entry);
3231 Entry->replaceAllUsesWith(Alias);
3232 Entry->eraseFromParent();
3233 } else {
3234 Alias->setName(MangledName);
3235 }
3236
3237 // Finally, set up the alias with its proper name and attributes.
3238 CGM.setAliasAttributes(cast<NamedDecl>(AliasDecl.getDecl()), Alias);
3239 }
3240
emitCXXStructor(const CXXMethodDecl * MD,StructorType Type)3241 void ItaniumCXXABI::emitCXXStructor(const CXXMethodDecl *MD,
3242 StructorType Type) {
3243 auto *CD = dyn_cast<CXXConstructorDecl>(MD);
3244 const CXXDestructorDecl *DD = CD ? nullptr : cast<CXXDestructorDecl>(MD);
3245
3246 StructorCodegen CGType = getCodegenToUse(CGM, MD);
3247
3248 if (Type == StructorType::Complete) {
3249 GlobalDecl CompleteDecl;
3250 GlobalDecl BaseDecl;
3251 if (CD) {
3252 CompleteDecl = GlobalDecl(CD, Ctor_Complete);
3253 BaseDecl = GlobalDecl(CD, Ctor_Base);
3254 } else {
3255 CompleteDecl = GlobalDecl(DD, Dtor_Complete);
3256 BaseDecl = GlobalDecl(DD, Dtor_Base);
3257 }
3258
3259 if (CGType == StructorCodegen::Alias || CGType == StructorCodegen::COMDAT) {
3260 emitConstructorDestructorAlias(CGM, CompleteDecl, BaseDecl);
3261 return;
3262 }
3263
3264 if (CGType == StructorCodegen::RAUW) {
3265 StringRef MangledName = CGM.getMangledName(CompleteDecl);
3266 auto *Aliasee = cast<llvm::GlobalValue>(CGM.GetAddrOfGlobal(BaseDecl));
3267 CGM.addReplacement(MangledName, Aliasee);
3268 return;
3269 }
3270 }
3271
3272 // The base destructor is equivalent to the base destructor of its
3273 // base class if there is exactly one non-virtual base class with a
3274 // non-trivial destructor, there are no fields with a non-trivial
3275 // destructor, and the body of the destructor is trivial.
3276 if (DD && Type == StructorType::Base && CGType != StructorCodegen::COMDAT &&
3277 !CGM.TryEmitBaseDestructorAsAlias(DD))
3278 return;
3279
3280 llvm::Function *Fn = CGM.codegenCXXStructor(MD, Type);
3281
3282 if (CGType == StructorCodegen::COMDAT) {
3283 SmallString<256> Buffer;
3284 llvm::raw_svector_ostream Out(Buffer);
3285 if (DD)
3286 getMangleContext().mangleCXXDtorComdat(DD, Out);
3287 else
3288 getMangleContext().mangleCXXCtorComdat(CD, Out);
3289 llvm::Comdat *C = CGM.getModule().getOrInsertComdat(Out.str());
3290 Fn->setComdat(C);
3291 } else {
3292 CGM.maybeSetTrivialComdat(*MD, *Fn);
3293 }
3294 }
3295
getBeginCatchFn(CodeGenModule & CGM)3296 static llvm::Constant *getBeginCatchFn(CodeGenModule &CGM) {
3297 // void *__cxa_begin_catch(void*);
3298 llvm::FunctionType *FTy = llvm::FunctionType::get(
3299 CGM.Int8PtrTy, CGM.Int8PtrTy, /*IsVarArgs=*/false);
3300
3301 return CGM.CreateRuntimeFunction(FTy, "__cxa_begin_catch");
3302 }
3303
getEndCatchFn(CodeGenModule & CGM)3304 static llvm::Constant *getEndCatchFn(CodeGenModule &CGM) {
3305 // void __cxa_end_catch();
3306 llvm::FunctionType *FTy =
3307 llvm::FunctionType::get(CGM.VoidTy, /*IsVarArgs=*/false);
3308
3309 return CGM.CreateRuntimeFunction(FTy, "__cxa_end_catch");
3310 }
3311
getGetExceptionPtrFn(CodeGenModule & CGM)3312 static llvm::Constant *getGetExceptionPtrFn(CodeGenModule &CGM) {
3313 // void *__cxa_get_exception_ptr(void*);
3314 llvm::FunctionType *FTy = llvm::FunctionType::get(
3315 CGM.Int8PtrTy, CGM.Int8PtrTy, /*IsVarArgs=*/false);
3316
3317 return CGM.CreateRuntimeFunction(FTy, "__cxa_get_exception_ptr");
3318 }
3319
3320 namespace {
3321 /// A cleanup to call __cxa_end_catch. In many cases, the caught
3322 /// exception type lets us state definitively that the thrown exception
3323 /// type does not have a destructor. In particular:
3324 /// - Catch-alls tell us nothing, so we have to conservatively
3325 /// assume that the thrown exception might have a destructor.
3326 /// - Catches by reference behave according to their base types.
3327 /// - Catches of non-record types will only trigger for exceptions
3328 /// of non-record types, which never have destructors.
3329 /// - Catches of record types can trigger for arbitrary subclasses
3330 /// of the caught type, so we have to assume the actual thrown
3331 /// exception type might have a throwing destructor, even if the
3332 /// caught type's destructor is trivial or nothrow.
3333 struct CallEndCatch : EHScopeStack::Cleanup {
CallEndCatch__anona7bdc4f60911::CallEndCatch3334 CallEndCatch(bool MightThrow) : MightThrow(MightThrow) {}
3335 bool MightThrow;
3336
Emit__anona7bdc4f60911::CallEndCatch3337 void Emit(CodeGenFunction &CGF, Flags flags) override {
3338 if (!MightThrow) {
3339 CGF.EmitNounwindRuntimeCall(getEndCatchFn(CGF.CGM));
3340 return;
3341 }
3342
3343 CGF.EmitRuntimeCallOrInvoke(getEndCatchFn(CGF.CGM));
3344 }
3345 };
3346 }
3347
3348 /// Emits a call to __cxa_begin_catch and enters a cleanup to call
3349 /// __cxa_end_catch.
3350 ///
3351 /// \param EndMightThrow - true if __cxa_end_catch might throw
CallBeginCatch(CodeGenFunction & CGF,llvm::Value * Exn,bool EndMightThrow)3352 static llvm::Value *CallBeginCatch(CodeGenFunction &CGF,
3353 llvm::Value *Exn,
3354 bool EndMightThrow) {
3355 llvm::CallInst *call =
3356 CGF.EmitNounwindRuntimeCall(getBeginCatchFn(CGF.CGM), Exn);
3357
3358 CGF.EHStack.pushCleanup<CallEndCatch>(NormalAndEHCleanup, EndMightThrow);
3359
3360 return call;
3361 }
3362
3363 /// A "special initializer" callback for initializing a catch
3364 /// parameter during catch initialization.
InitCatchParam(CodeGenFunction & CGF,const VarDecl & CatchParam,llvm::Value * ParamAddr,SourceLocation Loc)3365 static void InitCatchParam(CodeGenFunction &CGF,
3366 const VarDecl &CatchParam,
3367 llvm::Value *ParamAddr,
3368 SourceLocation Loc) {
3369 // Load the exception from where the landing pad saved it.
3370 llvm::Value *Exn = CGF.getExceptionFromSlot();
3371
3372 CanQualType CatchType =
3373 CGF.CGM.getContext().getCanonicalType(CatchParam.getType());
3374 llvm::Type *LLVMCatchTy = CGF.ConvertTypeForMem(CatchType);
3375
3376 // If we're catching by reference, we can just cast the object
3377 // pointer to the appropriate pointer.
3378 if (isa<ReferenceType>(CatchType)) {
3379 QualType CaughtType = cast<ReferenceType>(CatchType)->getPointeeType();
3380 bool EndCatchMightThrow = CaughtType->isRecordType();
3381
3382 // __cxa_begin_catch returns the adjusted object pointer.
3383 llvm::Value *AdjustedExn = CallBeginCatch(CGF, Exn, EndCatchMightThrow);
3384
3385 // We have no way to tell the personality function that we're
3386 // catching by reference, so if we're catching a pointer,
3387 // __cxa_begin_catch will actually return that pointer by value.
3388 if (const PointerType *PT = dyn_cast<PointerType>(CaughtType)) {
3389 QualType PointeeType = PT->getPointeeType();
3390
3391 // When catching by reference, generally we should just ignore
3392 // this by-value pointer and use the exception object instead.
3393 if (!PointeeType->isRecordType()) {
3394
3395 // Exn points to the struct _Unwind_Exception header, which
3396 // we have to skip past in order to reach the exception data.
3397 unsigned HeaderSize =
3398 CGF.CGM.getTargetCodeGenInfo().getSizeOfUnwindException();
3399 AdjustedExn = CGF.Builder.CreateConstGEP1_32(Exn, HeaderSize);
3400
3401 // However, if we're catching a pointer-to-record type that won't
3402 // work, because the personality function might have adjusted
3403 // the pointer. There's actually no way for us to fully satisfy
3404 // the language/ABI contract here: we can't use Exn because it
3405 // might have the wrong adjustment, but we can't use the by-value
3406 // pointer because it's off by a level of abstraction.
3407 //
3408 // The current solution is to dump the adjusted pointer into an
3409 // alloca, which breaks language semantics (because changing the
3410 // pointer doesn't change the exception) but at least works.
3411 // The better solution would be to filter out non-exact matches
3412 // and rethrow them, but this is tricky because the rethrow
3413 // really needs to be catchable by other sites at this landing
3414 // pad. The best solution is to fix the personality function.
3415 } else {
3416 // Pull the pointer for the reference type off.
3417 llvm::Type *PtrTy =
3418 cast<llvm::PointerType>(LLVMCatchTy)->getElementType();
3419
3420 // Create the temporary and write the adjusted pointer into it.
3421 llvm::Value *ExnPtrTmp = CGF.CreateTempAlloca(PtrTy, "exn.byref.tmp");
3422 llvm::Value *Casted = CGF.Builder.CreateBitCast(AdjustedExn, PtrTy);
3423 CGF.Builder.CreateStore(Casted, ExnPtrTmp);
3424
3425 // Bind the reference to the temporary.
3426 AdjustedExn = ExnPtrTmp;
3427 }
3428 }
3429
3430 llvm::Value *ExnCast =
3431 CGF.Builder.CreateBitCast(AdjustedExn, LLVMCatchTy, "exn.byref");
3432 CGF.Builder.CreateStore(ExnCast, ParamAddr);
3433 return;
3434 }
3435
3436 // Scalars and complexes.
3437 TypeEvaluationKind TEK = CGF.getEvaluationKind(CatchType);
3438 if (TEK != TEK_Aggregate) {
3439 llvm::Value *AdjustedExn = CallBeginCatch(CGF, Exn, false);
3440
3441 // If the catch type is a pointer type, __cxa_begin_catch returns
3442 // the pointer by value.
3443 if (CatchType->hasPointerRepresentation()) {
3444 llvm::Value *CastExn =
3445 CGF.Builder.CreateBitCast(AdjustedExn, LLVMCatchTy, "exn.casted");
3446
3447 switch (CatchType.getQualifiers().getObjCLifetime()) {
3448 case Qualifiers::OCL_Strong:
3449 CastExn = CGF.EmitARCRetainNonBlock(CastExn);
3450 // fallthrough
3451
3452 case Qualifiers::OCL_None:
3453 case Qualifiers::OCL_ExplicitNone:
3454 case Qualifiers::OCL_Autoreleasing:
3455 CGF.Builder.CreateStore(CastExn, ParamAddr);
3456 return;
3457
3458 case Qualifiers::OCL_Weak:
3459 CGF.EmitARCInitWeak(ParamAddr, CastExn);
3460 return;
3461 }
3462 llvm_unreachable("bad ownership qualifier!");
3463 }
3464
3465 // Otherwise, it returns a pointer into the exception object.
3466
3467 llvm::Type *PtrTy = LLVMCatchTy->getPointerTo(0); // addrspace 0 ok
3468 llvm::Value *Cast = CGF.Builder.CreateBitCast(AdjustedExn, PtrTy);
3469
3470 LValue srcLV = CGF.MakeNaturalAlignAddrLValue(Cast, CatchType);
3471 LValue destLV = CGF.MakeAddrLValue(ParamAddr, CatchType,
3472 CGF.getContext().getDeclAlign(&CatchParam));
3473 switch (TEK) {
3474 case TEK_Complex:
3475 CGF.EmitStoreOfComplex(CGF.EmitLoadOfComplex(srcLV, Loc), destLV,
3476 /*init*/ true);
3477 return;
3478 case TEK_Scalar: {
3479 llvm::Value *ExnLoad = CGF.EmitLoadOfScalar(srcLV, Loc);
3480 CGF.EmitStoreOfScalar(ExnLoad, destLV, /*init*/ true);
3481 return;
3482 }
3483 case TEK_Aggregate:
3484 llvm_unreachable("evaluation kind filtered out!");
3485 }
3486 llvm_unreachable("bad evaluation kind");
3487 }
3488
3489 assert(isa<RecordType>(CatchType) && "unexpected catch type!");
3490
3491 llvm::Type *PtrTy = LLVMCatchTy->getPointerTo(0); // addrspace 0 ok
3492
3493 // Check for a copy expression. If we don't have a copy expression,
3494 // that means a trivial copy is okay.
3495 const Expr *copyExpr = CatchParam.getInit();
3496 if (!copyExpr) {
3497 llvm::Value *rawAdjustedExn = CallBeginCatch(CGF, Exn, true);
3498 llvm::Value *adjustedExn = CGF.Builder.CreateBitCast(rawAdjustedExn, PtrTy);
3499 CGF.EmitAggregateCopy(ParamAddr, adjustedExn, CatchType);
3500 return;
3501 }
3502
3503 // We have to call __cxa_get_exception_ptr to get the adjusted
3504 // pointer before copying.
3505 llvm::CallInst *rawAdjustedExn =
3506 CGF.EmitNounwindRuntimeCall(getGetExceptionPtrFn(CGF.CGM), Exn);
3507
3508 // Cast that to the appropriate type.
3509 llvm::Value *adjustedExn = CGF.Builder.CreateBitCast(rawAdjustedExn, PtrTy);
3510
3511 // The copy expression is defined in terms of an OpaqueValueExpr.
3512 // Find it and map it to the adjusted expression.
3513 CodeGenFunction::OpaqueValueMapping
3514 opaque(CGF, OpaqueValueExpr::findInCopyConstruct(copyExpr),
3515 CGF.MakeAddrLValue(adjustedExn, CatchParam.getType()));
3516
3517 // Call the copy ctor in a terminate scope.
3518 CGF.EHStack.pushTerminate();
3519
3520 // Perform the copy construction.
3521 CharUnits Alignment = CGF.getContext().getDeclAlign(&CatchParam);
3522 CGF.EmitAggExpr(copyExpr,
3523 AggValueSlot::forAddr(ParamAddr, Alignment, Qualifiers(),
3524 AggValueSlot::IsNotDestructed,
3525 AggValueSlot::DoesNotNeedGCBarriers,
3526 AggValueSlot::IsNotAliased));
3527
3528 // Leave the terminate scope.
3529 CGF.EHStack.popTerminate();
3530
3531 // Undo the opaque value mapping.
3532 opaque.pop();
3533
3534 // Finally we can call __cxa_begin_catch.
3535 CallBeginCatch(CGF, Exn, true);
3536 }
3537
3538 /// Begins a catch statement by initializing the catch variable and
3539 /// calling __cxa_begin_catch.
emitBeginCatch(CodeGenFunction & CGF,const CXXCatchStmt * S)3540 void ItaniumCXXABI::emitBeginCatch(CodeGenFunction &CGF,
3541 const CXXCatchStmt *S) {
3542 // We have to be very careful with the ordering of cleanups here:
3543 // C++ [except.throw]p4:
3544 // The destruction [of the exception temporary] occurs
3545 // immediately after the destruction of the object declared in
3546 // the exception-declaration in the handler.
3547 //
3548 // So the precise ordering is:
3549 // 1. Construct catch variable.
3550 // 2. __cxa_begin_catch
3551 // 3. Enter __cxa_end_catch cleanup
3552 // 4. Enter dtor cleanup
3553 //
3554 // We do this by using a slightly abnormal initialization process.
3555 // Delegation sequence:
3556 // - ExitCXXTryStmt opens a RunCleanupsScope
3557 // - EmitAutoVarAlloca creates the variable and debug info
3558 // - InitCatchParam initializes the variable from the exception
3559 // - CallBeginCatch calls __cxa_begin_catch
3560 // - CallBeginCatch enters the __cxa_end_catch cleanup
3561 // - EmitAutoVarCleanups enters the variable destructor cleanup
3562 // - EmitCXXTryStmt emits the code for the catch body
3563 // - EmitCXXTryStmt close the RunCleanupsScope
3564
3565 VarDecl *CatchParam = S->getExceptionDecl();
3566 if (!CatchParam) {
3567 llvm::Value *Exn = CGF.getExceptionFromSlot();
3568 CallBeginCatch(CGF, Exn, true);
3569 return;
3570 }
3571
3572 // Emit the local.
3573 CodeGenFunction::AutoVarEmission var = CGF.EmitAutoVarAlloca(*CatchParam);
3574 InitCatchParam(CGF, *CatchParam, var.getObjectAddress(CGF), S->getLocStart());
3575 CGF.EmitAutoVarCleanups(var);
3576 }
3577
3578 /// Get or define the following function:
3579 /// void @__clang_call_terminate(i8* %exn) nounwind noreturn
3580 /// This code is used only in C++.
getClangCallTerminateFn(CodeGenModule & CGM)3581 static llvm::Constant *getClangCallTerminateFn(CodeGenModule &CGM) {
3582 llvm::FunctionType *fnTy =
3583 llvm::FunctionType::get(CGM.VoidTy, CGM.Int8PtrTy, /*IsVarArgs=*/false);
3584 llvm::Constant *fnRef =
3585 CGM.CreateRuntimeFunction(fnTy, "__clang_call_terminate");
3586
3587 llvm::Function *fn = dyn_cast<llvm::Function>(fnRef);
3588 if (fn && fn->empty()) {
3589 fn->setDoesNotThrow();
3590 fn->setDoesNotReturn();
3591
3592 // What we really want is to massively penalize inlining without
3593 // forbidding it completely. The difference between that and
3594 // 'noinline' is negligible.
3595 fn->addFnAttr(llvm::Attribute::NoInline);
3596
3597 // Allow this function to be shared across translation units, but
3598 // we don't want it to turn into an exported symbol.
3599 fn->setLinkage(llvm::Function::LinkOnceODRLinkage);
3600 fn->setVisibility(llvm::Function::HiddenVisibility);
3601 if (CGM.supportsCOMDAT())
3602 fn->setComdat(CGM.getModule().getOrInsertComdat(fn->getName()));
3603
3604 // Set up the function.
3605 llvm::BasicBlock *entry =
3606 llvm::BasicBlock::Create(CGM.getLLVMContext(), "", fn);
3607 CGBuilderTy builder(entry);
3608
3609 // Pull the exception pointer out of the parameter list.
3610 llvm::Value *exn = &*fn->arg_begin();
3611
3612 // Call __cxa_begin_catch(exn).
3613 llvm::CallInst *catchCall = builder.CreateCall(getBeginCatchFn(CGM), exn);
3614 catchCall->setDoesNotThrow();
3615 catchCall->setCallingConv(CGM.getRuntimeCC());
3616
3617 // Call std::terminate().
3618 llvm::CallInst *termCall = builder.CreateCall(CGM.getTerminateFn());
3619 termCall->setDoesNotThrow();
3620 termCall->setDoesNotReturn();
3621 termCall->setCallingConv(CGM.getRuntimeCC());
3622
3623 // std::terminate cannot return.
3624 builder.CreateUnreachable();
3625 }
3626
3627 return fnRef;
3628 }
3629
3630 llvm::CallInst *
emitTerminateForUnexpectedException(CodeGenFunction & CGF,llvm::Value * Exn)3631 ItaniumCXXABI::emitTerminateForUnexpectedException(CodeGenFunction &CGF,
3632 llvm::Value *Exn) {
3633 // In C++, we want to call __cxa_begin_catch() before terminating.
3634 if (Exn) {
3635 assert(CGF.CGM.getLangOpts().CPlusPlus);
3636 return CGF.EmitNounwindRuntimeCall(getClangCallTerminateFn(CGF.CGM), Exn);
3637 }
3638 return CGF.EmitNounwindRuntimeCall(CGF.CGM.getTerminateFn());
3639 }
3640