1 //===--- CGClass.cpp - Emit LLVM Code for C++ classes ---------------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This contains code dealing with C++ code generation of classes
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "CGBlocks.h"
15 #include "CGCXXABI.h"
16 #include "CGDebugInfo.h"
17 #include "CGRecordLayout.h"
18 #include "CodeGenFunction.h"
19 #include "clang/AST/CXXInheritance.h"
20 #include "clang/AST/DeclTemplate.h"
21 #include "clang/AST/EvaluatedExprVisitor.h"
22 #include "clang/AST/RecordLayout.h"
23 #include "clang/AST/StmtCXX.h"
24 #include "clang/Basic/TargetBuiltins.h"
25 #include "clang/CodeGen/CGFunctionInfo.h"
26 #include "clang/Frontend/CodeGenOptions.h"
27 #include "llvm/IR/Intrinsics.h"
28 
29 using namespace clang;
30 using namespace CodeGen;
31 
32 static CharUnits
ComputeNonVirtualBaseClassOffset(ASTContext & Context,const CXXRecordDecl * DerivedClass,CastExpr::path_const_iterator Start,CastExpr::path_const_iterator End)33 ComputeNonVirtualBaseClassOffset(ASTContext &Context,
34                                  const CXXRecordDecl *DerivedClass,
35                                  CastExpr::path_const_iterator Start,
36                                  CastExpr::path_const_iterator End) {
37   CharUnits Offset = CharUnits::Zero();
38 
39   const CXXRecordDecl *RD = DerivedClass;
40 
41   for (CastExpr::path_const_iterator I = Start; I != End; ++I) {
42     const CXXBaseSpecifier *Base = *I;
43     assert(!Base->isVirtual() && "Should not see virtual bases here!");
44 
45     // Get the layout.
46     const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
47 
48     const CXXRecordDecl *BaseDecl =
49       cast<CXXRecordDecl>(Base->getType()->getAs<RecordType>()->getDecl());
50 
51     // Add the offset.
52     Offset += Layout.getBaseClassOffset(BaseDecl);
53 
54     RD = BaseDecl;
55   }
56 
57   return Offset;
58 }
59 
60 llvm::Constant *
GetNonVirtualBaseClassOffset(const CXXRecordDecl * ClassDecl,CastExpr::path_const_iterator PathBegin,CastExpr::path_const_iterator PathEnd)61 CodeGenModule::GetNonVirtualBaseClassOffset(const CXXRecordDecl *ClassDecl,
62                                    CastExpr::path_const_iterator PathBegin,
63                                    CastExpr::path_const_iterator PathEnd) {
64   assert(PathBegin != PathEnd && "Base path should not be empty!");
65 
66   CharUnits Offset =
67     ComputeNonVirtualBaseClassOffset(getContext(), ClassDecl,
68                                      PathBegin, PathEnd);
69   if (Offset.isZero())
70     return nullptr;
71 
72   llvm::Type *PtrDiffTy =
73   Types.ConvertType(getContext().getPointerDiffType());
74 
75   return llvm::ConstantInt::get(PtrDiffTy, Offset.getQuantity());
76 }
77 
78 /// Gets the address of a direct base class within a complete object.
79 /// This should only be used for (1) non-virtual bases or (2) virtual bases
80 /// when the type is known to be complete (e.g. in complete destructors).
81 ///
82 /// The object pointed to by 'This' is assumed to be non-null.
83 llvm::Value *
GetAddressOfDirectBaseInCompleteClass(llvm::Value * This,const CXXRecordDecl * Derived,const CXXRecordDecl * Base,bool BaseIsVirtual)84 CodeGenFunction::GetAddressOfDirectBaseInCompleteClass(llvm::Value *This,
85                                                    const CXXRecordDecl *Derived,
86                                                    const CXXRecordDecl *Base,
87                                                    bool BaseIsVirtual) {
88   // 'this' must be a pointer (in some address space) to Derived.
89   assert(This->getType()->isPointerTy() &&
90          cast<llvm::PointerType>(This->getType())->getElementType()
91            == ConvertType(Derived));
92 
93   // Compute the offset of the virtual base.
94   CharUnits Offset;
95   const ASTRecordLayout &Layout = getContext().getASTRecordLayout(Derived);
96   if (BaseIsVirtual)
97     Offset = Layout.getVBaseClassOffset(Base);
98   else
99     Offset = Layout.getBaseClassOffset(Base);
100 
101   // Shift and cast down to the base type.
102   // TODO: for complete types, this should be possible with a GEP.
103   llvm::Value *V = This;
104   if (Offset.isPositive()) {
105     V = Builder.CreateBitCast(V, Int8PtrTy);
106     V = Builder.CreateConstInBoundsGEP1_64(V, Offset.getQuantity());
107   }
108   V = Builder.CreateBitCast(V, ConvertType(Base)->getPointerTo());
109 
110   return V;
111 }
112 
113 static llvm::Value *
ApplyNonVirtualAndVirtualOffset(CodeGenFunction & CGF,llvm::Value * ptr,CharUnits nonVirtualOffset,llvm::Value * virtualOffset)114 ApplyNonVirtualAndVirtualOffset(CodeGenFunction &CGF, llvm::Value *ptr,
115                                 CharUnits nonVirtualOffset,
116                                 llvm::Value *virtualOffset) {
117   // Assert that we have something to do.
118   assert(!nonVirtualOffset.isZero() || virtualOffset != nullptr);
119 
120   // Compute the offset from the static and dynamic components.
121   llvm::Value *baseOffset;
122   if (!nonVirtualOffset.isZero()) {
123     baseOffset = llvm::ConstantInt::get(CGF.PtrDiffTy,
124                                         nonVirtualOffset.getQuantity());
125     if (virtualOffset) {
126       baseOffset = CGF.Builder.CreateAdd(virtualOffset, baseOffset);
127     }
128   } else {
129     baseOffset = virtualOffset;
130   }
131 
132   // Apply the base offset.
133   ptr = CGF.Builder.CreateBitCast(ptr, CGF.Int8PtrTy);
134   ptr = CGF.Builder.CreateInBoundsGEP(ptr, baseOffset, "add.ptr");
135   return ptr;
136 }
137 
GetAddressOfBaseClass(llvm::Value * Value,const CXXRecordDecl * Derived,CastExpr::path_const_iterator PathBegin,CastExpr::path_const_iterator PathEnd,bool NullCheckValue,SourceLocation Loc)138 llvm::Value *CodeGenFunction::GetAddressOfBaseClass(
139     llvm::Value *Value, const CXXRecordDecl *Derived,
140     CastExpr::path_const_iterator PathBegin,
141     CastExpr::path_const_iterator PathEnd, bool NullCheckValue,
142     SourceLocation Loc) {
143   assert(PathBegin != PathEnd && "Base path should not be empty!");
144 
145   CastExpr::path_const_iterator Start = PathBegin;
146   const CXXRecordDecl *VBase = nullptr;
147 
148   // Sema has done some convenient canonicalization here: if the
149   // access path involved any virtual steps, the conversion path will
150   // *start* with a step down to the correct virtual base subobject,
151   // and hence will not require any further steps.
152   if ((*Start)->isVirtual()) {
153     VBase =
154       cast<CXXRecordDecl>((*Start)->getType()->getAs<RecordType>()->getDecl());
155     ++Start;
156   }
157 
158   // Compute the static offset of the ultimate destination within its
159   // allocating subobject (the virtual base, if there is one, or else
160   // the "complete" object that we see).
161   CharUnits NonVirtualOffset =
162     ComputeNonVirtualBaseClassOffset(getContext(), VBase ? VBase : Derived,
163                                      Start, PathEnd);
164 
165   // If there's a virtual step, we can sometimes "devirtualize" it.
166   // For now, that's limited to when the derived type is final.
167   // TODO: "devirtualize" this for accesses to known-complete objects.
168   if (VBase && Derived->hasAttr<FinalAttr>()) {
169     const ASTRecordLayout &layout = getContext().getASTRecordLayout(Derived);
170     CharUnits vBaseOffset = layout.getVBaseClassOffset(VBase);
171     NonVirtualOffset += vBaseOffset;
172     VBase = nullptr; // we no longer have a virtual step
173   }
174 
175   // Get the base pointer type.
176   llvm::Type *BasePtrTy =
177     ConvertType((PathEnd[-1])->getType())->getPointerTo();
178 
179   QualType DerivedTy = getContext().getRecordType(Derived);
180   CharUnits DerivedAlign = getContext().getTypeAlignInChars(DerivedTy);
181 
182   // If the static offset is zero and we don't have a virtual step,
183   // just do a bitcast; null checks are unnecessary.
184   if (NonVirtualOffset.isZero() && !VBase) {
185     if (sanitizePerformTypeCheck()) {
186       EmitTypeCheck(TCK_Upcast, Loc, Value, DerivedTy, DerivedAlign,
187                     !NullCheckValue);
188     }
189     return Builder.CreateBitCast(Value, BasePtrTy);
190   }
191 
192   llvm::BasicBlock *origBB = nullptr;
193   llvm::BasicBlock *endBB = nullptr;
194 
195   // Skip over the offset (and the vtable load) if we're supposed to
196   // null-check the pointer.
197   if (NullCheckValue) {
198     origBB = Builder.GetInsertBlock();
199     llvm::BasicBlock *notNullBB = createBasicBlock("cast.notnull");
200     endBB = createBasicBlock("cast.end");
201 
202     llvm::Value *isNull = Builder.CreateIsNull(Value);
203     Builder.CreateCondBr(isNull, endBB, notNullBB);
204     EmitBlock(notNullBB);
205   }
206 
207   if (sanitizePerformTypeCheck()) {
208     EmitTypeCheck(VBase ? TCK_UpcastToVirtualBase : TCK_Upcast, Loc, Value,
209                   DerivedTy, DerivedAlign, true);
210   }
211 
212   // Compute the virtual offset.
213   llvm::Value *VirtualOffset = nullptr;
214   if (VBase) {
215     VirtualOffset =
216       CGM.getCXXABI().GetVirtualBaseClassOffset(*this, Value, Derived, VBase);
217   }
218 
219   // Apply both offsets.
220   Value = ApplyNonVirtualAndVirtualOffset(*this, Value,
221                                           NonVirtualOffset,
222                                           VirtualOffset);
223 
224   // Cast to the destination type.
225   Value = Builder.CreateBitCast(Value, BasePtrTy);
226 
227   // Build a phi if we needed a null check.
228   if (NullCheckValue) {
229     llvm::BasicBlock *notNullBB = Builder.GetInsertBlock();
230     Builder.CreateBr(endBB);
231     EmitBlock(endBB);
232 
233     llvm::PHINode *PHI = Builder.CreatePHI(BasePtrTy, 2, "cast.result");
234     PHI->addIncoming(Value, notNullBB);
235     PHI->addIncoming(llvm::Constant::getNullValue(BasePtrTy), origBB);
236     Value = PHI;
237   }
238 
239   return Value;
240 }
241 
242 llvm::Value *
GetAddressOfDerivedClass(llvm::Value * Value,const CXXRecordDecl * Derived,CastExpr::path_const_iterator PathBegin,CastExpr::path_const_iterator PathEnd,bool NullCheckValue)243 CodeGenFunction::GetAddressOfDerivedClass(llvm::Value *Value,
244                                           const CXXRecordDecl *Derived,
245                                         CastExpr::path_const_iterator PathBegin,
246                                           CastExpr::path_const_iterator PathEnd,
247                                           bool NullCheckValue) {
248   assert(PathBegin != PathEnd && "Base path should not be empty!");
249 
250   QualType DerivedTy =
251     getContext().getCanonicalType(getContext().getTagDeclType(Derived));
252   llvm::Type *DerivedPtrTy = ConvertType(DerivedTy)->getPointerTo();
253 
254   llvm::Value *NonVirtualOffset =
255     CGM.GetNonVirtualBaseClassOffset(Derived, PathBegin, PathEnd);
256 
257   if (!NonVirtualOffset) {
258     // No offset, we can just cast back.
259     return Builder.CreateBitCast(Value, DerivedPtrTy);
260   }
261 
262   llvm::BasicBlock *CastNull = nullptr;
263   llvm::BasicBlock *CastNotNull = nullptr;
264   llvm::BasicBlock *CastEnd = nullptr;
265 
266   if (NullCheckValue) {
267     CastNull = createBasicBlock("cast.null");
268     CastNotNull = createBasicBlock("cast.notnull");
269     CastEnd = createBasicBlock("cast.end");
270 
271     llvm::Value *IsNull = Builder.CreateIsNull(Value);
272     Builder.CreateCondBr(IsNull, CastNull, CastNotNull);
273     EmitBlock(CastNotNull);
274   }
275 
276   // Apply the offset.
277   Value = Builder.CreateBitCast(Value, Int8PtrTy);
278   Value = Builder.CreateGEP(Value, Builder.CreateNeg(NonVirtualOffset),
279                             "sub.ptr");
280 
281   // Just cast.
282   Value = Builder.CreateBitCast(Value, DerivedPtrTy);
283 
284   if (NullCheckValue) {
285     Builder.CreateBr(CastEnd);
286     EmitBlock(CastNull);
287     Builder.CreateBr(CastEnd);
288     EmitBlock(CastEnd);
289 
290     llvm::PHINode *PHI = Builder.CreatePHI(Value->getType(), 2);
291     PHI->addIncoming(Value, CastNotNull);
292     PHI->addIncoming(llvm::Constant::getNullValue(Value->getType()),
293                      CastNull);
294     Value = PHI;
295   }
296 
297   return Value;
298 }
299 
GetVTTParameter(GlobalDecl GD,bool ForVirtualBase,bool Delegating)300 llvm::Value *CodeGenFunction::GetVTTParameter(GlobalDecl GD,
301                                               bool ForVirtualBase,
302                                               bool Delegating) {
303   if (!CGM.getCXXABI().NeedsVTTParameter(GD)) {
304     // This constructor/destructor does not need a VTT parameter.
305     return nullptr;
306   }
307 
308   const CXXRecordDecl *RD = cast<CXXMethodDecl>(CurCodeDecl)->getParent();
309   const CXXRecordDecl *Base = cast<CXXMethodDecl>(GD.getDecl())->getParent();
310 
311   llvm::Value *VTT;
312 
313   uint64_t SubVTTIndex;
314 
315   if (Delegating) {
316     // If this is a delegating constructor call, just load the VTT.
317     return LoadCXXVTT();
318   } else if (RD == Base) {
319     // If the record matches the base, this is the complete ctor/dtor
320     // variant calling the base variant in a class with virtual bases.
321     assert(!CGM.getCXXABI().NeedsVTTParameter(CurGD) &&
322            "doing no-op VTT offset in base dtor/ctor?");
323     assert(!ForVirtualBase && "Can't have same class as virtual base!");
324     SubVTTIndex = 0;
325   } else {
326     const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
327     CharUnits BaseOffset = ForVirtualBase ?
328       Layout.getVBaseClassOffset(Base) :
329       Layout.getBaseClassOffset(Base);
330 
331     SubVTTIndex =
332       CGM.getVTables().getSubVTTIndex(RD, BaseSubobject(Base, BaseOffset));
333     assert(SubVTTIndex != 0 && "Sub-VTT index must be greater than zero!");
334   }
335 
336   if (CGM.getCXXABI().NeedsVTTParameter(CurGD)) {
337     // A VTT parameter was passed to the constructor, use it.
338     VTT = LoadCXXVTT();
339     VTT = Builder.CreateConstInBoundsGEP1_64(VTT, SubVTTIndex);
340   } else {
341     // We're the complete constructor, so get the VTT by name.
342     VTT = CGM.getVTables().GetAddrOfVTT(RD);
343     VTT = Builder.CreateConstInBoundsGEP2_64(VTT, 0, SubVTTIndex);
344   }
345 
346   return VTT;
347 }
348 
349 namespace {
350   /// Call the destructor for a direct base class.
351   struct CallBaseDtor : EHScopeStack::Cleanup {
352     const CXXRecordDecl *BaseClass;
353     bool BaseIsVirtual;
CallBaseDtor__anon7d8fdd600111::CallBaseDtor354     CallBaseDtor(const CXXRecordDecl *Base, bool BaseIsVirtual)
355       : BaseClass(Base), BaseIsVirtual(BaseIsVirtual) {}
356 
Emit__anon7d8fdd600111::CallBaseDtor357     void Emit(CodeGenFunction &CGF, Flags flags) override {
358       const CXXRecordDecl *DerivedClass =
359         cast<CXXMethodDecl>(CGF.CurCodeDecl)->getParent();
360 
361       const CXXDestructorDecl *D = BaseClass->getDestructor();
362       llvm::Value *Addr =
363         CGF.GetAddressOfDirectBaseInCompleteClass(CGF.LoadCXXThis(),
364                                                   DerivedClass, BaseClass,
365                                                   BaseIsVirtual);
366       CGF.EmitCXXDestructorCall(D, Dtor_Base, BaseIsVirtual,
367                                 /*Delegating=*/false, Addr);
368     }
369   };
370 
371   /// A visitor which checks whether an initializer uses 'this' in a
372   /// way which requires the vtable to be properly set.
373   struct DynamicThisUseChecker : EvaluatedExprVisitor<DynamicThisUseChecker> {
374     typedef EvaluatedExprVisitor<DynamicThisUseChecker> super;
375 
376     bool UsesThis;
377 
DynamicThisUseChecker__anon7d8fdd600111::DynamicThisUseChecker378     DynamicThisUseChecker(ASTContext &C) : super(C), UsesThis(false) {}
379 
380     // Black-list all explicit and implicit references to 'this'.
381     //
382     // Do we need to worry about external references to 'this' derived
383     // from arbitrary code?  If so, then anything which runs arbitrary
384     // external code might potentially access the vtable.
VisitCXXThisExpr__anon7d8fdd600111::DynamicThisUseChecker385     void VisitCXXThisExpr(CXXThisExpr *E) { UsesThis = true; }
386   };
387 }
388 
BaseInitializerUsesThis(ASTContext & C,const Expr * Init)389 static bool BaseInitializerUsesThis(ASTContext &C, const Expr *Init) {
390   DynamicThisUseChecker Checker(C);
391   Checker.Visit(const_cast<Expr*>(Init));
392   return Checker.UsesThis;
393 }
394 
EmitBaseInitializer(CodeGenFunction & CGF,const CXXRecordDecl * ClassDecl,CXXCtorInitializer * BaseInit,CXXCtorType CtorType)395 static void EmitBaseInitializer(CodeGenFunction &CGF,
396                                 const CXXRecordDecl *ClassDecl,
397                                 CXXCtorInitializer *BaseInit,
398                                 CXXCtorType CtorType) {
399   assert(BaseInit->isBaseInitializer() &&
400          "Must have base initializer!");
401 
402   llvm::Value *ThisPtr = CGF.LoadCXXThis();
403 
404   const Type *BaseType = BaseInit->getBaseClass();
405   CXXRecordDecl *BaseClassDecl =
406     cast<CXXRecordDecl>(BaseType->getAs<RecordType>()->getDecl());
407 
408   bool isBaseVirtual = BaseInit->isBaseVirtual();
409 
410   // The base constructor doesn't construct virtual bases.
411   if (CtorType == Ctor_Base && isBaseVirtual)
412     return;
413 
414   // If the initializer for the base (other than the constructor
415   // itself) accesses 'this' in any way, we need to initialize the
416   // vtables.
417   if (BaseInitializerUsesThis(CGF.getContext(), BaseInit->getInit()))
418     CGF.InitializeVTablePointers(ClassDecl);
419 
420   // We can pretend to be a complete class because it only matters for
421   // virtual bases, and we only do virtual bases for complete ctors.
422   llvm::Value *V =
423     CGF.GetAddressOfDirectBaseInCompleteClass(ThisPtr, ClassDecl,
424                                               BaseClassDecl,
425                                               isBaseVirtual);
426   CharUnits Alignment = CGF.getContext().getTypeAlignInChars(BaseType);
427   AggValueSlot AggSlot =
428     AggValueSlot::forAddr(V, Alignment, Qualifiers(),
429                           AggValueSlot::IsDestructed,
430                           AggValueSlot::DoesNotNeedGCBarriers,
431                           AggValueSlot::IsNotAliased);
432 
433   CGF.EmitAggExpr(BaseInit->getInit(), AggSlot);
434 
435   if (CGF.CGM.getLangOpts().Exceptions &&
436       !BaseClassDecl->hasTrivialDestructor())
437     CGF.EHStack.pushCleanup<CallBaseDtor>(EHCleanup, BaseClassDecl,
438                                           isBaseVirtual);
439 }
440 
EmitAggMemberInitializer(CodeGenFunction & CGF,LValue LHS,Expr * Init,llvm::Value * ArrayIndexVar,QualType T,ArrayRef<VarDecl * > ArrayIndexes,unsigned Index)441 static void EmitAggMemberInitializer(CodeGenFunction &CGF,
442                                      LValue LHS,
443                                      Expr *Init,
444                                      llvm::Value *ArrayIndexVar,
445                                      QualType T,
446                                      ArrayRef<VarDecl *> ArrayIndexes,
447                                      unsigned Index) {
448   if (Index == ArrayIndexes.size()) {
449     LValue LV = LHS;
450 
451     if (ArrayIndexVar) {
452       // If we have an array index variable, load it and use it as an offset.
453       // Then, increment the value.
454       llvm::Value *Dest = LHS.getAddress();
455       llvm::Value *ArrayIndex = CGF.Builder.CreateLoad(ArrayIndexVar);
456       Dest = CGF.Builder.CreateInBoundsGEP(Dest, ArrayIndex, "destaddress");
457       llvm::Value *Next = llvm::ConstantInt::get(ArrayIndex->getType(), 1);
458       Next = CGF.Builder.CreateAdd(ArrayIndex, Next, "inc");
459       CGF.Builder.CreateStore(Next, ArrayIndexVar);
460 
461       // Update the LValue.
462       LV.setAddress(Dest);
463       CharUnits Align = CGF.getContext().getTypeAlignInChars(T);
464       LV.setAlignment(std::min(Align, LV.getAlignment()));
465     }
466 
467     switch (CGF.getEvaluationKind(T)) {
468     case TEK_Scalar:
469       CGF.EmitScalarInit(Init, /*decl*/ nullptr, LV, false);
470       break;
471     case TEK_Complex:
472       CGF.EmitComplexExprIntoLValue(Init, LV, /*isInit*/ true);
473       break;
474     case TEK_Aggregate: {
475       AggValueSlot Slot =
476         AggValueSlot::forLValue(LV,
477                                 AggValueSlot::IsDestructed,
478                                 AggValueSlot::DoesNotNeedGCBarriers,
479                                 AggValueSlot::IsNotAliased);
480 
481       CGF.EmitAggExpr(Init, Slot);
482       break;
483     }
484     }
485 
486     return;
487   }
488 
489   const ConstantArrayType *Array = CGF.getContext().getAsConstantArrayType(T);
490   assert(Array && "Array initialization without the array type?");
491   llvm::Value *IndexVar
492     = CGF.GetAddrOfLocalVar(ArrayIndexes[Index]);
493   assert(IndexVar && "Array index variable not loaded");
494 
495   // Initialize this index variable to zero.
496   llvm::Value* Zero
497     = llvm::Constant::getNullValue(
498                               CGF.ConvertType(CGF.getContext().getSizeType()));
499   CGF.Builder.CreateStore(Zero, IndexVar);
500 
501   // Start the loop with a block that tests the condition.
502   llvm::BasicBlock *CondBlock = CGF.createBasicBlock("for.cond");
503   llvm::BasicBlock *AfterFor = CGF.createBasicBlock("for.end");
504 
505   CGF.EmitBlock(CondBlock);
506 
507   llvm::BasicBlock *ForBody = CGF.createBasicBlock("for.body");
508   // Generate: if (loop-index < number-of-elements) fall to the loop body,
509   // otherwise, go to the block after the for-loop.
510   uint64_t NumElements = Array->getSize().getZExtValue();
511   llvm::Value *Counter = CGF.Builder.CreateLoad(IndexVar);
512   llvm::Value *NumElementsPtr =
513     llvm::ConstantInt::get(Counter->getType(), NumElements);
514   llvm::Value *IsLess = CGF.Builder.CreateICmpULT(Counter, NumElementsPtr,
515                                                   "isless");
516 
517   // If the condition is true, execute the body.
518   CGF.Builder.CreateCondBr(IsLess, ForBody, AfterFor);
519 
520   CGF.EmitBlock(ForBody);
521   llvm::BasicBlock *ContinueBlock = CGF.createBasicBlock("for.inc");
522 
523   // Inside the loop body recurse to emit the inner loop or, eventually, the
524   // constructor call.
525   EmitAggMemberInitializer(CGF, LHS, Init, ArrayIndexVar,
526                            Array->getElementType(), ArrayIndexes, Index + 1);
527 
528   CGF.EmitBlock(ContinueBlock);
529 
530   // Emit the increment of the loop counter.
531   llvm::Value *NextVal = llvm::ConstantInt::get(Counter->getType(), 1);
532   Counter = CGF.Builder.CreateLoad(IndexVar);
533   NextVal = CGF.Builder.CreateAdd(Counter, NextVal, "inc");
534   CGF.Builder.CreateStore(NextVal, IndexVar);
535 
536   // Finally, branch back up to the condition for the next iteration.
537   CGF.EmitBranch(CondBlock);
538 
539   // Emit the fall-through block.
540   CGF.EmitBlock(AfterFor, true);
541 }
542 
EmitMemberInitializer(CodeGenFunction & CGF,const CXXRecordDecl * ClassDecl,CXXCtorInitializer * MemberInit,const CXXConstructorDecl * Constructor,FunctionArgList & Args)543 static void EmitMemberInitializer(CodeGenFunction &CGF,
544                                   const CXXRecordDecl *ClassDecl,
545                                   CXXCtorInitializer *MemberInit,
546                                   const CXXConstructorDecl *Constructor,
547                                   FunctionArgList &Args) {
548   ApplyDebugLocation Loc(CGF, MemberInit->getSourceLocation());
549   assert(MemberInit->isAnyMemberInitializer() &&
550          "Must have member initializer!");
551   assert(MemberInit->getInit() && "Must have initializer!");
552 
553   // non-static data member initializers.
554   FieldDecl *Field = MemberInit->getAnyMember();
555   QualType FieldType = Field->getType();
556 
557   llvm::Value *ThisPtr = CGF.LoadCXXThis();
558   QualType RecordTy = CGF.getContext().getTypeDeclType(ClassDecl);
559   LValue LHS = CGF.MakeNaturalAlignAddrLValue(ThisPtr, RecordTy);
560 
561   if (MemberInit->isIndirectMemberInitializer()) {
562     // If we are initializing an anonymous union field, drill down to
563     // the field.
564     IndirectFieldDecl *IndirectField = MemberInit->getIndirectMember();
565     for (const auto *I : IndirectField->chain())
566       LHS = CGF.EmitLValueForFieldInitialization(LHS, cast<FieldDecl>(I));
567     FieldType = MemberInit->getIndirectMember()->getAnonField()->getType();
568   } else {
569     LHS = CGF.EmitLValueForFieldInitialization(LHS, Field);
570   }
571 
572   // Special case: if we are in a copy or move constructor, and we are copying
573   // an array of PODs or classes with trivial copy constructors, ignore the
574   // AST and perform the copy we know is equivalent.
575   // FIXME: This is hacky at best... if we had a bit more explicit information
576   // in the AST, we could generalize it more easily.
577   const ConstantArrayType *Array
578     = CGF.getContext().getAsConstantArrayType(FieldType);
579   if (Array && Constructor->isDefaulted() &&
580       Constructor->isCopyOrMoveConstructor()) {
581     QualType BaseElementTy = CGF.getContext().getBaseElementType(Array);
582     CXXConstructExpr *CE = dyn_cast<CXXConstructExpr>(MemberInit->getInit());
583     if (BaseElementTy.isPODType(CGF.getContext()) ||
584         (CE && CE->getConstructor()->isTrivial())) {
585       unsigned SrcArgIndex =
586           CGF.CGM.getCXXABI().getSrcArgforCopyCtor(Constructor, Args);
587       llvm::Value *SrcPtr
588         = CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(Args[SrcArgIndex]));
589       LValue ThisRHSLV = CGF.MakeNaturalAlignAddrLValue(SrcPtr, RecordTy);
590       LValue Src = CGF.EmitLValueForFieldInitialization(ThisRHSLV, Field);
591 
592       // Copy the aggregate.
593       CGF.EmitAggregateCopy(LHS.getAddress(), Src.getAddress(), FieldType,
594                             LHS.isVolatileQualified());
595       return;
596     }
597   }
598 
599   ArrayRef<VarDecl *> ArrayIndexes;
600   if (MemberInit->getNumArrayIndices())
601     ArrayIndexes = MemberInit->getArrayIndexes();
602   CGF.EmitInitializerForField(Field, LHS, MemberInit->getInit(), ArrayIndexes);
603 }
604 
EmitInitializerForField(FieldDecl * Field,LValue LHS,Expr * Init,ArrayRef<VarDecl * > ArrayIndexes)605 void CodeGenFunction::EmitInitializerForField(
606     FieldDecl *Field, LValue LHS, Expr *Init,
607     ArrayRef<VarDecl *> ArrayIndexes) {
608   QualType FieldType = Field->getType();
609   switch (getEvaluationKind(FieldType)) {
610   case TEK_Scalar:
611     if (LHS.isSimple()) {
612       EmitExprAsInit(Init, Field, LHS, false);
613     } else {
614       RValue RHS = RValue::get(EmitScalarExpr(Init));
615       EmitStoreThroughLValue(RHS, LHS);
616     }
617     break;
618   case TEK_Complex:
619     EmitComplexExprIntoLValue(Init, LHS, /*isInit*/ true);
620     break;
621   case TEK_Aggregate: {
622     llvm::Value *ArrayIndexVar = nullptr;
623     if (ArrayIndexes.size()) {
624       llvm::Type *SizeTy = ConvertType(getContext().getSizeType());
625 
626       // The LHS is a pointer to the first object we'll be constructing, as
627       // a flat array.
628       QualType BaseElementTy = getContext().getBaseElementType(FieldType);
629       llvm::Type *BasePtr = ConvertType(BaseElementTy);
630       BasePtr = llvm::PointerType::getUnqual(BasePtr);
631       llvm::Value *BaseAddrPtr = Builder.CreateBitCast(LHS.getAddress(),
632                                                        BasePtr);
633       LHS = MakeAddrLValue(BaseAddrPtr, BaseElementTy);
634 
635       // Create an array index that will be used to walk over all of the
636       // objects we're constructing.
637       ArrayIndexVar = CreateTempAlloca(SizeTy, "object.index");
638       llvm::Value *Zero = llvm::Constant::getNullValue(SizeTy);
639       Builder.CreateStore(Zero, ArrayIndexVar);
640 
641 
642       // Emit the block variables for the array indices, if any.
643       for (unsigned I = 0, N = ArrayIndexes.size(); I != N; ++I)
644         EmitAutoVarDecl(*ArrayIndexes[I]);
645     }
646 
647     EmitAggMemberInitializer(*this, LHS, Init, ArrayIndexVar, FieldType,
648                              ArrayIndexes, 0);
649   }
650   }
651 
652   // Ensure that we destroy this object if an exception is thrown
653   // later in the constructor.
654   QualType::DestructionKind dtorKind = FieldType.isDestructedType();
655   if (needsEHCleanup(dtorKind))
656     pushEHDestroy(dtorKind, LHS.getAddress(), FieldType);
657 }
658 
659 /// Checks whether the given constructor is a valid subject for the
660 /// complete-to-base constructor delegation optimization, i.e.
661 /// emitting the complete constructor as a simple call to the base
662 /// constructor.
IsConstructorDelegationValid(const CXXConstructorDecl * Ctor)663 static bool IsConstructorDelegationValid(const CXXConstructorDecl *Ctor) {
664 
665   // Currently we disable the optimization for classes with virtual
666   // bases because (1) the addresses of parameter variables need to be
667   // consistent across all initializers but (2) the delegate function
668   // call necessarily creates a second copy of the parameter variable.
669   //
670   // The limiting example (purely theoretical AFAIK):
671   //   struct A { A(int &c) { c++; } };
672   //   struct B : virtual A {
673   //     B(int count) : A(count) { printf("%d\n", count); }
674   //   };
675   // ...although even this example could in principle be emitted as a
676   // delegation since the address of the parameter doesn't escape.
677   if (Ctor->getParent()->getNumVBases()) {
678     // TODO: white-list trivial vbase initializers.  This case wouldn't
679     // be subject to the restrictions below.
680 
681     // TODO: white-list cases where:
682     //  - there are no non-reference parameters to the constructor
683     //  - the initializers don't access any non-reference parameters
684     //  - the initializers don't take the address of non-reference
685     //    parameters
686     //  - etc.
687     // If we ever add any of the above cases, remember that:
688     //  - function-try-blocks will always blacklist this optimization
689     //  - we need to perform the constructor prologue and cleanup in
690     //    EmitConstructorBody.
691 
692     return false;
693   }
694 
695   // We also disable the optimization for variadic functions because
696   // it's impossible to "re-pass" varargs.
697   if (Ctor->getType()->getAs<FunctionProtoType>()->isVariadic())
698     return false;
699 
700   // FIXME: Decide if we can do a delegation of a delegating constructor.
701   if (Ctor->isDelegatingConstructor())
702     return false;
703 
704   return true;
705 }
706 
707 // Emit code in ctor (Prologue==true) or dtor (Prologue==false)
708 // to poison the extra field paddings inserted under
709 // -fsanitize-address-field-padding=1|2.
EmitAsanPrologueOrEpilogue(bool Prologue)710 void CodeGenFunction::EmitAsanPrologueOrEpilogue(bool Prologue) {
711   ASTContext &Context = getContext();
712   const CXXRecordDecl *ClassDecl =
713       Prologue ? cast<CXXConstructorDecl>(CurGD.getDecl())->getParent()
714                : cast<CXXDestructorDecl>(CurGD.getDecl())->getParent();
715   if (!ClassDecl->mayInsertExtraPadding()) return;
716 
717   struct SizeAndOffset {
718     uint64_t Size;
719     uint64_t Offset;
720   };
721 
722   unsigned PtrSize = CGM.getDataLayout().getPointerSizeInBits();
723   const ASTRecordLayout &Info = Context.getASTRecordLayout(ClassDecl);
724 
725   // Populate sizes and offsets of fields.
726   SmallVector<SizeAndOffset, 16> SSV(Info.getFieldCount());
727   for (unsigned i = 0, e = Info.getFieldCount(); i != e; ++i)
728     SSV[i].Offset =
729         Context.toCharUnitsFromBits(Info.getFieldOffset(i)).getQuantity();
730 
731   size_t NumFields = 0;
732   for (const auto *Field : ClassDecl->fields()) {
733     const FieldDecl *D = Field;
734     std::pair<CharUnits, CharUnits> FieldInfo =
735         Context.getTypeInfoInChars(D->getType());
736     CharUnits FieldSize = FieldInfo.first;
737     assert(NumFields < SSV.size());
738     SSV[NumFields].Size = D->isBitField() ? 0 : FieldSize.getQuantity();
739     NumFields++;
740   }
741   assert(NumFields == SSV.size());
742   if (SSV.size() <= 1) return;
743 
744   // We will insert calls to __asan_* run-time functions.
745   // LLVM AddressSanitizer pass may decide to inline them later.
746   llvm::Type *Args[2] = {IntPtrTy, IntPtrTy};
747   llvm::FunctionType *FTy =
748       llvm::FunctionType::get(CGM.VoidTy, Args, false);
749   llvm::Constant *F = CGM.CreateRuntimeFunction(
750       FTy, Prologue ? "__asan_poison_intra_object_redzone"
751                     : "__asan_unpoison_intra_object_redzone");
752 
753   llvm::Value *ThisPtr = LoadCXXThis();
754   ThisPtr = Builder.CreatePtrToInt(ThisPtr, IntPtrTy);
755   uint64_t TypeSize = Info.getNonVirtualSize().getQuantity();
756   // For each field check if it has sufficient padding,
757   // if so (un)poison it with a call.
758   for (size_t i = 0; i < SSV.size(); i++) {
759     uint64_t AsanAlignment = 8;
760     uint64_t NextField = i == SSV.size() - 1 ? TypeSize : SSV[i + 1].Offset;
761     uint64_t PoisonSize = NextField - SSV[i].Offset - SSV[i].Size;
762     uint64_t EndOffset = SSV[i].Offset + SSV[i].Size;
763     if (PoisonSize < AsanAlignment || !SSV[i].Size ||
764         (NextField % AsanAlignment) != 0)
765       continue;
766     Builder.CreateCall2(
767         F, Builder.CreateAdd(ThisPtr, Builder.getIntN(PtrSize, EndOffset)),
768         Builder.getIntN(PtrSize, PoisonSize));
769   }
770 }
771 
772 /// EmitConstructorBody - Emits the body of the current constructor.
EmitConstructorBody(FunctionArgList & Args)773 void CodeGenFunction::EmitConstructorBody(FunctionArgList &Args) {
774   EmitAsanPrologueOrEpilogue(true);
775   const CXXConstructorDecl *Ctor = cast<CXXConstructorDecl>(CurGD.getDecl());
776   CXXCtorType CtorType = CurGD.getCtorType();
777 
778   assert((CGM.getTarget().getCXXABI().hasConstructorVariants() ||
779           CtorType == Ctor_Complete) &&
780          "can only generate complete ctor for this ABI");
781 
782   // Before we go any further, try the complete->base constructor
783   // delegation optimization.
784   if (CtorType == Ctor_Complete && IsConstructorDelegationValid(Ctor) &&
785       CGM.getTarget().getCXXABI().hasConstructorVariants()) {
786     EmitDelegateCXXConstructorCall(Ctor, Ctor_Base, Args, Ctor->getLocEnd());
787     return;
788   }
789 
790   const FunctionDecl *Definition = 0;
791   Stmt *Body = Ctor->getBody(Definition);
792   assert(Definition == Ctor && "emitting wrong constructor body");
793 
794   // Enter the function-try-block before the constructor prologue if
795   // applicable.
796   bool IsTryBody = (Body && isa<CXXTryStmt>(Body));
797   if (IsTryBody)
798     EnterCXXTryStmt(*cast<CXXTryStmt>(Body), true);
799 
800   RegionCounter Cnt = getPGORegionCounter(Body);
801   Cnt.beginRegion(Builder);
802 
803   RunCleanupsScope RunCleanups(*this);
804 
805   // TODO: in restricted cases, we can emit the vbase initializers of
806   // a complete ctor and then delegate to the base ctor.
807 
808   // Emit the constructor prologue, i.e. the base and member
809   // initializers.
810   EmitCtorPrologue(Ctor, CtorType, Args);
811 
812   // Emit the body of the statement.
813   if (IsTryBody)
814     EmitStmt(cast<CXXTryStmt>(Body)->getTryBlock());
815   else if (Body)
816     EmitStmt(Body);
817 
818   // Emit any cleanup blocks associated with the member or base
819   // initializers, which includes (along the exceptional path) the
820   // destructors for those members and bases that were fully
821   // constructed.
822   RunCleanups.ForceCleanup();
823 
824   if (IsTryBody)
825     ExitCXXTryStmt(*cast<CXXTryStmt>(Body), true);
826 }
827 
828 namespace {
829   /// RAII object to indicate that codegen is copying the value representation
830   /// instead of the object representation. Useful when copying a struct or
831   /// class which has uninitialized members and we're only performing
832   /// lvalue-to-rvalue conversion on the object but not its members.
833   class CopyingValueRepresentation {
834   public:
CopyingValueRepresentation(CodeGenFunction & CGF)835     explicit CopyingValueRepresentation(CodeGenFunction &CGF)
836         : CGF(CGF), OldSanOpts(CGF.SanOpts) {
837       CGF.SanOpts.set(SanitizerKind::Bool, false);
838       CGF.SanOpts.set(SanitizerKind::Enum, false);
839     }
~CopyingValueRepresentation()840     ~CopyingValueRepresentation() {
841       CGF.SanOpts = OldSanOpts;
842     }
843   private:
844     CodeGenFunction &CGF;
845     SanitizerSet OldSanOpts;
846   };
847 }
848 
849 namespace {
850   class FieldMemcpyizer {
851   public:
FieldMemcpyizer(CodeGenFunction & CGF,const CXXRecordDecl * ClassDecl,const VarDecl * SrcRec)852     FieldMemcpyizer(CodeGenFunction &CGF, const CXXRecordDecl *ClassDecl,
853                     const VarDecl *SrcRec)
854       : CGF(CGF), ClassDecl(ClassDecl), SrcRec(SrcRec),
855         RecLayout(CGF.getContext().getASTRecordLayout(ClassDecl)),
856         FirstField(nullptr), LastField(nullptr), FirstFieldOffset(0),
857         LastFieldOffset(0), LastAddedFieldIndex(0) {}
858 
isMemcpyableField(FieldDecl * F) const859     bool isMemcpyableField(FieldDecl *F) const {
860       // Never memcpy fields when we are adding poisoned paddings.
861       if (CGF.getContext().getLangOpts().SanitizeAddressFieldPadding)
862         return false;
863       Qualifiers Qual = F->getType().getQualifiers();
864       if (Qual.hasVolatile() || Qual.hasObjCLifetime())
865         return false;
866       return true;
867     }
868 
addMemcpyableField(FieldDecl * F)869     void addMemcpyableField(FieldDecl *F) {
870       if (!FirstField)
871         addInitialField(F);
872       else
873         addNextField(F);
874     }
875 
getMemcpySize(uint64_t FirstByteOffset) const876     CharUnits getMemcpySize(uint64_t FirstByteOffset) const {
877       unsigned LastFieldSize =
878         LastField->isBitField() ?
879           LastField->getBitWidthValue(CGF.getContext()) :
880           CGF.getContext().getTypeSize(LastField->getType());
881       uint64_t MemcpySizeBits =
882         LastFieldOffset + LastFieldSize - FirstByteOffset +
883         CGF.getContext().getCharWidth() - 1;
884       CharUnits MemcpySize =
885         CGF.getContext().toCharUnitsFromBits(MemcpySizeBits);
886       return MemcpySize;
887     }
888 
emitMemcpy()889     void emitMemcpy() {
890       // Give the subclass a chance to bail out if it feels the memcpy isn't
891       // worth it (e.g. Hasn't aggregated enough data).
892       if (!FirstField) {
893         return;
894       }
895 
896       CharUnits Alignment;
897 
898       uint64_t FirstByteOffset;
899       if (FirstField->isBitField()) {
900         const CGRecordLayout &RL =
901           CGF.getTypes().getCGRecordLayout(FirstField->getParent());
902         const CGBitFieldInfo &BFInfo = RL.getBitFieldInfo(FirstField);
903         Alignment = CharUnits::fromQuantity(BFInfo.StorageAlignment);
904         // FirstFieldOffset is not appropriate for bitfields,
905         // it won't tell us what the storage offset should be and thus might not
906         // be properly aligned.
907         //
908         // Instead calculate the storage offset using the offset of the field in
909         // the struct type.
910         const llvm::DataLayout &DL = CGF.CGM.getDataLayout();
911         FirstByteOffset =
912             DL.getStructLayout(RL.getLLVMType())
913                 ->getElementOffsetInBits(RL.getLLVMFieldNo(FirstField));
914       } else {
915         Alignment = CGF.getContext().getDeclAlign(FirstField);
916         FirstByteOffset = FirstFieldOffset;
917       }
918 
919       assert((CGF.getContext().toCharUnitsFromBits(FirstByteOffset) %
920               Alignment) == 0 && "Bad field alignment.");
921 
922       CharUnits MemcpySize = getMemcpySize(FirstByteOffset);
923       QualType RecordTy = CGF.getContext().getTypeDeclType(ClassDecl);
924       llvm::Value *ThisPtr = CGF.LoadCXXThis();
925       LValue DestLV = CGF.MakeNaturalAlignAddrLValue(ThisPtr, RecordTy);
926       LValue Dest = CGF.EmitLValueForFieldInitialization(DestLV, FirstField);
927       llvm::Value *SrcPtr = CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(SrcRec));
928       LValue SrcLV = CGF.MakeNaturalAlignAddrLValue(SrcPtr, RecordTy);
929       LValue Src = CGF.EmitLValueForFieldInitialization(SrcLV, FirstField);
930 
931       emitMemcpyIR(Dest.isBitField() ? Dest.getBitFieldAddr() : Dest.getAddress(),
932                    Src.isBitField() ? Src.getBitFieldAddr() : Src.getAddress(),
933                    MemcpySize, Alignment);
934       reset();
935     }
936 
reset()937     void reset() {
938       FirstField = nullptr;
939     }
940 
941   protected:
942     CodeGenFunction &CGF;
943     const CXXRecordDecl *ClassDecl;
944 
945   private:
946 
emitMemcpyIR(llvm::Value * DestPtr,llvm::Value * SrcPtr,CharUnits Size,CharUnits Alignment)947     void emitMemcpyIR(llvm::Value *DestPtr, llvm::Value *SrcPtr,
948                       CharUnits Size, CharUnits Alignment) {
949       llvm::PointerType *DPT = cast<llvm::PointerType>(DestPtr->getType());
950       llvm::Type *DBP =
951         llvm::Type::getInt8PtrTy(CGF.getLLVMContext(), DPT->getAddressSpace());
952       DestPtr = CGF.Builder.CreateBitCast(DestPtr, DBP);
953 
954       llvm::PointerType *SPT = cast<llvm::PointerType>(SrcPtr->getType());
955       llvm::Type *SBP =
956         llvm::Type::getInt8PtrTy(CGF.getLLVMContext(), SPT->getAddressSpace());
957       SrcPtr = CGF.Builder.CreateBitCast(SrcPtr, SBP);
958 
959       CGF.Builder.CreateMemCpy(DestPtr, SrcPtr, Size.getQuantity(),
960                                Alignment.getQuantity());
961     }
962 
addInitialField(FieldDecl * F)963     void addInitialField(FieldDecl *F) {
964         FirstField = F;
965         LastField = F;
966         FirstFieldOffset = RecLayout.getFieldOffset(F->getFieldIndex());
967         LastFieldOffset = FirstFieldOffset;
968         LastAddedFieldIndex = F->getFieldIndex();
969         return;
970       }
971 
addNextField(FieldDecl * F)972     void addNextField(FieldDecl *F) {
973       // For the most part, the following invariant will hold:
974       //   F->getFieldIndex() == LastAddedFieldIndex + 1
975       // The one exception is that Sema won't add a copy-initializer for an
976       // unnamed bitfield, which will show up here as a gap in the sequence.
977       assert(F->getFieldIndex() >= LastAddedFieldIndex + 1 &&
978              "Cannot aggregate fields out of order.");
979       LastAddedFieldIndex = F->getFieldIndex();
980 
981       // The 'first' and 'last' fields are chosen by offset, rather than field
982       // index. This allows the code to support bitfields, as well as regular
983       // fields.
984       uint64_t FOffset = RecLayout.getFieldOffset(F->getFieldIndex());
985       if (FOffset < FirstFieldOffset) {
986         FirstField = F;
987         FirstFieldOffset = FOffset;
988       } else if (FOffset > LastFieldOffset) {
989         LastField = F;
990         LastFieldOffset = FOffset;
991       }
992     }
993 
994     const VarDecl *SrcRec;
995     const ASTRecordLayout &RecLayout;
996     FieldDecl *FirstField;
997     FieldDecl *LastField;
998     uint64_t FirstFieldOffset, LastFieldOffset;
999     unsigned LastAddedFieldIndex;
1000   };
1001 
1002   class ConstructorMemcpyizer : public FieldMemcpyizer {
1003   private:
1004 
1005     /// Get source argument for copy constructor. Returns null if not a copy
1006     /// constructor.
getTrivialCopySource(CodeGenFunction & CGF,const CXXConstructorDecl * CD,FunctionArgList & Args)1007     static const VarDecl *getTrivialCopySource(CodeGenFunction &CGF,
1008                                                const CXXConstructorDecl *CD,
1009                                                FunctionArgList &Args) {
1010       if (CD->isCopyOrMoveConstructor() && CD->isDefaulted())
1011         return Args[CGF.CGM.getCXXABI().getSrcArgforCopyCtor(CD, Args)];
1012       return nullptr;
1013     }
1014 
1015     // Returns true if a CXXCtorInitializer represents a member initialization
1016     // that can be rolled into a memcpy.
isMemberInitMemcpyable(CXXCtorInitializer * MemberInit) const1017     bool isMemberInitMemcpyable(CXXCtorInitializer *MemberInit) const {
1018       if (!MemcpyableCtor)
1019         return false;
1020       FieldDecl *Field = MemberInit->getMember();
1021       assert(Field && "No field for member init.");
1022       QualType FieldType = Field->getType();
1023       CXXConstructExpr *CE = dyn_cast<CXXConstructExpr>(MemberInit->getInit());
1024 
1025       // Bail out on non-POD, not-trivially-constructable members.
1026       if (!(CE && CE->getConstructor()->isTrivial()) &&
1027           !(FieldType.isTriviallyCopyableType(CGF.getContext()) ||
1028             FieldType->isReferenceType()))
1029         return false;
1030 
1031       // Bail out on volatile fields.
1032       if (!isMemcpyableField(Field))
1033         return false;
1034 
1035       // Otherwise we're good.
1036       return true;
1037     }
1038 
1039   public:
ConstructorMemcpyizer(CodeGenFunction & CGF,const CXXConstructorDecl * CD,FunctionArgList & Args)1040     ConstructorMemcpyizer(CodeGenFunction &CGF, const CXXConstructorDecl *CD,
1041                           FunctionArgList &Args)
1042       : FieldMemcpyizer(CGF, CD->getParent(), getTrivialCopySource(CGF, CD, Args)),
1043         ConstructorDecl(CD),
1044         MemcpyableCtor(CD->isDefaulted() &&
1045                        CD->isCopyOrMoveConstructor() &&
1046                        CGF.getLangOpts().getGC() == LangOptions::NonGC),
1047         Args(Args) { }
1048 
addMemberInitializer(CXXCtorInitializer * MemberInit)1049     void addMemberInitializer(CXXCtorInitializer *MemberInit) {
1050       if (isMemberInitMemcpyable(MemberInit)) {
1051         AggregatedInits.push_back(MemberInit);
1052         addMemcpyableField(MemberInit->getMember());
1053       } else {
1054         emitAggregatedInits();
1055         EmitMemberInitializer(CGF, ConstructorDecl->getParent(), MemberInit,
1056                               ConstructorDecl, Args);
1057       }
1058     }
1059 
emitAggregatedInits()1060     void emitAggregatedInits() {
1061       if (AggregatedInits.size() <= 1) {
1062         // This memcpy is too small to be worthwhile. Fall back on default
1063         // codegen.
1064         if (!AggregatedInits.empty()) {
1065           CopyingValueRepresentation CVR(CGF);
1066           EmitMemberInitializer(CGF, ConstructorDecl->getParent(),
1067                                 AggregatedInits[0], ConstructorDecl, Args);
1068         }
1069         reset();
1070         return;
1071       }
1072 
1073       pushEHDestructors();
1074       emitMemcpy();
1075       AggregatedInits.clear();
1076     }
1077 
pushEHDestructors()1078     void pushEHDestructors() {
1079       llvm::Value *ThisPtr = CGF.LoadCXXThis();
1080       QualType RecordTy = CGF.getContext().getTypeDeclType(ClassDecl);
1081       LValue LHS = CGF.MakeNaturalAlignAddrLValue(ThisPtr, RecordTy);
1082 
1083       for (unsigned i = 0; i < AggregatedInits.size(); ++i) {
1084         QualType FieldType = AggregatedInits[i]->getMember()->getType();
1085         QualType::DestructionKind dtorKind = FieldType.isDestructedType();
1086         if (CGF.needsEHCleanup(dtorKind))
1087           CGF.pushEHDestroy(dtorKind, LHS.getAddress(), FieldType);
1088       }
1089     }
1090 
finish()1091     void finish() {
1092       emitAggregatedInits();
1093     }
1094 
1095   private:
1096     const CXXConstructorDecl *ConstructorDecl;
1097     bool MemcpyableCtor;
1098     FunctionArgList &Args;
1099     SmallVector<CXXCtorInitializer*, 16> AggregatedInits;
1100   };
1101 
1102   class AssignmentMemcpyizer : public FieldMemcpyizer {
1103   private:
1104 
1105     // Returns the memcpyable field copied by the given statement, if one
1106     // exists. Otherwise returns null.
getMemcpyableField(Stmt * S)1107     FieldDecl *getMemcpyableField(Stmt *S) {
1108       if (!AssignmentsMemcpyable)
1109         return nullptr;
1110       if (BinaryOperator *BO = dyn_cast<BinaryOperator>(S)) {
1111         // Recognise trivial assignments.
1112         if (BO->getOpcode() != BO_Assign)
1113           return nullptr;
1114         MemberExpr *ME = dyn_cast<MemberExpr>(BO->getLHS());
1115         if (!ME)
1116           return nullptr;
1117         FieldDecl *Field = dyn_cast<FieldDecl>(ME->getMemberDecl());
1118         if (!Field || !isMemcpyableField(Field))
1119           return nullptr;
1120         Stmt *RHS = BO->getRHS();
1121         if (ImplicitCastExpr *EC = dyn_cast<ImplicitCastExpr>(RHS))
1122           RHS = EC->getSubExpr();
1123         if (!RHS)
1124           return nullptr;
1125         MemberExpr *ME2 = dyn_cast<MemberExpr>(RHS);
1126         if (dyn_cast<FieldDecl>(ME2->getMemberDecl()) != Field)
1127           return nullptr;
1128         return Field;
1129       } else if (CXXMemberCallExpr *MCE = dyn_cast<CXXMemberCallExpr>(S)) {
1130         CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(MCE->getCalleeDecl());
1131         if (!(MD && (MD->isCopyAssignmentOperator() ||
1132                        MD->isMoveAssignmentOperator()) &&
1133               MD->isTrivial()))
1134           return nullptr;
1135         MemberExpr *IOA = dyn_cast<MemberExpr>(MCE->getImplicitObjectArgument());
1136         if (!IOA)
1137           return nullptr;
1138         FieldDecl *Field = dyn_cast<FieldDecl>(IOA->getMemberDecl());
1139         if (!Field || !isMemcpyableField(Field))
1140           return nullptr;
1141         MemberExpr *Arg0 = dyn_cast<MemberExpr>(MCE->getArg(0));
1142         if (!Arg0 || Field != dyn_cast<FieldDecl>(Arg0->getMemberDecl()))
1143           return nullptr;
1144         return Field;
1145       } else if (CallExpr *CE = dyn_cast<CallExpr>(S)) {
1146         FunctionDecl *FD = dyn_cast<FunctionDecl>(CE->getCalleeDecl());
1147         if (!FD || FD->getBuiltinID() != Builtin::BI__builtin_memcpy)
1148           return nullptr;
1149         Expr *DstPtr = CE->getArg(0);
1150         if (ImplicitCastExpr *DC = dyn_cast<ImplicitCastExpr>(DstPtr))
1151           DstPtr = DC->getSubExpr();
1152         UnaryOperator *DUO = dyn_cast<UnaryOperator>(DstPtr);
1153         if (!DUO || DUO->getOpcode() != UO_AddrOf)
1154           return nullptr;
1155         MemberExpr *ME = dyn_cast<MemberExpr>(DUO->getSubExpr());
1156         if (!ME)
1157           return nullptr;
1158         FieldDecl *Field = dyn_cast<FieldDecl>(ME->getMemberDecl());
1159         if (!Field || !isMemcpyableField(Field))
1160           return nullptr;
1161         Expr *SrcPtr = CE->getArg(1);
1162         if (ImplicitCastExpr *SC = dyn_cast<ImplicitCastExpr>(SrcPtr))
1163           SrcPtr = SC->getSubExpr();
1164         UnaryOperator *SUO = dyn_cast<UnaryOperator>(SrcPtr);
1165         if (!SUO || SUO->getOpcode() != UO_AddrOf)
1166           return nullptr;
1167         MemberExpr *ME2 = dyn_cast<MemberExpr>(SUO->getSubExpr());
1168         if (!ME2 || Field != dyn_cast<FieldDecl>(ME2->getMemberDecl()))
1169           return nullptr;
1170         return Field;
1171       }
1172 
1173       return nullptr;
1174     }
1175 
1176     bool AssignmentsMemcpyable;
1177     SmallVector<Stmt*, 16> AggregatedStmts;
1178 
1179   public:
1180 
AssignmentMemcpyizer(CodeGenFunction & CGF,const CXXMethodDecl * AD,FunctionArgList & Args)1181     AssignmentMemcpyizer(CodeGenFunction &CGF, const CXXMethodDecl *AD,
1182                          FunctionArgList &Args)
1183       : FieldMemcpyizer(CGF, AD->getParent(), Args[Args.size() - 1]),
1184         AssignmentsMemcpyable(CGF.getLangOpts().getGC() == LangOptions::NonGC) {
1185       assert(Args.size() == 2);
1186     }
1187 
emitAssignment(Stmt * S)1188     void emitAssignment(Stmt *S) {
1189       FieldDecl *F = getMemcpyableField(S);
1190       if (F) {
1191         addMemcpyableField(F);
1192         AggregatedStmts.push_back(S);
1193       } else {
1194         emitAggregatedStmts();
1195         CGF.EmitStmt(S);
1196       }
1197     }
1198 
emitAggregatedStmts()1199     void emitAggregatedStmts() {
1200       if (AggregatedStmts.size() <= 1) {
1201         if (!AggregatedStmts.empty()) {
1202           CopyingValueRepresentation CVR(CGF);
1203           CGF.EmitStmt(AggregatedStmts[0]);
1204         }
1205         reset();
1206       }
1207 
1208       emitMemcpy();
1209       AggregatedStmts.clear();
1210     }
1211 
finish()1212     void finish() {
1213       emitAggregatedStmts();
1214     }
1215   };
1216 
1217 }
1218 
1219 /// EmitCtorPrologue - This routine generates necessary code to initialize
1220 /// base classes and non-static data members belonging to this constructor.
EmitCtorPrologue(const CXXConstructorDecl * CD,CXXCtorType CtorType,FunctionArgList & Args)1221 void CodeGenFunction::EmitCtorPrologue(const CXXConstructorDecl *CD,
1222                                        CXXCtorType CtorType,
1223                                        FunctionArgList &Args) {
1224   if (CD->isDelegatingConstructor())
1225     return EmitDelegatingCXXConstructorCall(CD, Args);
1226 
1227   const CXXRecordDecl *ClassDecl = CD->getParent();
1228 
1229   CXXConstructorDecl::init_const_iterator B = CD->init_begin(),
1230                                           E = CD->init_end();
1231 
1232   llvm::BasicBlock *BaseCtorContinueBB = nullptr;
1233   if (ClassDecl->getNumVBases() &&
1234       !CGM.getTarget().getCXXABI().hasConstructorVariants()) {
1235     // The ABIs that don't have constructor variants need to put a branch
1236     // before the virtual base initialization code.
1237     BaseCtorContinueBB =
1238       CGM.getCXXABI().EmitCtorCompleteObjectHandler(*this, ClassDecl);
1239     assert(BaseCtorContinueBB);
1240   }
1241 
1242   // Virtual base initializers first.
1243   for (; B != E && (*B)->isBaseInitializer() && (*B)->isBaseVirtual(); B++) {
1244     EmitBaseInitializer(*this, ClassDecl, *B, CtorType);
1245   }
1246 
1247   if (BaseCtorContinueBB) {
1248     // Complete object handler should continue to the remaining initializers.
1249     Builder.CreateBr(BaseCtorContinueBB);
1250     EmitBlock(BaseCtorContinueBB);
1251   }
1252 
1253   // Then, non-virtual base initializers.
1254   for (; B != E && (*B)->isBaseInitializer(); B++) {
1255     assert(!(*B)->isBaseVirtual());
1256     EmitBaseInitializer(*this, ClassDecl, *B, CtorType);
1257   }
1258 
1259   InitializeVTablePointers(ClassDecl);
1260 
1261   // And finally, initialize class members.
1262   FieldConstructionScope FCS(*this, CXXThisValue);
1263   ConstructorMemcpyizer CM(*this, CD, Args);
1264   for (; B != E; B++) {
1265     CXXCtorInitializer *Member = (*B);
1266     assert(!Member->isBaseInitializer());
1267     assert(Member->isAnyMemberInitializer() &&
1268            "Delegating initializer on non-delegating constructor");
1269     CM.addMemberInitializer(Member);
1270   }
1271   CM.finish();
1272 }
1273 
1274 static bool
1275 FieldHasTrivialDestructorBody(ASTContext &Context, const FieldDecl *Field);
1276 
1277 static bool
HasTrivialDestructorBody(ASTContext & Context,const CXXRecordDecl * BaseClassDecl,const CXXRecordDecl * MostDerivedClassDecl)1278 HasTrivialDestructorBody(ASTContext &Context,
1279                          const CXXRecordDecl *BaseClassDecl,
1280                          const CXXRecordDecl *MostDerivedClassDecl)
1281 {
1282   // If the destructor is trivial we don't have to check anything else.
1283   if (BaseClassDecl->hasTrivialDestructor())
1284     return true;
1285 
1286   if (!BaseClassDecl->getDestructor()->hasTrivialBody())
1287     return false;
1288 
1289   // Check fields.
1290   for (const auto *Field : BaseClassDecl->fields())
1291     if (!FieldHasTrivialDestructorBody(Context, Field))
1292       return false;
1293 
1294   // Check non-virtual bases.
1295   for (const auto &I : BaseClassDecl->bases()) {
1296     if (I.isVirtual())
1297       continue;
1298 
1299     const CXXRecordDecl *NonVirtualBase =
1300       cast<CXXRecordDecl>(I.getType()->castAs<RecordType>()->getDecl());
1301     if (!HasTrivialDestructorBody(Context, NonVirtualBase,
1302                                   MostDerivedClassDecl))
1303       return false;
1304   }
1305 
1306   if (BaseClassDecl == MostDerivedClassDecl) {
1307     // Check virtual bases.
1308     for (const auto &I : BaseClassDecl->vbases()) {
1309       const CXXRecordDecl *VirtualBase =
1310         cast<CXXRecordDecl>(I.getType()->castAs<RecordType>()->getDecl());
1311       if (!HasTrivialDestructorBody(Context, VirtualBase,
1312                                     MostDerivedClassDecl))
1313         return false;
1314     }
1315   }
1316 
1317   return true;
1318 }
1319 
1320 static bool
FieldHasTrivialDestructorBody(ASTContext & Context,const FieldDecl * Field)1321 FieldHasTrivialDestructorBody(ASTContext &Context,
1322                               const FieldDecl *Field)
1323 {
1324   QualType FieldBaseElementType = Context.getBaseElementType(Field->getType());
1325 
1326   const RecordType *RT = FieldBaseElementType->getAs<RecordType>();
1327   if (!RT)
1328     return true;
1329 
1330   CXXRecordDecl *FieldClassDecl = cast<CXXRecordDecl>(RT->getDecl());
1331   return HasTrivialDestructorBody(Context, FieldClassDecl, FieldClassDecl);
1332 }
1333 
1334 /// CanSkipVTablePointerInitialization - Check whether we need to initialize
1335 /// any vtable pointers before calling this destructor.
CanSkipVTablePointerInitialization(ASTContext & Context,const CXXDestructorDecl * Dtor)1336 static bool CanSkipVTablePointerInitialization(ASTContext &Context,
1337                                                const CXXDestructorDecl *Dtor) {
1338   if (!Dtor->hasTrivialBody())
1339     return false;
1340 
1341   // Check the fields.
1342   const CXXRecordDecl *ClassDecl = Dtor->getParent();
1343   for (const auto *Field : ClassDecl->fields())
1344     if (!FieldHasTrivialDestructorBody(Context, Field))
1345       return false;
1346 
1347   return true;
1348 }
1349 
1350 /// EmitDestructorBody - Emits the body of the current destructor.
EmitDestructorBody(FunctionArgList & Args)1351 void CodeGenFunction::EmitDestructorBody(FunctionArgList &Args) {
1352   const CXXDestructorDecl *Dtor = cast<CXXDestructorDecl>(CurGD.getDecl());
1353   CXXDtorType DtorType = CurGD.getDtorType();
1354 
1355   // The call to operator delete in a deleting destructor happens
1356   // outside of the function-try-block, which means it's always
1357   // possible to delegate the destructor body to the complete
1358   // destructor.  Do so.
1359   if (DtorType == Dtor_Deleting) {
1360     EnterDtorCleanups(Dtor, Dtor_Deleting);
1361     EmitCXXDestructorCall(Dtor, Dtor_Complete, /*ForVirtualBase=*/false,
1362                           /*Delegating=*/false, LoadCXXThis());
1363     PopCleanupBlock();
1364     return;
1365   }
1366 
1367   Stmt *Body = Dtor->getBody();
1368 
1369   // If the body is a function-try-block, enter the try before
1370   // anything else.
1371   bool isTryBody = (Body && isa<CXXTryStmt>(Body));
1372   if (isTryBody)
1373     EnterCXXTryStmt(*cast<CXXTryStmt>(Body), true);
1374   EmitAsanPrologueOrEpilogue(false);
1375 
1376   // Enter the epilogue cleanups.
1377   RunCleanupsScope DtorEpilogue(*this);
1378 
1379   // If this is the complete variant, just invoke the base variant;
1380   // the epilogue will destruct the virtual bases.  But we can't do
1381   // this optimization if the body is a function-try-block, because
1382   // we'd introduce *two* handler blocks.  In the Microsoft ABI, we
1383   // always delegate because we might not have a definition in this TU.
1384   switch (DtorType) {
1385   case Dtor_Comdat:
1386     llvm_unreachable("not expecting a COMDAT");
1387 
1388   case Dtor_Deleting: llvm_unreachable("already handled deleting case");
1389 
1390   case Dtor_Complete:
1391     assert((Body || getTarget().getCXXABI().isMicrosoft()) &&
1392            "can't emit a dtor without a body for non-Microsoft ABIs");
1393 
1394     // Enter the cleanup scopes for virtual bases.
1395     EnterDtorCleanups(Dtor, Dtor_Complete);
1396 
1397     if (!isTryBody) {
1398       EmitCXXDestructorCall(Dtor, Dtor_Base, /*ForVirtualBase=*/false,
1399                             /*Delegating=*/false, LoadCXXThis());
1400       break;
1401     }
1402     // Fallthrough: act like we're in the base variant.
1403 
1404   case Dtor_Base:
1405     assert(Body);
1406 
1407     RegionCounter Cnt = getPGORegionCounter(Body);
1408     Cnt.beginRegion(Builder);
1409 
1410     // Enter the cleanup scopes for fields and non-virtual bases.
1411     EnterDtorCleanups(Dtor, Dtor_Base);
1412 
1413     // Initialize the vtable pointers before entering the body.
1414     if (!CanSkipVTablePointerInitialization(getContext(), Dtor))
1415         InitializeVTablePointers(Dtor->getParent());
1416 
1417     if (isTryBody)
1418       EmitStmt(cast<CXXTryStmt>(Body)->getTryBlock());
1419     else if (Body)
1420       EmitStmt(Body);
1421     else {
1422       assert(Dtor->isImplicit() && "bodyless dtor not implicit");
1423       // nothing to do besides what's in the epilogue
1424     }
1425     // -fapple-kext must inline any call to this dtor into
1426     // the caller's body.
1427     if (getLangOpts().AppleKext)
1428       CurFn->addFnAttr(llvm::Attribute::AlwaysInline);
1429     break;
1430   }
1431 
1432   // Jump out through the epilogue cleanups.
1433   DtorEpilogue.ForceCleanup();
1434 
1435   // Exit the try if applicable.
1436   if (isTryBody)
1437     ExitCXXTryStmt(*cast<CXXTryStmt>(Body), true);
1438 }
1439 
emitImplicitAssignmentOperatorBody(FunctionArgList & Args)1440 void CodeGenFunction::emitImplicitAssignmentOperatorBody(FunctionArgList &Args) {
1441   const CXXMethodDecl *AssignOp = cast<CXXMethodDecl>(CurGD.getDecl());
1442   const Stmt *RootS = AssignOp->getBody();
1443   assert(isa<CompoundStmt>(RootS) &&
1444          "Body of an implicit assignment operator should be compound stmt.");
1445   const CompoundStmt *RootCS = cast<CompoundStmt>(RootS);
1446 
1447   LexicalScope Scope(*this, RootCS->getSourceRange());
1448 
1449   AssignmentMemcpyizer AM(*this, AssignOp, Args);
1450   for (auto *I : RootCS->body())
1451     AM.emitAssignment(I);
1452   AM.finish();
1453 }
1454 
1455 namespace {
1456   /// Call the operator delete associated with the current destructor.
1457   struct CallDtorDelete : EHScopeStack::Cleanup {
CallDtorDelete__anon7d8fdd600411::CallDtorDelete1458     CallDtorDelete() {}
1459 
Emit__anon7d8fdd600411::CallDtorDelete1460     void Emit(CodeGenFunction &CGF, Flags flags) override {
1461       const CXXDestructorDecl *Dtor = cast<CXXDestructorDecl>(CGF.CurCodeDecl);
1462       const CXXRecordDecl *ClassDecl = Dtor->getParent();
1463       CGF.EmitDeleteCall(Dtor->getOperatorDelete(), CGF.LoadCXXThis(),
1464                          CGF.getContext().getTagDeclType(ClassDecl));
1465     }
1466   };
1467 
1468   struct CallDtorDeleteConditional : EHScopeStack::Cleanup {
1469     llvm::Value *ShouldDeleteCondition;
1470   public:
CallDtorDeleteConditional__anon7d8fdd600411::CallDtorDeleteConditional1471     CallDtorDeleteConditional(llvm::Value *ShouldDeleteCondition)
1472       : ShouldDeleteCondition(ShouldDeleteCondition) {
1473       assert(ShouldDeleteCondition != nullptr);
1474     }
1475 
Emit__anon7d8fdd600411::CallDtorDeleteConditional1476     void Emit(CodeGenFunction &CGF, Flags flags) override {
1477       llvm::BasicBlock *callDeleteBB = CGF.createBasicBlock("dtor.call_delete");
1478       llvm::BasicBlock *continueBB = CGF.createBasicBlock("dtor.continue");
1479       llvm::Value *ShouldCallDelete
1480         = CGF.Builder.CreateIsNull(ShouldDeleteCondition);
1481       CGF.Builder.CreateCondBr(ShouldCallDelete, continueBB, callDeleteBB);
1482 
1483       CGF.EmitBlock(callDeleteBB);
1484       const CXXDestructorDecl *Dtor = cast<CXXDestructorDecl>(CGF.CurCodeDecl);
1485       const CXXRecordDecl *ClassDecl = Dtor->getParent();
1486       CGF.EmitDeleteCall(Dtor->getOperatorDelete(), CGF.LoadCXXThis(),
1487                          CGF.getContext().getTagDeclType(ClassDecl));
1488       CGF.Builder.CreateBr(continueBB);
1489 
1490       CGF.EmitBlock(continueBB);
1491     }
1492   };
1493 
1494   class DestroyField  : public EHScopeStack::Cleanup {
1495     const FieldDecl *field;
1496     CodeGenFunction::Destroyer *destroyer;
1497     bool useEHCleanupForArray;
1498 
1499   public:
DestroyField(const FieldDecl * field,CodeGenFunction::Destroyer * destroyer,bool useEHCleanupForArray)1500     DestroyField(const FieldDecl *field, CodeGenFunction::Destroyer *destroyer,
1501                  bool useEHCleanupForArray)
1502       : field(field), destroyer(destroyer),
1503         useEHCleanupForArray(useEHCleanupForArray) {}
1504 
Emit(CodeGenFunction & CGF,Flags flags)1505     void Emit(CodeGenFunction &CGF, Flags flags) override {
1506       // Find the address of the field.
1507       llvm::Value *thisValue = CGF.LoadCXXThis();
1508       QualType RecordTy = CGF.getContext().getTagDeclType(field->getParent());
1509       LValue ThisLV = CGF.MakeAddrLValue(thisValue, RecordTy);
1510       LValue LV = CGF.EmitLValueForField(ThisLV, field);
1511       assert(LV.isSimple());
1512 
1513       CGF.emitDestroy(LV.getAddress(), field->getType(), destroyer,
1514                       flags.isForNormalCleanup() && useEHCleanupForArray);
1515     }
1516   };
1517 }
1518 
1519 /// \brief Emit all code that comes at the end of class's
1520 /// destructor. This is to call destructors on members and base classes
1521 /// in reverse order of their construction.
EnterDtorCleanups(const CXXDestructorDecl * DD,CXXDtorType DtorType)1522 void CodeGenFunction::EnterDtorCleanups(const CXXDestructorDecl *DD,
1523                                         CXXDtorType DtorType) {
1524   assert((!DD->isTrivial() || DD->hasAttr<DLLExportAttr>()) &&
1525          "Should not emit dtor epilogue for non-exported trivial dtor!");
1526 
1527   // The deleting-destructor phase just needs to call the appropriate
1528   // operator delete that Sema picked up.
1529   if (DtorType == Dtor_Deleting) {
1530     assert(DD->getOperatorDelete() &&
1531            "operator delete missing - EnterDtorCleanups");
1532     if (CXXStructorImplicitParamValue) {
1533       // If there is an implicit param to the deleting dtor, it's a boolean
1534       // telling whether we should call delete at the end of the dtor.
1535       EHStack.pushCleanup<CallDtorDeleteConditional>(
1536           NormalAndEHCleanup, CXXStructorImplicitParamValue);
1537     } else {
1538       EHStack.pushCleanup<CallDtorDelete>(NormalAndEHCleanup);
1539     }
1540     return;
1541   }
1542 
1543   const CXXRecordDecl *ClassDecl = DD->getParent();
1544 
1545   // Unions have no bases and do not call field destructors.
1546   if (ClassDecl->isUnion())
1547     return;
1548 
1549   // The complete-destructor phase just destructs all the virtual bases.
1550   if (DtorType == Dtor_Complete) {
1551 
1552     // We push them in the forward order so that they'll be popped in
1553     // the reverse order.
1554     for (const auto &Base : ClassDecl->vbases()) {
1555       CXXRecordDecl *BaseClassDecl
1556         = cast<CXXRecordDecl>(Base.getType()->getAs<RecordType>()->getDecl());
1557 
1558       // Ignore trivial destructors.
1559       if (BaseClassDecl->hasTrivialDestructor())
1560         continue;
1561 
1562       EHStack.pushCleanup<CallBaseDtor>(NormalAndEHCleanup,
1563                                         BaseClassDecl,
1564                                         /*BaseIsVirtual*/ true);
1565     }
1566 
1567     return;
1568   }
1569 
1570   assert(DtorType == Dtor_Base);
1571 
1572   // Destroy non-virtual bases.
1573   for (const auto &Base : ClassDecl->bases()) {
1574     // Ignore virtual bases.
1575     if (Base.isVirtual())
1576       continue;
1577 
1578     CXXRecordDecl *BaseClassDecl = Base.getType()->getAsCXXRecordDecl();
1579 
1580     // Ignore trivial destructors.
1581     if (BaseClassDecl->hasTrivialDestructor())
1582       continue;
1583 
1584     EHStack.pushCleanup<CallBaseDtor>(NormalAndEHCleanup,
1585                                       BaseClassDecl,
1586                                       /*BaseIsVirtual*/ false);
1587   }
1588 
1589   // Destroy direct fields.
1590   for (const auto *Field : ClassDecl->fields()) {
1591     QualType type = Field->getType();
1592     QualType::DestructionKind dtorKind = type.isDestructedType();
1593     if (!dtorKind) continue;
1594 
1595     // Anonymous union members do not have their destructors called.
1596     const RecordType *RT = type->getAsUnionType();
1597     if (RT && RT->getDecl()->isAnonymousStructOrUnion()) continue;
1598 
1599     CleanupKind cleanupKind = getCleanupKind(dtorKind);
1600     EHStack.pushCleanup<DestroyField>(cleanupKind, Field,
1601                                       getDestroyer(dtorKind),
1602                                       cleanupKind & EHCleanup);
1603   }
1604 }
1605 
1606 /// EmitCXXAggrConstructorCall - Emit a loop to call a particular
1607 /// constructor for each of several members of an array.
1608 ///
1609 /// \param ctor the constructor to call for each element
1610 /// \param arrayType the type of the array to initialize
1611 /// \param arrayBegin an arrayType*
1612 /// \param zeroInitialize true if each element should be
1613 ///   zero-initialized before it is constructed
EmitCXXAggrConstructorCall(const CXXConstructorDecl * ctor,const ConstantArrayType * arrayType,llvm::Value * arrayBegin,const CXXConstructExpr * E,bool zeroInitialize)1614 void CodeGenFunction::EmitCXXAggrConstructorCall(
1615     const CXXConstructorDecl *ctor, const ConstantArrayType *arrayType,
1616     llvm::Value *arrayBegin, const CXXConstructExpr *E, bool zeroInitialize) {
1617   QualType elementType;
1618   llvm::Value *numElements =
1619     emitArrayLength(arrayType, elementType, arrayBegin);
1620 
1621   EmitCXXAggrConstructorCall(ctor, numElements, arrayBegin, E, zeroInitialize);
1622 }
1623 
1624 /// EmitCXXAggrConstructorCall - Emit a loop to call a particular
1625 /// constructor for each of several members of an array.
1626 ///
1627 /// \param ctor the constructor to call for each element
1628 /// \param numElements the number of elements in the array;
1629 ///   may be zero
1630 /// \param arrayBegin a T*, where T is the type constructed by ctor
1631 /// \param zeroInitialize true if each element should be
1632 ///   zero-initialized before it is constructed
EmitCXXAggrConstructorCall(const CXXConstructorDecl * ctor,llvm::Value * numElements,llvm::Value * arrayBegin,const CXXConstructExpr * E,bool zeroInitialize)1633 void CodeGenFunction::EmitCXXAggrConstructorCall(const CXXConstructorDecl *ctor,
1634                                                  llvm::Value *numElements,
1635                                                  llvm::Value *arrayBegin,
1636                                                  const CXXConstructExpr *E,
1637                                                  bool zeroInitialize) {
1638 
1639   // It's legal for numElements to be zero.  This can happen both
1640   // dynamically, because x can be zero in 'new A[x]', and statically,
1641   // because of GCC extensions that permit zero-length arrays.  There
1642   // are probably legitimate places where we could assume that this
1643   // doesn't happen, but it's not clear that it's worth it.
1644   llvm::BranchInst *zeroCheckBranch = nullptr;
1645 
1646   // Optimize for a constant count.
1647   llvm::ConstantInt *constantCount
1648     = dyn_cast<llvm::ConstantInt>(numElements);
1649   if (constantCount) {
1650     // Just skip out if the constant count is zero.
1651     if (constantCount->isZero()) return;
1652 
1653   // Otherwise, emit the check.
1654   } else {
1655     llvm::BasicBlock *loopBB = createBasicBlock("new.ctorloop");
1656     llvm::Value *iszero = Builder.CreateIsNull(numElements, "isempty");
1657     zeroCheckBranch = Builder.CreateCondBr(iszero, loopBB, loopBB);
1658     EmitBlock(loopBB);
1659   }
1660 
1661   // Find the end of the array.
1662   llvm::Value *arrayEnd = Builder.CreateInBoundsGEP(arrayBegin, numElements,
1663                                                     "arrayctor.end");
1664 
1665   // Enter the loop, setting up a phi for the current location to initialize.
1666   llvm::BasicBlock *entryBB = Builder.GetInsertBlock();
1667   llvm::BasicBlock *loopBB = createBasicBlock("arrayctor.loop");
1668   EmitBlock(loopBB);
1669   llvm::PHINode *cur = Builder.CreatePHI(arrayBegin->getType(), 2,
1670                                          "arrayctor.cur");
1671   cur->addIncoming(arrayBegin, entryBB);
1672 
1673   // Inside the loop body, emit the constructor call on the array element.
1674 
1675   QualType type = getContext().getTypeDeclType(ctor->getParent());
1676 
1677   // Zero initialize the storage, if requested.
1678   if (zeroInitialize)
1679     EmitNullInitialization(cur, type);
1680 
1681   // C++ [class.temporary]p4:
1682   // There are two contexts in which temporaries are destroyed at a different
1683   // point than the end of the full-expression. The first context is when a
1684   // default constructor is called to initialize an element of an array.
1685   // If the constructor has one or more default arguments, the destruction of
1686   // every temporary created in a default argument expression is sequenced
1687   // before the construction of the next array element, if any.
1688 
1689   {
1690     RunCleanupsScope Scope(*this);
1691 
1692     // Evaluate the constructor and its arguments in a regular
1693     // partial-destroy cleanup.
1694     if (getLangOpts().Exceptions &&
1695         !ctor->getParent()->hasTrivialDestructor()) {
1696       Destroyer *destroyer = destroyCXXObject;
1697       pushRegularPartialArrayCleanup(arrayBegin, cur, type, *destroyer);
1698     }
1699 
1700     EmitCXXConstructorCall(ctor, Ctor_Complete, /*ForVirtualBase=*/false,
1701                            /*Delegating=*/false, cur, E);
1702   }
1703 
1704   // Go to the next element.
1705   llvm::Value *next =
1706     Builder.CreateInBoundsGEP(cur, llvm::ConstantInt::get(SizeTy, 1),
1707                               "arrayctor.next");
1708   cur->addIncoming(next, Builder.GetInsertBlock());
1709 
1710   // Check whether that's the end of the loop.
1711   llvm::Value *done = Builder.CreateICmpEQ(next, arrayEnd, "arrayctor.done");
1712   llvm::BasicBlock *contBB = createBasicBlock("arrayctor.cont");
1713   Builder.CreateCondBr(done, contBB, loopBB);
1714 
1715   // Patch the earlier check to skip over the loop.
1716   if (zeroCheckBranch) zeroCheckBranch->setSuccessor(0, contBB);
1717 
1718   EmitBlock(contBB);
1719 }
1720 
destroyCXXObject(CodeGenFunction & CGF,llvm::Value * addr,QualType type)1721 void CodeGenFunction::destroyCXXObject(CodeGenFunction &CGF,
1722                                        llvm::Value *addr,
1723                                        QualType type) {
1724   const RecordType *rtype = type->castAs<RecordType>();
1725   const CXXRecordDecl *record = cast<CXXRecordDecl>(rtype->getDecl());
1726   const CXXDestructorDecl *dtor = record->getDestructor();
1727   assert(!dtor->isTrivial());
1728   CGF.EmitCXXDestructorCall(dtor, Dtor_Complete, /*for vbase*/ false,
1729                             /*Delegating=*/false, addr);
1730 }
1731 
EmitCXXConstructorCall(const CXXConstructorDecl * D,CXXCtorType Type,bool ForVirtualBase,bool Delegating,llvm::Value * This,const CXXConstructExpr * E)1732 void CodeGenFunction::EmitCXXConstructorCall(const CXXConstructorDecl *D,
1733                                              CXXCtorType Type,
1734                                              bool ForVirtualBase,
1735                                              bool Delegating, llvm::Value *This,
1736                                              const CXXConstructExpr *E) {
1737   // If this is a trivial constructor, just emit what's needed.
1738   if (D->isTrivial() && !D->getParent()->mayInsertExtraPadding()) {
1739     if (E->getNumArgs() == 0) {
1740       // Trivial default constructor, no codegen required.
1741       assert(D->isDefaultConstructor() &&
1742              "trivial 0-arg ctor not a default ctor");
1743       return;
1744     }
1745 
1746     assert(E->getNumArgs() == 1 && "unexpected argcount for trivial ctor");
1747     assert(D->isCopyOrMoveConstructor() &&
1748            "trivial 1-arg ctor not a copy/move ctor");
1749 
1750     const Expr *Arg = E->getArg(0);
1751     QualType SrcTy = Arg->getType();
1752     llvm::Value *Src = EmitLValue(Arg).getAddress();
1753     QualType DestTy = getContext().getTypeDeclType(D->getParent());
1754     EmitAggregateCopyCtor(This, Src, DestTy, SrcTy);
1755     return;
1756   }
1757 
1758   // C++11 [class.mfct.non-static]p2:
1759   //   If a non-static member function of a class X is called for an object that
1760   //   is not of type X, or of a type derived from X, the behavior is undefined.
1761   // FIXME: Provide a source location here.
1762   EmitTypeCheck(CodeGenFunction::TCK_ConstructorCall, SourceLocation(), This,
1763                 getContext().getRecordType(D->getParent()));
1764 
1765   CallArgList Args;
1766 
1767   // Push the this ptr.
1768   Args.add(RValue::get(This), D->getThisType(getContext()));
1769 
1770   // Add the rest of the user-supplied arguments.
1771   const FunctionProtoType *FPT = D->getType()->castAs<FunctionProtoType>();
1772   EmitCallArgs(Args, FPT, E->arg_begin(), E->arg_end(), E->getConstructor());
1773 
1774   // Insert any ABI-specific implicit constructor arguments.
1775   unsigned ExtraArgs = CGM.getCXXABI().addImplicitConstructorArgs(
1776       *this, D, Type, ForVirtualBase, Delegating, Args);
1777 
1778   // Emit the call.
1779   llvm::Value *Callee = CGM.getAddrOfCXXStructor(D, getFromCtorType(Type));
1780   const CGFunctionInfo &Info =
1781       CGM.getTypes().arrangeCXXConstructorCall(Args, D, Type, ExtraArgs);
1782   EmitCall(Info, Callee, ReturnValueSlot(), Args, D);
1783 }
1784 
1785 void
EmitSynthesizedCXXCopyCtorCall(const CXXConstructorDecl * D,llvm::Value * This,llvm::Value * Src,const CXXConstructExpr * E)1786 CodeGenFunction::EmitSynthesizedCXXCopyCtorCall(const CXXConstructorDecl *D,
1787                                         llvm::Value *This, llvm::Value *Src,
1788                                         const CXXConstructExpr *E) {
1789   if (D->isTrivial() &&
1790       !D->getParent()->mayInsertExtraPadding()) {
1791     assert(E->getNumArgs() == 1 && "unexpected argcount for trivial ctor");
1792     assert(D->isCopyOrMoveConstructor() &&
1793            "trivial 1-arg ctor not a copy/move ctor");
1794     EmitAggregateCopyCtor(This, Src,
1795                           getContext().getTypeDeclType(D->getParent()),
1796                           E->arg_begin()->getType());
1797     return;
1798   }
1799   llvm::Value *Callee = CGM.getAddrOfCXXStructor(D, StructorType::Complete);
1800   assert(D->isInstance() &&
1801          "Trying to emit a member call expr on a static method!");
1802 
1803   const FunctionProtoType *FPT = D->getType()->castAs<FunctionProtoType>();
1804 
1805   CallArgList Args;
1806 
1807   // Push the this ptr.
1808   Args.add(RValue::get(This), D->getThisType(getContext()));
1809 
1810   // Push the src ptr.
1811   QualType QT = *(FPT->param_type_begin());
1812   llvm::Type *t = CGM.getTypes().ConvertType(QT);
1813   Src = Builder.CreateBitCast(Src, t);
1814   Args.add(RValue::get(Src), QT);
1815 
1816   // Skip over first argument (Src).
1817   EmitCallArgs(Args, FPT, E->arg_begin() + 1, E->arg_end(), E->getConstructor(),
1818                /*ParamsToSkip*/ 1);
1819 
1820   EmitCall(CGM.getTypes().arrangeCXXMethodCall(Args, FPT, RequiredArgs::All),
1821            Callee, ReturnValueSlot(), Args, D);
1822 }
1823 
1824 void
EmitDelegateCXXConstructorCall(const CXXConstructorDecl * Ctor,CXXCtorType CtorType,const FunctionArgList & Args,SourceLocation Loc)1825 CodeGenFunction::EmitDelegateCXXConstructorCall(const CXXConstructorDecl *Ctor,
1826                                                 CXXCtorType CtorType,
1827                                                 const FunctionArgList &Args,
1828                                                 SourceLocation Loc) {
1829   CallArgList DelegateArgs;
1830 
1831   FunctionArgList::const_iterator I = Args.begin(), E = Args.end();
1832   assert(I != E && "no parameters to constructor");
1833 
1834   // this
1835   DelegateArgs.add(RValue::get(LoadCXXThis()), (*I)->getType());
1836   ++I;
1837 
1838   // vtt
1839   if (llvm::Value *VTT = GetVTTParameter(GlobalDecl(Ctor, CtorType),
1840                                          /*ForVirtualBase=*/false,
1841                                          /*Delegating=*/true)) {
1842     QualType VoidPP = getContext().getPointerType(getContext().VoidPtrTy);
1843     DelegateArgs.add(RValue::get(VTT), VoidPP);
1844 
1845     if (CGM.getCXXABI().NeedsVTTParameter(CurGD)) {
1846       assert(I != E && "cannot skip vtt parameter, already done with args");
1847       assert((*I)->getType() == VoidPP && "skipping parameter not of vtt type");
1848       ++I;
1849     }
1850   }
1851 
1852   // Explicit arguments.
1853   for (; I != E; ++I) {
1854     const VarDecl *param = *I;
1855     // FIXME: per-argument source location
1856     EmitDelegateCallArg(DelegateArgs, param, Loc);
1857   }
1858 
1859   llvm::Value *Callee =
1860       CGM.getAddrOfCXXStructor(Ctor, getFromCtorType(CtorType));
1861   EmitCall(CGM.getTypes()
1862                .arrangeCXXStructorDeclaration(Ctor, getFromCtorType(CtorType)),
1863            Callee, ReturnValueSlot(), DelegateArgs, Ctor);
1864 }
1865 
1866 namespace {
1867   struct CallDelegatingCtorDtor : EHScopeStack::Cleanup {
1868     const CXXDestructorDecl *Dtor;
1869     llvm::Value *Addr;
1870     CXXDtorType Type;
1871 
CallDelegatingCtorDtor__anon7d8fdd600511::CallDelegatingCtorDtor1872     CallDelegatingCtorDtor(const CXXDestructorDecl *D, llvm::Value *Addr,
1873                            CXXDtorType Type)
1874       : Dtor(D), Addr(Addr), Type(Type) {}
1875 
Emit__anon7d8fdd600511::CallDelegatingCtorDtor1876     void Emit(CodeGenFunction &CGF, Flags flags) override {
1877       CGF.EmitCXXDestructorCall(Dtor, Type, /*ForVirtualBase=*/false,
1878                                 /*Delegating=*/true, Addr);
1879     }
1880   };
1881 }
1882 
1883 void
EmitDelegatingCXXConstructorCall(const CXXConstructorDecl * Ctor,const FunctionArgList & Args)1884 CodeGenFunction::EmitDelegatingCXXConstructorCall(const CXXConstructorDecl *Ctor,
1885                                                   const FunctionArgList &Args) {
1886   assert(Ctor->isDelegatingConstructor());
1887 
1888   llvm::Value *ThisPtr = LoadCXXThis();
1889 
1890   QualType Ty = getContext().getTagDeclType(Ctor->getParent());
1891   CharUnits Alignment = getContext().getTypeAlignInChars(Ty);
1892   AggValueSlot AggSlot =
1893     AggValueSlot::forAddr(ThisPtr, Alignment, Qualifiers(),
1894                           AggValueSlot::IsDestructed,
1895                           AggValueSlot::DoesNotNeedGCBarriers,
1896                           AggValueSlot::IsNotAliased);
1897 
1898   EmitAggExpr(Ctor->init_begin()[0]->getInit(), AggSlot);
1899 
1900   const CXXRecordDecl *ClassDecl = Ctor->getParent();
1901   if (CGM.getLangOpts().Exceptions && !ClassDecl->hasTrivialDestructor()) {
1902     CXXDtorType Type =
1903       CurGD.getCtorType() == Ctor_Complete ? Dtor_Complete : Dtor_Base;
1904 
1905     EHStack.pushCleanup<CallDelegatingCtorDtor>(EHCleanup,
1906                                                 ClassDecl->getDestructor(),
1907                                                 ThisPtr, Type);
1908   }
1909 }
1910 
EmitCXXDestructorCall(const CXXDestructorDecl * DD,CXXDtorType Type,bool ForVirtualBase,bool Delegating,llvm::Value * This)1911 void CodeGenFunction::EmitCXXDestructorCall(const CXXDestructorDecl *DD,
1912                                             CXXDtorType Type,
1913                                             bool ForVirtualBase,
1914                                             bool Delegating,
1915                                             llvm::Value *This) {
1916   CGM.getCXXABI().EmitDestructorCall(*this, DD, Type, ForVirtualBase,
1917                                      Delegating, This);
1918 }
1919 
1920 namespace {
1921   struct CallLocalDtor : EHScopeStack::Cleanup {
1922     const CXXDestructorDecl *Dtor;
1923     llvm::Value *Addr;
1924 
CallLocalDtor__anon7d8fdd600611::CallLocalDtor1925     CallLocalDtor(const CXXDestructorDecl *D, llvm::Value *Addr)
1926       : Dtor(D), Addr(Addr) {}
1927 
Emit__anon7d8fdd600611::CallLocalDtor1928     void Emit(CodeGenFunction &CGF, Flags flags) override {
1929       CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete,
1930                                 /*ForVirtualBase=*/false,
1931                                 /*Delegating=*/false, Addr);
1932     }
1933   };
1934 }
1935 
PushDestructorCleanup(const CXXDestructorDecl * D,llvm::Value * Addr)1936 void CodeGenFunction::PushDestructorCleanup(const CXXDestructorDecl *D,
1937                                             llvm::Value *Addr) {
1938   EHStack.pushCleanup<CallLocalDtor>(NormalAndEHCleanup, D, Addr);
1939 }
1940 
PushDestructorCleanup(QualType T,llvm::Value * Addr)1941 void CodeGenFunction::PushDestructorCleanup(QualType T, llvm::Value *Addr) {
1942   CXXRecordDecl *ClassDecl = T->getAsCXXRecordDecl();
1943   if (!ClassDecl) return;
1944   if (ClassDecl->hasTrivialDestructor()) return;
1945 
1946   const CXXDestructorDecl *D = ClassDecl->getDestructor();
1947   assert(D && D->isUsed() && "destructor not marked as used!");
1948   PushDestructorCleanup(D, Addr);
1949 }
1950 
1951 void
InitializeVTablePointer(BaseSubobject Base,const CXXRecordDecl * NearestVBase,CharUnits OffsetFromNearestVBase,const CXXRecordDecl * VTableClass)1952 CodeGenFunction::InitializeVTablePointer(BaseSubobject Base,
1953                                          const CXXRecordDecl *NearestVBase,
1954                                          CharUnits OffsetFromNearestVBase,
1955                                          const CXXRecordDecl *VTableClass) {
1956   const CXXRecordDecl *RD = Base.getBase();
1957 
1958   // Don't initialize the vtable pointer if the class is marked with the
1959   // 'novtable' attribute.
1960   if ((RD == VTableClass || RD == NearestVBase) &&
1961       VTableClass->hasAttr<MSNoVTableAttr>())
1962     return;
1963 
1964   // Compute the address point.
1965   bool NeedsVirtualOffset;
1966   llvm::Value *VTableAddressPoint =
1967       CGM.getCXXABI().getVTableAddressPointInStructor(
1968           *this, VTableClass, Base, NearestVBase, NeedsVirtualOffset);
1969   if (!VTableAddressPoint)
1970     return;
1971 
1972   // Compute where to store the address point.
1973   llvm::Value *VirtualOffset = nullptr;
1974   CharUnits NonVirtualOffset = CharUnits::Zero();
1975 
1976   if (NeedsVirtualOffset) {
1977     // We need to use the virtual base offset offset because the virtual base
1978     // might have a different offset in the most derived class.
1979     VirtualOffset = CGM.getCXXABI().GetVirtualBaseClassOffset(*this,
1980                                                               LoadCXXThis(),
1981                                                               VTableClass,
1982                                                               NearestVBase);
1983     NonVirtualOffset = OffsetFromNearestVBase;
1984   } else {
1985     // We can just use the base offset in the complete class.
1986     NonVirtualOffset = Base.getBaseOffset();
1987   }
1988 
1989   // Apply the offsets.
1990   llvm::Value *VTableField = LoadCXXThis();
1991 
1992   if (!NonVirtualOffset.isZero() || VirtualOffset)
1993     VTableField = ApplyNonVirtualAndVirtualOffset(*this, VTableField,
1994                                                   NonVirtualOffset,
1995                                                   VirtualOffset);
1996 
1997   // Finally, store the address point. Use the same LLVM types as the field to
1998   // support optimization.
1999   llvm::Type *VTablePtrTy =
2000       llvm::FunctionType::get(CGM.Int32Ty, /*isVarArg=*/true)
2001           ->getPointerTo()
2002           ->getPointerTo();
2003   VTableField = Builder.CreateBitCast(VTableField, VTablePtrTy->getPointerTo());
2004   VTableAddressPoint = Builder.CreateBitCast(VTableAddressPoint, VTablePtrTy);
2005   llvm::StoreInst *Store = Builder.CreateStore(VTableAddressPoint, VTableField);
2006   CGM.DecorateInstruction(Store, CGM.getTBAAInfoForVTablePtr());
2007 }
2008 
2009 void
InitializeVTablePointers(BaseSubobject Base,const CXXRecordDecl * NearestVBase,CharUnits OffsetFromNearestVBase,bool BaseIsNonVirtualPrimaryBase,const CXXRecordDecl * VTableClass,VisitedVirtualBasesSetTy & VBases)2010 CodeGenFunction::InitializeVTablePointers(BaseSubobject Base,
2011                                           const CXXRecordDecl *NearestVBase,
2012                                           CharUnits OffsetFromNearestVBase,
2013                                           bool BaseIsNonVirtualPrimaryBase,
2014                                           const CXXRecordDecl *VTableClass,
2015                                           VisitedVirtualBasesSetTy& VBases) {
2016   // If this base is a non-virtual primary base the address point has already
2017   // been set.
2018   if (!BaseIsNonVirtualPrimaryBase) {
2019     // Initialize the vtable pointer for this base.
2020     InitializeVTablePointer(Base, NearestVBase, OffsetFromNearestVBase,
2021                             VTableClass);
2022   }
2023 
2024   const CXXRecordDecl *RD = Base.getBase();
2025 
2026   // Traverse bases.
2027   for (const auto &I : RD->bases()) {
2028     CXXRecordDecl *BaseDecl
2029       = cast<CXXRecordDecl>(I.getType()->getAs<RecordType>()->getDecl());
2030 
2031     // Ignore classes without a vtable.
2032     if (!BaseDecl->isDynamicClass())
2033       continue;
2034 
2035     CharUnits BaseOffset;
2036     CharUnits BaseOffsetFromNearestVBase;
2037     bool BaseDeclIsNonVirtualPrimaryBase;
2038 
2039     if (I.isVirtual()) {
2040       // Check if we've visited this virtual base before.
2041       if (!VBases.insert(BaseDecl).second)
2042         continue;
2043 
2044       const ASTRecordLayout &Layout =
2045         getContext().getASTRecordLayout(VTableClass);
2046 
2047       BaseOffset = Layout.getVBaseClassOffset(BaseDecl);
2048       BaseOffsetFromNearestVBase = CharUnits::Zero();
2049       BaseDeclIsNonVirtualPrimaryBase = false;
2050     } else {
2051       const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
2052 
2053       BaseOffset = Base.getBaseOffset() + Layout.getBaseClassOffset(BaseDecl);
2054       BaseOffsetFromNearestVBase =
2055         OffsetFromNearestVBase + Layout.getBaseClassOffset(BaseDecl);
2056       BaseDeclIsNonVirtualPrimaryBase = Layout.getPrimaryBase() == BaseDecl;
2057     }
2058 
2059     InitializeVTablePointers(BaseSubobject(BaseDecl, BaseOffset),
2060                              I.isVirtual() ? BaseDecl : NearestVBase,
2061                              BaseOffsetFromNearestVBase,
2062                              BaseDeclIsNonVirtualPrimaryBase,
2063                              VTableClass, VBases);
2064   }
2065 }
2066 
InitializeVTablePointers(const CXXRecordDecl * RD)2067 void CodeGenFunction::InitializeVTablePointers(const CXXRecordDecl *RD) {
2068   // Ignore classes without a vtable.
2069   if (!RD->isDynamicClass())
2070     return;
2071 
2072   // Initialize the vtable pointers for this class and all of its bases.
2073   VisitedVirtualBasesSetTy VBases;
2074   InitializeVTablePointers(BaseSubobject(RD, CharUnits::Zero()),
2075                            /*NearestVBase=*/nullptr,
2076                            /*OffsetFromNearestVBase=*/CharUnits::Zero(),
2077                            /*BaseIsNonVirtualPrimaryBase=*/false, RD, VBases);
2078 
2079   if (RD->getNumVBases())
2080     CGM.getCXXABI().initializeHiddenVirtualInheritanceMembers(*this, RD);
2081 }
2082 
GetVTablePtr(llvm::Value * This,llvm::Type * Ty)2083 llvm::Value *CodeGenFunction::GetVTablePtr(llvm::Value *This,
2084                                            llvm::Type *Ty) {
2085   llvm::Value *VTablePtrSrc = Builder.CreateBitCast(This, Ty->getPointerTo());
2086   llvm::Instruction *VTable = Builder.CreateLoad(VTablePtrSrc, "vtable");
2087   CGM.DecorateInstruction(VTable, CGM.getTBAAInfoForVTablePtr());
2088   return VTable;
2089 }
2090 
2091 // If a class has a single non-virtual base and does not introduce or override
2092 // virtual member functions or fields, it will have the same layout as its base.
2093 // This function returns the least derived such class.
2094 //
2095 // Casting an instance of a base class to such a derived class is technically
2096 // undefined behavior, but it is a relatively common hack for introducing member
2097 // functions on class instances with specific properties (e.g. llvm::Operator)
2098 // that works under most compilers and should not have security implications, so
2099 // we allow it by default. It can be disabled with -fsanitize=cfi-cast-strict.
2100 static const CXXRecordDecl *
LeastDerivedClassWithSameLayout(const CXXRecordDecl * RD)2101 LeastDerivedClassWithSameLayout(const CXXRecordDecl *RD) {
2102   if (!RD->field_empty())
2103     return RD;
2104 
2105   if (RD->getNumVBases() != 0)
2106     return RD;
2107 
2108   if (RD->getNumBases() != 1)
2109     return RD;
2110 
2111   for (const CXXMethodDecl *MD : RD->methods()) {
2112     if (MD->isVirtual()) {
2113       // Virtual member functions are only ok if they are implicit destructors
2114       // because the implicit destructor will have the same semantics as the
2115       // base class's destructor if no fields are added.
2116       if (isa<CXXDestructorDecl>(MD) && MD->isImplicit())
2117         continue;
2118       return RD;
2119     }
2120   }
2121 
2122   return LeastDerivedClassWithSameLayout(
2123       RD->bases_begin()->getType()->getAsCXXRecordDecl());
2124 }
2125 
EmitVTablePtrCheckForCall(const CXXMethodDecl * MD,llvm::Value * VTable)2126 void CodeGenFunction::EmitVTablePtrCheckForCall(const CXXMethodDecl *MD,
2127                                                 llvm::Value *VTable) {
2128   const CXXRecordDecl *ClassDecl = MD->getParent();
2129   if (!SanOpts.has(SanitizerKind::CFICastStrict))
2130     ClassDecl = LeastDerivedClassWithSameLayout(ClassDecl);
2131 
2132   EmitVTablePtrCheck(ClassDecl, VTable);
2133 }
2134 
EmitVTablePtrCheckForCast(QualType T,llvm::Value * Derived,bool MayBeNull)2135 void CodeGenFunction::EmitVTablePtrCheckForCast(QualType T,
2136                                                 llvm::Value *Derived,
2137                                                 bool MayBeNull) {
2138   if (!getLangOpts().CPlusPlus)
2139     return;
2140 
2141   auto *ClassTy = T->getAs<RecordType>();
2142   if (!ClassTy)
2143     return;
2144 
2145   const CXXRecordDecl *ClassDecl = cast<CXXRecordDecl>(ClassTy->getDecl());
2146 
2147   if (!ClassDecl->isCompleteDefinition() || !ClassDecl->isDynamicClass())
2148     return;
2149 
2150   SmallString<64> MangledName;
2151   llvm::raw_svector_ostream Out(MangledName);
2152   CGM.getCXXABI().getMangleContext().mangleCXXRTTI(T.getUnqualifiedType(),
2153                                                    Out);
2154 
2155   // Blacklist based on the mangled type.
2156   if (CGM.getContext().getSanitizerBlacklist().isBlacklistedType(Out.str()))
2157     return;
2158 
2159   if (!SanOpts.has(SanitizerKind::CFICastStrict))
2160     ClassDecl = LeastDerivedClassWithSameLayout(ClassDecl);
2161 
2162   llvm::BasicBlock *ContBlock = 0;
2163 
2164   if (MayBeNull) {
2165     llvm::Value *DerivedNotNull =
2166         Builder.CreateIsNotNull(Derived, "cast.nonnull");
2167 
2168     llvm::BasicBlock *CheckBlock = createBasicBlock("cast.check");
2169     ContBlock = createBasicBlock("cast.cont");
2170 
2171     Builder.CreateCondBr(DerivedNotNull, CheckBlock, ContBlock);
2172 
2173     EmitBlock(CheckBlock);
2174   }
2175 
2176   llvm::Value *VTable = GetVTablePtr(Derived, Int8PtrTy);
2177   EmitVTablePtrCheck(ClassDecl, VTable);
2178 
2179   if (MayBeNull) {
2180     Builder.CreateBr(ContBlock);
2181     EmitBlock(ContBlock);
2182   }
2183 }
2184 
EmitVTablePtrCheck(const CXXRecordDecl * RD,llvm::Value * VTable)2185 void CodeGenFunction::EmitVTablePtrCheck(const CXXRecordDecl *RD,
2186                                          llvm::Value *VTable) {
2187   // FIXME: Add blacklisting scheme.
2188   if (RD->isInStdNamespace())
2189     return;
2190 
2191   std::string OutName;
2192   llvm::raw_string_ostream Out(OutName);
2193   CGM.getCXXABI().getMangleContext().mangleCXXVTableBitSet(RD, Out);
2194 
2195   llvm::Value *BitSetName = llvm::MetadataAsValue::get(
2196       getLLVMContext(), llvm::MDString::get(getLLVMContext(), Out.str()));
2197 
2198   llvm::Value *BitSetTest = Builder.CreateCall2(
2199       CGM.getIntrinsic(llvm::Intrinsic::bitset_test),
2200       Builder.CreateBitCast(VTable, CGM.Int8PtrTy), BitSetName);
2201 
2202   llvm::BasicBlock *ContBlock = createBasicBlock("vtable.check.cont");
2203   llvm::BasicBlock *TrapBlock = createBasicBlock("vtable.check.trap");
2204 
2205   Builder.CreateCondBr(BitSetTest, ContBlock, TrapBlock);
2206 
2207   EmitBlock(TrapBlock);
2208   Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::trap));
2209   Builder.CreateUnreachable();
2210 
2211   EmitBlock(ContBlock);
2212 }
2213 
2214 // FIXME: Ideally Expr::IgnoreParenNoopCasts should do this, but it doesn't do
2215 // quite what we want.
skipNoOpCastsAndParens(const Expr * E)2216 static const Expr *skipNoOpCastsAndParens(const Expr *E) {
2217   while (true) {
2218     if (const ParenExpr *PE = dyn_cast<ParenExpr>(E)) {
2219       E = PE->getSubExpr();
2220       continue;
2221     }
2222 
2223     if (const CastExpr *CE = dyn_cast<CastExpr>(E)) {
2224       if (CE->getCastKind() == CK_NoOp) {
2225         E = CE->getSubExpr();
2226         continue;
2227       }
2228     }
2229     if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) {
2230       if (UO->getOpcode() == UO_Extension) {
2231         E = UO->getSubExpr();
2232         continue;
2233       }
2234     }
2235     return E;
2236   }
2237 }
2238 
2239 bool
CanDevirtualizeMemberFunctionCall(const Expr * Base,const CXXMethodDecl * MD)2240 CodeGenFunction::CanDevirtualizeMemberFunctionCall(const Expr *Base,
2241                                                    const CXXMethodDecl *MD) {
2242   // When building with -fapple-kext, all calls must go through the vtable since
2243   // the kernel linker can do runtime patching of vtables.
2244   if (getLangOpts().AppleKext)
2245     return false;
2246 
2247   // If the most derived class is marked final, we know that no subclass can
2248   // override this member function and so we can devirtualize it. For example:
2249   //
2250   // struct A { virtual void f(); }
2251   // struct B final : A { };
2252   //
2253   // void f(B *b) {
2254   //   b->f();
2255   // }
2256   //
2257   const CXXRecordDecl *MostDerivedClassDecl = Base->getBestDynamicClassType();
2258   if (MostDerivedClassDecl->hasAttr<FinalAttr>())
2259     return true;
2260 
2261   // If the member function is marked 'final', we know that it can't be
2262   // overridden and can therefore devirtualize it.
2263   if (MD->hasAttr<FinalAttr>())
2264     return true;
2265 
2266   // Similarly, if the class itself is marked 'final' it can't be overridden
2267   // and we can therefore devirtualize the member function call.
2268   if (MD->getParent()->hasAttr<FinalAttr>())
2269     return true;
2270 
2271   Base = skipNoOpCastsAndParens(Base);
2272   if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Base)) {
2273     if (const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl())) {
2274       // This is a record decl. We know the type and can devirtualize it.
2275       return VD->getType()->isRecordType();
2276     }
2277 
2278     return false;
2279   }
2280 
2281   // We can devirtualize calls on an object accessed by a class member access
2282   // expression, since by C++11 [basic.life]p6 we know that it can't refer to
2283   // a derived class object constructed in the same location.
2284   if (const MemberExpr *ME = dyn_cast<MemberExpr>(Base))
2285     if (const ValueDecl *VD = dyn_cast<ValueDecl>(ME->getMemberDecl()))
2286       return VD->getType()->isRecordType();
2287 
2288   // We can always devirtualize calls on temporary object expressions.
2289   if (isa<CXXConstructExpr>(Base))
2290     return true;
2291 
2292   // And calls on bound temporaries.
2293   if (isa<CXXBindTemporaryExpr>(Base))
2294     return true;
2295 
2296   // Check if this is a call expr that returns a record type.
2297   if (const CallExpr *CE = dyn_cast<CallExpr>(Base))
2298     return CE->getCallReturnType(getContext())->isRecordType();
2299 
2300   // We can't devirtualize the call.
2301   return false;
2302 }
2303 
EmitForwardingCallToLambda(const CXXMethodDecl * callOperator,CallArgList & callArgs)2304 void CodeGenFunction::EmitForwardingCallToLambda(
2305                                       const CXXMethodDecl *callOperator,
2306                                       CallArgList &callArgs) {
2307   // Get the address of the call operator.
2308   const CGFunctionInfo &calleeFnInfo =
2309     CGM.getTypes().arrangeCXXMethodDeclaration(callOperator);
2310   llvm::Value *callee =
2311     CGM.GetAddrOfFunction(GlobalDecl(callOperator),
2312                           CGM.getTypes().GetFunctionType(calleeFnInfo));
2313 
2314   // Prepare the return slot.
2315   const FunctionProtoType *FPT =
2316     callOperator->getType()->castAs<FunctionProtoType>();
2317   QualType resultType = FPT->getReturnType();
2318   ReturnValueSlot returnSlot;
2319   if (!resultType->isVoidType() &&
2320       calleeFnInfo.getReturnInfo().getKind() == ABIArgInfo::Indirect &&
2321       !hasScalarEvaluationKind(calleeFnInfo.getReturnType()))
2322     returnSlot = ReturnValueSlot(ReturnValue, resultType.isVolatileQualified());
2323 
2324   // We don't need to separately arrange the call arguments because
2325   // the call can't be variadic anyway --- it's impossible to forward
2326   // variadic arguments.
2327 
2328   // Now emit our call.
2329   RValue RV = EmitCall(calleeFnInfo, callee, returnSlot,
2330                        callArgs, callOperator);
2331 
2332   // If necessary, copy the returned value into the slot.
2333   if (!resultType->isVoidType() && returnSlot.isNull())
2334     EmitReturnOfRValue(RV, resultType);
2335   else
2336     EmitBranchThroughCleanup(ReturnBlock);
2337 }
2338 
EmitLambdaBlockInvokeBody()2339 void CodeGenFunction::EmitLambdaBlockInvokeBody() {
2340   const BlockDecl *BD = BlockInfo->getBlockDecl();
2341   const VarDecl *variable = BD->capture_begin()->getVariable();
2342   const CXXRecordDecl *Lambda = variable->getType()->getAsCXXRecordDecl();
2343 
2344   // Start building arguments for forwarding call
2345   CallArgList CallArgs;
2346 
2347   QualType ThisType = getContext().getPointerType(getContext().getRecordType(Lambda));
2348   llvm::Value *ThisPtr = GetAddrOfBlockDecl(variable, false);
2349   CallArgs.add(RValue::get(ThisPtr), ThisType);
2350 
2351   // Add the rest of the parameters.
2352   for (auto param : BD->params())
2353     EmitDelegateCallArg(CallArgs, param, param->getLocStart());
2354 
2355   assert(!Lambda->isGenericLambda() &&
2356             "generic lambda interconversion to block not implemented");
2357   EmitForwardingCallToLambda(Lambda->getLambdaCallOperator(), CallArgs);
2358 }
2359 
EmitLambdaToBlockPointerBody(FunctionArgList & Args)2360 void CodeGenFunction::EmitLambdaToBlockPointerBody(FunctionArgList &Args) {
2361   if (cast<CXXMethodDecl>(CurCodeDecl)->isVariadic()) {
2362     // FIXME: Making this work correctly is nasty because it requires either
2363     // cloning the body of the call operator or making the call operator forward.
2364     CGM.ErrorUnsupported(CurCodeDecl, "lambda conversion to variadic function");
2365     return;
2366   }
2367 
2368   EmitFunctionBody(Args, cast<FunctionDecl>(CurGD.getDecl())->getBody());
2369 }
2370 
EmitLambdaDelegatingInvokeBody(const CXXMethodDecl * MD)2371 void CodeGenFunction::EmitLambdaDelegatingInvokeBody(const CXXMethodDecl *MD) {
2372   const CXXRecordDecl *Lambda = MD->getParent();
2373 
2374   // Start building arguments for forwarding call
2375   CallArgList CallArgs;
2376 
2377   QualType ThisType = getContext().getPointerType(getContext().getRecordType(Lambda));
2378   llvm::Value *ThisPtr = llvm::UndefValue::get(getTypes().ConvertType(ThisType));
2379   CallArgs.add(RValue::get(ThisPtr), ThisType);
2380 
2381   // Add the rest of the parameters.
2382   for (auto Param : MD->params())
2383     EmitDelegateCallArg(CallArgs, Param, Param->getLocStart());
2384 
2385   const CXXMethodDecl *CallOp = Lambda->getLambdaCallOperator();
2386   // For a generic lambda, find the corresponding call operator specialization
2387   // to which the call to the static-invoker shall be forwarded.
2388   if (Lambda->isGenericLambda()) {
2389     assert(MD->isFunctionTemplateSpecialization());
2390     const TemplateArgumentList *TAL = MD->getTemplateSpecializationArgs();
2391     FunctionTemplateDecl *CallOpTemplate = CallOp->getDescribedFunctionTemplate();
2392     void *InsertPos = nullptr;
2393     FunctionDecl *CorrespondingCallOpSpecialization =
2394         CallOpTemplate->findSpecialization(TAL->asArray(), InsertPos);
2395     assert(CorrespondingCallOpSpecialization);
2396     CallOp = cast<CXXMethodDecl>(CorrespondingCallOpSpecialization);
2397   }
2398   EmitForwardingCallToLambda(CallOp, CallArgs);
2399 }
2400 
EmitLambdaStaticInvokeFunction(const CXXMethodDecl * MD)2401 void CodeGenFunction::EmitLambdaStaticInvokeFunction(const CXXMethodDecl *MD) {
2402   if (MD->isVariadic()) {
2403     // FIXME: Making this work correctly is nasty because it requires either
2404     // cloning the body of the call operator or making the call operator forward.
2405     CGM.ErrorUnsupported(MD, "lambda conversion to variadic function");
2406     return;
2407   }
2408 
2409   EmitLambdaDelegatingInvokeBody(MD);
2410 }
2411