1 //===--- CGExprAgg.cpp - Emit LLVM Code from Aggregate Expressions --------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This contains code to emit Aggregate Expr nodes as LLVM code.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "CodeGenFunction.h"
15 #include "CGObjCRuntime.h"
16 #include "CodeGenModule.h"
17 #include "clang/AST/ASTContext.h"
18 #include "clang/AST/DeclCXX.h"
19 #include "clang/AST/DeclTemplate.h"
20 #include "clang/AST/StmtVisitor.h"
21 #include "llvm/IR/Constants.h"
22 #include "llvm/IR/Function.h"
23 #include "llvm/IR/GlobalVariable.h"
24 #include "llvm/IR/Intrinsics.h"
25 using namespace clang;
26 using namespace CodeGen;
27 
28 //===----------------------------------------------------------------------===//
29 //                        Aggregate Expression Emitter
30 //===----------------------------------------------------------------------===//
31 
32 namespace  {
33 class AggExprEmitter : public StmtVisitor<AggExprEmitter> {
34   CodeGenFunction &CGF;
35   CGBuilderTy &Builder;
36   AggValueSlot Dest;
37 
38   /// We want to use 'dest' as the return slot except under two
39   /// conditions:
40   ///   - The destination slot requires garbage collection, so we
41   ///     need to use the GC API.
42   ///   - The destination slot is potentially aliased.
shouldUseDestForReturnSlot() const43   bool shouldUseDestForReturnSlot() const {
44     return !(Dest.requiresGCollection() || Dest.isPotentiallyAliased());
45   }
46 
getReturnValueSlot() const47   ReturnValueSlot getReturnValueSlot() const {
48     if (!shouldUseDestForReturnSlot())
49       return ReturnValueSlot();
50 
51     return ReturnValueSlot(Dest.getAddr(), Dest.isVolatile());
52   }
53 
EnsureSlot(QualType T)54   AggValueSlot EnsureSlot(QualType T) {
55     if (!Dest.isIgnored()) return Dest;
56     return CGF.CreateAggTemp(T, "agg.tmp.ensured");
57   }
EnsureDest(QualType T)58   void EnsureDest(QualType T) {
59     if (!Dest.isIgnored()) return;
60     Dest = CGF.CreateAggTemp(T, "agg.tmp.ensured");
61   }
62 
63 public:
AggExprEmitter(CodeGenFunction & cgf,AggValueSlot Dest)64   AggExprEmitter(CodeGenFunction &cgf, AggValueSlot Dest)
65     : CGF(cgf), Builder(CGF.Builder), Dest(Dest) {
66   }
67 
68   //===--------------------------------------------------------------------===//
69   //                               Utilities
70   //===--------------------------------------------------------------------===//
71 
72   /// EmitAggLoadOfLValue - Given an expression with aggregate type that
73   /// represents a value lvalue, this method emits the address of the lvalue,
74   /// then loads the result into DestPtr.
75   void EmitAggLoadOfLValue(const Expr *E);
76 
77   /// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
78   void EmitFinalDestCopy(QualType type, const LValue &src);
79   void EmitFinalDestCopy(QualType type, RValue src,
80                          CharUnits srcAlignment = CharUnits::Zero());
81   void EmitCopy(QualType type, const AggValueSlot &dest,
82                 const AggValueSlot &src);
83 
84   void EmitMoveFromReturnSlot(const Expr *E, RValue Src);
85 
86   void EmitArrayInit(llvm::Value *DestPtr, llvm::ArrayType *AType,
87                      QualType elementType, InitListExpr *E);
88 
needsGC(QualType T)89   AggValueSlot::NeedsGCBarriers_t needsGC(QualType T) {
90     if (CGF.getLangOpts().getGC() && TypeRequiresGCollection(T))
91       return AggValueSlot::NeedsGCBarriers;
92     return AggValueSlot::DoesNotNeedGCBarriers;
93   }
94 
95   bool TypeRequiresGCollection(QualType T);
96 
97   //===--------------------------------------------------------------------===//
98   //                            Visitor Methods
99   //===--------------------------------------------------------------------===//
100 
Visit(Expr * E)101   void Visit(Expr *E) {
102     ApplyDebugLocation DL(CGF, E);
103     StmtVisitor<AggExprEmitter>::Visit(E);
104   }
105 
VisitStmt(Stmt * S)106   void VisitStmt(Stmt *S) {
107     CGF.ErrorUnsupported(S, "aggregate expression");
108   }
VisitParenExpr(ParenExpr * PE)109   void VisitParenExpr(ParenExpr *PE) { Visit(PE->getSubExpr()); }
VisitGenericSelectionExpr(GenericSelectionExpr * GE)110   void VisitGenericSelectionExpr(GenericSelectionExpr *GE) {
111     Visit(GE->getResultExpr());
112   }
VisitUnaryExtension(UnaryOperator * E)113   void VisitUnaryExtension(UnaryOperator *E) { Visit(E->getSubExpr()); }
VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr * E)114   void VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *E) {
115     return Visit(E->getReplacement());
116   }
117 
118   // l-values.
VisitDeclRefExpr(DeclRefExpr * E)119   void VisitDeclRefExpr(DeclRefExpr *E) {
120     // For aggregates, we should always be able to emit the variable
121     // as an l-value unless it's a reference.  This is due to the fact
122     // that we can't actually ever see a normal l2r conversion on an
123     // aggregate in C++, and in C there's no language standard
124     // actively preventing us from listing variables in the captures
125     // list of a block.
126     if (E->getDecl()->getType()->isReferenceType()) {
127       if (CodeGenFunction::ConstantEmission result
128             = CGF.tryEmitAsConstant(E)) {
129         EmitFinalDestCopy(E->getType(), result.getReferenceLValue(CGF, E));
130         return;
131       }
132     }
133 
134     EmitAggLoadOfLValue(E);
135   }
136 
VisitMemberExpr(MemberExpr * ME)137   void VisitMemberExpr(MemberExpr *ME) { EmitAggLoadOfLValue(ME); }
VisitUnaryDeref(UnaryOperator * E)138   void VisitUnaryDeref(UnaryOperator *E) { EmitAggLoadOfLValue(E); }
VisitStringLiteral(StringLiteral * E)139   void VisitStringLiteral(StringLiteral *E) { EmitAggLoadOfLValue(E); }
140   void VisitCompoundLiteralExpr(CompoundLiteralExpr *E);
VisitArraySubscriptExpr(ArraySubscriptExpr * E)141   void VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
142     EmitAggLoadOfLValue(E);
143   }
VisitPredefinedExpr(const PredefinedExpr * E)144   void VisitPredefinedExpr(const PredefinedExpr *E) {
145     EmitAggLoadOfLValue(E);
146   }
147 
148   // Operators.
149   void VisitCastExpr(CastExpr *E);
150   void VisitCallExpr(const CallExpr *E);
151   void VisitStmtExpr(const StmtExpr *E);
152   void VisitBinaryOperator(const BinaryOperator *BO);
153   void VisitPointerToDataMemberBinaryOperator(const BinaryOperator *BO);
154   void VisitBinAssign(const BinaryOperator *E);
155   void VisitBinComma(const BinaryOperator *E);
156 
157   void VisitObjCMessageExpr(ObjCMessageExpr *E);
VisitObjCIvarRefExpr(ObjCIvarRefExpr * E)158   void VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) {
159     EmitAggLoadOfLValue(E);
160   }
161 
162   void VisitAbstractConditionalOperator(const AbstractConditionalOperator *CO);
163   void VisitChooseExpr(const ChooseExpr *CE);
164   void VisitInitListExpr(InitListExpr *E);
165   void VisitImplicitValueInitExpr(ImplicitValueInitExpr *E);
VisitCXXDefaultArgExpr(CXXDefaultArgExpr * DAE)166   void VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {
167     Visit(DAE->getExpr());
168   }
VisitCXXDefaultInitExpr(CXXDefaultInitExpr * DIE)169   void VisitCXXDefaultInitExpr(CXXDefaultInitExpr *DIE) {
170     CodeGenFunction::CXXDefaultInitExprScope Scope(CGF);
171     Visit(DIE->getExpr());
172   }
173   void VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E);
174   void VisitCXXConstructExpr(const CXXConstructExpr *E);
175   void VisitLambdaExpr(LambdaExpr *E);
176   void VisitCXXStdInitializerListExpr(CXXStdInitializerListExpr *E);
177   void VisitExprWithCleanups(ExprWithCleanups *E);
178   void VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E);
VisitCXXTypeidExpr(CXXTypeidExpr * E)179   void VisitCXXTypeidExpr(CXXTypeidExpr *E) { EmitAggLoadOfLValue(E); }
180   void VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E);
181   void VisitOpaqueValueExpr(OpaqueValueExpr *E);
182 
VisitPseudoObjectExpr(PseudoObjectExpr * E)183   void VisitPseudoObjectExpr(PseudoObjectExpr *E) {
184     if (E->isGLValue()) {
185       LValue LV = CGF.EmitPseudoObjectLValue(E);
186       return EmitFinalDestCopy(E->getType(), LV);
187     }
188 
189     CGF.EmitPseudoObjectRValue(E, EnsureSlot(E->getType()));
190   }
191 
192   void VisitVAArgExpr(VAArgExpr *E);
193 
194   void EmitInitializationToLValue(Expr *E, LValue Address);
195   void EmitNullInitializationToLValue(LValue Address);
196   //  case Expr::ChooseExprClass:
VisitCXXThrowExpr(const CXXThrowExpr * E)197   void VisitCXXThrowExpr(const CXXThrowExpr *E) { CGF.EmitCXXThrowExpr(E); }
VisitAtomicExpr(AtomicExpr * E)198   void VisitAtomicExpr(AtomicExpr *E) {
199     CGF.EmitAtomicExpr(E, EnsureSlot(E->getType()).getAddr());
200   }
201 };
202 }  // end anonymous namespace.
203 
204 //===----------------------------------------------------------------------===//
205 //                                Utilities
206 //===----------------------------------------------------------------------===//
207 
208 /// EmitAggLoadOfLValue - Given an expression with aggregate type that
209 /// represents a value lvalue, this method emits the address of the lvalue,
210 /// then loads the result into DestPtr.
EmitAggLoadOfLValue(const Expr * E)211 void AggExprEmitter::EmitAggLoadOfLValue(const Expr *E) {
212   LValue LV = CGF.EmitLValue(E);
213 
214   // If the type of the l-value is atomic, then do an atomic load.
215   if (LV.getType()->isAtomicType() || CGF.LValueIsSuitableForInlineAtomic(LV)) {
216     CGF.EmitAtomicLoad(LV, E->getExprLoc(), Dest);
217     return;
218   }
219 
220   EmitFinalDestCopy(E->getType(), LV);
221 }
222 
223 /// \brief True if the given aggregate type requires special GC API calls.
TypeRequiresGCollection(QualType T)224 bool AggExprEmitter::TypeRequiresGCollection(QualType T) {
225   // Only record types have members that might require garbage collection.
226   const RecordType *RecordTy = T->getAs<RecordType>();
227   if (!RecordTy) return false;
228 
229   // Don't mess with non-trivial C++ types.
230   RecordDecl *Record = RecordTy->getDecl();
231   if (isa<CXXRecordDecl>(Record) &&
232       (cast<CXXRecordDecl>(Record)->hasNonTrivialCopyConstructor() ||
233        !cast<CXXRecordDecl>(Record)->hasTrivialDestructor()))
234     return false;
235 
236   // Check whether the type has an object member.
237   return Record->hasObjectMember();
238 }
239 
240 /// \brief Perform the final move to DestPtr if for some reason
241 /// getReturnValueSlot() didn't use it directly.
242 ///
243 /// The idea is that you do something like this:
244 ///   RValue Result = EmitSomething(..., getReturnValueSlot());
245 ///   EmitMoveFromReturnSlot(E, Result);
246 ///
247 /// If nothing interferes, this will cause the result to be emitted
248 /// directly into the return value slot.  Otherwise, a final move
249 /// will be performed.
EmitMoveFromReturnSlot(const Expr * E,RValue src)250 void AggExprEmitter::EmitMoveFromReturnSlot(const Expr *E, RValue src) {
251   if (shouldUseDestForReturnSlot()) {
252     // Logically, Dest.getAddr() should equal Src.getAggregateAddr().
253     // The possibility of undef rvalues complicates that a lot,
254     // though, so we can't really assert.
255     return;
256   }
257 
258   // Otherwise, copy from there to the destination.
259   assert(Dest.getAddr() != src.getAggregateAddr());
260   std::pair<CharUnits, CharUnits> typeInfo =
261     CGF.getContext().getTypeInfoInChars(E->getType());
262   EmitFinalDestCopy(E->getType(), src, typeInfo.second);
263 }
264 
265 /// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
EmitFinalDestCopy(QualType type,RValue src,CharUnits srcAlign)266 void AggExprEmitter::EmitFinalDestCopy(QualType type, RValue src,
267                                        CharUnits srcAlign) {
268   assert(src.isAggregate() && "value must be aggregate value!");
269   LValue srcLV = CGF.MakeAddrLValue(src.getAggregateAddr(), type, srcAlign);
270   EmitFinalDestCopy(type, srcLV);
271 }
272 
273 /// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
EmitFinalDestCopy(QualType type,const LValue & src)274 void AggExprEmitter::EmitFinalDestCopy(QualType type, const LValue &src) {
275   // If Dest is ignored, then we're evaluating an aggregate expression
276   // in a context that doesn't care about the result.  Note that loads
277   // from volatile l-values force the existence of a non-ignored
278   // destination.
279   if (Dest.isIgnored())
280     return;
281 
282   AggValueSlot srcAgg =
283     AggValueSlot::forLValue(src, AggValueSlot::IsDestructed,
284                             needsGC(type), AggValueSlot::IsAliased);
285   EmitCopy(type, Dest, srcAgg);
286 }
287 
288 /// Perform a copy from the source into the destination.
289 ///
290 /// \param type - the type of the aggregate being copied; qualifiers are
291 ///   ignored
EmitCopy(QualType type,const AggValueSlot & dest,const AggValueSlot & src)292 void AggExprEmitter::EmitCopy(QualType type, const AggValueSlot &dest,
293                               const AggValueSlot &src) {
294   if (dest.requiresGCollection()) {
295     CharUnits sz = CGF.getContext().getTypeSizeInChars(type);
296     llvm::Value *size = llvm::ConstantInt::get(CGF.SizeTy, sz.getQuantity());
297     CGF.CGM.getObjCRuntime().EmitGCMemmoveCollectable(CGF,
298                                                       dest.getAddr(),
299                                                       src.getAddr(),
300                                                       size);
301     return;
302   }
303 
304   // If the result of the assignment is used, copy the LHS there also.
305   // It's volatile if either side is.  Use the minimum alignment of
306   // the two sides.
307   CGF.EmitAggregateCopy(dest.getAddr(), src.getAddr(), type,
308                         dest.isVolatile() || src.isVolatile(),
309                         std::min(dest.getAlignment(), src.getAlignment()));
310 }
311 
312 /// \brief Emit the initializer for a std::initializer_list initialized with a
313 /// real initializer list.
314 void
VisitCXXStdInitializerListExpr(CXXStdInitializerListExpr * E)315 AggExprEmitter::VisitCXXStdInitializerListExpr(CXXStdInitializerListExpr *E) {
316   // Emit an array containing the elements.  The array is externally destructed
317   // if the std::initializer_list object is.
318   ASTContext &Ctx = CGF.getContext();
319   LValue Array = CGF.EmitLValue(E->getSubExpr());
320   assert(Array.isSimple() && "initializer_list array not a simple lvalue");
321   llvm::Value *ArrayPtr = Array.getAddress();
322 
323   const ConstantArrayType *ArrayType =
324       Ctx.getAsConstantArrayType(E->getSubExpr()->getType());
325   assert(ArrayType && "std::initializer_list constructed from non-array");
326 
327   // FIXME: Perform the checks on the field types in SemaInit.
328   RecordDecl *Record = E->getType()->castAs<RecordType>()->getDecl();
329   RecordDecl::field_iterator Field = Record->field_begin();
330   if (Field == Record->field_end()) {
331     CGF.ErrorUnsupported(E, "weird std::initializer_list");
332     return;
333   }
334 
335   // Start pointer.
336   if (!Field->getType()->isPointerType() ||
337       !Ctx.hasSameType(Field->getType()->getPointeeType(),
338                        ArrayType->getElementType())) {
339     CGF.ErrorUnsupported(E, "weird std::initializer_list");
340     return;
341   }
342 
343   AggValueSlot Dest = EnsureSlot(E->getType());
344   LValue DestLV = CGF.MakeAddrLValue(Dest.getAddr(), E->getType(),
345                                      Dest.getAlignment());
346   LValue Start = CGF.EmitLValueForFieldInitialization(DestLV, *Field);
347   llvm::Value *Zero = llvm::ConstantInt::get(CGF.PtrDiffTy, 0);
348   llvm::Value *IdxStart[] = { Zero, Zero };
349   llvm::Value *ArrayStart =
350       Builder.CreateInBoundsGEP(ArrayPtr, IdxStart, "arraystart");
351   CGF.EmitStoreThroughLValue(RValue::get(ArrayStart), Start);
352   ++Field;
353 
354   if (Field == Record->field_end()) {
355     CGF.ErrorUnsupported(E, "weird std::initializer_list");
356     return;
357   }
358 
359   llvm::Value *Size = Builder.getInt(ArrayType->getSize());
360   LValue EndOrLength = CGF.EmitLValueForFieldInitialization(DestLV, *Field);
361   if (Field->getType()->isPointerType() &&
362       Ctx.hasSameType(Field->getType()->getPointeeType(),
363                       ArrayType->getElementType())) {
364     // End pointer.
365     llvm::Value *IdxEnd[] = { Zero, Size };
366     llvm::Value *ArrayEnd =
367         Builder.CreateInBoundsGEP(ArrayPtr, IdxEnd, "arrayend");
368     CGF.EmitStoreThroughLValue(RValue::get(ArrayEnd), EndOrLength);
369   } else if (Ctx.hasSameType(Field->getType(), Ctx.getSizeType())) {
370     // Length.
371     CGF.EmitStoreThroughLValue(RValue::get(Size), EndOrLength);
372   } else {
373     CGF.ErrorUnsupported(E, "weird std::initializer_list");
374     return;
375   }
376 }
377 
378 /// \brief Determine if E is a trivial array filler, that is, one that is
379 /// equivalent to zero-initialization.
isTrivialFiller(Expr * E)380 static bool isTrivialFiller(Expr *E) {
381   if (!E)
382     return true;
383 
384   if (isa<ImplicitValueInitExpr>(E))
385     return true;
386 
387   if (auto *ILE = dyn_cast<InitListExpr>(E)) {
388     if (ILE->getNumInits())
389       return false;
390     return isTrivialFiller(ILE->getArrayFiller());
391   }
392 
393   if (auto *Cons = dyn_cast_or_null<CXXConstructExpr>(E))
394     return Cons->getConstructor()->isDefaultConstructor() &&
395            Cons->getConstructor()->isTrivial();
396 
397   // FIXME: Are there other cases where we can avoid emitting an initializer?
398   return false;
399 }
400 
401 /// \brief Emit initialization of an array from an initializer list.
EmitArrayInit(llvm::Value * DestPtr,llvm::ArrayType * AType,QualType elementType,InitListExpr * E)402 void AggExprEmitter::EmitArrayInit(llvm::Value *DestPtr, llvm::ArrayType *AType,
403                                    QualType elementType, InitListExpr *E) {
404   uint64_t NumInitElements = E->getNumInits();
405 
406   uint64_t NumArrayElements = AType->getNumElements();
407   assert(NumInitElements <= NumArrayElements);
408 
409   // DestPtr is an array*.  Construct an elementType* by drilling
410   // down a level.
411   llvm::Value *zero = llvm::ConstantInt::get(CGF.SizeTy, 0);
412   llvm::Value *indices[] = { zero, zero };
413   llvm::Value *begin =
414     Builder.CreateInBoundsGEP(DestPtr, indices, "arrayinit.begin");
415 
416   // Exception safety requires us to destroy all the
417   // already-constructed members if an initializer throws.
418   // For that, we'll need an EH cleanup.
419   QualType::DestructionKind dtorKind = elementType.isDestructedType();
420   llvm::AllocaInst *endOfInit = nullptr;
421   EHScopeStack::stable_iterator cleanup;
422   llvm::Instruction *cleanupDominator = nullptr;
423   if (CGF.needsEHCleanup(dtorKind)) {
424     // In principle we could tell the cleanup where we are more
425     // directly, but the control flow can get so varied here that it
426     // would actually be quite complex.  Therefore we go through an
427     // alloca.
428     endOfInit = CGF.CreateTempAlloca(begin->getType(),
429                                      "arrayinit.endOfInit");
430     cleanupDominator = Builder.CreateStore(begin, endOfInit);
431     CGF.pushIrregularPartialArrayCleanup(begin, endOfInit, elementType,
432                                          CGF.getDestroyer(dtorKind));
433     cleanup = CGF.EHStack.stable_begin();
434 
435   // Otherwise, remember that we didn't need a cleanup.
436   } else {
437     dtorKind = QualType::DK_none;
438   }
439 
440   llvm::Value *one = llvm::ConstantInt::get(CGF.SizeTy, 1);
441 
442   // The 'current element to initialize'.  The invariants on this
443   // variable are complicated.  Essentially, after each iteration of
444   // the loop, it points to the last initialized element, except
445   // that it points to the beginning of the array before any
446   // elements have been initialized.
447   llvm::Value *element = begin;
448 
449   // Emit the explicit initializers.
450   for (uint64_t i = 0; i != NumInitElements; ++i) {
451     // Advance to the next element.
452     if (i > 0) {
453       element = Builder.CreateInBoundsGEP(element, one, "arrayinit.element");
454 
455       // Tell the cleanup that it needs to destroy up to this
456       // element.  TODO: some of these stores can be trivially
457       // observed to be unnecessary.
458       if (endOfInit) Builder.CreateStore(element, endOfInit);
459     }
460 
461     LValue elementLV = CGF.MakeAddrLValue(element, elementType);
462     EmitInitializationToLValue(E->getInit(i), elementLV);
463   }
464 
465   // Check whether there's a non-trivial array-fill expression.
466   Expr *filler = E->getArrayFiller();
467   bool hasTrivialFiller = isTrivialFiller(filler);
468 
469   // Any remaining elements need to be zero-initialized, possibly
470   // using the filler expression.  We can skip this if the we're
471   // emitting to zeroed memory.
472   if (NumInitElements != NumArrayElements &&
473       !(Dest.isZeroed() && hasTrivialFiller &&
474         CGF.getTypes().isZeroInitializable(elementType))) {
475 
476     // Use an actual loop.  This is basically
477     //   do { *array++ = filler; } while (array != end);
478 
479     // Advance to the start of the rest of the array.
480     if (NumInitElements) {
481       element = Builder.CreateInBoundsGEP(element, one, "arrayinit.start");
482       if (endOfInit) Builder.CreateStore(element, endOfInit);
483     }
484 
485     // Compute the end of the array.
486     llvm::Value *end = Builder.CreateInBoundsGEP(begin,
487                       llvm::ConstantInt::get(CGF.SizeTy, NumArrayElements),
488                                                  "arrayinit.end");
489 
490     llvm::BasicBlock *entryBB = Builder.GetInsertBlock();
491     llvm::BasicBlock *bodyBB = CGF.createBasicBlock("arrayinit.body");
492 
493     // Jump into the body.
494     CGF.EmitBlock(bodyBB);
495     llvm::PHINode *currentElement =
496       Builder.CreatePHI(element->getType(), 2, "arrayinit.cur");
497     currentElement->addIncoming(element, entryBB);
498 
499     // Emit the actual filler expression.
500     LValue elementLV = CGF.MakeAddrLValue(currentElement, elementType);
501     if (filler)
502       EmitInitializationToLValue(filler, elementLV);
503     else
504       EmitNullInitializationToLValue(elementLV);
505 
506     // Move on to the next element.
507     llvm::Value *nextElement =
508       Builder.CreateInBoundsGEP(currentElement, one, "arrayinit.next");
509 
510     // Tell the EH cleanup that we finished with the last element.
511     if (endOfInit) Builder.CreateStore(nextElement, endOfInit);
512 
513     // Leave the loop if we're done.
514     llvm::Value *done = Builder.CreateICmpEQ(nextElement, end,
515                                              "arrayinit.done");
516     llvm::BasicBlock *endBB = CGF.createBasicBlock("arrayinit.end");
517     Builder.CreateCondBr(done, endBB, bodyBB);
518     currentElement->addIncoming(nextElement, Builder.GetInsertBlock());
519 
520     CGF.EmitBlock(endBB);
521   }
522 
523   // Leave the partial-array cleanup if we entered one.
524   if (dtorKind) CGF.DeactivateCleanupBlock(cleanup, cleanupDominator);
525 }
526 
527 //===----------------------------------------------------------------------===//
528 //                            Visitor Methods
529 //===----------------------------------------------------------------------===//
530 
VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr * E)531 void AggExprEmitter::VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E){
532   Visit(E->GetTemporaryExpr());
533 }
534 
VisitOpaqueValueExpr(OpaqueValueExpr * e)535 void AggExprEmitter::VisitOpaqueValueExpr(OpaqueValueExpr *e) {
536   EmitFinalDestCopy(e->getType(), CGF.getOpaqueLValueMapping(e));
537 }
538 
539 void
VisitCompoundLiteralExpr(CompoundLiteralExpr * E)540 AggExprEmitter::VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
541   if (Dest.isPotentiallyAliased() &&
542       E->getType().isPODType(CGF.getContext())) {
543     // For a POD type, just emit a load of the lvalue + a copy, because our
544     // compound literal might alias the destination.
545     EmitAggLoadOfLValue(E);
546     return;
547   }
548 
549   AggValueSlot Slot = EnsureSlot(E->getType());
550   CGF.EmitAggExpr(E->getInitializer(), Slot);
551 }
552 
553 /// Attempt to look through various unimportant expressions to find a
554 /// cast of the given kind.
findPeephole(Expr * op,CastKind kind)555 static Expr *findPeephole(Expr *op, CastKind kind) {
556   while (true) {
557     op = op->IgnoreParens();
558     if (CastExpr *castE = dyn_cast<CastExpr>(op)) {
559       if (castE->getCastKind() == kind)
560         return castE->getSubExpr();
561       if (castE->getCastKind() == CK_NoOp)
562         continue;
563     }
564     return nullptr;
565   }
566 }
567 
VisitCastExpr(CastExpr * E)568 void AggExprEmitter::VisitCastExpr(CastExpr *E) {
569   switch (E->getCastKind()) {
570   case CK_Dynamic: {
571     // FIXME: Can this actually happen? We have no test coverage for it.
572     assert(isa<CXXDynamicCastExpr>(E) && "CK_Dynamic without a dynamic_cast?");
573     LValue LV = CGF.EmitCheckedLValue(E->getSubExpr(),
574                                       CodeGenFunction::TCK_Load);
575     // FIXME: Do we also need to handle property references here?
576     if (LV.isSimple())
577       CGF.EmitDynamicCast(LV.getAddress(), cast<CXXDynamicCastExpr>(E));
578     else
579       CGF.CGM.ErrorUnsupported(E, "non-simple lvalue dynamic_cast");
580 
581     if (!Dest.isIgnored())
582       CGF.CGM.ErrorUnsupported(E, "lvalue dynamic_cast with a destination");
583     break;
584   }
585 
586   case CK_ToUnion: {
587     if (Dest.isIgnored()) break;
588 
589     // GCC union extension
590     QualType Ty = E->getSubExpr()->getType();
591     QualType PtrTy = CGF.getContext().getPointerType(Ty);
592     llvm::Value *CastPtr = Builder.CreateBitCast(Dest.getAddr(),
593                                                  CGF.ConvertType(PtrTy));
594     EmitInitializationToLValue(E->getSubExpr(),
595                                CGF.MakeAddrLValue(CastPtr, Ty));
596     break;
597   }
598 
599   case CK_DerivedToBase:
600   case CK_BaseToDerived:
601   case CK_UncheckedDerivedToBase: {
602     llvm_unreachable("cannot perform hierarchy conversion in EmitAggExpr: "
603                 "should have been unpacked before we got here");
604   }
605 
606   case CK_NonAtomicToAtomic:
607   case CK_AtomicToNonAtomic: {
608     bool isToAtomic = (E->getCastKind() == CK_NonAtomicToAtomic);
609 
610     // Determine the atomic and value types.
611     QualType atomicType = E->getSubExpr()->getType();
612     QualType valueType = E->getType();
613     if (isToAtomic) std::swap(atomicType, valueType);
614 
615     assert(atomicType->isAtomicType());
616     assert(CGF.getContext().hasSameUnqualifiedType(valueType,
617                           atomicType->castAs<AtomicType>()->getValueType()));
618 
619     // Just recurse normally if we're ignoring the result or the
620     // atomic type doesn't change representation.
621     if (Dest.isIgnored() || !CGF.CGM.isPaddedAtomicType(atomicType)) {
622       return Visit(E->getSubExpr());
623     }
624 
625     CastKind peepholeTarget =
626       (isToAtomic ? CK_AtomicToNonAtomic : CK_NonAtomicToAtomic);
627 
628     // These two cases are reverses of each other; try to peephole them.
629     if (Expr *op = findPeephole(E->getSubExpr(), peepholeTarget)) {
630       assert(CGF.getContext().hasSameUnqualifiedType(op->getType(),
631                                                      E->getType()) &&
632            "peephole significantly changed types?");
633       return Visit(op);
634     }
635 
636     // If we're converting an r-value of non-atomic type to an r-value
637     // of atomic type, just emit directly into the relevant sub-object.
638     if (isToAtomic) {
639       AggValueSlot valueDest = Dest;
640       if (!valueDest.isIgnored() && CGF.CGM.isPaddedAtomicType(atomicType)) {
641         // Zero-initialize.  (Strictly speaking, we only need to intialize
642         // the padding at the end, but this is simpler.)
643         if (!Dest.isZeroed())
644           CGF.EmitNullInitialization(Dest.getAddr(), atomicType);
645 
646         // Build a GEP to refer to the subobject.
647         llvm::Value *valueAddr =
648             CGF.Builder.CreateStructGEP(nullptr, valueDest.getAddr(), 0);
649         valueDest = AggValueSlot::forAddr(valueAddr,
650                                           valueDest.getAlignment(),
651                                           valueDest.getQualifiers(),
652                                           valueDest.isExternallyDestructed(),
653                                           valueDest.requiresGCollection(),
654                                           valueDest.isPotentiallyAliased(),
655                                           AggValueSlot::IsZeroed);
656       }
657 
658       CGF.EmitAggExpr(E->getSubExpr(), valueDest);
659       return;
660     }
661 
662     // Otherwise, we're converting an atomic type to a non-atomic type.
663     // Make an atomic temporary, emit into that, and then copy the value out.
664     AggValueSlot atomicSlot =
665       CGF.CreateAggTemp(atomicType, "atomic-to-nonatomic.temp");
666     CGF.EmitAggExpr(E->getSubExpr(), atomicSlot);
667 
668     llvm::Value *valueAddr =
669         Builder.CreateStructGEP(nullptr, atomicSlot.getAddr(), 0);
670     RValue rvalue = RValue::getAggregate(valueAddr, atomicSlot.isVolatile());
671     return EmitFinalDestCopy(valueType, rvalue);
672   }
673 
674   case CK_LValueToRValue:
675     // If we're loading from a volatile type, force the destination
676     // into existence.
677     if (E->getSubExpr()->getType().isVolatileQualified()) {
678       EnsureDest(E->getType());
679       return Visit(E->getSubExpr());
680     }
681 
682     // fallthrough
683 
684   case CK_NoOp:
685   case CK_UserDefinedConversion:
686   case CK_ConstructorConversion:
687     assert(CGF.getContext().hasSameUnqualifiedType(E->getSubExpr()->getType(),
688                                                    E->getType()) &&
689            "Implicit cast types must be compatible");
690     Visit(E->getSubExpr());
691     break;
692 
693   case CK_LValueBitCast:
694     llvm_unreachable("should not be emitting lvalue bitcast as rvalue");
695 
696   case CK_Dependent:
697   case CK_BitCast:
698   case CK_ArrayToPointerDecay:
699   case CK_FunctionToPointerDecay:
700   case CK_NullToPointer:
701   case CK_NullToMemberPointer:
702   case CK_BaseToDerivedMemberPointer:
703   case CK_DerivedToBaseMemberPointer:
704   case CK_MemberPointerToBoolean:
705   case CK_ReinterpretMemberPointer:
706   case CK_IntegralToPointer:
707   case CK_PointerToIntegral:
708   case CK_PointerToBoolean:
709   case CK_ToVoid:
710   case CK_VectorSplat:
711   case CK_IntegralCast:
712   case CK_IntegralToBoolean:
713   case CK_IntegralToFloating:
714   case CK_FloatingToIntegral:
715   case CK_FloatingToBoolean:
716   case CK_FloatingCast:
717   case CK_CPointerToObjCPointerCast:
718   case CK_BlockPointerToObjCPointerCast:
719   case CK_AnyPointerToBlockPointerCast:
720   case CK_ObjCObjectLValueCast:
721   case CK_FloatingRealToComplex:
722   case CK_FloatingComplexToReal:
723   case CK_FloatingComplexToBoolean:
724   case CK_FloatingComplexCast:
725   case CK_FloatingComplexToIntegralComplex:
726   case CK_IntegralRealToComplex:
727   case CK_IntegralComplexToReal:
728   case CK_IntegralComplexToBoolean:
729   case CK_IntegralComplexCast:
730   case CK_IntegralComplexToFloatingComplex:
731   case CK_ARCProduceObject:
732   case CK_ARCConsumeObject:
733   case CK_ARCReclaimReturnedObject:
734   case CK_ARCExtendBlockObject:
735   case CK_CopyAndAutoreleaseBlockObject:
736   case CK_BuiltinFnToFnPtr:
737   case CK_ZeroToOCLEvent:
738   case CK_AddressSpaceConversion:
739     llvm_unreachable("cast kind invalid for aggregate types");
740   }
741 }
742 
VisitCallExpr(const CallExpr * E)743 void AggExprEmitter::VisitCallExpr(const CallExpr *E) {
744   if (E->getCallReturnType(CGF.getContext())->isReferenceType()) {
745     EmitAggLoadOfLValue(E);
746     return;
747   }
748 
749   RValue RV = CGF.EmitCallExpr(E, getReturnValueSlot());
750   EmitMoveFromReturnSlot(E, RV);
751 }
752 
VisitObjCMessageExpr(ObjCMessageExpr * E)753 void AggExprEmitter::VisitObjCMessageExpr(ObjCMessageExpr *E) {
754   RValue RV = CGF.EmitObjCMessageExpr(E, getReturnValueSlot());
755   EmitMoveFromReturnSlot(E, RV);
756 }
757 
VisitBinComma(const BinaryOperator * E)758 void AggExprEmitter::VisitBinComma(const BinaryOperator *E) {
759   CGF.EmitIgnoredExpr(E->getLHS());
760   Visit(E->getRHS());
761 }
762 
VisitStmtExpr(const StmtExpr * E)763 void AggExprEmitter::VisitStmtExpr(const StmtExpr *E) {
764   CodeGenFunction::StmtExprEvaluation eval(CGF);
765   CGF.EmitCompoundStmt(*E->getSubStmt(), true, Dest);
766 }
767 
VisitBinaryOperator(const BinaryOperator * E)768 void AggExprEmitter::VisitBinaryOperator(const BinaryOperator *E) {
769   if (E->getOpcode() == BO_PtrMemD || E->getOpcode() == BO_PtrMemI)
770     VisitPointerToDataMemberBinaryOperator(E);
771   else
772     CGF.ErrorUnsupported(E, "aggregate binary expression");
773 }
774 
VisitPointerToDataMemberBinaryOperator(const BinaryOperator * E)775 void AggExprEmitter::VisitPointerToDataMemberBinaryOperator(
776                                                     const BinaryOperator *E) {
777   LValue LV = CGF.EmitPointerToDataMemberBinaryExpr(E);
778   EmitFinalDestCopy(E->getType(), LV);
779 }
780 
781 /// Is the value of the given expression possibly a reference to or
782 /// into a __block variable?
isBlockVarRef(const Expr * E)783 static bool isBlockVarRef(const Expr *E) {
784   // Make sure we look through parens.
785   E = E->IgnoreParens();
786 
787   // Check for a direct reference to a __block variable.
788   if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) {
789     const VarDecl *var = dyn_cast<VarDecl>(DRE->getDecl());
790     return (var && var->hasAttr<BlocksAttr>());
791   }
792 
793   // More complicated stuff.
794 
795   // Binary operators.
796   if (const BinaryOperator *op = dyn_cast<BinaryOperator>(E)) {
797     // For an assignment or pointer-to-member operation, just care
798     // about the LHS.
799     if (op->isAssignmentOp() || op->isPtrMemOp())
800       return isBlockVarRef(op->getLHS());
801 
802     // For a comma, just care about the RHS.
803     if (op->getOpcode() == BO_Comma)
804       return isBlockVarRef(op->getRHS());
805 
806     // FIXME: pointer arithmetic?
807     return false;
808 
809   // Check both sides of a conditional operator.
810   } else if (const AbstractConditionalOperator *op
811                = dyn_cast<AbstractConditionalOperator>(E)) {
812     return isBlockVarRef(op->getTrueExpr())
813         || isBlockVarRef(op->getFalseExpr());
814 
815   // OVEs are required to support BinaryConditionalOperators.
816   } else if (const OpaqueValueExpr *op
817                = dyn_cast<OpaqueValueExpr>(E)) {
818     if (const Expr *src = op->getSourceExpr())
819       return isBlockVarRef(src);
820 
821   // Casts are necessary to get things like (*(int*)&var) = foo().
822   // We don't really care about the kind of cast here, except
823   // we don't want to look through l2r casts, because it's okay
824   // to get the *value* in a __block variable.
825   } else if (const CastExpr *cast = dyn_cast<CastExpr>(E)) {
826     if (cast->getCastKind() == CK_LValueToRValue)
827       return false;
828     return isBlockVarRef(cast->getSubExpr());
829 
830   // Handle unary operators.  Again, just aggressively look through
831   // it, ignoring the operation.
832   } else if (const UnaryOperator *uop = dyn_cast<UnaryOperator>(E)) {
833     return isBlockVarRef(uop->getSubExpr());
834 
835   // Look into the base of a field access.
836   } else if (const MemberExpr *mem = dyn_cast<MemberExpr>(E)) {
837     return isBlockVarRef(mem->getBase());
838 
839   // Look into the base of a subscript.
840   } else if (const ArraySubscriptExpr *sub = dyn_cast<ArraySubscriptExpr>(E)) {
841     return isBlockVarRef(sub->getBase());
842   }
843 
844   return false;
845 }
846 
VisitBinAssign(const BinaryOperator * E)847 void AggExprEmitter::VisitBinAssign(const BinaryOperator *E) {
848   // For an assignment to work, the value on the right has
849   // to be compatible with the value on the left.
850   assert(CGF.getContext().hasSameUnqualifiedType(E->getLHS()->getType(),
851                                                  E->getRHS()->getType())
852          && "Invalid assignment");
853 
854   // If the LHS might be a __block variable, and the RHS can
855   // potentially cause a block copy, we need to evaluate the RHS first
856   // so that the assignment goes the right place.
857   // This is pretty semantically fragile.
858   if (isBlockVarRef(E->getLHS()) &&
859       E->getRHS()->HasSideEffects(CGF.getContext())) {
860     // Ensure that we have a destination, and evaluate the RHS into that.
861     EnsureDest(E->getRHS()->getType());
862     Visit(E->getRHS());
863 
864     // Now emit the LHS and copy into it.
865     LValue LHS = CGF.EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store);
866 
867     // That copy is an atomic copy if the LHS is atomic.
868     if (LHS.getType()->isAtomicType() ||
869         CGF.LValueIsSuitableForInlineAtomic(LHS)) {
870       CGF.EmitAtomicStore(Dest.asRValue(), LHS, /*isInit*/ false);
871       return;
872     }
873 
874     EmitCopy(E->getLHS()->getType(),
875              AggValueSlot::forLValue(LHS, AggValueSlot::IsDestructed,
876                                      needsGC(E->getLHS()->getType()),
877                                      AggValueSlot::IsAliased),
878              Dest);
879     return;
880   }
881 
882   LValue LHS = CGF.EmitLValue(E->getLHS());
883 
884   // If we have an atomic type, evaluate into the destination and then
885   // do an atomic copy.
886   if (LHS.getType()->isAtomicType() ||
887       CGF.LValueIsSuitableForInlineAtomic(LHS)) {
888     EnsureDest(E->getRHS()->getType());
889     Visit(E->getRHS());
890     CGF.EmitAtomicStore(Dest.asRValue(), LHS, /*isInit*/ false);
891     return;
892   }
893 
894   // Codegen the RHS so that it stores directly into the LHS.
895   AggValueSlot LHSSlot =
896     AggValueSlot::forLValue(LHS, AggValueSlot::IsDestructed,
897                             needsGC(E->getLHS()->getType()),
898                             AggValueSlot::IsAliased);
899   // A non-volatile aggregate destination might have volatile member.
900   if (!LHSSlot.isVolatile() &&
901       CGF.hasVolatileMember(E->getLHS()->getType()))
902     LHSSlot.setVolatile(true);
903 
904   CGF.EmitAggExpr(E->getRHS(), LHSSlot);
905 
906   // Copy into the destination if the assignment isn't ignored.
907   EmitFinalDestCopy(E->getType(), LHS);
908 }
909 
910 void AggExprEmitter::
VisitAbstractConditionalOperator(const AbstractConditionalOperator * E)911 VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
912   llvm::BasicBlock *LHSBlock = CGF.createBasicBlock("cond.true");
913   llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("cond.false");
914   llvm::BasicBlock *ContBlock = CGF.createBasicBlock("cond.end");
915 
916   // Bind the common expression if necessary.
917   CodeGenFunction::OpaqueValueMapping binding(CGF, E);
918 
919   RegionCounter Cnt = CGF.getPGORegionCounter(E);
920   CodeGenFunction::ConditionalEvaluation eval(CGF);
921   CGF.EmitBranchOnBoolExpr(E->getCond(), LHSBlock, RHSBlock, Cnt.getCount());
922 
923   // Save whether the destination's lifetime is externally managed.
924   bool isExternallyDestructed = Dest.isExternallyDestructed();
925 
926   eval.begin(CGF);
927   CGF.EmitBlock(LHSBlock);
928   Cnt.beginRegion(Builder);
929   Visit(E->getTrueExpr());
930   eval.end(CGF);
931 
932   assert(CGF.HaveInsertPoint() && "expression evaluation ended with no IP!");
933   CGF.Builder.CreateBr(ContBlock);
934 
935   // If the result of an agg expression is unused, then the emission
936   // of the LHS might need to create a destination slot.  That's fine
937   // with us, and we can safely emit the RHS into the same slot, but
938   // we shouldn't claim that it's already being destructed.
939   Dest.setExternallyDestructed(isExternallyDestructed);
940 
941   eval.begin(CGF);
942   CGF.EmitBlock(RHSBlock);
943   Visit(E->getFalseExpr());
944   eval.end(CGF);
945 
946   CGF.EmitBlock(ContBlock);
947 }
948 
VisitChooseExpr(const ChooseExpr * CE)949 void AggExprEmitter::VisitChooseExpr(const ChooseExpr *CE) {
950   Visit(CE->getChosenSubExpr());
951 }
952 
VisitVAArgExpr(VAArgExpr * VE)953 void AggExprEmitter::VisitVAArgExpr(VAArgExpr *VE) {
954   llvm::Value *ArgValue = CGF.EmitVAListRef(VE->getSubExpr());
955   llvm::Value *ArgPtr = CGF.EmitVAArg(ArgValue, VE->getType());
956 
957   if (!ArgPtr) {
958     // If EmitVAArg fails, we fall back to the LLVM instruction.
959     llvm::Value *Val =
960         Builder.CreateVAArg(ArgValue, CGF.ConvertType(VE->getType()));
961     if (!Dest.isIgnored())
962       Builder.CreateStore(Val, Dest.getAddr());
963     return;
964   }
965 
966   EmitFinalDestCopy(VE->getType(), CGF.MakeAddrLValue(ArgPtr, VE->getType()));
967 }
968 
VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr * E)969 void AggExprEmitter::VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E) {
970   // Ensure that we have a slot, but if we already do, remember
971   // whether it was externally destructed.
972   bool wasExternallyDestructed = Dest.isExternallyDestructed();
973   EnsureDest(E->getType());
974 
975   // We're going to push a destructor if there isn't already one.
976   Dest.setExternallyDestructed();
977 
978   Visit(E->getSubExpr());
979 
980   // Push that destructor we promised.
981   if (!wasExternallyDestructed)
982     CGF.EmitCXXTemporary(E->getTemporary(), E->getType(), Dest.getAddr());
983 }
984 
985 void
VisitCXXConstructExpr(const CXXConstructExpr * E)986 AggExprEmitter::VisitCXXConstructExpr(const CXXConstructExpr *E) {
987   AggValueSlot Slot = EnsureSlot(E->getType());
988   CGF.EmitCXXConstructExpr(E, Slot);
989 }
990 
991 void
VisitLambdaExpr(LambdaExpr * E)992 AggExprEmitter::VisitLambdaExpr(LambdaExpr *E) {
993   AggValueSlot Slot = EnsureSlot(E->getType());
994   CGF.EmitLambdaExpr(E, Slot);
995 }
996 
VisitExprWithCleanups(ExprWithCleanups * E)997 void AggExprEmitter::VisitExprWithCleanups(ExprWithCleanups *E) {
998   CGF.enterFullExpression(E);
999   CodeGenFunction::RunCleanupsScope cleanups(CGF);
1000   Visit(E->getSubExpr());
1001 }
1002 
VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr * E)1003 void AggExprEmitter::VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E) {
1004   QualType T = E->getType();
1005   AggValueSlot Slot = EnsureSlot(T);
1006   EmitNullInitializationToLValue(CGF.MakeAddrLValue(Slot.getAddr(), T));
1007 }
1008 
VisitImplicitValueInitExpr(ImplicitValueInitExpr * E)1009 void AggExprEmitter::VisitImplicitValueInitExpr(ImplicitValueInitExpr *E) {
1010   QualType T = E->getType();
1011   AggValueSlot Slot = EnsureSlot(T);
1012   EmitNullInitializationToLValue(CGF.MakeAddrLValue(Slot.getAddr(), T));
1013 }
1014 
1015 /// isSimpleZero - If emitting this value will obviously just cause a store of
1016 /// zero to memory, return true.  This can return false if uncertain, so it just
1017 /// handles simple cases.
isSimpleZero(const Expr * E,CodeGenFunction & CGF)1018 static bool isSimpleZero(const Expr *E, CodeGenFunction &CGF) {
1019   E = E->IgnoreParens();
1020 
1021   // 0
1022   if (const IntegerLiteral *IL = dyn_cast<IntegerLiteral>(E))
1023     return IL->getValue() == 0;
1024   // +0.0
1025   if (const FloatingLiteral *FL = dyn_cast<FloatingLiteral>(E))
1026     return FL->getValue().isPosZero();
1027   // int()
1028   if ((isa<ImplicitValueInitExpr>(E) || isa<CXXScalarValueInitExpr>(E)) &&
1029       CGF.getTypes().isZeroInitializable(E->getType()))
1030     return true;
1031   // (int*)0 - Null pointer expressions.
1032   if (const CastExpr *ICE = dyn_cast<CastExpr>(E))
1033     return ICE->getCastKind() == CK_NullToPointer;
1034   // '\0'
1035   if (const CharacterLiteral *CL = dyn_cast<CharacterLiteral>(E))
1036     return CL->getValue() == 0;
1037 
1038   // Otherwise, hard case: conservatively return false.
1039   return false;
1040 }
1041 
1042 
1043 void
EmitInitializationToLValue(Expr * E,LValue LV)1044 AggExprEmitter::EmitInitializationToLValue(Expr *E, LValue LV) {
1045   QualType type = LV.getType();
1046   // FIXME: Ignore result?
1047   // FIXME: Are initializers affected by volatile?
1048   if (Dest.isZeroed() && isSimpleZero(E, CGF)) {
1049     // Storing "i32 0" to a zero'd memory location is a noop.
1050     return;
1051   } else if (isa<ImplicitValueInitExpr>(E) || isa<CXXScalarValueInitExpr>(E)) {
1052     return EmitNullInitializationToLValue(LV);
1053   } else if (type->isReferenceType()) {
1054     RValue RV = CGF.EmitReferenceBindingToExpr(E);
1055     return CGF.EmitStoreThroughLValue(RV, LV);
1056   }
1057 
1058   switch (CGF.getEvaluationKind(type)) {
1059   case TEK_Complex:
1060     CGF.EmitComplexExprIntoLValue(E, LV, /*isInit*/ true);
1061     return;
1062   case TEK_Aggregate:
1063     CGF.EmitAggExpr(E, AggValueSlot::forLValue(LV,
1064                                                AggValueSlot::IsDestructed,
1065                                       AggValueSlot::DoesNotNeedGCBarriers,
1066                                                AggValueSlot::IsNotAliased,
1067                                                Dest.isZeroed()));
1068     return;
1069   case TEK_Scalar:
1070     if (LV.isSimple()) {
1071       CGF.EmitScalarInit(E, /*D=*/nullptr, LV, /*Captured=*/false);
1072     } else {
1073       CGF.EmitStoreThroughLValue(RValue::get(CGF.EmitScalarExpr(E)), LV);
1074     }
1075     return;
1076   }
1077   llvm_unreachable("bad evaluation kind");
1078 }
1079 
EmitNullInitializationToLValue(LValue lv)1080 void AggExprEmitter::EmitNullInitializationToLValue(LValue lv) {
1081   QualType type = lv.getType();
1082 
1083   // If the destination slot is already zeroed out before the aggregate is
1084   // copied into it, we don't have to emit any zeros here.
1085   if (Dest.isZeroed() && CGF.getTypes().isZeroInitializable(type))
1086     return;
1087 
1088   if (CGF.hasScalarEvaluationKind(type)) {
1089     // For non-aggregates, we can store the appropriate null constant.
1090     llvm::Value *null = CGF.CGM.EmitNullConstant(type);
1091     // Note that the following is not equivalent to
1092     // EmitStoreThroughBitfieldLValue for ARC types.
1093     if (lv.isBitField()) {
1094       CGF.EmitStoreThroughBitfieldLValue(RValue::get(null), lv);
1095     } else {
1096       assert(lv.isSimple());
1097       CGF.EmitStoreOfScalar(null, lv, /* isInitialization */ true);
1098     }
1099   } else {
1100     // There's a potential optimization opportunity in combining
1101     // memsets; that would be easy for arrays, but relatively
1102     // difficult for structures with the current code.
1103     CGF.EmitNullInitialization(lv.getAddress(), lv.getType());
1104   }
1105 }
1106 
VisitInitListExpr(InitListExpr * E)1107 void AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
1108 #if 0
1109   // FIXME: Assess perf here?  Figure out what cases are worth optimizing here
1110   // (Length of globals? Chunks of zeroed-out space?).
1111   //
1112   // If we can, prefer a copy from a global; this is a lot less code for long
1113   // globals, and it's easier for the current optimizers to analyze.
1114   if (llvm::Constant* C = CGF.CGM.EmitConstantExpr(E, E->getType(), &CGF)) {
1115     llvm::GlobalVariable* GV =
1116     new llvm::GlobalVariable(CGF.CGM.getModule(), C->getType(), true,
1117                              llvm::GlobalValue::InternalLinkage, C, "");
1118     EmitFinalDestCopy(E->getType(), CGF.MakeAddrLValue(GV, E->getType()));
1119     return;
1120   }
1121 #endif
1122   if (E->hadArrayRangeDesignator())
1123     CGF.ErrorUnsupported(E, "GNU array range designator extension");
1124 
1125   AggValueSlot Dest = EnsureSlot(E->getType());
1126 
1127   LValue DestLV = CGF.MakeAddrLValue(Dest.getAddr(), E->getType(),
1128                                      Dest.getAlignment());
1129 
1130   // Handle initialization of an array.
1131   if (E->getType()->isArrayType()) {
1132     if (E->isStringLiteralInit())
1133       return Visit(E->getInit(0));
1134 
1135     QualType elementType =
1136         CGF.getContext().getAsArrayType(E->getType())->getElementType();
1137 
1138     llvm::PointerType *APType =
1139       cast<llvm::PointerType>(Dest.getAddr()->getType());
1140     llvm::ArrayType *AType =
1141       cast<llvm::ArrayType>(APType->getElementType());
1142 
1143     EmitArrayInit(Dest.getAddr(), AType, elementType, E);
1144     return;
1145   }
1146 
1147   if (E->getType()->isAtomicType()) {
1148     // An _Atomic(T) object can be list-initialized from an expression
1149     // of the same type.
1150     assert(E->getNumInits() == 1 &&
1151            CGF.getContext().hasSameUnqualifiedType(E->getInit(0)->getType(),
1152                                                    E->getType()) &&
1153            "unexpected list initialization for atomic object");
1154     return Visit(E->getInit(0));
1155   }
1156 
1157   assert(E->getType()->isRecordType() && "Only support structs/unions here!");
1158 
1159   // Do struct initialization; this code just sets each individual member
1160   // to the approprate value.  This makes bitfield support automatic;
1161   // the disadvantage is that the generated code is more difficult for
1162   // the optimizer, especially with bitfields.
1163   unsigned NumInitElements = E->getNumInits();
1164   RecordDecl *record = E->getType()->castAs<RecordType>()->getDecl();
1165 
1166   // Prepare a 'this' for CXXDefaultInitExprs.
1167   CodeGenFunction::FieldConstructionScope FCS(CGF, Dest.getAddr());
1168 
1169   if (record->isUnion()) {
1170     // Only initialize one field of a union. The field itself is
1171     // specified by the initializer list.
1172     if (!E->getInitializedFieldInUnion()) {
1173       // Empty union; we have nothing to do.
1174 
1175 #ifndef NDEBUG
1176       // Make sure that it's really an empty and not a failure of
1177       // semantic analysis.
1178       for (const auto *Field : record->fields())
1179         assert(Field->isUnnamedBitfield() && "Only unnamed bitfields allowed");
1180 #endif
1181       return;
1182     }
1183 
1184     // FIXME: volatility
1185     FieldDecl *Field = E->getInitializedFieldInUnion();
1186 
1187     LValue FieldLoc = CGF.EmitLValueForFieldInitialization(DestLV, Field);
1188     if (NumInitElements) {
1189       // Store the initializer into the field
1190       EmitInitializationToLValue(E->getInit(0), FieldLoc);
1191     } else {
1192       // Default-initialize to null.
1193       EmitNullInitializationToLValue(FieldLoc);
1194     }
1195 
1196     return;
1197   }
1198 
1199   // We'll need to enter cleanup scopes in case any of the member
1200   // initializers throw an exception.
1201   SmallVector<EHScopeStack::stable_iterator, 16> cleanups;
1202   llvm::Instruction *cleanupDominator = nullptr;
1203 
1204   // Here we iterate over the fields; this makes it simpler to both
1205   // default-initialize fields and skip over unnamed fields.
1206   unsigned curInitIndex = 0;
1207   for (const auto *field : record->fields()) {
1208     // We're done once we hit the flexible array member.
1209     if (field->getType()->isIncompleteArrayType())
1210       break;
1211 
1212     // Always skip anonymous bitfields.
1213     if (field->isUnnamedBitfield())
1214       continue;
1215 
1216     // We're done if we reach the end of the explicit initializers, we
1217     // have a zeroed object, and the rest of the fields are
1218     // zero-initializable.
1219     if (curInitIndex == NumInitElements && Dest.isZeroed() &&
1220         CGF.getTypes().isZeroInitializable(E->getType()))
1221       break;
1222 
1223 
1224     LValue LV = CGF.EmitLValueForFieldInitialization(DestLV, field);
1225     // We never generate write-barries for initialized fields.
1226     LV.setNonGC(true);
1227 
1228     if (curInitIndex < NumInitElements) {
1229       // Store the initializer into the field.
1230       EmitInitializationToLValue(E->getInit(curInitIndex++), LV);
1231     } else {
1232       // We're out of initalizers; default-initialize to null
1233       EmitNullInitializationToLValue(LV);
1234     }
1235 
1236     // Push a destructor if necessary.
1237     // FIXME: if we have an array of structures, all explicitly
1238     // initialized, we can end up pushing a linear number of cleanups.
1239     bool pushedCleanup = false;
1240     if (QualType::DestructionKind dtorKind
1241           = field->getType().isDestructedType()) {
1242       assert(LV.isSimple());
1243       if (CGF.needsEHCleanup(dtorKind)) {
1244         if (!cleanupDominator)
1245           cleanupDominator = CGF.Builder.CreateUnreachable(); // placeholder
1246 
1247         CGF.pushDestroy(EHCleanup, LV.getAddress(), field->getType(),
1248                         CGF.getDestroyer(dtorKind), false);
1249         cleanups.push_back(CGF.EHStack.stable_begin());
1250         pushedCleanup = true;
1251       }
1252     }
1253 
1254     // If the GEP didn't get used because of a dead zero init or something
1255     // else, clean it up for -O0 builds and general tidiness.
1256     if (!pushedCleanup && LV.isSimple())
1257       if (llvm::GetElementPtrInst *GEP =
1258             dyn_cast<llvm::GetElementPtrInst>(LV.getAddress()))
1259         if (GEP->use_empty())
1260           GEP->eraseFromParent();
1261   }
1262 
1263   // Deactivate all the partial cleanups in reverse order, which
1264   // generally means popping them.
1265   for (unsigned i = cleanups.size(); i != 0; --i)
1266     CGF.DeactivateCleanupBlock(cleanups[i-1], cleanupDominator);
1267 
1268   // Destroy the placeholder if we made one.
1269   if (cleanupDominator)
1270     cleanupDominator->eraseFromParent();
1271 }
1272 
1273 //===----------------------------------------------------------------------===//
1274 //                        Entry Points into this File
1275 //===----------------------------------------------------------------------===//
1276 
1277 /// GetNumNonZeroBytesInInit - Get an approximate count of the number of
1278 /// non-zero bytes that will be stored when outputting the initializer for the
1279 /// specified initializer expression.
GetNumNonZeroBytesInInit(const Expr * E,CodeGenFunction & CGF)1280 static CharUnits GetNumNonZeroBytesInInit(const Expr *E, CodeGenFunction &CGF) {
1281   E = E->IgnoreParens();
1282 
1283   // 0 and 0.0 won't require any non-zero stores!
1284   if (isSimpleZero(E, CGF)) return CharUnits::Zero();
1285 
1286   // If this is an initlist expr, sum up the size of sizes of the (present)
1287   // elements.  If this is something weird, assume the whole thing is non-zero.
1288   const InitListExpr *ILE = dyn_cast<InitListExpr>(E);
1289   if (!ILE || !CGF.getTypes().isZeroInitializable(ILE->getType()))
1290     return CGF.getContext().getTypeSizeInChars(E->getType());
1291 
1292   // InitListExprs for structs have to be handled carefully.  If there are
1293   // reference members, we need to consider the size of the reference, not the
1294   // referencee.  InitListExprs for unions and arrays can't have references.
1295   if (const RecordType *RT = E->getType()->getAs<RecordType>()) {
1296     if (!RT->isUnionType()) {
1297       RecordDecl *SD = E->getType()->getAs<RecordType>()->getDecl();
1298       CharUnits NumNonZeroBytes = CharUnits::Zero();
1299 
1300       unsigned ILEElement = 0;
1301       for (const auto *Field : SD->fields()) {
1302         // We're done once we hit the flexible array member or run out of
1303         // InitListExpr elements.
1304         if (Field->getType()->isIncompleteArrayType() ||
1305             ILEElement == ILE->getNumInits())
1306           break;
1307         if (Field->isUnnamedBitfield())
1308           continue;
1309 
1310         const Expr *E = ILE->getInit(ILEElement++);
1311 
1312         // Reference values are always non-null and have the width of a pointer.
1313         if (Field->getType()->isReferenceType())
1314           NumNonZeroBytes += CGF.getContext().toCharUnitsFromBits(
1315               CGF.getTarget().getPointerWidth(0));
1316         else
1317           NumNonZeroBytes += GetNumNonZeroBytesInInit(E, CGF);
1318       }
1319 
1320       return NumNonZeroBytes;
1321     }
1322   }
1323 
1324 
1325   CharUnits NumNonZeroBytes = CharUnits::Zero();
1326   for (unsigned i = 0, e = ILE->getNumInits(); i != e; ++i)
1327     NumNonZeroBytes += GetNumNonZeroBytesInInit(ILE->getInit(i), CGF);
1328   return NumNonZeroBytes;
1329 }
1330 
1331 /// CheckAggExprForMemSetUse - If the initializer is large and has a lot of
1332 /// zeros in it, emit a memset and avoid storing the individual zeros.
1333 ///
CheckAggExprForMemSetUse(AggValueSlot & Slot,const Expr * E,CodeGenFunction & CGF)1334 static void CheckAggExprForMemSetUse(AggValueSlot &Slot, const Expr *E,
1335                                      CodeGenFunction &CGF) {
1336   // If the slot is already known to be zeroed, nothing to do.  Don't mess with
1337   // volatile stores.
1338   if (Slot.isZeroed() || Slot.isVolatile() || Slot.getAddr() == nullptr)
1339     return;
1340 
1341   // C++ objects with a user-declared constructor don't need zero'ing.
1342   if (CGF.getLangOpts().CPlusPlus)
1343     if (const RecordType *RT = CGF.getContext()
1344                        .getBaseElementType(E->getType())->getAs<RecordType>()) {
1345       const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
1346       if (RD->hasUserDeclaredConstructor())
1347         return;
1348     }
1349 
1350   // If the type is 16-bytes or smaller, prefer individual stores over memset.
1351   std::pair<CharUnits, CharUnits> TypeInfo =
1352     CGF.getContext().getTypeInfoInChars(E->getType());
1353   if (TypeInfo.first <= CharUnits::fromQuantity(16))
1354     return;
1355 
1356   // Check to see if over 3/4 of the initializer are known to be zero.  If so,
1357   // we prefer to emit memset + individual stores for the rest.
1358   CharUnits NumNonZeroBytes = GetNumNonZeroBytesInInit(E, CGF);
1359   if (NumNonZeroBytes*4 > TypeInfo.first)
1360     return;
1361 
1362   // Okay, it seems like a good idea to use an initial memset, emit the call.
1363   llvm::Constant *SizeVal = CGF.Builder.getInt64(TypeInfo.first.getQuantity());
1364   CharUnits Align = TypeInfo.second;
1365 
1366   llvm::Value *Loc = Slot.getAddr();
1367 
1368   Loc = CGF.Builder.CreateBitCast(Loc, CGF.Int8PtrTy);
1369   CGF.Builder.CreateMemSet(Loc, CGF.Builder.getInt8(0), SizeVal,
1370                            Align.getQuantity(), false);
1371 
1372   // Tell the AggExprEmitter that the slot is known zero.
1373   Slot.setZeroed();
1374 }
1375 
1376 
1377 
1378 
1379 /// EmitAggExpr - Emit the computation of the specified expression of aggregate
1380 /// type.  The result is computed into DestPtr.  Note that if DestPtr is null,
1381 /// the value of the aggregate expression is not needed.  If VolatileDest is
1382 /// true, DestPtr cannot be 0.
EmitAggExpr(const Expr * E,AggValueSlot Slot)1383 void CodeGenFunction::EmitAggExpr(const Expr *E, AggValueSlot Slot) {
1384   assert(E && hasAggregateEvaluationKind(E->getType()) &&
1385          "Invalid aggregate expression to emit");
1386   assert((Slot.getAddr() != nullptr || Slot.isIgnored()) &&
1387          "slot has bits but no address");
1388 
1389   // Optimize the slot if possible.
1390   CheckAggExprForMemSetUse(Slot, E, *this);
1391 
1392   AggExprEmitter(*this, Slot).Visit(const_cast<Expr*>(E));
1393 }
1394 
EmitAggExprToLValue(const Expr * E)1395 LValue CodeGenFunction::EmitAggExprToLValue(const Expr *E) {
1396   assert(hasAggregateEvaluationKind(E->getType()) && "Invalid argument!");
1397   llvm::Value *Temp = CreateMemTemp(E->getType());
1398   LValue LV = MakeAddrLValue(Temp, E->getType());
1399   EmitAggExpr(E, AggValueSlot::forLValue(LV, AggValueSlot::IsNotDestructed,
1400                                          AggValueSlot::DoesNotNeedGCBarriers,
1401                                          AggValueSlot::IsNotAliased));
1402   return LV;
1403 }
1404 
EmitAggregateCopy(llvm::Value * DestPtr,llvm::Value * SrcPtr,QualType Ty,bool isVolatile,CharUnits alignment,bool isAssignment)1405 void CodeGenFunction::EmitAggregateCopy(llvm::Value *DestPtr,
1406                                         llvm::Value *SrcPtr, QualType Ty,
1407                                         bool isVolatile,
1408                                         CharUnits alignment,
1409                                         bool isAssignment) {
1410   assert(!Ty->isAnyComplexType() && "Shouldn't happen for complex");
1411 
1412   if (getLangOpts().CPlusPlus) {
1413     if (const RecordType *RT = Ty->getAs<RecordType>()) {
1414       CXXRecordDecl *Record = cast<CXXRecordDecl>(RT->getDecl());
1415       assert((Record->hasTrivialCopyConstructor() ||
1416               Record->hasTrivialCopyAssignment() ||
1417               Record->hasTrivialMoveConstructor() ||
1418               Record->hasTrivialMoveAssignment()) &&
1419              "Trying to aggregate-copy a type without a trivial copy/move "
1420              "constructor or assignment operator");
1421       // Ignore empty classes in C++.
1422       if (Record->isEmpty())
1423         return;
1424     }
1425   }
1426 
1427   // Aggregate assignment turns into llvm.memcpy.  This is almost valid per
1428   // C99 6.5.16.1p3, which states "If the value being stored in an object is
1429   // read from another object that overlaps in anyway the storage of the first
1430   // object, then the overlap shall be exact and the two objects shall have
1431   // qualified or unqualified versions of a compatible type."
1432   //
1433   // memcpy is not defined if the source and destination pointers are exactly
1434   // equal, but other compilers do this optimization, and almost every memcpy
1435   // implementation handles this case safely.  If there is a libc that does not
1436   // safely handle this, we can add a target hook.
1437 
1438   // Get data size and alignment info for this aggregate. If this is an
1439   // assignment don't copy the tail padding. Otherwise copying it is fine.
1440   std::pair<CharUnits, CharUnits> TypeInfo;
1441   if (isAssignment)
1442     TypeInfo = getContext().getTypeInfoDataSizeInChars(Ty);
1443   else
1444     TypeInfo = getContext().getTypeInfoInChars(Ty);
1445 
1446   if (alignment.isZero())
1447     alignment = TypeInfo.second;
1448 
1449   // FIXME: Handle variable sized types.
1450 
1451   // FIXME: If we have a volatile struct, the optimizer can remove what might
1452   // appear to be `extra' memory ops:
1453   //
1454   // volatile struct { int i; } a, b;
1455   //
1456   // int main() {
1457   //   a = b;
1458   //   a = b;
1459   // }
1460   //
1461   // we need to use a different call here.  We use isVolatile to indicate when
1462   // either the source or the destination is volatile.
1463 
1464   llvm::PointerType *DPT = cast<llvm::PointerType>(DestPtr->getType());
1465   llvm::Type *DBP =
1466     llvm::Type::getInt8PtrTy(getLLVMContext(), DPT->getAddressSpace());
1467   DestPtr = Builder.CreateBitCast(DestPtr, DBP);
1468 
1469   llvm::PointerType *SPT = cast<llvm::PointerType>(SrcPtr->getType());
1470   llvm::Type *SBP =
1471     llvm::Type::getInt8PtrTy(getLLVMContext(), SPT->getAddressSpace());
1472   SrcPtr = Builder.CreateBitCast(SrcPtr, SBP);
1473 
1474   // Don't do any of the memmove_collectable tests if GC isn't set.
1475   if (CGM.getLangOpts().getGC() == LangOptions::NonGC) {
1476     // fall through
1477   } else if (const RecordType *RecordTy = Ty->getAs<RecordType>()) {
1478     RecordDecl *Record = RecordTy->getDecl();
1479     if (Record->hasObjectMember()) {
1480       CharUnits size = TypeInfo.first;
1481       llvm::Type *SizeTy = ConvertType(getContext().getSizeType());
1482       llvm::Value *SizeVal = llvm::ConstantInt::get(SizeTy, size.getQuantity());
1483       CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, DestPtr, SrcPtr,
1484                                                     SizeVal);
1485       return;
1486     }
1487   } else if (Ty->isArrayType()) {
1488     QualType BaseType = getContext().getBaseElementType(Ty);
1489     if (const RecordType *RecordTy = BaseType->getAs<RecordType>()) {
1490       if (RecordTy->getDecl()->hasObjectMember()) {
1491         CharUnits size = TypeInfo.first;
1492         llvm::Type *SizeTy = ConvertType(getContext().getSizeType());
1493         llvm::Value *SizeVal =
1494           llvm::ConstantInt::get(SizeTy, size.getQuantity());
1495         CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, DestPtr, SrcPtr,
1496                                                       SizeVal);
1497         return;
1498       }
1499     }
1500   }
1501 
1502   // Determine the metadata to describe the position of any padding in this
1503   // memcpy, as well as the TBAA tags for the members of the struct, in case
1504   // the optimizer wishes to expand it in to scalar memory operations.
1505   llvm::MDNode *TBAAStructTag = CGM.getTBAAStructInfo(Ty);
1506 
1507   Builder.CreateMemCpy(DestPtr, SrcPtr,
1508                        llvm::ConstantInt::get(IntPtrTy,
1509                                               TypeInfo.first.getQuantity()),
1510                        alignment.getQuantity(), isVolatile,
1511                        /*TBAATag=*/nullptr, TBAAStructTag);
1512 }
1513