1 //===--- CGStmt.cpp - Emit LLVM Code from Statements ----------------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This contains code to emit Stmt nodes as LLVM code.
11 //
12 //===----------------------------------------------------------------------===//
13
14 #include "CodeGenFunction.h"
15 #include "CGDebugInfo.h"
16 #include "CodeGenModule.h"
17 #include "TargetInfo.h"
18 #include "clang/AST/StmtVisitor.h"
19 #include "clang/Basic/Builtins.h"
20 #include "clang/Basic/PrettyStackTrace.h"
21 #include "clang/Basic/TargetInfo.h"
22 #include "clang/Sema/LoopHint.h"
23 #include "clang/Sema/SemaDiagnostic.h"
24 #include "llvm/ADT/StringExtras.h"
25 #include "llvm/IR/CallSite.h"
26 #include "llvm/IR/DataLayout.h"
27 #include "llvm/IR/InlineAsm.h"
28 #include "llvm/IR/Intrinsics.h"
29 #include "llvm/IR/MDBuilder.h"
30
31 using namespace clang;
32 using namespace CodeGen;
33
34 //===----------------------------------------------------------------------===//
35 // Statement Emission
36 //===----------------------------------------------------------------------===//
37
EmitStopPoint(const Stmt * S)38 void CodeGenFunction::EmitStopPoint(const Stmt *S) {
39 if (CGDebugInfo *DI = getDebugInfo()) {
40 SourceLocation Loc;
41 Loc = S->getLocStart();
42 DI->EmitLocation(Builder, Loc);
43
44 LastStopPoint = Loc;
45 }
46 }
47
EmitStmt(const Stmt * S)48 void CodeGenFunction::EmitStmt(const Stmt *S) {
49 assert(S && "Null statement?");
50 PGO.setCurrentStmt(S);
51
52 // These statements have their own debug info handling.
53 if (EmitSimpleStmt(S))
54 return;
55
56 // Check if we are generating unreachable code.
57 if (!HaveInsertPoint()) {
58 // If so, and the statement doesn't contain a label, then we do not need to
59 // generate actual code. This is safe because (1) the current point is
60 // unreachable, so we don't need to execute the code, and (2) we've already
61 // handled the statements which update internal data structures (like the
62 // local variable map) which could be used by subsequent statements.
63 if (!ContainsLabel(S)) {
64 // Verify that any decl statements were handled as simple, they may be in
65 // scope of subsequent reachable statements.
66 assert(!isa<DeclStmt>(*S) && "Unexpected DeclStmt!");
67 return;
68 }
69
70 // Otherwise, make a new block to hold the code.
71 EnsureInsertPoint();
72 }
73
74 // Generate a stoppoint if we are emitting debug info.
75 EmitStopPoint(S);
76
77 switch (S->getStmtClass()) {
78 case Stmt::NoStmtClass:
79 case Stmt::CXXCatchStmtClass:
80 case Stmt::SEHExceptStmtClass:
81 case Stmt::SEHFinallyStmtClass:
82 case Stmt::MSDependentExistsStmtClass:
83 llvm_unreachable("invalid statement class to emit generically");
84 case Stmt::NullStmtClass:
85 case Stmt::CompoundStmtClass:
86 case Stmt::DeclStmtClass:
87 case Stmt::LabelStmtClass:
88 case Stmt::AttributedStmtClass:
89 case Stmt::GotoStmtClass:
90 case Stmt::BreakStmtClass:
91 case Stmt::ContinueStmtClass:
92 case Stmt::DefaultStmtClass:
93 case Stmt::CaseStmtClass:
94 case Stmt::SEHLeaveStmtClass:
95 llvm_unreachable("should have emitted these statements as simple");
96
97 #define STMT(Type, Base)
98 #define ABSTRACT_STMT(Op)
99 #define EXPR(Type, Base) \
100 case Stmt::Type##Class:
101 #include "clang/AST/StmtNodes.inc"
102 {
103 // Remember the block we came in on.
104 llvm::BasicBlock *incoming = Builder.GetInsertBlock();
105 assert(incoming && "expression emission must have an insertion point");
106
107 EmitIgnoredExpr(cast<Expr>(S));
108
109 llvm::BasicBlock *outgoing = Builder.GetInsertBlock();
110 assert(outgoing && "expression emission cleared block!");
111
112 // The expression emitters assume (reasonably!) that the insertion
113 // point is always set. To maintain that, the call-emission code
114 // for noreturn functions has to enter a new block with no
115 // predecessors. We want to kill that block and mark the current
116 // insertion point unreachable in the common case of a call like
117 // "exit();". Since expression emission doesn't otherwise create
118 // blocks with no predecessors, we can just test for that.
119 // However, we must be careful not to do this to our incoming
120 // block, because *statement* emission does sometimes create
121 // reachable blocks which will have no predecessors until later in
122 // the function. This occurs with, e.g., labels that are not
123 // reachable by fallthrough.
124 if (incoming != outgoing && outgoing->use_empty()) {
125 outgoing->eraseFromParent();
126 Builder.ClearInsertionPoint();
127 }
128 break;
129 }
130
131 case Stmt::IndirectGotoStmtClass:
132 EmitIndirectGotoStmt(cast<IndirectGotoStmt>(*S)); break;
133
134 case Stmt::IfStmtClass: EmitIfStmt(cast<IfStmt>(*S)); break;
135 case Stmt::WhileStmtClass: EmitWhileStmt(cast<WhileStmt>(*S)); break;
136 case Stmt::DoStmtClass: EmitDoStmt(cast<DoStmt>(*S)); break;
137 case Stmt::ForStmtClass: EmitForStmt(cast<ForStmt>(*S)); break;
138
139 case Stmt::ReturnStmtClass: EmitReturnStmt(cast<ReturnStmt>(*S)); break;
140
141 case Stmt::SwitchStmtClass: EmitSwitchStmt(cast<SwitchStmt>(*S)); break;
142 case Stmt::GCCAsmStmtClass: // Intentional fall-through.
143 case Stmt::MSAsmStmtClass: EmitAsmStmt(cast<AsmStmt>(*S)); break;
144 case Stmt::CoroutineBodyStmtClass:
145 case Stmt::CoreturnStmtClass:
146 CGM.ErrorUnsupported(S, "coroutine");
147 break;
148 case Stmt::CapturedStmtClass: {
149 const CapturedStmt *CS = cast<CapturedStmt>(S);
150 EmitCapturedStmt(*CS, CS->getCapturedRegionKind());
151 }
152 break;
153 case Stmt::ObjCAtTryStmtClass:
154 EmitObjCAtTryStmt(cast<ObjCAtTryStmt>(*S));
155 break;
156 case Stmt::ObjCAtCatchStmtClass:
157 llvm_unreachable(
158 "@catch statements should be handled by EmitObjCAtTryStmt");
159 case Stmt::ObjCAtFinallyStmtClass:
160 llvm_unreachable(
161 "@finally statements should be handled by EmitObjCAtTryStmt");
162 case Stmt::ObjCAtThrowStmtClass:
163 EmitObjCAtThrowStmt(cast<ObjCAtThrowStmt>(*S));
164 break;
165 case Stmt::ObjCAtSynchronizedStmtClass:
166 EmitObjCAtSynchronizedStmt(cast<ObjCAtSynchronizedStmt>(*S));
167 break;
168 case Stmt::ObjCForCollectionStmtClass:
169 EmitObjCForCollectionStmt(cast<ObjCForCollectionStmt>(*S));
170 break;
171 case Stmt::ObjCAutoreleasePoolStmtClass:
172 EmitObjCAutoreleasePoolStmt(cast<ObjCAutoreleasePoolStmt>(*S));
173 break;
174
175 case Stmt::CXXTryStmtClass:
176 EmitCXXTryStmt(cast<CXXTryStmt>(*S));
177 break;
178 case Stmt::CXXForRangeStmtClass:
179 EmitCXXForRangeStmt(cast<CXXForRangeStmt>(*S));
180 break;
181 case Stmt::SEHTryStmtClass:
182 EmitSEHTryStmt(cast<SEHTryStmt>(*S));
183 break;
184 case Stmt::OMPParallelDirectiveClass:
185 EmitOMPParallelDirective(cast<OMPParallelDirective>(*S));
186 break;
187 case Stmt::OMPSimdDirectiveClass:
188 EmitOMPSimdDirective(cast<OMPSimdDirective>(*S));
189 break;
190 case Stmt::OMPForDirectiveClass:
191 EmitOMPForDirective(cast<OMPForDirective>(*S));
192 break;
193 case Stmt::OMPForSimdDirectiveClass:
194 EmitOMPForSimdDirective(cast<OMPForSimdDirective>(*S));
195 break;
196 case Stmt::OMPSectionsDirectiveClass:
197 EmitOMPSectionsDirective(cast<OMPSectionsDirective>(*S));
198 break;
199 case Stmt::OMPSectionDirectiveClass:
200 EmitOMPSectionDirective(cast<OMPSectionDirective>(*S));
201 break;
202 case Stmt::OMPSingleDirectiveClass:
203 EmitOMPSingleDirective(cast<OMPSingleDirective>(*S));
204 break;
205 case Stmt::OMPMasterDirectiveClass:
206 EmitOMPMasterDirective(cast<OMPMasterDirective>(*S));
207 break;
208 case Stmt::OMPCriticalDirectiveClass:
209 EmitOMPCriticalDirective(cast<OMPCriticalDirective>(*S));
210 break;
211 case Stmt::OMPParallelForDirectiveClass:
212 EmitOMPParallelForDirective(cast<OMPParallelForDirective>(*S));
213 break;
214 case Stmt::OMPParallelForSimdDirectiveClass:
215 EmitOMPParallelForSimdDirective(cast<OMPParallelForSimdDirective>(*S));
216 break;
217 case Stmt::OMPParallelSectionsDirectiveClass:
218 EmitOMPParallelSectionsDirective(cast<OMPParallelSectionsDirective>(*S));
219 break;
220 case Stmt::OMPTaskDirectiveClass:
221 EmitOMPTaskDirective(cast<OMPTaskDirective>(*S));
222 break;
223 case Stmt::OMPTaskyieldDirectiveClass:
224 EmitOMPTaskyieldDirective(cast<OMPTaskyieldDirective>(*S));
225 break;
226 case Stmt::OMPBarrierDirectiveClass:
227 EmitOMPBarrierDirective(cast<OMPBarrierDirective>(*S));
228 break;
229 case Stmt::OMPTaskwaitDirectiveClass:
230 EmitOMPTaskwaitDirective(cast<OMPTaskwaitDirective>(*S));
231 break;
232 case Stmt::OMPTaskgroupDirectiveClass:
233 EmitOMPTaskgroupDirective(cast<OMPTaskgroupDirective>(*S));
234 break;
235 case Stmt::OMPFlushDirectiveClass:
236 EmitOMPFlushDirective(cast<OMPFlushDirective>(*S));
237 break;
238 case Stmt::OMPOrderedDirectiveClass:
239 EmitOMPOrderedDirective(cast<OMPOrderedDirective>(*S));
240 break;
241 case Stmt::OMPAtomicDirectiveClass:
242 EmitOMPAtomicDirective(cast<OMPAtomicDirective>(*S));
243 break;
244 case Stmt::OMPTargetDirectiveClass:
245 EmitOMPTargetDirective(cast<OMPTargetDirective>(*S));
246 break;
247 case Stmt::OMPTeamsDirectiveClass:
248 EmitOMPTeamsDirective(cast<OMPTeamsDirective>(*S));
249 break;
250 case Stmt::OMPCancellationPointDirectiveClass:
251 EmitOMPCancellationPointDirective(cast<OMPCancellationPointDirective>(*S));
252 break;
253 case Stmt::OMPCancelDirectiveClass:
254 EmitOMPCancelDirective(cast<OMPCancelDirective>(*S));
255 break;
256 case Stmt::OMPTargetDataDirectiveClass:
257 EmitOMPTargetDataDirective(cast<OMPTargetDataDirective>(*S));
258 break;
259 case Stmt::OMPTaskLoopDirectiveClass:
260 EmitOMPTaskLoopDirective(cast<OMPTaskLoopDirective>(*S));
261 break;
262 case Stmt::OMPTaskLoopSimdDirectiveClass:
263 EmitOMPTaskLoopSimdDirective(cast<OMPTaskLoopSimdDirective>(*S));
264 break;
265 case Stmt::OMPDistributeDirectiveClass:
266 EmitOMPDistributeDirective(cast<OMPDistributeDirective>(*S));
267 break;
268 }
269 }
270
EmitSimpleStmt(const Stmt * S)271 bool CodeGenFunction::EmitSimpleStmt(const Stmt *S) {
272 switch (S->getStmtClass()) {
273 default: return false;
274 case Stmt::NullStmtClass: break;
275 case Stmt::CompoundStmtClass: EmitCompoundStmt(cast<CompoundStmt>(*S)); break;
276 case Stmt::DeclStmtClass: EmitDeclStmt(cast<DeclStmt>(*S)); break;
277 case Stmt::LabelStmtClass: EmitLabelStmt(cast<LabelStmt>(*S)); break;
278 case Stmt::AttributedStmtClass:
279 EmitAttributedStmt(cast<AttributedStmt>(*S)); break;
280 case Stmt::GotoStmtClass: EmitGotoStmt(cast<GotoStmt>(*S)); break;
281 case Stmt::BreakStmtClass: EmitBreakStmt(cast<BreakStmt>(*S)); break;
282 case Stmt::ContinueStmtClass: EmitContinueStmt(cast<ContinueStmt>(*S)); break;
283 case Stmt::DefaultStmtClass: EmitDefaultStmt(cast<DefaultStmt>(*S)); break;
284 case Stmt::CaseStmtClass: EmitCaseStmt(cast<CaseStmt>(*S)); break;
285 case Stmt::SEHLeaveStmtClass: EmitSEHLeaveStmt(cast<SEHLeaveStmt>(*S)); break;
286 }
287
288 return true;
289 }
290
291 /// EmitCompoundStmt - Emit a compound statement {..} node. If GetLast is true,
292 /// this captures the expression result of the last sub-statement and returns it
293 /// (for use by the statement expression extension).
EmitCompoundStmt(const CompoundStmt & S,bool GetLast,AggValueSlot AggSlot)294 Address CodeGenFunction::EmitCompoundStmt(const CompoundStmt &S, bool GetLast,
295 AggValueSlot AggSlot) {
296 PrettyStackTraceLoc CrashInfo(getContext().getSourceManager(),S.getLBracLoc(),
297 "LLVM IR generation of compound statement ('{}')");
298
299 // Keep track of the current cleanup stack depth, including debug scopes.
300 LexicalScope Scope(*this, S.getSourceRange());
301
302 return EmitCompoundStmtWithoutScope(S, GetLast, AggSlot);
303 }
304
305 Address
EmitCompoundStmtWithoutScope(const CompoundStmt & S,bool GetLast,AggValueSlot AggSlot)306 CodeGenFunction::EmitCompoundStmtWithoutScope(const CompoundStmt &S,
307 bool GetLast,
308 AggValueSlot AggSlot) {
309
310 for (CompoundStmt::const_body_iterator I = S.body_begin(),
311 E = S.body_end()-GetLast; I != E; ++I)
312 EmitStmt(*I);
313
314 Address RetAlloca = Address::invalid();
315 if (GetLast) {
316 // We have to special case labels here. They are statements, but when put
317 // at the end of a statement expression, they yield the value of their
318 // subexpression. Handle this by walking through all labels we encounter,
319 // emitting them before we evaluate the subexpr.
320 const Stmt *LastStmt = S.body_back();
321 while (const LabelStmt *LS = dyn_cast<LabelStmt>(LastStmt)) {
322 EmitLabel(LS->getDecl());
323 LastStmt = LS->getSubStmt();
324 }
325
326 EnsureInsertPoint();
327
328 QualType ExprTy = cast<Expr>(LastStmt)->getType();
329 if (hasAggregateEvaluationKind(ExprTy)) {
330 EmitAggExpr(cast<Expr>(LastStmt), AggSlot);
331 } else {
332 // We can't return an RValue here because there might be cleanups at
333 // the end of the StmtExpr. Because of that, we have to emit the result
334 // here into a temporary alloca.
335 RetAlloca = CreateMemTemp(ExprTy);
336 EmitAnyExprToMem(cast<Expr>(LastStmt), RetAlloca, Qualifiers(),
337 /*IsInit*/false);
338 }
339
340 }
341
342 return RetAlloca;
343 }
344
SimplifyForwardingBlocks(llvm::BasicBlock * BB)345 void CodeGenFunction::SimplifyForwardingBlocks(llvm::BasicBlock *BB) {
346 llvm::BranchInst *BI = dyn_cast<llvm::BranchInst>(BB->getTerminator());
347
348 // If there is a cleanup stack, then we it isn't worth trying to
349 // simplify this block (we would need to remove it from the scope map
350 // and cleanup entry).
351 if (!EHStack.empty())
352 return;
353
354 // Can only simplify direct branches.
355 if (!BI || !BI->isUnconditional())
356 return;
357
358 // Can only simplify empty blocks.
359 if (BI->getIterator() != BB->begin())
360 return;
361
362 BB->replaceAllUsesWith(BI->getSuccessor(0));
363 BI->eraseFromParent();
364 BB->eraseFromParent();
365 }
366
EmitBlock(llvm::BasicBlock * BB,bool IsFinished)367 void CodeGenFunction::EmitBlock(llvm::BasicBlock *BB, bool IsFinished) {
368 llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
369
370 // Fall out of the current block (if necessary).
371 EmitBranch(BB);
372
373 if (IsFinished && BB->use_empty()) {
374 delete BB;
375 return;
376 }
377
378 // Place the block after the current block, if possible, or else at
379 // the end of the function.
380 if (CurBB && CurBB->getParent())
381 CurFn->getBasicBlockList().insertAfter(CurBB->getIterator(), BB);
382 else
383 CurFn->getBasicBlockList().push_back(BB);
384 Builder.SetInsertPoint(BB);
385 }
386
EmitBranch(llvm::BasicBlock * Target)387 void CodeGenFunction::EmitBranch(llvm::BasicBlock *Target) {
388 // Emit a branch from the current block to the target one if this
389 // was a real block. If this was just a fall-through block after a
390 // terminator, don't emit it.
391 llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
392
393 if (!CurBB || CurBB->getTerminator()) {
394 // If there is no insert point or the previous block is already
395 // terminated, don't touch it.
396 } else {
397 // Otherwise, create a fall-through branch.
398 Builder.CreateBr(Target);
399 }
400
401 Builder.ClearInsertionPoint();
402 }
403
EmitBlockAfterUses(llvm::BasicBlock * block)404 void CodeGenFunction::EmitBlockAfterUses(llvm::BasicBlock *block) {
405 bool inserted = false;
406 for (llvm::User *u : block->users()) {
407 if (llvm::Instruction *insn = dyn_cast<llvm::Instruction>(u)) {
408 CurFn->getBasicBlockList().insertAfter(insn->getParent()->getIterator(),
409 block);
410 inserted = true;
411 break;
412 }
413 }
414
415 if (!inserted)
416 CurFn->getBasicBlockList().push_back(block);
417
418 Builder.SetInsertPoint(block);
419 }
420
421 CodeGenFunction::JumpDest
getJumpDestForLabel(const LabelDecl * D)422 CodeGenFunction::getJumpDestForLabel(const LabelDecl *D) {
423 JumpDest &Dest = LabelMap[D];
424 if (Dest.isValid()) return Dest;
425
426 // Create, but don't insert, the new block.
427 Dest = JumpDest(createBasicBlock(D->getName()),
428 EHScopeStack::stable_iterator::invalid(),
429 NextCleanupDestIndex++);
430 return Dest;
431 }
432
EmitLabel(const LabelDecl * D)433 void CodeGenFunction::EmitLabel(const LabelDecl *D) {
434 // Add this label to the current lexical scope if we're within any
435 // normal cleanups. Jumps "in" to this label --- when permitted by
436 // the language --- may need to be routed around such cleanups.
437 if (EHStack.hasNormalCleanups() && CurLexicalScope)
438 CurLexicalScope->addLabel(D);
439
440 JumpDest &Dest = LabelMap[D];
441
442 // If we didn't need a forward reference to this label, just go
443 // ahead and create a destination at the current scope.
444 if (!Dest.isValid()) {
445 Dest = getJumpDestInCurrentScope(D->getName());
446
447 // Otherwise, we need to give this label a target depth and remove
448 // it from the branch-fixups list.
449 } else {
450 assert(!Dest.getScopeDepth().isValid() && "already emitted label!");
451 Dest.setScopeDepth(EHStack.stable_begin());
452 ResolveBranchFixups(Dest.getBlock());
453 }
454
455 EmitBlock(Dest.getBlock());
456 incrementProfileCounter(D->getStmt());
457 }
458
459 /// Change the cleanup scope of the labels in this lexical scope to
460 /// match the scope of the enclosing context.
rescopeLabels()461 void CodeGenFunction::LexicalScope::rescopeLabels() {
462 assert(!Labels.empty());
463 EHScopeStack::stable_iterator innermostScope
464 = CGF.EHStack.getInnermostNormalCleanup();
465
466 // Change the scope depth of all the labels.
467 for (SmallVectorImpl<const LabelDecl*>::const_iterator
468 i = Labels.begin(), e = Labels.end(); i != e; ++i) {
469 assert(CGF.LabelMap.count(*i));
470 JumpDest &dest = CGF.LabelMap.find(*i)->second;
471 assert(dest.getScopeDepth().isValid());
472 assert(innermostScope.encloses(dest.getScopeDepth()));
473 dest.setScopeDepth(innermostScope);
474 }
475
476 // Reparent the labels if the new scope also has cleanups.
477 if (innermostScope != EHScopeStack::stable_end() && ParentScope) {
478 ParentScope->Labels.append(Labels.begin(), Labels.end());
479 }
480 }
481
482
EmitLabelStmt(const LabelStmt & S)483 void CodeGenFunction::EmitLabelStmt(const LabelStmt &S) {
484 EmitLabel(S.getDecl());
485 EmitStmt(S.getSubStmt());
486 }
487
EmitAttributedStmt(const AttributedStmt & S)488 void CodeGenFunction::EmitAttributedStmt(const AttributedStmt &S) {
489 const Stmt *SubStmt = S.getSubStmt();
490 switch (SubStmt->getStmtClass()) {
491 case Stmt::DoStmtClass:
492 EmitDoStmt(cast<DoStmt>(*SubStmt), S.getAttrs());
493 break;
494 case Stmt::ForStmtClass:
495 EmitForStmt(cast<ForStmt>(*SubStmt), S.getAttrs());
496 break;
497 case Stmt::WhileStmtClass:
498 EmitWhileStmt(cast<WhileStmt>(*SubStmt), S.getAttrs());
499 break;
500 case Stmt::CXXForRangeStmtClass:
501 EmitCXXForRangeStmt(cast<CXXForRangeStmt>(*SubStmt), S.getAttrs());
502 break;
503 default:
504 EmitStmt(SubStmt);
505 }
506 }
507
EmitGotoStmt(const GotoStmt & S)508 void CodeGenFunction::EmitGotoStmt(const GotoStmt &S) {
509 // If this code is reachable then emit a stop point (if generating
510 // debug info). We have to do this ourselves because we are on the
511 // "simple" statement path.
512 if (HaveInsertPoint())
513 EmitStopPoint(&S);
514
515 EmitBranchThroughCleanup(getJumpDestForLabel(S.getLabel()));
516 }
517
518
EmitIndirectGotoStmt(const IndirectGotoStmt & S)519 void CodeGenFunction::EmitIndirectGotoStmt(const IndirectGotoStmt &S) {
520 if (const LabelDecl *Target = S.getConstantTarget()) {
521 EmitBranchThroughCleanup(getJumpDestForLabel(Target));
522 return;
523 }
524
525 // Ensure that we have an i8* for our PHI node.
526 llvm::Value *V = Builder.CreateBitCast(EmitScalarExpr(S.getTarget()),
527 Int8PtrTy, "addr");
528 llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
529
530 // Get the basic block for the indirect goto.
531 llvm::BasicBlock *IndGotoBB = GetIndirectGotoBlock();
532
533 // The first instruction in the block has to be the PHI for the switch dest,
534 // add an entry for this branch.
535 cast<llvm::PHINode>(IndGotoBB->begin())->addIncoming(V, CurBB);
536
537 EmitBranch(IndGotoBB);
538 }
539
EmitIfStmt(const IfStmt & S)540 void CodeGenFunction::EmitIfStmt(const IfStmt &S) {
541 // C99 6.8.4.1: The first substatement is executed if the expression compares
542 // unequal to 0. The condition must be a scalar type.
543 LexicalScope ConditionScope(*this, S.getCond()->getSourceRange());
544
545 if (S.getConditionVariable())
546 EmitAutoVarDecl(*S.getConditionVariable());
547
548 // If the condition constant folds and can be elided, try to avoid emitting
549 // the condition and the dead arm of the if/else.
550 bool CondConstant;
551 if (ConstantFoldsToSimpleInteger(S.getCond(), CondConstant)) {
552 // Figure out which block (then or else) is executed.
553 const Stmt *Executed = S.getThen();
554 const Stmt *Skipped = S.getElse();
555 if (!CondConstant) // Condition false?
556 std::swap(Executed, Skipped);
557
558 // If the skipped block has no labels in it, just emit the executed block.
559 // This avoids emitting dead code and simplifies the CFG substantially.
560 if (!ContainsLabel(Skipped)) {
561 if (CondConstant)
562 incrementProfileCounter(&S);
563 if (Executed) {
564 RunCleanupsScope ExecutedScope(*this);
565 EmitStmt(Executed);
566 }
567 return;
568 }
569 }
570
571 // Otherwise, the condition did not fold, or we couldn't elide it. Just emit
572 // the conditional branch.
573 llvm::BasicBlock *ThenBlock = createBasicBlock("if.then");
574 llvm::BasicBlock *ContBlock = createBasicBlock("if.end");
575 llvm::BasicBlock *ElseBlock = ContBlock;
576 if (S.getElse())
577 ElseBlock = createBasicBlock("if.else");
578
579 EmitBranchOnBoolExpr(S.getCond(), ThenBlock, ElseBlock,
580 getProfileCount(S.getThen()));
581
582 // Emit the 'then' code.
583 EmitBlock(ThenBlock);
584 incrementProfileCounter(&S);
585 {
586 RunCleanupsScope ThenScope(*this);
587 EmitStmt(S.getThen());
588 }
589 EmitBranch(ContBlock);
590
591 // Emit the 'else' code if present.
592 if (const Stmt *Else = S.getElse()) {
593 {
594 // There is no need to emit line number for an unconditional branch.
595 auto NL = ApplyDebugLocation::CreateEmpty(*this);
596 EmitBlock(ElseBlock);
597 }
598 {
599 RunCleanupsScope ElseScope(*this);
600 EmitStmt(Else);
601 }
602 {
603 // There is no need to emit line number for an unconditional branch.
604 auto NL = ApplyDebugLocation::CreateEmpty(*this);
605 EmitBranch(ContBlock);
606 }
607 }
608
609 // Emit the continuation block for code after the if.
610 EmitBlock(ContBlock, true);
611 }
612
EmitWhileStmt(const WhileStmt & S,ArrayRef<const Attr * > WhileAttrs)613 void CodeGenFunction::EmitWhileStmt(const WhileStmt &S,
614 ArrayRef<const Attr *> WhileAttrs) {
615 // Emit the header for the loop, which will also become
616 // the continue target.
617 JumpDest LoopHeader = getJumpDestInCurrentScope("while.cond");
618 EmitBlock(LoopHeader.getBlock());
619
620 LoopStack.push(LoopHeader.getBlock(), CGM.getContext(), WhileAttrs);
621
622 // Create an exit block for when the condition fails, which will
623 // also become the break target.
624 JumpDest LoopExit = getJumpDestInCurrentScope("while.end");
625
626 // Store the blocks to use for break and continue.
627 BreakContinueStack.push_back(BreakContinue(LoopExit, LoopHeader));
628
629 // C++ [stmt.while]p2:
630 // When the condition of a while statement is a declaration, the
631 // scope of the variable that is declared extends from its point
632 // of declaration (3.3.2) to the end of the while statement.
633 // [...]
634 // The object created in a condition is destroyed and created
635 // with each iteration of the loop.
636 RunCleanupsScope ConditionScope(*this);
637
638 if (S.getConditionVariable())
639 EmitAutoVarDecl(*S.getConditionVariable());
640
641 // Evaluate the conditional in the while header. C99 6.8.5.1: The
642 // evaluation of the controlling expression takes place before each
643 // execution of the loop body.
644 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
645
646 // while(1) is common, avoid extra exit blocks. Be sure
647 // to correctly handle break/continue though.
648 bool EmitBoolCondBranch = true;
649 if (llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal))
650 if (C->isOne())
651 EmitBoolCondBranch = false;
652
653 // As long as the condition is true, go to the loop body.
654 llvm::BasicBlock *LoopBody = createBasicBlock("while.body");
655 if (EmitBoolCondBranch) {
656 llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
657 if (ConditionScope.requiresCleanups())
658 ExitBlock = createBasicBlock("while.exit");
659 Builder.CreateCondBr(
660 BoolCondVal, LoopBody, ExitBlock,
661 createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody())));
662
663 if (ExitBlock != LoopExit.getBlock()) {
664 EmitBlock(ExitBlock);
665 EmitBranchThroughCleanup(LoopExit);
666 }
667 }
668
669 // Emit the loop body. We have to emit this in a cleanup scope
670 // because it might be a singleton DeclStmt.
671 {
672 RunCleanupsScope BodyScope(*this);
673 EmitBlock(LoopBody);
674 incrementProfileCounter(&S);
675 EmitStmt(S.getBody());
676 }
677
678 BreakContinueStack.pop_back();
679
680 // Immediately force cleanup.
681 ConditionScope.ForceCleanup();
682
683 EmitStopPoint(&S);
684 // Branch to the loop header again.
685 EmitBranch(LoopHeader.getBlock());
686
687 LoopStack.pop();
688
689 // Emit the exit block.
690 EmitBlock(LoopExit.getBlock(), true);
691
692 // The LoopHeader typically is just a branch if we skipped emitting
693 // a branch, try to erase it.
694 if (!EmitBoolCondBranch)
695 SimplifyForwardingBlocks(LoopHeader.getBlock());
696 }
697
EmitDoStmt(const DoStmt & S,ArrayRef<const Attr * > DoAttrs)698 void CodeGenFunction::EmitDoStmt(const DoStmt &S,
699 ArrayRef<const Attr *> DoAttrs) {
700 JumpDest LoopExit = getJumpDestInCurrentScope("do.end");
701 JumpDest LoopCond = getJumpDestInCurrentScope("do.cond");
702
703 uint64_t ParentCount = getCurrentProfileCount();
704
705 // Store the blocks to use for break and continue.
706 BreakContinueStack.push_back(BreakContinue(LoopExit, LoopCond));
707
708 // Emit the body of the loop.
709 llvm::BasicBlock *LoopBody = createBasicBlock("do.body");
710
711 LoopStack.push(LoopBody, CGM.getContext(), DoAttrs);
712
713 EmitBlockWithFallThrough(LoopBody, &S);
714 {
715 RunCleanupsScope BodyScope(*this);
716 EmitStmt(S.getBody());
717 }
718
719 EmitBlock(LoopCond.getBlock());
720
721 // C99 6.8.5.2: "The evaluation of the controlling expression takes place
722 // after each execution of the loop body."
723
724 // Evaluate the conditional in the while header.
725 // C99 6.8.5p2/p4: The first substatement is executed if the expression
726 // compares unequal to 0. The condition must be a scalar type.
727 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
728
729 BreakContinueStack.pop_back();
730
731 // "do {} while (0)" is common in macros, avoid extra blocks. Be sure
732 // to correctly handle break/continue though.
733 bool EmitBoolCondBranch = true;
734 if (llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal))
735 if (C->isZero())
736 EmitBoolCondBranch = false;
737
738 // As long as the condition is true, iterate the loop.
739 if (EmitBoolCondBranch) {
740 uint64_t BackedgeCount = getProfileCount(S.getBody()) - ParentCount;
741 Builder.CreateCondBr(
742 BoolCondVal, LoopBody, LoopExit.getBlock(),
743 createProfileWeightsForLoop(S.getCond(), BackedgeCount));
744 }
745
746 LoopStack.pop();
747
748 // Emit the exit block.
749 EmitBlock(LoopExit.getBlock());
750
751 // The DoCond block typically is just a branch if we skipped
752 // emitting a branch, try to erase it.
753 if (!EmitBoolCondBranch)
754 SimplifyForwardingBlocks(LoopCond.getBlock());
755 }
756
EmitForStmt(const ForStmt & S,ArrayRef<const Attr * > ForAttrs)757 void CodeGenFunction::EmitForStmt(const ForStmt &S,
758 ArrayRef<const Attr *> ForAttrs) {
759 JumpDest LoopExit = getJumpDestInCurrentScope("for.end");
760
761 LexicalScope ForScope(*this, S.getSourceRange());
762
763 // Evaluate the first part before the loop.
764 if (S.getInit())
765 EmitStmt(S.getInit());
766
767 // Start the loop with a block that tests the condition.
768 // If there's an increment, the continue scope will be overwritten
769 // later.
770 JumpDest Continue = getJumpDestInCurrentScope("for.cond");
771 llvm::BasicBlock *CondBlock = Continue.getBlock();
772 EmitBlock(CondBlock);
773
774 LoopStack.push(CondBlock, CGM.getContext(), ForAttrs);
775
776 // If the for loop doesn't have an increment we can just use the
777 // condition as the continue block. Otherwise we'll need to create
778 // a block for it (in the current scope, i.e. in the scope of the
779 // condition), and that we will become our continue block.
780 if (S.getInc())
781 Continue = getJumpDestInCurrentScope("for.inc");
782
783 // Store the blocks to use for break and continue.
784 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
785
786 // Create a cleanup scope for the condition variable cleanups.
787 LexicalScope ConditionScope(*this, S.getSourceRange());
788
789 if (S.getCond()) {
790 // If the for statement has a condition scope, emit the local variable
791 // declaration.
792 if (S.getConditionVariable()) {
793 EmitAutoVarDecl(*S.getConditionVariable());
794 }
795
796 llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
797 // If there are any cleanups between here and the loop-exit scope,
798 // create a block to stage a loop exit along.
799 if (ForScope.requiresCleanups())
800 ExitBlock = createBasicBlock("for.cond.cleanup");
801
802 // As long as the condition is true, iterate the loop.
803 llvm::BasicBlock *ForBody = createBasicBlock("for.body");
804
805 // C99 6.8.5p2/p4: The first substatement is executed if the expression
806 // compares unequal to 0. The condition must be a scalar type.
807 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
808 Builder.CreateCondBr(
809 BoolCondVal, ForBody, ExitBlock,
810 createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody())));
811
812 if (ExitBlock != LoopExit.getBlock()) {
813 EmitBlock(ExitBlock);
814 EmitBranchThroughCleanup(LoopExit);
815 }
816
817 EmitBlock(ForBody);
818 } else {
819 // Treat it as a non-zero constant. Don't even create a new block for the
820 // body, just fall into it.
821 }
822 incrementProfileCounter(&S);
823
824 {
825 // Create a separate cleanup scope for the body, in case it is not
826 // a compound statement.
827 RunCleanupsScope BodyScope(*this);
828 EmitStmt(S.getBody());
829 }
830
831 // If there is an increment, emit it next.
832 if (S.getInc()) {
833 EmitBlock(Continue.getBlock());
834 EmitStmt(S.getInc());
835 }
836
837 BreakContinueStack.pop_back();
838
839 ConditionScope.ForceCleanup();
840
841 EmitStopPoint(&S);
842 EmitBranch(CondBlock);
843
844 ForScope.ForceCleanup();
845
846 LoopStack.pop();
847
848 // Emit the fall-through block.
849 EmitBlock(LoopExit.getBlock(), true);
850 }
851
852 void
EmitCXXForRangeStmt(const CXXForRangeStmt & S,ArrayRef<const Attr * > ForAttrs)853 CodeGenFunction::EmitCXXForRangeStmt(const CXXForRangeStmt &S,
854 ArrayRef<const Attr *> ForAttrs) {
855 JumpDest LoopExit = getJumpDestInCurrentScope("for.end");
856
857 LexicalScope ForScope(*this, S.getSourceRange());
858
859 // Evaluate the first pieces before the loop.
860 EmitStmt(S.getRangeStmt());
861 EmitStmt(S.getBeginEndStmt());
862
863 // Start the loop with a block that tests the condition.
864 // If there's an increment, the continue scope will be overwritten
865 // later.
866 llvm::BasicBlock *CondBlock = createBasicBlock("for.cond");
867 EmitBlock(CondBlock);
868
869 LoopStack.push(CondBlock, CGM.getContext(), ForAttrs);
870
871 // If there are any cleanups between here and the loop-exit scope,
872 // create a block to stage a loop exit along.
873 llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
874 if (ForScope.requiresCleanups())
875 ExitBlock = createBasicBlock("for.cond.cleanup");
876
877 // The loop body, consisting of the specified body and the loop variable.
878 llvm::BasicBlock *ForBody = createBasicBlock("for.body");
879
880 // The body is executed if the expression, contextually converted
881 // to bool, is true.
882 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
883 Builder.CreateCondBr(
884 BoolCondVal, ForBody, ExitBlock,
885 createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody())));
886
887 if (ExitBlock != LoopExit.getBlock()) {
888 EmitBlock(ExitBlock);
889 EmitBranchThroughCleanup(LoopExit);
890 }
891
892 EmitBlock(ForBody);
893 incrementProfileCounter(&S);
894
895 // Create a block for the increment. In case of a 'continue', we jump there.
896 JumpDest Continue = getJumpDestInCurrentScope("for.inc");
897
898 // Store the blocks to use for break and continue.
899 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
900
901 {
902 // Create a separate cleanup scope for the loop variable and body.
903 LexicalScope BodyScope(*this, S.getSourceRange());
904 EmitStmt(S.getLoopVarStmt());
905 EmitStmt(S.getBody());
906 }
907
908 EmitStopPoint(&S);
909 // If there is an increment, emit it next.
910 EmitBlock(Continue.getBlock());
911 EmitStmt(S.getInc());
912
913 BreakContinueStack.pop_back();
914
915 EmitBranch(CondBlock);
916
917 ForScope.ForceCleanup();
918
919 LoopStack.pop();
920
921 // Emit the fall-through block.
922 EmitBlock(LoopExit.getBlock(), true);
923 }
924
EmitReturnOfRValue(RValue RV,QualType Ty)925 void CodeGenFunction::EmitReturnOfRValue(RValue RV, QualType Ty) {
926 if (RV.isScalar()) {
927 Builder.CreateStore(RV.getScalarVal(), ReturnValue);
928 } else if (RV.isAggregate()) {
929 EmitAggregateCopy(ReturnValue, RV.getAggregateAddress(), Ty);
930 } else {
931 EmitStoreOfComplex(RV.getComplexVal(), MakeAddrLValue(ReturnValue, Ty),
932 /*init*/ true);
933 }
934 EmitBranchThroughCleanup(ReturnBlock);
935 }
936
937 /// EmitReturnStmt - Note that due to GCC extensions, this can have an operand
938 /// if the function returns void, or may be missing one if the function returns
939 /// non-void. Fun stuff :).
EmitReturnStmt(const ReturnStmt & S)940 void CodeGenFunction::EmitReturnStmt(const ReturnStmt &S) {
941 // Returning from an outlined SEH helper is UB, and we already warn on it.
942 if (IsOutlinedSEHHelper) {
943 Builder.CreateUnreachable();
944 Builder.ClearInsertionPoint();
945 }
946
947 // Emit the result value, even if unused, to evalute the side effects.
948 const Expr *RV = S.getRetValue();
949
950 // Treat block literals in a return expression as if they appeared
951 // in their own scope. This permits a small, easily-implemented
952 // exception to our over-conservative rules about not jumping to
953 // statements following block literals with non-trivial cleanups.
954 RunCleanupsScope cleanupScope(*this);
955 if (const ExprWithCleanups *cleanups =
956 dyn_cast_or_null<ExprWithCleanups>(RV)) {
957 enterFullExpression(cleanups);
958 RV = cleanups->getSubExpr();
959 }
960
961 // FIXME: Clean this up by using an LValue for ReturnTemp,
962 // EmitStoreThroughLValue, and EmitAnyExpr.
963 if (getLangOpts().ElideConstructors &&
964 S.getNRVOCandidate() && S.getNRVOCandidate()->isNRVOVariable()) {
965 // Apply the named return value optimization for this return statement,
966 // which means doing nothing: the appropriate result has already been
967 // constructed into the NRVO variable.
968
969 // If there is an NRVO flag for this variable, set it to 1 into indicate
970 // that the cleanup code should not destroy the variable.
971 if (llvm::Value *NRVOFlag = NRVOFlags[S.getNRVOCandidate()])
972 Builder.CreateFlagStore(Builder.getTrue(), NRVOFlag);
973 } else if (!ReturnValue.isValid() || (RV && RV->getType()->isVoidType())) {
974 // Make sure not to return anything, but evaluate the expression
975 // for side effects.
976 if (RV)
977 EmitAnyExpr(RV);
978 } else if (!RV) {
979 // Do nothing (return value is left uninitialized)
980 } else if (FnRetTy->isReferenceType()) {
981 // If this function returns a reference, take the address of the expression
982 // rather than the value.
983 RValue Result = EmitReferenceBindingToExpr(RV);
984 Builder.CreateStore(Result.getScalarVal(), ReturnValue);
985 } else {
986 switch (getEvaluationKind(RV->getType())) {
987 case TEK_Scalar:
988 Builder.CreateStore(EmitScalarExpr(RV), ReturnValue);
989 break;
990 case TEK_Complex:
991 EmitComplexExprIntoLValue(RV, MakeAddrLValue(ReturnValue, RV->getType()),
992 /*isInit*/ true);
993 break;
994 case TEK_Aggregate:
995 EmitAggExpr(RV, AggValueSlot::forAddr(ReturnValue,
996 Qualifiers(),
997 AggValueSlot::IsDestructed,
998 AggValueSlot::DoesNotNeedGCBarriers,
999 AggValueSlot::IsNotAliased));
1000 break;
1001 }
1002 }
1003
1004 ++NumReturnExprs;
1005 if (!RV || RV->isEvaluatable(getContext()))
1006 ++NumSimpleReturnExprs;
1007
1008 cleanupScope.ForceCleanup();
1009 EmitBranchThroughCleanup(ReturnBlock);
1010 }
1011
EmitDeclStmt(const DeclStmt & S)1012 void CodeGenFunction::EmitDeclStmt(const DeclStmt &S) {
1013 // As long as debug info is modeled with instructions, we have to ensure we
1014 // have a place to insert here and write the stop point here.
1015 if (HaveInsertPoint())
1016 EmitStopPoint(&S);
1017
1018 for (const auto *I : S.decls())
1019 EmitDecl(*I);
1020 }
1021
EmitBreakStmt(const BreakStmt & S)1022 void CodeGenFunction::EmitBreakStmt(const BreakStmt &S) {
1023 assert(!BreakContinueStack.empty() && "break stmt not in a loop or switch!");
1024
1025 // If this code is reachable then emit a stop point (if generating
1026 // debug info). We have to do this ourselves because we are on the
1027 // "simple" statement path.
1028 if (HaveInsertPoint())
1029 EmitStopPoint(&S);
1030
1031 EmitBranchThroughCleanup(BreakContinueStack.back().BreakBlock);
1032 }
1033
EmitContinueStmt(const ContinueStmt & S)1034 void CodeGenFunction::EmitContinueStmt(const ContinueStmt &S) {
1035 assert(!BreakContinueStack.empty() && "continue stmt not in a loop!");
1036
1037 // If this code is reachable then emit a stop point (if generating
1038 // debug info). We have to do this ourselves because we are on the
1039 // "simple" statement path.
1040 if (HaveInsertPoint())
1041 EmitStopPoint(&S);
1042
1043 EmitBranchThroughCleanup(BreakContinueStack.back().ContinueBlock);
1044 }
1045
1046 /// EmitCaseStmtRange - If case statement range is not too big then
1047 /// add multiple cases to switch instruction, one for each value within
1048 /// the range. If range is too big then emit "if" condition check.
EmitCaseStmtRange(const CaseStmt & S)1049 void CodeGenFunction::EmitCaseStmtRange(const CaseStmt &S) {
1050 assert(S.getRHS() && "Expected RHS value in CaseStmt");
1051
1052 llvm::APSInt LHS = S.getLHS()->EvaluateKnownConstInt(getContext());
1053 llvm::APSInt RHS = S.getRHS()->EvaluateKnownConstInt(getContext());
1054
1055 // Emit the code for this case. We do this first to make sure it is
1056 // properly chained from our predecessor before generating the
1057 // switch machinery to enter this block.
1058 llvm::BasicBlock *CaseDest = createBasicBlock("sw.bb");
1059 EmitBlockWithFallThrough(CaseDest, &S);
1060 EmitStmt(S.getSubStmt());
1061
1062 // If range is empty, do nothing.
1063 if (LHS.isSigned() ? RHS.slt(LHS) : RHS.ult(LHS))
1064 return;
1065
1066 llvm::APInt Range = RHS - LHS;
1067 // FIXME: parameters such as this should not be hardcoded.
1068 if (Range.ult(llvm::APInt(Range.getBitWidth(), 64))) {
1069 // Range is small enough to add multiple switch instruction cases.
1070 uint64_t Total = getProfileCount(&S);
1071 unsigned NCases = Range.getZExtValue() + 1;
1072 // We only have one region counter for the entire set of cases here, so we
1073 // need to divide the weights evenly between the generated cases, ensuring
1074 // that the total weight is preserved. E.g., a weight of 5 over three cases
1075 // will be distributed as weights of 2, 2, and 1.
1076 uint64_t Weight = Total / NCases, Rem = Total % NCases;
1077 for (unsigned I = 0; I != NCases; ++I) {
1078 if (SwitchWeights)
1079 SwitchWeights->push_back(Weight + (Rem ? 1 : 0));
1080 if (Rem)
1081 Rem--;
1082 SwitchInsn->addCase(Builder.getInt(LHS), CaseDest);
1083 LHS++;
1084 }
1085 return;
1086 }
1087
1088 // The range is too big. Emit "if" condition into a new block,
1089 // making sure to save and restore the current insertion point.
1090 llvm::BasicBlock *RestoreBB = Builder.GetInsertBlock();
1091
1092 // Push this test onto the chain of range checks (which terminates
1093 // in the default basic block). The switch's default will be changed
1094 // to the top of this chain after switch emission is complete.
1095 llvm::BasicBlock *FalseDest = CaseRangeBlock;
1096 CaseRangeBlock = createBasicBlock("sw.caserange");
1097
1098 CurFn->getBasicBlockList().push_back(CaseRangeBlock);
1099 Builder.SetInsertPoint(CaseRangeBlock);
1100
1101 // Emit range check.
1102 llvm::Value *Diff =
1103 Builder.CreateSub(SwitchInsn->getCondition(), Builder.getInt(LHS));
1104 llvm::Value *Cond =
1105 Builder.CreateICmpULE(Diff, Builder.getInt(Range), "inbounds");
1106
1107 llvm::MDNode *Weights = nullptr;
1108 if (SwitchWeights) {
1109 uint64_t ThisCount = getProfileCount(&S);
1110 uint64_t DefaultCount = (*SwitchWeights)[0];
1111 Weights = createProfileWeights(ThisCount, DefaultCount);
1112
1113 // Since we're chaining the switch default through each large case range, we
1114 // need to update the weight for the default, ie, the first case, to include
1115 // this case.
1116 (*SwitchWeights)[0] += ThisCount;
1117 }
1118 Builder.CreateCondBr(Cond, CaseDest, FalseDest, Weights);
1119
1120 // Restore the appropriate insertion point.
1121 if (RestoreBB)
1122 Builder.SetInsertPoint(RestoreBB);
1123 else
1124 Builder.ClearInsertionPoint();
1125 }
1126
EmitCaseStmt(const CaseStmt & S)1127 void CodeGenFunction::EmitCaseStmt(const CaseStmt &S) {
1128 // If there is no enclosing switch instance that we're aware of, then this
1129 // case statement and its block can be elided. This situation only happens
1130 // when we've constant-folded the switch, are emitting the constant case,
1131 // and part of the constant case includes another case statement. For
1132 // instance: switch (4) { case 4: do { case 5: } while (1); }
1133 if (!SwitchInsn) {
1134 EmitStmt(S.getSubStmt());
1135 return;
1136 }
1137
1138 // Handle case ranges.
1139 if (S.getRHS()) {
1140 EmitCaseStmtRange(S);
1141 return;
1142 }
1143
1144 llvm::ConstantInt *CaseVal =
1145 Builder.getInt(S.getLHS()->EvaluateKnownConstInt(getContext()));
1146
1147 // If the body of the case is just a 'break', try to not emit an empty block.
1148 // If we're profiling or we're not optimizing, leave the block in for better
1149 // debug and coverage analysis.
1150 if (!CGM.getCodeGenOpts().ProfileInstrGenerate &&
1151 CGM.getCodeGenOpts().OptimizationLevel > 0 &&
1152 isa<BreakStmt>(S.getSubStmt())) {
1153 JumpDest Block = BreakContinueStack.back().BreakBlock;
1154
1155 // Only do this optimization if there are no cleanups that need emitting.
1156 if (isObviouslyBranchWithoutCleanups(Block)) {
1157 if (SwitchWeights)
1158 SwitchWeights->push_back(getProfileCount(&S));
1159 SwitchInsn->addCase(CaseVal, Block.getBlock());
1160
1161 // If there was a fallthrough into this case, make sure to redirect it to
1162 // the end of the switch as well.
1163 if (Builder.GetInsertBlock()) {
1164 Builder.CreateBr(Block.getBlock());
1165 Builder.ClearInsertionPoint();
1166 }
1167 return;
1168 }
1169 }
1170
1171 llvm::BasicBlock *CaseDest = createBasicBlock("sw.bb");
1172 EmitBlockWithFallThrough(CaseDest, &S);
1173 if (SwitchWeights)
1174 SwitchWeights->push_back(getProfileCount(&S));
1175 SwitchInsn->addCase(CaseVal, CaseDest);
1176
1177 // Recursively emitting the statement is acceptable, but is not wonderful for
1178 // code where we have many case statements nested together, i.e.:
1179 // case 1:
1180 // case 2:
1181 // case 3: etc.
1182 // Handling this recursively will create a new block for each case statement
1183 // that falls through to the next case which is IR intensive. It also causes
1184 // deep recursion which can run into stack depth limitations. Handle
1185 // sequential non-range case statements specially.
1186 const CaseStmt *CurCase = &S;
1187 const CaseStmt *NextCase = dyn_cast<CaseStmt>(S.getSubStmt());
1188
1189 // Otherwise, iteratively add consecutive cases to this switch stmt.
1190 while (NextCase && NextCase->getRHS() == nullptr) {
1191 CurCase = NextCase;
1192 llvm::ConstantInt *CaseVal =
1193 Builder.getInt(CurCase->getLHS()->EvaluateKnownConstInt(getContext()));
1194
1195 if (SwitchWeights)
1196 SwitchWeights->push_back(getProfileCount(NextCase));
1197 if (CGM.getCodeGenOpts().ProfileInstrGenerate) {
1198 CaseDest = createBasicBlock("sw.bb");
1199 EmitBlockWithFallThrough(CaseDest, &S);
1200 }
1201
1202 SwitchInsn->addCase(CaseVal, CaseDest);
1203 NextCase = dyn_cast<CaseStmt>(CurCase->getSubStmt());
1204 }
1205
1206 // Normal default recursion for non-cases.
1207 EmitStmt(CurCase->getSubStmt());
1208 }
1209
EmitDefaultStmt(const DefaultStmt & S)1210 void CodeGenFunction::EmitDefaultStmt(const DefaultStmt &S) {
1211 llvm::BasicBlock *DefaultBlock = SwitchInsn->getDefaultDest();
1212 assert(DefaultBlock->empty() &&
1213 "EmitDefaultStmt: Default block already defined?");
1214
1215 EmitBlockWithFallThrough(DefaultBlock, &S);
1216
1217 EmitStmt(S.getSubStmt());
1218 }
1219
1220 /// CollectStatementsForCase - Given the body of a 'switch' statement and a
1221 /// constant value that is being switched on, see if we can dead code eliminate
1222 /// the body of the switch to a simple series of statements to emit. Basically,
1223 /// on a switch (5) we want to find these statements:
1224 /// case 5:
1225 /// printf(...); <--
1226 /// ++i; <--
1227 /// break;
1228 ///
1229 /// and add them to the ResultStmts vector. If it is unsafe to do this
1230 /// transformation (for example, one of the elided statements contains a label
1231 /// that might be jumped to), return CSFC_Failure. If we handled it and 'S'
1232 /// should include statements after it (e.g. the printf() line is a substmt of
1233 /// the case) then return CSFC_FallThrough. If we handled it and found a break
1234 /// statement, then return CSFC_Success.
1235 ///
1236 /// If Case is non-null, then we are looking for the specified case, checking
1237 /// that nothing we jump over contains labels. If Case is null, then we found
1238 /// the case and are looking for the break.
1239 ///
1240 /// If the recursive walk actually finds our Case, then we set FoundCase to
1241 /// true.
1242 ///
1243 enum CSFC_Result { CSFC_Failure, CSFC_FallThrough, CSFC_Success };
CollectStatementsForCase(const Stmt * S,const SwitchCase * Case,bool & FoundCase,SmallVectorImpl<const Stmt * > & ResultStmts)1244 static CSFC_Result CollectStatementsForCase(const Stmt *S,
1245 const SwitchCase *Case,
1246 bool &FoundCase,
1247 SmallVectorImpl<const Stmt*> &ResultStmts) {
1248 // If this is a null statement, just succeed.
1249 if (!S)
1250 return Case ? CSFC_Success : CSFC_FallThrough;
1251
1252 // If this is the switchcase (case 4: or default) that we're looking for, then
1253 // we're in business. Just add the substatement.
1254 if (const SwitchCase *SC = dyn_cast<SwitchCase>(S)) {
1255 if (S == Case) {
1256 FoundCase = true;
1257 return CollectStatementsForCase(SC->getSubStmt(), nullptr, FoundCase,
1258 ResultStmts);
1259 }
1260
1261 // Otherwise, this is some other case or default statement, just ignore it.
1262 return CollectStatementsForCase(SC->getSubStmt(), Case, FoundCase,
1263 ResultStmts);
1264 }
1265
1266 // If we are in the live part of the code and we found our break statement,
1267 // return a success!
1268 if (!Case && isa<BreakStmt>(S))
1269 return CSFC_Success;
1270
1271 // If this is a switch statement, then it might contain the SwitchCase, the
1272 // break, or neither.
1273 if (const CompoundStmt *CS = dyn_cast<CompoundStmt>(S)) {
1274 // Handle this as two cases: we might be looking for the SwitchCase (if so
1275 // the skipped statements must be skippable) or we might already have it.
1276 CompoundStmt::const_body_iterator I = CS->body_begin(), E = CS->body_end();
1277 if (Case) {
1278 // Keep track of whether we see a skipped declaration. The code could be
1279 // using the declaration even if it is skipped, so we can't optimize out
1280 // the decl if the kept statements might refer to it.
1281 bool HadSkippedDecl = false;
1282
1283 // If we're looking for the case, just see if we can skip each of the
1284 // substatements.
1285 for (; Case && I != E; ++I) {
1286 HadSkippedDecl |= isa<DeclStmt>(*I);
1287
1288 switch (CollectStatementsForCase(*I, Case, FoundCase, ResultStmts)) {
1289 case CSFC_Failure: return CSFC_Failure;
1290 case CSFC_Success:
1291 // A successful result means that either 1) that the statement doesn't
1292 // have the case and is skippable, or 2) does contain the case value
1293 // and also contains the break to exit the switch. In the later case,
1294 // we just verify the rest of the statements are elidable.
1295 if (FoundCase) {
1296 // If we found the case and skipped declarations, we can't do the
1297 // optimization.
1298 if (HadSkippedDecl)
1299 return CSFC_Failure;
1300
1301 for (++I; I != E; ++I)
1302 if (CodeGenFunction::ContainsLabel(*I, true))
1303 return CSFC_Failure;
1304 return CSFC_Success;
1305 }
1306 break;
1307 case CSFC_FallThrough:
1308 // If we have a fallthrough condition, then we must have found the
1309 // case started to include statements. Consider the rest of the
1310 // statements in the compound statement as candidates for inclusion.
1311 assert(FoundCase && "Didn't find case but returned fallthrough?");
1312 // We recursively found Case, so we're not looking for it anymore.
1313 Case = nullptr;
1314
1315 // If we found the case and skipped declarations, we can't do the
1316 // optimization.
1317 if (HadSkippedDecl)
1318 return CSFC_Failure;
1319 break;
1320 }
1321 }
1322 }
1323
1324 // If we have statements in our range, then we know that the statements are
1325 // live and need to be added to the set of statements we're tracking.
1326 for (; I != E; ++I) {
1327 switch (CollectStatementsForCase(*I, nullptr, FoundCase, ResultStmts)) {
1328 case CSFC_Failure: return CSFC_Failure;
1329 case CSFC_FallThrough:
1330 // A fallthrough result means that the statement was simple and just
1331 // included in ResultStmt, keep adding them afterwards.
1332 break;
1333 case CSFC_Success:
1334 // A successful result means that we found the break statement and
1335 // stopped statement inclusion. We just ensure that any leftover stmts
1336 // are skippable and return success ourselves.
1337 for (++I; I != E; ++I)
1338 if (CodeGenFunction::ContainsLabel(*I, true))
1339 return CSFC_Failure;
1340 return CSFC_Success;
1341 }
1342 }
1343
1344 return Case ? CSFC_Success : CSFC_FallThrough;
1345 }
1346
1347 // Okay, this is some other statement that we don't handle explicitly, like a
1348 // for statement or increment etc. If we are skipping over this statement,
1349 // just verify it doesn't have labels, which would make it invalid to elide.
1350 if (Case) {
1351 if (CodeGenFunction::ContainsLabel(S, true))
1352 return CSFC_Failure;
1353 return CSFC_Success;
1354 }
1355
1356 // Otherwise, we want to include this statement. Everything is cool with that
1357 // so long as it doesn't contain a break out of the switch we're in.
1358 if (CodeGenFunction::containsBreak(S)) return CSFC_Failure;
1359
1360 // Otherwise, everything is great. Include the statement and tell the caller
1361 // that we fall through and include the next statement as well.
1362 ResultStmts.push_back(S);
1363 return CSFC_FallThrough;
1364 }
1365
1366 /// FindCaseStatementsForValue - Find the case statement being jumped to and
1367 /// then invoke CollectStatementsForCase to find the list of statements to emit
1368 /// for a switch on constant. See the comment above CollectStatementsForCase
1369 /// for more details.
FindCaseStatementsForValue(const SwitchStmt & S,const llvm::APSInt & ConstantCondValue,SmallVectorImpl<const Stmt * > & ResultStmts,ASTContext & C,const SwitchCase * & ResultCase)1370 static bool FindCaseStatementsForValue(const SwitchStmt &S,
1371 const llvm::APSInt &ConstantCondValue,
1372 SmallVectorImpl<const Stmt*> &ResultStmts,
1373 ASTContext &C,
1374 const SwitchCase *&ResultCase) {
1375 // First step, find the switch case that is being branched to. We can do this
1376 // efficiently by scanning the SwitchCase list.
1377 const SwitchCase *Case = S.getSwitchCaseList();
1378 const DefaultStmt *DefaultCase = nullptr;
1379
1380 for (; Case; Case = Case->getNextSwitchCase()) {
1381 // It's either a default or case. Just remember the default statement in
1382 // case we're not jumping to any numbered cases.
1383 if (const DefaultStmt *DS = dyn_cast<DefaultStmt>(Case)) {
1384 DefaultCase = DS;
1385 continue;
1386 }
1387
1388 // Check to see if this case is the one we're looking for.
1389 const CaseStmt *CS = cast<CaseStmt>(Case);
1390 // Don't handle case ranges yet.
1391 if (CS->getRHS()) return false;
1392
1393 // If we found our case, remember it as 'case'.
1394 if (CS->getLHS()->EvaluateKnownConstInt(C) == ConstantCondValue)
1395 break;
1396 }
1397
1398 // If we didn't find a matching case, we use a default if it exists, or we
1399 // elide the whole switch body!
1400 if (!Case) {
1401 // It is safe to elide the body of the switch if it doesn't contain labels
1402 // etc. If it is safe, return successfully with an empty ResultStmts list.
1403 if (!DefaultCase)
1404 return !CodeGenFunction::ContainsLabel(&S);
1405 Case = DefaultCase;
1406 }
1407
1408 // Ok, we know which case is being jumped to, try to collect all the
1409 // statements that follow it. This can fail for a variety of reasons. Also,
1410 // check to see that the recursive walk actually found our case statement.
1411 // Insane cases like this can fail to find it in the recursive walk since we
1412 // don't handle every stmt kind:
1413 // switch (4) {
1414 // while (1) {
1415 // case 4: ...
1416 bool FoundCase = false;
1417 ResultCase = Case;
1418 return CollectStatementsForCase(S.getBody(), Case, FoundCase,
1419 ResultStmts) != CSFC_Failure &&
1420 FoundCase;
1421 }
1422
EmitSwitchStmt(const SwitchStmt & S)1423 void CodeGenFunction::EmitSwitchStmt(const SwitchStmt &S) {
1424 // Handle nested switch statements.
1425 llvm::SwitchInst *SavedSwitchInsn = SwitchInsn;
1426 SmallVector<uint64_t, 16> *SavedSwitchWeights = SwitchWeights;
1427 llvm::BasicBlock *SavedCRBlock = CaseRangeBlock;
1428
1429 // See if we can constant fold the condition of the switch and therefore only
1430 // emit the live case statement (if any) of the switch.
1431 llvm::APSInt ConstantCondValue;
1432 if (ConstantFoldsToSimpleInteger(S.getCond(), ConstantCondValue)) {
1433 SmallVector<const Stmt*, 4> CaseStmts;
1434 const SwitchCase *Case = nullptr;
1435 if (FindCaseStatementsForValue(S, ConstantCondValue, CaseStmts,
1436 getContext(), Case)) {
1437 if (Case)
1438 incrementProfileCounter(Case);
1439 RunCleanupsScope ExecutedScope(*this);
1440
1441 // Emit the condition variable if needed inside the entire cleanup scope
1442 // used by this special case for constant folded switches.
1443 if (S.getConditionVariable())
1444 EmitAutoVarDecl(*S.getConditionVariable());
1445
1446 // At this point, we are no longer "within" a switch instance, so
1447 // we can temporarily enforce this to ensure that any embedded case
1448 // statements are not emitted.
1449 SwitchInsn = nullptr;
1450
1451 // Okay, we can dead code eliminate everything except this case. Emit the
1452 // specified series of statements and we're good.
1453 for (unsigned i = 0, e = CaseStmts.size(); i != e; ++i)
1454 EmitStmt(CaseStmts[i]);
1455 incrementProfileCounter(&S);
1456
1457 // Now we want to restore the saved switch instance so that nested
1458 // switches continue to function properly
1459 SwitchInsn = SavedSwitchInsn;
1460
1461 return;
1462 }
1463 }
1464
1465 JumpDest SwitchExit = getJumpDestInCurrentScope("sw.epilog");
1466
1467 RunCleanupsScope ConditionScope(*this);
1468 if (S.getConditionVariable())
1469 EmitAutoVarDecl(*S.getConditionVariable());
1470 llvm::Value *CondV = EmitScalarExpr(S.getCond());
1471
1472 // Create basic block to hold stuff that comes after switch
1473 // statement. We also need to create a default block now so that
1474 // explicit case ranges tests can have a place to jump to on
1475 // failure.
1476 llvm::BasicBlock *DefaultBlock = createBasicBlock("sw.default");
1477 SwitchInsn = Builder.CreateSwitch(CondV, DefaultBlock);
1478 if (PGO.haveRegionCounts()) {
1479 // Walk the SwitchCase list to find how many there are.
1480 uint64_t DefaultCount = 0;
1481 unsigned NumCases = 0;
1482 for (const SwitchCase *Case = S.getSwitchCaseList();
1483 Case;
1484 Case = Case->getNextSwitchCase()) {
1485 if (isa<DefaultStmt>(Case))
1486 DefaultCount = getProfileCount(Case);
1487 NumCases += 1;
1488 }
1489 SwitchWeights = new SmallVector<uint64_t, 16>();
1490 SwitchWeights->reserve(NumCases);
1491 // The default needs to be first. We store the edge count, so we already
1492 // know the right weight.
1493 SwitchWeights->push_back(DefaultCount);
1494 }
1495 CaseRangeBlock = DefaultBlock;
1496
1497 // Clear the insertion point to indicate we are in unreachable code.
1498 Builder.ClearInsertionPoint();
1499
1500 // All break statements jump to NextBlock. If BreakContinueStack is non-empty
1501 // then reuse last ContinueBlock.
1502 JumpDest OuterContinue;
1503 if (!BreakContinueStack.empty())
1504 OuterContinue = BreakContinueStack.back().ContinueBlock;
1505
1506 BreakContinueStack.push_back(BreakContinue(SwitchExit, OuterContinue));
1507
1508 // Emit switch body.
1509 EmitStmt(S.getBody());
1510
1511 BreakContinueStack.pop_back();
1512
1513 // Update the default block in case explicit case range tests have
1514 // been chained on top.
1515 SwitchInsn->setDefaultDest(CaseRangeBlock);
1516
1517 // If a default was never emitted:
1518 if (!DefaultBlock->getParent()) {
1519 // If we have cleanups, emit the default block so that there's a
1520 // place to jump through the cleanups from.
1521 if (ConditionScope.requiresCleanups()) {
1522 EmitBlock(DefaultBlock);
1523
1524 // Otherwise, just forward the default block to the switch end.
1525 } else {
1526 DefaultBlock->replaceAllUsesWith(SwitchExit.getBlock());
1527 delete DefaultBlock;
1528 }
1529 }
1530
1531 ConditionScope.ForceCleanup();
1532
1533 // Emit continuation.
1534 EmitBlock(SwitchExit.getBlock(), true);
1535 incrementProfileCounter(&S);
1536
1537 // If the switch has a condition wrapped by __builtin_unpredictable,
1538 // create metadata that specifies that the switch is unpredictable.
1539 // Don't bother if not optimizing because that metadata would not be used.
1540 if (CGM.getCodeGenOpts().OptimizationLevel != 0) {
1541 if (const CallExpr *Call = dyn_cast<CallExpr>(S.getCond())) {
1542 const Decl *TargetDecl = Call->getCalleeDecl();
1543 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl)) {
1544 if (FD->getBuiltinID() == Builtin::BI__builtin_unpredictable) {
1545 llvm::MDBuilder MDHelper(getLLVMContext());
1546 SwitchInsn->setMetadata(llvm::LLVMContext::MD_unpredictable,
1547 MDHelper.createUnpredictable());
1548 }
1549 }
1550 }
1551 }
1552
1553 if (SwitchWeights) {
1554 assert(SwitchWeights->size() == 1 + SwitchInsn->getNumCases() &&
1555 "switch weights do not match switch cases");
1556 // If there's only one jump destination there's no sense weighting it.
1557 if (SwitchWeights->size() > 1)
1558 SwitchInsn->setMetadata(llvm::LLVMContext::MD_prof,
1559 createProfileWeights(*SwitchWeights));
1560 delete SwitchWeights;
1561 }
1562 SwitchInsn = SavedSwitchInsn;
1563 SwitchWeights = SavedSwitchWeights;
1564 CaseRangeBlock = SavedCRBlock;
1565 }
1566
1567 static std::string
SimplifyConstraint(const char * Constraint,const TargetInfo & Target,SmallVectorImpl<TargetInfo::ConstraintInfo> * OutCons=nullptr)1568 SimplifyConstraint(const char *Constraint, const TargetInfo &Target,
1569 SmallVectorImpl<TargetInfo::ConstraintInfo> *OutCons=nullptr) {
1570 std::string Result;
1571
1572 while (*Constraint) {
1573 switch (*Constraint) {
1574 default:
1575 Result += Target.convertConstraint(Constraint);
1576 break;
1577 // Ignore these
1578 case '*':
1579 case '?':
1580 case '!':
1581 case '=': // Will see this and the following in mult-alt constraints.
1582 case '+':
1583 break;
1584 case '#': // Ignore the rest of the constraint alternative.
1585 while (Constraint[1] && Constraint[1] != ',')
1586 Constraint++;
1587 break;
1588 case '&':
1589 case '%':
1590 Result += *Constraint;
1591 while (Constraint[1] && Constraint[1] == *Constraint)
1592 Constraint++;
1593 break;
1594 case ',':
1595 Result += "|";
1596 break;
1597 case 'g':
1598 Result += "imr";
1599 break;
1600 case '[': {
1601 assert(OutCons &&
1602 "Must pass output names to constraints with a symbolic name");
1603 unsigned Index;
1604 bool result = Target.resolveSymbolicName(Constraint, *OutCons, Index);
1605 assert(result && "Could not resolve symbolic name"); (void)result;
1606 Result += llvm::utostr(Index);
1607 break;
1608 }
1609 }
1610
1611 Constraint++;
1612 }
1613
1614 return Result;
1615 }
1616
1617 /// AddVariableConstraints - Look at AsmExpr and if it is a variable declared
1618 /// as using a particular register add that as a constraint that will be used
1619 /// in this asm stmt.
1620 static std::string
AddVariableConstraints(const std::string & Constraint,const Expr & AsmExpr,const TargetInfo & Target,CodeGenModule & CGM,const AsmStmt & Stmt,const bool EarlyClobber)1621 AddVariableConstraints(const std::string &Constraint, const Expr &AsmExpr,
1622 const TargetInfo &Target, CodeGenModule &CGM,
1623 const AsmStmt &Stmt, const bool EarlyClobber) {
1624 const DeclRefExpr *AsmDeclRef = dyn_cast<DeclRefExpr>(&AsmExpr);
1625 if (!AsmDeclRef)
1626 return Constraint;
1627 const ValueDecl &Value = *AsmDeclRef->getDecl();
1628 const VarDecl *Variable = dyn_cast<VarDecl>(&Value);
1629 if (!Variable)
1630 return Constraint;
1631 if (Variable->getStorageClass() != SC_Register)
1632 return Constraint;
1633 AsmLabelAttr *Attr = Variable->getAttr<AsmLabelAttr>();
1634 if (!Attr)
1635 return Constraint;
1636 StringRef Register = Attr->getLabel();
1637 assert(Target.isValidGCCRegisterName(Register));
1638 // We're using validateOutputConstraint here because we only care if
1639 // this is a register constraint.
1640 TargetInfo::ConstraintInfo Info(Constraint, "");
1641 if (Target.validateOutputConstraint(Info) &&
1642 !Info.allowsRegister()) {
1643 CGM.ErrorUnsupported(&Stmt, "__asm__");
1644 return Constraint;
1645 }
1646 // Canonicalize the register here before returning it.
1647 Register = Target.getNormalizedGCCRegisterName(Register);
1648 return (EarlyClobber ? "&{" : "{") + Register.str() + "}";
1649 }
1650
1651 llvm::Value*
EmitAsmInputLValue(const TargetInfo::ConstraintInfo & Info,LValue InputValue,QualType InputType,std::string & ConstraintStr,SourceLocation Loc)1652 CodeGenFunction::EmitAsmInputLValue(const TargetInfo::ConstraintInfo &Info,
1653 LValue InputValue, QualType InputType,
1654 std::string &ConstraintStr,
1655 SourceLocation Loc) {
1656 llvm::Value *Arg;
1657 if (Info.allowsRegister() || !Info.allowsMemory()) {
1658 if (CodeGenFunction::hasScalarEvaluationKind(InputType)) {
1659 Arg = EmitLoadOfLValue(InputValue, Loc).getScalarVal();
1660 } else {
1661 llvm::Type *Ty = ConvertType(InputType);
1662 uint64_t Size = CGM.getDataLayout().getTypeSizeInBits(Ty);
1663 if (Size <= 64 && llvm::isPowerOf2_64(Size)) {
1664 Ty = llvm::IntegerType::get(getLLVMContext(), Size);
1665 Ty = llvm::PointerType::getUnqual(Ty);
1666
1667 Arg = Builder.CreateLoad(Builder.CreateBitCast(InputValue.getAddress(),
1668 Ty));
1669 } else {
1670 Arg = InputValue.getPointer();
1671 ConstraintStr += '*';
1672 }
1673 }
1674 } else {
1675 Arg = InputValue.getPointer();
1676 ConstraintStr += '*';
1677 }
1678
1679 return Arg;
1680 }
1681
EmitAsmInput(const TargetInfo::ConstraintInfo & Info,const Expr * InputExpr,std::string & ConstraintStr)1682 llvm::Value* CodeGenFunction::EmitAsmInput(
1683 const TargetInfo::ConstraintInfo &Info,
1684 const Expr *InputExpr,
1685 std::string &ConstraintStr) {
1686 // If this can't be a register or memory, i.e., has to be a constant
1687 // (immediate or symbolic), try to emit it as such.
1688 if (!Info.allowsRegister() && !Info.allowsMemory()) {
1689 llvm::APSInt Result;
1690 if (InputExpr->EvaluateAsInt(Result, getContext()))
1691 return llvm::ConstantInt::get(getLLVMContext(), Result);
1692 assert(!Info.requiresImmediateConstant() &&
1693 "Required-immediate inlineasm arg isn't constant?");
1694 }
1695
1696 if (Info.allowsRegister() || !Info.allowsMemory())
1697 if (CodeGenFunction::hasScalarEvaluationKind(InputExpr->getType()))
1698 return EmitScalarExpr(InputExpr);
1699 if (InputExpr->getStmtClass() == Expr::CXXThisExprClass)
1700 return EmitScalarExpr(InputExpr);
1701 InputExpr = InputExpr->IgnoreParenNoopCasts(getContext());
1702 LValue Dest = EmitLValue(InputExpr);
1703 return EmitAsmInputLValue(Info, Dest, InputExpr->getType(), ConstraintStr,
1704 InputExpr->getExprLoc());
1705 }
1706
1707 /// getAsmSrcLocInfo - Return the !srcloc metadata node to attach to an inline
1708 /// asm call instruction. The !srcloc MDNode contains a list of constant
1709 /// integers which are the source locations of the start of each line in the
1710 /// asm.
getAsmSrcLocInfo(const StringLiteral * Str,CodeGenFunction & CGF)1711 static llvm::MDNode *getAsmSrcLocInfo(const StringLiteral *Str,
1712 CodeGenFunction &CGF) {
1713 SmallVector<llvm::Metadata *, 8> Locs;
1714 // Add the location of the first line to the MDNode.
1715 Locs.push_back(llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
1716 CGF.Int32Ty, Str->getLocStart().getRawEncoding())));
1717 StringRef StrVal = Str->getString();
1718 if (!StrVal.empty()) {
1719 const SourceManager &SM = CGF.CGM.getContext().getSourceManager();
1720 const LangOptions &LangOpts = CGF.CGM.getLangOpts();
1721 unsigned StartToken = 0;
1722 unsigned ByteOffset = 0;
1723
1724 // Add the location of the start of each subsequent line of the asm to the
1725 // MDNode.
1726 for (unsigned i = 0, e = StrVal.size() - 1; i != e; ++i) {
1727 if (StrVal[i] != '\n') continue;
1728 SourceLocation LineLoc = Str->getLocationOfByte(
1729 i + 1, SM, LangOpts, CGF.getTarget(), &StartToken, &ByteOffset);
1730 Locs.push_back(llvm::ConstantAsMetadata::get(
1731 llvm::ConstantInt::get(CGF.Int32Ty, LineLoc.getRawEncoding())));
1732 }
1733 }
1734
1735 return llvm::MDNode::get(CGF.getLLVMContext(), Locs);
1736 }
1737
EmitAsmStmt(const AsmStmt & S)1738 void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
1739 // Assemble the final asm string.
1740 std::string AsmString = S.generateAsmString(getContext());
1741
1742 // Get all the output and input constraints together.
1743 SmallVector<TargetInfo::ConstraintInfo, 4> OutputConstraintInfos;
1744 SmallVector<TargetInfo::ConstraintInfo, 4> InputConstraintInfos;
1745
1746 for (unsigned i = 0, e = S.getNumOutputs(); i != e; i++) {
1747 StringRef Name;
1748 if (const GCCAsmStmt *GAS = dyn_cast<GCCAsmStmt>(&S))
1749 Name = GAS->getOutputName(i);
1750 TargetInfo::ConstraintInfo Info(S.getOutputConstraint(i), Name);
1751 bool IsValid = getTarget().validateOutputConstraint(Info); (void)IsValid;
1752 assert(IsValid && "Failed to parse output constraint");
1753 OutputConstraintInfos.push_back(Info);
1754 }
1755
1756 for (unsigned i = 0, e = S.getNumInputs(); i != e; i++) {
1757 StringRef Name;
1758 if (const GCCAsmStmt *GAS = dyn_cast<GCCAsmStmt>(&S))
1759 Name = GAS->getInputName(i);
1760 TargetInfo::ConstraintInfo Info(S.getInputConstraint(i), Name);
1761 bool IsValid =
1762 getTarget().validateInputConstraint(OutputConstraintInfos, Info);
1763 assert(IsValid && "Failed to parse input constraint"); (void)IsValid;
1764 InputConstraintInfos.push_back(Info);
1765 }
1766
1767 std::string Constraints;
1768
1769 std::vector<LValue> ResultRegDests;
1770 std::vector<QualType> ResultRegQualTys;
1771 std::vector<llvm::Type *> ResultRegTypes;
1772 std::vector<llvm::Type *> ResultTruncRegTypes;
1773 std::vector<llvm::Type *> ArgTypes;
1774 std::vector<llvm::Value*> Args;
1775
1776 // Keep track of inout constraints.
1777 std::string InOutConstraints;
1778 std::vector<llvm::Value*> InOutArgs;
1779 std::vector<llvm::Type*> InOutArgTypes;
1780
1781 // An inline asm can be marked readonly if it meets the following conditions:
1782 // - it doesn't have any sideeffects
1783 // - it doesn't clobber memory
1784 // - it doesn't return a value by-reference
1785 // It can be marked readnone if it doesn't have any input memory constraints
1786 // in addition to meeting the conditions listed above.
1787 bool ReadOnly = true, ReadNone = true;
1788
1789 for (unsigned i = 0, e = S.getNumOutputs(); i != e; i++) {
1790 TargetInfo::ConstraintInfo &Info = OutputConstraintInfos[i];
1791
1792 // Simplify the output constraint.
1793 std::string OutputConstraint(S.getOutputConstraint(i));
1794 OutputConstraint = SimplifyConstraint(OutputConstraint.c_str() + 1,
1795 getTarget());
1796
1797 const Expr *OutExpr = S.getOutputExpr(i);
1798 OutExpr = OutExpr->IgnoreParenNoopCasts(getContext());
1799
1800 OutputConstraint = AddVariableConstraints(OutputConstraint, *OutExpr,
1801 getTarget(), CGM, S,
1802 Info.earlyClobber());
1803
1804 LValue Dest = EmitLValue(OutExpr);
1805 if (!Constraints.empty())
1806 Constraints += ',';
1807
1808 // If this is a register output, then make the inline asm return it
1809 // by-value. If this is a memory result, return the value by-reference.
1810 if (!Info.allowsMemory() && hasScalarEvaluationKind(OutExpr->getType())) {
1811 Constraints += "=" + OutputConstraint;
1812 ResultRegQualTys.push_back(OutExpr->getType());
1813 ResultRegDests.push_back(Dest);
1814 ResultRegTypes.push_back(ConvertTypeForMem(OutExpr->getType()));
1815 ResultTruncRegTypes.push_back(ResultRegTypes.back());
1816
1817 // If this output is tied to an input, and if the input is larger, then
1818 // we need to set the actual result type of the inline asm node to be the
1819 // same as the input type.
1820 if (Info.hasMatchingInput()) {
1821 unsigned InputNo;
1822 for (InputNo = 0; InputNo != S.getNumInputs(); ++InputNo) {
1823 TargetInfo::ConstraintInfo &Input = InputConstraintInfos[InputNo];
1824 if (Input.hasTiedOperand() && Input.getTiedOperand() == i)
1825 break;
1826 }
1827 assert(InputNo != S.getNumInputs() && "Didn't find matching input!");
1828
1829 QualType InputTy = S.getInputExpr(InputNo)->getType();
1830 QualType OutputType = OutExpr->getType();
1831
1832 uint64_t InputSize = getContext().getTypeSize(InputTy);
1833 if (getContext().getTypeSize(OutputType) < InputSize) {
1834 // Form the asm to return the value as a larger integer or fp type.
1835 ResultRegTypes.back() = ConvertType(InputTy);
1836 }
1837 }
1838 if (llvm::Type* AdjTy =
1839 getTargetHooks().adjustInlineAsmType(*this, OutputConstraint,
1840 ResultRegTypes.back()))
1841 ResultRegTypes.back() = AdjTy;
1842 else {
1843 CGM.getDiags().Report(S.getAsmLoc(),
1844 diag::err_asm_invalid_type_in_input)
1845 << OutExpr->getType() << OutputConstraint;
1846 }
1847 } else {
1848 ArgTypes.push_back(Dest.getAddress().getType());
1849 Args.push_back(Dest.getPointer());
1850 Constraints += "=*";
1851 Constraints += OutputConstraint;
1852 ReadOnly = ReadNone = false;
1853 }
1854
1855 if (Info.isReadWrite()) {
1856 InOutConstraints += ',';
1857
1858 const Expr *InputExpr = S.getOutputExpr(i);
1859 llvm::Value *Arg = EmitAsmInputLValue(Info, Dest, InputExpr->getType(),
1860 InOutConstraints,
1861 InputExpr->getExprLoc());
1862
1863 if (llvm::Type* AdjTy =
1864 getTargetHooks().adjustInlineAsmType(*this, OutputConstraint,
1865 Arg->getType()))
1866 Arg = Builder.CreateBitCast(Arg, AdjTy);
1867
1868 if (Info.allowsRegister())
1869 InOutConstraints += llvm::utostr(i);
1870 else
1871 InOutConstraints += OutputConstraint;
1872
1873 InOutArgTypes.push_back(Arg->getType());
1874 InOutArgs.push_back(Arg);
1875 }
1876 }
1877
1878 // If this is a Microsoft-style asm blob, store the return registers (EAX:EDX)
1879 // to the return value slot. Only do this when returning in registers.
1880 if (isa<MSAsmStmt>(&S)) {
1881 const ABIArgInfo &RetAI = CurFnInfo->getReturnInfo();
1882 if (RetAI.isDirect() || RetAI.isExtend()) {
1883 // Make a fake lvalue for the return value slot.
1884 LValue ReturnSlot = MakeAddrLValue(ReturnValue, FnRetTy);
1885 CGM.getTargetCodeGenInfo().addReturnRegisterOutputs(
1886 *this, ReturnSlot, Constraints, ResultRegTypes, ResultTruncRegTypes,
1887 ResultRegDests, AsmString, S.getNumOutputs());
1888 SawAsmBlock = true;
1889 }
1890 }
1891
1892 for (unsigned i = 0, e = S.getNumInputs(); i != e; i++) {
1893 const Expr *InputExpr = S.getInputExpr(i);
1894
1895 TargetInfo::ConstraintInfo &Info = InputConstraintInfos[i];
1896
1897 if (Info.allowsMemory())
1898 ReadNone = false;
1899
1900 if (!Constraints.empty())
1901 Constraints += ',';
1902
1903 // Simplify the input constraint.
1904 std::string InputConstraint(S.getInputConstraint(i));
1905 InputConstraint = SimplifyConstraint(InputConstraint.c_str(), getTarget(),
1906 &OutputConstraintInfos);
1907
1908 InputConstraint = AddVariableConstraints(
1909 InputConstraint, *InputExpr->IgnoreParenNoopCasts(getContext()),
1910 getTarget(), CGM, S, false /* No EarlyClobber */);
1911
1912 llvm::Value *Arg = EmitAsmInput(Info, InputExpr, Constraints);
1913
1914 // If this input argument is tied to a larger output result, extend the
1915 // input to be the same size as the output. The LLVM backend wants to see
1916 // the input and output of a matching constraint be the same size. Note
1917 // that GCC does not define what the top bits are here. We use zext because
1918 // that is usually cheaper, but LLVM IR should really get an anyext someday.
1919 if (Info.hasTiedOperand()) {
1920 unsigned Output = Info.getTiedOperand();
1921 QualType OutputType = S.getOutputExpr(Output)->getType();
1922 QualType InputTy = InputExpr->getType();
1923
1924 if (getContext().getTypeSize(OutputType) >
1925 getContext().getTypeSize(InputTy)) {
1926 // Use ptrtoint as appropriate so that we can do our extension.
1927 if (isa<llvm::PointerType>(Arg->getType()))
1928 Arg = Builder.CreatePtrToInt(Arg, IntPtrTy);
1929 llvm::Type *OutputTy = ConvertType(OutputType);
1930 if (isa<llvm::IntegerType>(OutputTy))
1931 Arg = Builder.CreateZExt(Arg, OutputTy);
1932 else if (isa<llvm::PointerType>(OutputTy))
1933 Arg = Builder.CreateZExt(Arg, IntPtrTy);
1934 else {
1935 assert(OutputTy->isFloatingPointTy() && "Unexpected output type");
1936 Arg = Builder.CreateFPExt(Arg, OutputTy);
1937 }
1938 }
1939 }
1940 if (llvm::Type* AdjTy =
1941 getTargetHooks().adjustInlineAsmType(*this, InputConstraint,
1942 Arg->getType()))
1943 Arg = Builder.CreateBitCast(Arg, AdjTy);
1944 else
1945 CGM.getDiags().Report(S.getAsmLoc(), diag::err_asm_invalid_type_in_input)
1946 << InputExpr->getType() << InputConstraint;
1947
1948 ArgTypes.push_back(Arg->getType());
1949 Args.push_back(Arg);
1950 Constraints += InputConstraint;
1951 }
1952
1953 // Append the "input" part of inout constraints last.
1954 for (unsigned i = 0, e = InOutArgs.size(); i != e; i++) {
1955 ArgTypes.push_back(InOutArgTypes[i]);
1956 Args.push_back(InOutArgs[i]);
1957 }
1958 Constraints += InOutConstraints;
1959
1960 // Clobbers
1961 for (unsigned i = 0, e = S.getNumClobbers(); i != e; i++) {
1962 StringRef Clobber = S.getClobber(i);
1963
1964 if (Clobber == "memory")
1965 ReadOnly = ReadNone = false;
1966 else if (Clobber != "cc")
1967 Clobber = getTarget().getNormalizedGCCRegisterName(Clobber);
1968
1969 if (!Constraints.empty())
1970 Constraints += ',';
1971
1972 Constraints += "~{";
1973 Constraints += Clobber;
1974 Constraints += '}';
1975 }
1976
1977 // Add machine specific clobbers
1978 std::string MachineClobbers = getTarget().getClobbers();
1979 if (!MachineClobbers.empty()) {
1980 if (!Constraints.empty())
1981 Constraints += ',';
1982 Constraints += MachineClobbers;
1983 }
1984
1985 llvm::Type *ResultType;
1986 if (ResultRegTypes.empty())
1987 ResultType = VoidTy;
1988 else if (ResultRegTypes.size() == 1)
1989 ResultType = ResultRegTypes[0];
1990 else
1991 ResultType = llvm::StructType::get(getLLVMContext(), ResultRegTypes);
1992
1993 llvm::FunctionType *FTy =
1994 llvm::FunctionType::get(ResultType, ArgTypes, false);
1995
1996 bool HasSideEffect = S.isVolatile() || S.getNumOutputs() == 0;
1997 llvm::InlineAsm::AsmDialect AsmDialect = isa<MSAsmStmt>(&S) ?
1998 llvm::InlineAsm::AD_Intel : llvm::InlineAsm::AD_ATT;
1999 llvm::InlineAsm *IA =
2000 llvm::InlineAsm::get(FTy, AsmString, Constraints, HasSideEffect,
2001 /* IsAlignStack */ false, AsmDialect);
2002 llvm::CallInst *Result = Builder.CreateCall(IA, Args);
2003 Result->addAttribute(llvm::AttributeSet::FunctionIndex,
2004 llvm::Attribute::NoUnwind);
2005
2006 if (isa<MSAsmStmt>(&S)) {
2007 // If the assembly contains any labels, mark the call noduplicate to prevent
2008 // defining the same ASM label twice (PR23715). This is pretty hacky, but it
2009 // works.
2010 if (AsmString.find("__MSASMLABEL_") != std::string::npos)
2011 Result->addAttribute(llvm::AttributeSet::FunctionIndex,
2012 llvm::Attribute::NoDuplicate);
2013 }
2014
2015 // Attach readnone and readonly attributes.
2016 if (!HasSideEffect) {
2017 if (ReadNone)
2018 Result->addAttribute(llvm::AttributeSet::FunctionIndex,
2019 llvm::Attribute::ReadNone);
2020 else if (ReadOnly)
2021 Result->addAttribute(llvm::AttributeSet::FunctionIndex,
2022 llvm::Attribute::ReadOnly);
2023 }
2024
2025 // Slap the source location of the inline asm into a !srcloc metadata on the
2026 // call.
2027 if (const GCCAsmStmt *gccAsmStmt = dyn_cast<GCCAsmStmt>(&S)) {
2028 Result->setMetadata("srcloc", getAsmSrcLocInfo(gccAsmStmt->getAsmString(),
2029 *this));
2030 } else {
2031 // At least put the line number on MS inline asm blobs.
2032 auto Loc = llvm::ConstantInt::get(Int32Ty, S.getAsmLoc().getRawEncoding());
2033 Result->setMetadata("srcloc",
2034 llvm::MDNode::get(getLLVMContext(),
2035 llvm::ConstantAsMetadata::get(Loc)));
2036 }
2037
2038 // Extract all of the register value results from the asm.
2039 std::vector<llvm::Value*> RegResults;
2040 if (ResultRegTypes.size() == 1) {
2041 RegResults.push_back(Result);
2042 } else {
2043 for (unsigned i = 0, e = ResultRegTypes.size(); i != e; ++i) {
2044 llvm::Value *Tmp = Builder.CreateExtractValue(Result, i, "asmresult");
2045 RegResults.push_back(Tmp);
2046 }
2047 }
2048
2049 assert(RegResults.size() == ResultRegTypes.size());
2050 assert(RegResults.size() == ResultTruncRegTypes.size());
2051 assert(RegResults.size() == ResultRegDests.size());
2052 for (unsigned i = 0, e = RegResults.size(); i != e; ++i) {
2053 llvm::Value *Tmp = RegResults[i];
2054
2055 // If the result type of the LLVM IR asm doesn't match the result type of
2056 // the expression, do the conversion.
2057 if (ResultRegTypes[i] != ResultTruncRegTypes[i]) {
2058 llvm::Type *TruncTy = ResultTruncRegTypes[i];
2059
2060 // Truncate the integer result to the right size, note that TruncTy can be
2061 // a pointer.
2062 if (TruncTy->isFloatingPointTy())
2063 Tmp = Builder.CreateFPTrunc(Tmp, TruncTy);
2064 else if (TruncTy->isPointerTy() && Tmp->getType()->isIntegerTy()) {
2065 uint64_t ResSize = CGM.getDataLayout().getTypeSizeInBits(TruncTy);
2066 Tmp = Builder.CreateTrunc(Tmp,
2067 llvm::IntegerType::get(getLLVMContext(), (unsigned)ResSize));
2068 Tmp = Builder.CreateIntToPtr(Tmp, TruncTy);
2069 } else if (Tmp->getType()->isPointerTy() && TruncTy->isIntegerTy()) {
2070 uint64_t TmpSize =CGM.getDataLayout().getTypeSizeInBits(Tmp->getType());
2071 Tmp = Builder.CreatePtrToInt(Tmp,
2072 llvm::IntegerType::get(getLLVMContext(), (unsigned)TmpSize));
2073 Tmp = Builder.CreateTrunc(Tmp, TruncTy);
2074 } else if (TruncTy->isIntegerTy()) {
2075 Tmp = Builder.CreateTrunc(Tmp, TruncTy);
2076 } else if (TruncTy->isVectorTy()) {
2077 Tmp = Builder.CreateBitCast(Tmp, TruncTy);
2078 }
2079 }
2080
2081 EmitStoreThroughLValue(RValue::get(Tmp), ResultRegDests[i]);
2082 }
2083 }
2084
InitCapturedStruct(const CapturedStmt & S)2085 LValue CodeGenFunction::InitCapturedStruct(const CapturedStmt &S) {
2086 const RecordDecl *RD = S.getCapturedRecordDecl();
2087 QualType RecordTy = getContext().getRecordType(RD);
2088
2089 // Initialize the captured struct.
2090 LValue SlotLV =
2091 MakeAddrLValue(CreateMemTemp(RecordTy, "agg.captured"), RecordTy);
2092
2093 RecordDecl::field_iterator CurField = RD->field_begin();
2094 for (CapturedStmt::const_capture_init_iterator I = S.capture_init_begin(),
2095 E = S.capture_init_end();
2096 I != E; ++I, ++CurField) {
2097 LValue LV = EmitLValueForFieldInitialization(SlotLV, *CurField);
2098 if (CurField->hasCapturedVLAType()) {
2099 auto VAT = CurField->getCapturedVLAType();
2100 EmitStoreThroughLValue(RValue::get(VLASizeMap[VAT->getSizeExpr()]), LV);
2101 } else {
2102 EmitInitializerForField(*CurField, LV, *I, None);
2103 }
2104 }
2105
2106 return SlotLV;
2107 }
2108
2109 /// Generate an outlined function for the body of a CapturedStmt, store any
2110 /// captured variables into the captured struct, and call the outlined function.
2111 llvm::Function *
EmitCapturedStmt(const CapturedStmt & S,CapturedRegionKind K)2112 CodeGenFunction::EmitCapturedStmt(const CapturedStmt &S, CapturedRegionKind K) {
2113 LValue CapStruct = InitCapturedStruct(S);
2114
2115 // Emit the CapturedDecl
2116 CodeGenFunction CGF(CGM, true);
2117 CGCapturedStmtRAII CapInfoRAII(CGF, new CGCapturedStmtInfo(S, K));
2118 llvm::Function *F = CGF.GenerateCapturedStmtFunction(S);
2119 delete CGF.CapturedStmtInfo;
2120
2121 // Emit call to the helper function.
2122 EmitCallOrInvoke(F, CapStruct.getPointer());
2123
2124 return F;
2125 }
2126
GenerateCapturedStmtArgument(const CapturedStmt & S)2127 Address CodeGenFunction::GenerateCapturedStmtArgument(const CapturedStmt &S) {
2128 LValue CapStruct = InitCapturedStruct(S);
2129 return CapStruct.getAddress();
2130 }
2131
2132 /// Creates the outlined function for a CapturedStmt.
2133 llvm::Function *
GenerateCapturedStmtFunction(const CapturedStmt & S)2134 CodeGenFunction::GenerateCapturedStmtFunction(const CapturedStmt &S) {
2135 assert(CapturedStmtInfo &&
2136 "CapturedStmtInfo should be set when generating the captured function");
2137 const CapturedDecl *CD = S.getCapturedDecl();
2138 const RecordDecl *RD = S.getCapturedRecordDecl();
2139 SourceLocation Loc = S.getLocStart();
2140 assert(CD->hasBody() && "missing CapturedDecl body");
2141
2142 // Build the argument list.
2143 ASTContext &Ctx = CGM.getContext();
2144 FunctionArgList Args;
2145 Args.append(CD->param_begin(), CD->param_end());
2146
2147 // Create the function declaration.
2148 FunctionType::ExtInfo ExtInfo;
2149 const CGFunctionInfo &FuncInfo =
2150 CGM.getTypes().arrangeFreeFunctionDeclaration(Ctx.VoidTy, Args, ExtInfo,
2151 /*IsVariadic=*/false);
2152 llvm::FunctionType *FuncLLVMTy = CGM.getTypes().GetFunctionType(FuncInfo);
2153
2154 llvm::Function *F =
2155 llvm::Function::Create(FuncLLVMTy, llvm::GlobalValue::InternalLinkage,
2156 CapturedStmtInfo->getHelperName(), &CGM.getModule());
2157 CGM.SetInternalFunctionAttributes(CD, F, FuncInfo);
2158 if (CD->isNothrow())
2159 F->addFnAttr(llvm::Attribute::NoUnwind);
2160
2161 // Generate the function.
2162 StartFunction(CD, Ctx.VoidTy, F, FuncInfo, Args,
2163 CD->getLocation(),
2164 CD->getBody()->getLocStart());
2165 // Set the context parameter in CapturedStmtInfo.
2166 Address DeclPtr = GetAddrOfLocalVar(CD->getContextParam());
2167 CapturedStmtInfo->setContextValue(Builder.CreateLoad(DeclPtr));
2168
2169 // Initialize variable-length arrays.
2170 LValue Base = MakeNaturalAlignAddrLValue(CapturedStmtInfo->getContextValue(),
2171 Ctx.getTagDeclType(RD));
2172 for (auto *FD : RD->fields()) {
2173 if (FD->hasCapturedVLAType()) {
2174 auto *ExprArg = EmitLoadOfLValue(EmitLValueForField(Base, FD),
2175 S.getLocStart()).getScalarVal();
2176 auto VAT = FD->getCapturedVLAType();
2177 VLASizeMap[VAT->getSizeExpr()] = ExprArg;
2178 }
2179 }
2180
2181 // If 'this' is captured, load it into CXXThisValue.
2182 if (CapturedStmtInfo->isCXXThisExprCaptured()) {
2183 FieldDecl *FD = CapturedStmtInfo->getThisFieldDecl();
2184 LValue ThisLValue = EmitLValueForField(Base, FD);
2185 CXXThisValue = EmitLoadOfLValue(ThisLValue, Loc).getScalarVal();
2186 }
2187
2188 PGO.assignRegionCounters(GlobalDecl(CD), F);
2189 CapturedStmtInfo->EmitBody(*this, CD->getBody());
2190 FinishFunction(CD->getBodyRBrace());
2191
2192 return F;
2193 }
2194