1 //===---- CGOpenMPRuntimeGPU.cpp - Interface to OpenMP GPU Runtimes ----===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This provides a generalized class for OpenMP runtime code generation
10 // specialized by GPU targets NVPTX and AMDGCN.
11 //
12 //===----------------------------------------------------------------------===//
13
14 #include "CGOpenMPRuntimeGPU.h"
15 #include "CGOpenMPRuntimeNVPTX.h"
16 #include "CodeGenFunction.h"
17 #include "clang/AST/Attr.h"
18 #include "clang/AST/DeclOpenMP.h"
19 #include "clang/AST/StmtOpenMP.h"
20 #include "clang/AST/StmtVisitor.h"
21 #include "clang/Basic/Cuda.h"
22 #include "llvm/ADT/SmallPtrSet.h"
23 #include "llvm/Frontend/OpenMP/OMPGridValues.h"
24 #include "llvm/IR/IntrinsicsNVPTX.h"
25
26 using namespace clang;
27 using namespace CodeGen;
28 using namespace llvm::omp;
29
30 namespace {
31 /// Pre(post)-action for different OpenMP constructs specialized for NVPTX.
32 class NVPTXActionTy final : public PrePostActionTy {
33 llvm::FunctionCallee EnterCallee = nullptr;
34 ArrayRef<llvm::Value *> EnterArgs;
35 llvm::FunctionCallee ExitCallee = nullptr;
36 ArrayRef<llvm::Value *> ExitArgs;
37 bool Conditional = false;
38 llvm::BasicBlock *ContBlock = nullptr;
39
40 public:
NVPTXActionTy(llvm::FunctionCallee EnterCallee,ArrayRef<llvm::Value * > EnterArgs,llvm::FunctionCallee ExitCallee,ArrayRef<llvm::Value * > ExitArgs,bool Conditional=false)41 NVPTXActionTy(llvm::FunctionCallee EnterCallee,
42 ArrayRef<llvm::Value *> EnterArgs,
43 llvm::FunctionCallee ExitCallee,
44 ArrayRef<llvm::Value *> ExitArgs, bool Conditional = false)
45 : EnterCallee(EnterCallee), EnterArgs(EnterArgs), ExitCallee(ExitCallee),
46 ExitArgs(ExitArgs), Conditional(Conditional) {}
Enter(CodeGenFunction & CGF)47 void Enter(CodeGenFunction &CGF) override {
48 llvm::Value *EnterRes = CGF.EmitRuntimeCall(EnterCallee, EnterArgs);
49 if (Conditional) {
50 llvm::Value *CallBool = CGF.Builder.CreateIsNotNull(EnterRes);
51 auto *ThenBlock = CGF.createBasicBlock("omp_if.then");
52 ContBlock = CGF.createBasicBlock("omp_if.end");
53 // Generate the branch (If-stmt)
54 CGF.Builder.CreateCondBr(CallBool, ThenBlock, ContBlock);
55 CGF.EmitBlock(ThenBlock);
56 }
57 }
Done(CodeGenFunction & CGF)58 void Done(CodeGenFunction &CGF) {
59 // Emit the rest of blocks/branches
60 CGF.EmitBranch(ContBlock);
61 CGF.EmitBlock(ContBlock, true);
62 }
Exit(CodeGenFunction & CGF)63 void Exit(CodeGenFunction &CGF) override {
64 CGF.EmitRuntimeCall(ExitCallee, ExitArgs);
65 }
66 };
67
68 /// A class to track the execution mode when codegening directives within
69 /// a target region. The appropriate mode (SPMD|NON-SPMD) is set on entry
70 /// to the target region and used by containing directives such as 'parallel'
71 /// to emit optimized code.
72 class ExecutionRuntimeModesRAII {
73 private:
74 CGOpenMPRuntimeGPU::ExecutionMode SavedExecMode =
75 CGOpenMPRuntimeGPU::EM_Unknown;
76 CGOpenMPRuntimeGPU::ExecutionMode &ExecMode;
77 bool SavedRuntimeMode = false;
78 bool *RuntimeMode = nullptr;
79
80 public:
81 /// Constructor for Non-SPMD mode.
ExecutionRuntimeModesRAII(CGOpenMPRuntimeGPU::ExecutionMode & ExecMode)82 ExecutionRuntimeModesRAII(CGOpenMPRuntimeGPU::ExecutionMode &ExecMode)
83 : ExecMode(ExecMode) {
84 SavedExecMode = ExecMode;
85 ExecMode = CGOpenMPRuntimeGPU::EM_NonSPMD;
86 }
87 /// Constructor for SPMD mode.
ExecutionRuntimeModesRAII(CGOpenMPRuntimeGPU::ExecutionMode & ExecMode,bool & RuntimeMode,bool FullRuntimeMode)88 ExecutionRuntimeModesRAII(CGOpenMPRuntimeGPU::ExecutionMode &ExecMode,
89 bool &RuntimeMode, bool FullRuntimeMode)
90 : ExecMode(ExecMode), RuntimeMode(&RuntimeMode) {
91 SavedExecMode = ExecMode;
92 SavedRuntimeMode = RuntimeMode;
93 ExecMode = CGOpenMPRuntimeGPU::EM_SPMD;
94 RuntimeMode = FullRuntimeMode;
95 }
~ExecutionRuntimeModesRAII()96 ~ExecutionRuntimeModesRAII() {
97 ExecMode = SavedExecMode;
98 if (RuntimeMode)
99 *RuntimeMode = SavedRuntimeMode;
100 }
101 };
102
103 /// GPU Configuration: This information can be derived from cuda registers,
104 /// however, providing compile time constants helps generate more efficient
105 /// code. For all practical purposes this is fine because the configuration
106 /// is the same for all known NVPTX architectures.
107 enum MachineConfiguration : unsigned {
108 /// See "llvm/Frontend/OpenMP/OMPGridValues.h" for various related target
109 /// specific Grid Values like GV_Warp_Size, GV_Warp_Size_Log2,
110 /// and GV_Warp_Size_Log2_Mask.
111
112 /// Global memory alignment for performance.
113 GlobalMemoryAlignment = 128,
114
115 /// Maximal size of the shared memory buffer.
116 SharedMemorySize = 128,
117 };
118
getPrivateItem(const Expr * RefExpr)119 static const ValueDecl *getPrivateItem(const Expr *RefExpr) {
120 RefExpr = RefExpr->IgnoreParens();
121 if (const auto *ASE = dyn_cast<ArraySubscriptExpr>(RefExpr)) {
122 const Expr *Base = ASE->getBase()->IgnoreParenImpCasts();
123 while (const auto *TempASE = dyn_cast<ArraySubscriptExpr>(Base))
124 Base = TempASE->getBase()->IgnoreParenImpCasts();
125 RefExpr = Base;
126 } else if (auto *OASE = dyn_cast<OMPArraySectionExpr>(RefExpr)) {
127 const Expr *Base = OASE->getBase()->IgnoreParenImpCasts();
128 while (const auto *TempOASE = dyn_cast<OMPArraySectionExpr>(Base))
129 Base = TempOASE->getBase()->IgnoreParenImpCasts();
130 while (const auto *TempASE = dyn_cast<ArraySubscriptExpr>(Base))
131 Base = TempASE->getBase()->IgnoreParenImpCasts();
132 RefExpr = Base;
133 }
134 RefExpr = RefExpr->IgnoreParenImpCasts();
135 if (const auto *DE = dyn_cast<DeclRefExpr>(RefExpr))
136 return cast<ValueDecl>(DE->getDecl()->getCanonicalDecl());
137 const auto *ME = cast<MemberExpr>(RefExpr);
138 return cast<ValueDecl>(ME->getMemberDecl()->getCanonicalDecl());
139 }
140
141
buildRecordForGlobalizedVars(ASTContext & C,ArrayRef<const ValueDecl * > EscapedDecls,ArrayRef<const ValueDecl * > EscapedDeclsForTeams,llvm::SmallDenseMap<const ValueDecl *,const FieldDecl * > & MappedDeclsFields,int BufSize)142 static RecordDecl *buildRecordForGlobalizedVars(
143 ASTContext &C, ArrayRef<const ValueDecl *> EscapedDecls,
144 ArrayRef<const ValueDecl *> EscapedDeclsForTeams,
145 llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *>
146 &MappedDeclsFields, int BufSize) {
147 using VarsDataTy = std::pair<CharUnits /*Align*/, const ValueDecl *>;
148 if (EscapedDecls.empty() && EscapedDeclsForTeams.empty())
149 return nullptr;
150 SmallVector<VarsDataTy, 4> GlobalizedVars;
151 for (const ValueDecl *D : EscapedDecls)
152 GlobalizedVars.emplace_back(
153 CharUnits::fromQuantity(std::max(
154 C.getDeclAlign(D).getQuantity(),
155 static_cast<CharUnits::QuantityType>(GlobalMemoryAlignment))),
156 D);
157 for (const ValueDecl *D : EscapedDeclsForTeams)
158 GlobalizedVars.emplace_back(C.getDeclAlign(D), D);
159 llvm::stable_sort(GlobalizedVars, [](VarsDataTy L, VarsDataTy R) {
160 return L.first > R.first;
161 });
162
163 // Build struct _globalized_locals_ty {
164 // /* globalized vars */[WarSize] align (max(decl_align,
165 // GlobalMemoryAlignment))
166 // /* globalized vars */ for EscapedDeclsForTeams
167 // };
168 RecordDecl *GlobalizedRD = C.buildImplicitRecord("_globalized_locals_ty");
169 GlobalizedRD->startDefinition();
170 llvm::SmallPtrSet<const ValueDecl *, 16> SingleEscaped(
171 EscapedDeclsForTeams.begin(), EscapedDeclsForTeams.end());
172 for (const auto &Pair : GlobalizedVars) {
173 const ValueDecl *VD = Pair.second;
174 QualType Type = VD->getType();
175 if (Type->isLValueReferenceType())
176 Type = C.getPointerType(Type.getNonReferenceType());
177 else
178 Type = Type.getNonReferenceType();
179 SourceLocation Loc = VD->getLocation();
180 FieldDecl *Field;
181 if (SingleEscaped.count(VD)) {
182 Field = FieldDecl::Create(
183 C, GlobalizedRD, Loc, Loc, VD->getIdentifier(), Type,
184 C.getTrivialTypeSourceInfo(Type, SourceLocation()),
185 /*BW=*/nullptr, /*Mutable=*/false,
186 /*InitStyle=*/ICIS_NoInit);
187 Field->setAccess(AS_public);
188 if (VD->hasAttrs()) {
189 for (specific_attr_iterator<AlignedAttr> I(VD->getAttrs().begin()),
190 E(VD->getAttrs().end());
191 I != E; ++I)
192 Field->addAttr(*I);
193 }
194 } else {
195 llvm::APInt ArraySize(32, BufSize);
196 Type = C.getConstantArrayType(Type, ArraySize, nullptr, ArrayType::Normal,
197 0);
198 Field = FieldDecl::Create(
199 C, GlobalizedRD, Loc, Loc, VD->getIdentifier(), Type,
200 C.getTrivialTypeSourceInfo(Type, SourceLocation()),
201 /*BW=*/nullptr, /*Mutable=*/false,
202 /*InitStyle=*/ICIS_NoInit);
203 Field->setAccess(AS_public);
204 llvm::APInt Align(32, std::max(C.getDeclAlign(VD).getQuantity(),
205 static_cast<CharUnits::QuantityType>(
206 GlobalMemoryAlignment)));
207 Field->addAttr(AlignedAttr::CreateImplicit(
208 C, /*IsAlignmentExpr=*/true,
209 IntegerLiteral::Create(C, Align,
210 C.getIntTypeForBitwidth(32, /*Signed=*/0),
211 SourceLocation()),
212 {}, AttributeCommonInfo::AS_GNU, AlignedAttr::GNU_aligned));
213 }
214 GlobalizedRD->addDecl(Field);
215 MappedDeclsFields.try_emplace(VD, Field);
216 }
217 GlobalizedRD->completeDefinition();
218 return GlobalizedRD;
219 }
220
221 /// Get the list of variables that can escape their declaration context.
222 class CheckVarsEscapingDeclContext final
223 : public ConstStmtVisitor<CheckVarsEscapingDeclContext> {
224 CodeGenFunction &CGF;
225 llvm::SetVector<const ValueDecl *> EscapedDecls;
226 llvm::SetVector<const ValueDecl *> EscapedVariableLengthDecls;
227 llvm::SmallPtrSet<const Decl *, 4> EscapedParameters;
228 RecordDecl *GlobalizedRD = nullptr;
229 llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *> MappedDeclsFields;
230 bool AllEscaped = false;
231 bool IsForCombinedParallelRegion = false;
232
markAsEscaped(const ValueDecl * VD)233 void markAsEscaped(const ValueDecl *VD) {
234 // Do not globalize declare target variables.
235 if (!isa<VarDecl>(VD) ||
236 OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD))
237 return;
238 VD = cast<ValueDecl>(VD->getCanonicalDecl());
239 // Use user-specified allocation.
240 if (VD->hasAttrs() && VD->hasAttr<OMPAllocateDeclAttr>())
241 return;
242 // Variables captured by value must be globalized.
243 if (auto *CSI = CGF.CapturedStmtInfo) {
244 if (const FieldDecl *FD = CSI->lookup(cast<VarDecl>(VD))) {
245 // Check if need to capture the variable that was already captured by
246 // value in the outer region.
247 if (!IsForCombinedParallelRegion) {
248 if (!FD->hasAttrs())
249 return;
250 const auto *Attr = FD->getAttr<OMPCaptureKindAttr>();
251 if (!Attr)
252 return;
253 if (((Attr->getCaptureKind() != OMPC_map) &&
254 !isOpenMPPrivate(Attr->getCaptureKind())) ||
255 ((Attr->getCaptureKind() == OMPC_map) &&
256 !FD->getType()->isAnyPointerType()))
257 return;
258 }
259 if (!FD->getType()->isReferenceType()) {
260 assert(!VD->getType()->isVariablyModifiedType() &&
261 "Parameter captured by value with variably modified type");
262 EscapedParameters.insert(VD);
263 } else if (!IsForCombinedParallelRegion) {
264 return;
265 }
266 }
267 }
268 if ((!CGF.CapturedStmtInfo ||
269 (IsForCombinedParallelRegion && CGF.CapturedStmtInfo)) &&
270 VD->getType()->isReferenceType())
271 // Do not globalize variables with reference type.
272 return;
273 if (VD->getType()->isVariablyModifiedType())
274 EscapedVariableLengthDecls.insert(VD);
275 else
276 EscapedDecls.insert(VD);
277 }
278
VisitValueDecl(const ValueDecl * VD)279 void VisitValueDecl(const ValueDecl *VD) {
280 if (VD->getType()->isLValueReferenceType())
281 markAsEscaped(VD);
282 if (const auto *VarD = dyn_cast<VarDecl>(VD)) {
283 if (!isa<ParmVarDecl>(VarD) && VarD->hasInit()) {
284 const bool SavedAllEscaped = AllEscaped;
285 AllEscaped = VD->getType()->isLValueReferenceType();
286 Visit(VarD->getInit());
287 AllEscaped = SavedAllEscaped;
288 }
289 }
290 }
VisitOpenMPCapturedStmt(const CapturedStmt * S,ArrayRef<OMPClause * > Clauses,bool IsCombinedParallelRegion)291 void VisitOpenMPCapturedStmt(const CapturedStmt *S,
292 ArrayRef<OMPClause *> Clauses,
293 bool IsCombinedParallelRegion) {
294 if (!S)
295 return;
296 for (const CapturedStmt::Capture &C : S->captures()) {
297 if (C.capturesVariable() && !C.capturesVariableByCopy()) {
298 const ValueDecl *VD = C.getCapturedVar();
299 bool SavedIsForCombinedParallelRegion = IsForCombinedParallelRegion;
300 if (IsCombinedParallelRegion) {
301 // Check if the variable is privatized in the combined construct and
302 // those private copies must be shared in the inner parallel
303 // directive.
304 IsForCombinedParallelRegion = false;
305 for (const OMPClause *C : Clauses) {
306 if (!isOpenMPPrivate(C->getClauseKind()) ||
307 C->getClauseKind() == OMPC_reduction ||
308 C->getClauseKind() == OMPC_linear ||
309 C->getClauseKind() == OMPC_private)
310 continue;
311 ArrayRef<const Expr *> Vars;
312 if (const auto *PC = dyn_cast<OMPFirstprivateClause>(C))
313 Vars = PC->getVarRefs();
314 else if (const auto *PC = dyn_cast<OMPLastprivateClause>(C))
315 Vars = PC->getVarRefs();
316 else
317 llvm_unreachable("Unexpected clause.");
318 for (const auto *E : Vars) {
319 const Decl *D =
320 cast<DeclRefExpr>(E)->getDecl()->getCanonicalDecl();
321 if (D == VD->getCanonicalDecl()) {
322 IsForCombinedParallelRegion = true;
323 break;
324 }
325 }
326 if (IsForCombinedParallelRegion)
327 break;
328 }
329 }
330 markAsEscaped(VD);
331 if (isa<OMPCapturedExprDecl>(VD))
332 VisitValueDecl(VD);
333 IsForCombinedParallelRegion = SavedIsForCombinedParallelRegion;
334 }
335 }
336 }
337
buildRecordForGlobalizedVars(bool IsInTTDRegion)338 void buildRecordForGlobalizedVars(bool IsInTTDRegion) {
339 assert(!GlobalizedRD &&
340 "Record for globalized variables is built already.");
341 ArrayRef<const ValueDecl *> EscapedDeclsForParallel, EscapedDeclsForTeams;
342 unsigned WarpSize = CGF.getTarget().getGridValue(llvm::omp::GV_Warp_Size);
343 if (IsInTTDRegion)
344 EscapedDeclsForTeams = EscapedDecls.getArrayRef();
345 else
346 EscapedDeclsForParallel = EscapedDecls.getArrayRef();
347 GlobalizedRD = ::buildRecordForGlobalizedVars(
348 CGF.getContext(), EscapedDeclsForParallel, EscapedDeclsForTeams,
349 MappedDeclsFields, WarpSize);
350 }
351
352 public:
CheckVarsEscapingDeclContext(CodeGenFunction & CGF,ArrayRef<const ValueDecl * > TeamsReductions)353 CheckVarsEscapingDeclContext(CodeGenFunction &CGF,
354 ArrayRef<const ValueDecl *> TeamsReductions)
355 : CGF(CGF), EscapedDecls(TeamsReductions.begin(), TeamsReductions.end()) {
356 }
357 virtual ~CheckVarsEscapingDeclContext() = default;
VisitDeclStmt(const DeclStmt * S)358 void VisitDeclStmt(const DeclStmt *S) {
359 if (!S)
360 return;
361 for (const Decl *D : S->decls())
362 if (const auto *VD = dyn_cast_or_null<ValueDecl>(D))
363 VisitValueDecl(VD);
364 }
VisitOMPExecutableDirective(const OMPExecutableDirective * D)365 void VisitOMPExecutableDirective(const OMPExecutableDirective *D) {
366 if (!D)
367 return;
368 if (!D->hasAssociatedStmt())
369 return;
370 if (const auto *S =
371 dyn_cast_or_null<CapturedStmt>(D->getAssociatedStmt())) {
372 // Do not analyze directives that do not actually require capturing,
373 // like `omp for` or `omp simd` directives.
374 llvm::SmallVector<OpenMPDirectiveKind, 4> CaptureRegions;
375 getOpenMPCaptureRegions(CaptureRegions, D->getDirectiveKind());
376 if (CaptureRegions.size() == 1 && CaptureRegions.back() == OMPD_unknown) {
377 VisitStmt(S->getCapturedStmt());
378 return;
379 }
380 VisitOpenMPCapturedStmt(
381 S, D->clauses(),
382 CaptureRegions.back() == OMPD_parallel &&
383 isOpenMPDistributeDirective(D->getDirectiveKind()));
384 }
385 }
VisitCapturedStmt(const CapturedStmt * S)386 void VisitCapturedStmt(const CapturedStmt *S) {
387 if (!S)
388 return;
389 for (const CapturedStmt::Capture &C : S->captures()) {
390 if (C.capturesVariable() && !C.capturesVariableByCopy()) {
391 const ValueDecl *VD = C.getCapturedVar();
392 markAsEscaped(VD);
393 if (isa<OMPCapturedExprDecl>(VD))
394 VisitValueDecl(VD);
395 }
396 }
397 }
VisitLambdaExpr(const LambdaExpr * E)398 void VisitLambdaExpr(const LambdaExpr *E) {
399 if (!E)
400 return;
401 for (const LambdaCapture &C : E->captures()) {
402 if (C.capturesVariable()) {
403 if (C.getCaptureKind() == LCK_ByRef) {
404 const ValueDecl *VD = C.getCapturedVar();
405 markAsEscaped(VD);
406 if (E->isInitCapture(&C) || isa<OMPCapturedExprDecl>(VD))
407 VisitValueDecl(VD);
408 }
409 }
410 }
411 }
VisitBlockExpr(const BlockExpr * E)412 void VisitBlockExpr(const BlockExpr *E) {
413 if (!E)
414 return;
415 for (const BlockDecl::Capture &C : E->getBlockDecl()->captures()) {
416 if (C.isByRef()) {
417 const VarDecl *VD = C.getVariable();
418 markAsEscaped(VD);
419 if (isa<OMPCapturedExprDecl>(VD) || VD->isInitCapture())
420 VisitValueDecl(VD);
421 }
422 }
423 }
VisitCallExpr(const CallExpr * E)424 void VisitCallExpr(const CallExpr *E) {
425 if (!E)
426 return;
427 for (const Expr *Arg : E->arguments()) {
428 if (!Arg)
429 continue;
430 if (Arg->isLValue()) {
431 const bool SavedAllEscaped = AllEscaped;
432 AllEscaped = true;
433 Visit(Arg);
434 AllEscaped = SavedAllEscaped;
435 } else {
436 Visit(Arg);
437 }
438 }
439 Visit(E->getCallee());
440 }
VisitDeclRefExpr(const DeclRefExpr * E)441 void VisitDeclRefExpr(const DeclRefExpr *E) {
442 if (!E)
443 return;
444 const ValueDecl *VD = E->getDecl();
445 if (AllEscaped)
446 markAsEscaped(VD);
447 if (isa<OMPCapturedExprDecl>(VD))
448 VisitValueDecl(VD);
449 else if (const auto *VarD = dyn_cast<VarDecl>(VD))
450 if (VarD->isInitCapture())
451 VisitValueDecl(VD);
452 }
VisitUnaryOperator(const UnaryOperator * E)453 void VisitUnaryOperator(const UnaryOperator *E) {
454 if (!E)
455 return;
456 if (E->getOpcode() == UO_AddrOf) {
457 const bool SavedAllEscaped = AllEscaped;
458 AllEscaped = true;
459 Visit(E->getSubExpr());
460 AllEscaped = SavedAllEscaped;
461 } else {
462 Visit(E->getSubExpr());
463 }
464 }
VisitImplicitCastExpr(const ImplicitCastExpr * E)465 void VisitImplicitCastExpr(const ImplicitCastExpr *E) {
466 if (!E)
467 return;
468 if (E->getCastKind() == CK_ArrayToPointerDecay) {
469 const bool SavedAllEscaped = AllEscaped;
470 AllEscaped = true;
471 Visit(E->getSubExpr());
472 AllEscaped = SavedAllEscaped;
473 } else {
474 Visit(E->getSubExpr());
475 }
476 }
VisitExpr(const Expr * E)477 void VisitExpr(const Expr *E) {
478 if (!E)
479 return;
480 bool SavedAllEscaped = AllEscaped;
481 if (!E->isLValue())
482 AllEscaped = false;
483 for (const Stmt *Child : E->children())
484 if (Child)
485 Visit(Child);
486 AllEscaped = SavedAllEscaped;
487 }
VisitStmt(const Stmt * S)488 void VisitStmt(const Stmt *S) {
489 if (!S)
490 return;
491 for (const Stmt *Child : S->children())
492 if (Child)
493 Visit(Child);
494 }
495
496 /// Returns the record that handles all the escaped local variables and used
497 /// instead of their original storage.
getGlobalizedRecord(bool IsInTTDRegion)498 const RecordDecl *getGlobalizedRecord(bool IsInTTDRegion) {
499 if (!GlobalizedRD)
500 buildRecordForGlobalizedVars(IsInTTDRegion);
501 return GlobalizedRD;
502 }
503
504 /// Returns the field in the globalized record for the escaped variable.
getFieldForGlobalizedVar(const ValueDecl * VD) const505 const FieldDecl *getFieldForGlobalizedVar(const ValueDecl *VD) const {
506 assert(GlobalizedRD &&
507 "Record for globalized variables must be generated already.");
508 auto I = MappedDeclsFields.find(VD);
509 if (I == MappedDeclsFields.end())
510 return nullptr;
511 return I->getSecond();
512 }
513
514 /// Returns the list of the escaped local variables/parameters.
getEscapedDecls() const515 ArrayRef<const ValueDecl *> getEscapedDecls() const {
516 return EscapedDecls.getArrayRef();
517 }
518
519 /// Checks if the escaped local variable is actually a parameter passed by
520 /// value.
getEscapedParameters() const521 const llvm::SmallPtrSetImpl<const Decl *> &getEscapedParameters() const {
522 return EscapedParameters;
523 }
524
525 /// Returns the list of the escaped variables with the variably modified
526 /// types.
getEscapedVariableLengthDecls() const527 ArrayRef<const ValueDecl *> getEscapedVariableLengthDecls() const {
528 return EscapedVariableLengthDecls.getArrayRef();
529 }
530 };
531 } // anonymous namespace
532
533 /// Get the id of the warp in the block.
534 /// We assume that the warp size is 32, which is always the case
535 /// on the NVPTX device, to generate more efficient code.
getNVPTXWarpID(CodeGenFunction & CGF)536 static llvm::Value *getNVPTXWarpID(CodeGenFunction &CGF) {
537 CGBuilderTy &Bld = CGF.Builder;
538 unsigned LaneIDBits =
539 CGF.getTarget().getGridValue(llvm::omp::GV_Warp_Size_Log2);
540 auto &RT = static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime());
541 return Bld.CreateAShr(RT.getGPUThreadID(CGF), LaneIDBits, "nvptx_warp_id");
542 }
543
544 /// Get the id of the current lane in the Warp.
545 /// We assume that the warp size is 32, which is always the case
546 /// on the NVPTX device, to generate more efficient code.
getNVPTXLaneID(CodeGenFunction & CGF)547 static llvm::Value *getNVPTXLaneID(CodeGenFunction &CGF) {
548 CGBuilderTy &Bld = CGF.Builder;
549 unsigned LaneIDMask = CGF.getContext().getTargetInfo().getGridValue(
550 llvm::omp::GV_Warp_Size_Log2_Mask);
551 auto &RT = static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime());
552 return Bld.CreateAnd(RT.getGPUThreadID(CGF), Bld.getInt32(LaneIDMask),
553 "nvptx_lane_id");
554 }
555
556 /// Get the value of the thread_limit clause in the teams directive.
557 /// For the 'generic' execution mode, the runtime encodes thread_limit in
558 /// the launch parameters, always starting thread_limit+warpSize threads per
559 /// CTA. The threads in the last warp are reserved for master execution.
560 /// For the 'spmd' execution mode, all threads in a CTA are part of the team.
getThreadLimit(CodeGenFunction & CGF,bool IsInSPMDExecutionMode=false)561 static llvm::Value *getThreadLimit(CodeGenFunction &CGF,
562 bool IsInSPMDExecutionMode = false) {
563 CGBuilderTy &Bld = CGF.Builder;
564 auto &RT = static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime());
565 return IsInSPMDExecutionMode
566 ? RT.getGPUNumThreads(CGF)
567 : Bld.CreateNUWSub(RT.getGPUNumThreads(CGF),
568 RT.getGPUWarpSize(CGF), "thread_limit");
569 }
570
571 /// Get the thread id of the OMP master thread.
572 /// The master thread id is the first thread (lane) of the last warp in the
573 /// GPU block. Warp size is assumed to be some power of 2.
574 /// Thread id is 0 indexed.
575 /// E.g: If NumThreads is 33, master id is 32.
576 /// If NumThreads is 64, master id is 32.
577 /// If NumThreads is 1024, master id is 992.
getMasterThreadID(CodeGenFunction & CGF)578 static llvm::Value *getMasterThreadID(CodeGenFunction &CGF) {
579 CGBuilderTy &Bld = CGF.Builder;
580 auto &RT = static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime());
581 llvm::Value *NumThreads = RT.getGPUNumThreads(CGF);
582 // We assume that the warp size is a power of 2.
583 llvm::Value *Mask = Bld.CreateNUWSub(RT.getGPUWarpSize(CGF), Bld.getInt32(1));
584
585 return Bld.CreateAnd(Bld.CreateNUWSub(NumThreads, Bld.getInt32(1)),
586 Bld.CreateNot(Mask), "master_tid");
587 }
588
WorkerFunctionState(CodeGenModule & CGM,SourceLocation Loc)589 CGOpenMPRuntimeGPU::WorkerFunctionState::WorkerFunctionState(
590 CodeGenModule &CGM, SourceLocation Loc)
591 : WorkerFn(nullptr), CGFI(CGM.getTypes().arrangeNullaryFunction()),
592 Loc(Loc) {
593 createWorkerFunction(CGM);
594 }
595
createWorkerFunction(CodeGenModule & CGM)596 void CGOpenMPRuntimeGPU::WorkerFunctionState::createWorkerFunction(
597 CodeGenModule &CGM) {
598 // Create an worker function with no arguments.
599
600 WorkerFn = llvm::Function::Create(
601 CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage,
602 /*placeholder=*/"_worker", &CGM.getModule());
603 CGM.SetInternalFunctionAttributes(GlobalDecl(), WorkerFn, CGFI);
604 WorkerFn->setDoesNotRecurse();
605 }
606
607 CGOpenMPRuntimeGPU::ExecutionMode
getExecutionMode() const608 CGOpenMPRuntimeGPU::getExecutionMode() const {
609 return CurrentExecutionMode;
610 }
611
612 static CGOpenMPRuntimeGPU::DataSharingMode
getDataSharingMode(CodeGenModule & CGM)613 getDataSharingMode(CodeGenModule &CGM) {
614 return CGM.getLangOpts().OpenMPCUDAMode ? CGOpenMPRuntimeGPU::CUDA
615 : CGOpenMPRuntimeGPU::Generic;
616 }
617
618 /// Check for inner (nested) SPMD construct, if any
hasNestedSPMDDirective(ASTContext & Ctx,const OMPExecutableDirective & D)619 static bool hasNestedSPMDDirective(ASTContext &Ctx,
620 const OMPExecutableDirective &D) {
621 const auto *CS = D.getInnermostCapturedStmt();
622 const auto *Body =
623 CS->getCapturedStmt()->IgnoreContainers(/*IgnoreCaptured=*/true);
624 const Stmt *ChildStmt = CGOpenMPRuntime::getSingleCompoundChild(Ctx, Body);
625
626 if (const auto *NestedDir =
627 dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) {
628 OpenMPDirectiveKind DKind = NestedDir->getDirectiveKind();
629 switch (D.getDirectiveKind()) {
630 case OMPD_target:
631 if (isOpenMPParallelDirective(DKind))
632 return true;
633 if (DKind == OMPD_teams) {
634 Body = NestedDir->getInnermostCapturedStmt()->IgnoreContainers(
635 /*IgnoreCaptured=*/true);
636 if (!Body)
637 return false;
638 ChildStmt = CGOpenMPRuntime::getSingleCompoundChild(Ctx, Body);
639 if (const auto *NND =
640 dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) {
641 DKind = NND->getDirectiveKind();
642 if (isOpenMPParallelDirective(DKind))
643 return true;
644 }
645 }
646 return false;
647 case OMPD_target_teams:
648 return isOpenMPParallelDirective(DKind);
649 case OMPD_target_simd:
650 case OMPD_target_parallel:
651 case OMPD_target_parallel_for:
652 case OMPD_target_parallel_for_simd:
653 case OMPD_target_teams_distribute:
654 case OMPD_target_teams_distribute_simd:
655 case OMPD_target_teams_distribute_parallel_for:
656 case OMPD_target_teams_distribute_parallel_for_simd:
657 case OMPD_parallel:
658 case OMPD_for:
659 case OMPD_parallel_for:
660 case OMPD_parallel_master:
661 case OMPD_parallel_sections:
662 case OMPD_for_simd:
663 case OMPD_parallel_for_simd:
664 case OMPD_cancel:
665 case OMPD_cancellation_point:
666 case OMPD_ordered:
667 case OMPD_threadprivate:
668 case OMPD_allocate:
669 case OMPD_task:
670 case OMPD_simd:
671 case OMPD_sections:
672 case OMPD_section:
673 case OMPD_single:
674 case OMPD_master:
675 case OMPD_critical:
676 case OMPD_taskyield:
677 case OMPD_barrier:
678 case OMPD_taskwait:
679 case OMPD_taskgroup:
680 case OMPD_atomic:
681 case OMPD_flush:
682 case OMPD_depobj:
683 case OMPD_scan:
684 case OMPD_teams:
685 case OMPD_target_data:
686 case OMPD_target_exit_data:
687 case OMPD_target_enter_data:
688 case OMPD_distribute:
689 case OMPD_distribute_simd:
690 case OMPD_distribute_parallel_for:
691 case OMPD_distribute_parallel_for_simd:
692 case OMPD_teams_distribute:
693 case OMPD_teams_distribute_simd:
694 case OMPD_teams_distribute_parallel_for:
695 case OMPD_teams_distribute_parallel_for_simd:
696 case OMPD_target_update:
697 case OMPD_declare_simd:
698 case OMPD_declare_variant:
699 case OMPD_begin_declare_variant:
700 case OMPD_end_declare_variant:
701 case OMPD_declare_target:
702 case OMPD_end_declare_target:
703 case OMPD_declare_reduction:
704 case OMPD_declare_mapper:
705 case OMPD_taskloop:
706 case OMPD_taskloop_simd:
707 case OMPD_master_taskloop:
708 case OMPD_master_taskloop_simd:
709 case OMPD_parallel_master_taskloop:
710 case OMPD_parallel_master_taskloop_simd:
711 case OMPD_requires:
712 case OMPD_unknown:
713 default:
714 llvm_unreachable("Unexpected directive.");
715 }
716 }
717
718 return false;
719 }
720
supportsSPMDExecutionMode(ASTContext & Ctx,const OMPExecutableDirective & D)721 static bool supportsSPMDExecutionMode(ASTContext &Ctx,
722 const OMPExecutableDirective &D) {
723 OpenMPDirectiveKind DirectiveKind = D.getDirectiveKind();
724 switch (DirectiveKind) {
725 case OMPD_target:
726 case OMPD_target_teams:
727 return hasNestedSPMDDirective(Ctx, D);
728 case OMPD_target_parallel:
729 case OMPD_target_parallel_for:
730 case OMPD_target_parallel_for_simd:
731 case OMPD_target_teams_distribute_parallel_for:
732 case OMPD_target_teams_distribute_parallel_for_simd:
733 case OMPD_target_simd:
734 case OMPD_target_teams_distribute_simd:
735 return true;
736 case OMPD_target_teams_distribute:
737 return false;
738 case OMPD_parallel:
739 case OMPD_for:
740 case OMPD_parallel_for:
741 case OMPD_parallel_master:
742 case OMPD_parallel_sections:
743 case OMPD_for_simd:
744 case OMPD_parallel_for_simd:
745 case OMPD_cancel:
746 case OMPD_cancellation_point:
747 case OMPD_ordered:
748 case OMPD_threadprivate:
749 case OMPD_allocate:
750 case OMPD_task:
751 case OMPD_simd:
752 case OMPD_sections:
753 case OMPD_section:
754 case OMPD_single:
755 case OMPD_master:
756 case OMPD_critical:
757 case OMPD_taskyield:
758 case OMPD_barrier:
759 case OMPD_taskwait:
760 case OMPD_taskgroup:
761 case OMPD_atomic:
762 case OMPD_flush:
763 case OMPD_depobj:
764 case OMPD_scan:
765 case OMPD_teams:
766 case OMPD_target_data:
767 case OMPD_target_exit_data:
768 case OMPD_target_enter_data:
769 case OMPD_distribute:
770 case OMPD_distribute_simd:
771 case OMPD_distribute_parallel_for:
772 case OMPD_distribute_parallel_for_simd:
773 case OMPD_teams_distribute:
774 case OMPD_teams_distribute_simd:
775 case OMPD_teams_distribute_parallel_for:
776 case OMPD_teams_distribute_parallel_for_simd:
777 case OMPD_target_update:
778 case OMPD_declare_simd:
779 case OMPD_declare_variant:
780 case OMPD_begin_declare_variant:
781 case OMPD_end_declare_variant:
782 case OMPD_declare_target:
783 case OMPD_end_declare_target:
784 case OMPD_declare_reduction:
785 case OMPD_declare_mapper:
786 case OMPD_taskloop:
787 case OMPD_taskloop_simd:
788 case OMPD_master_taskloop:
789 case OMPD_master_taskloop_simd:
790 case OMPD_parallel_master_taskloop:
791 case OMPD_parallel_master_taskloop_simd:
792 case OMPD_requires:
793 case OMPD_unknown:
794 default:
795 break;
796 }
797 llvm_unreachable(
798 "Unknown programming model for OpenMP directive on NVPTX target.");
799 }
800
801 /// Check if the directive is loops based and has schedule clause at all or has
802 /// static scheduling.
hasStaticScheduling(const OMPExecutableDirective & D)803 static bool hasStaticScheduling(const OMPExecutableDirective &D) {
804 assert(isOpenMPWorksharingDirective(D.getDirectiveKind()) &&
805 isOpenMPLoopDirective(D.getDirectiveKind()) &&
806 "Expected loop-based directive.");
807 return !D.hasClausesOfKind<OMPOrderedClause>() &&
808 (!D.hasClausesOfKind<OMPScheduleClause>() ||
809 llvm::any_of(D.getClausesOfKind<OMPScheduleClause>(),
810 [](const OMPScheduleClause *C) {
811 return C->getScheduleKind() == OMPC_SCHEDULE_static;
812 }));
813 }
814
815 /// Check for inner (nested) lightweight runtime construct, if any
hasNestedLightweightDirective(ASTContext & Ctx,const OMPExecutableDirective & D)816 static bool hasNestedLightweightDirective(ASTContext &Ctx,
817 const OMPExecutableDirective &D) {
818 assert(supportsSPMDExecutionMode(Ctx, D) && "Expected SPMD mode directive.");
819 const auto *CS = D.getInnermostCapturedStmt();
820 const auto *Body =
821 CS->getCapturedStmt()->IgnoreContainers(/*IgnoreCaptured=*/true);
822 const Stmt *ChildStmt = CGOpenMPRuntime::getSingleCompoundChild(Ctx, Body);
823
824 if (const auto *NestedDir =
825 dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) {
826 OpenMPDirectiveKind DKind = NestedDir->getDirectiveKind();
827 switch (D.getDirectiveKind()) {
828 case OMPD_target:
829 if (isOpenMPParallelDirective(DKind) &&
830 isOpenMPWorksharingDirective(DKind) && isOpenMPLoopDirective(DKind) &&
831 hasStaticScheduling(*NestedDir))
832 return true;
833 if (DKind == OMPD_teams_distribute_simd || DKind == OMPD_simd)
834 return true;
835 if (DKind == OMPD_parallel) {
836 Body = NestedDir->getInnermostCapturedStmt()->IgnoreContainers(
837 /*IgnoreCaptured=*/true);
838 if (!Body)
839 return false;
840 ChildStmt = CGOpenMPRuntime::getSingleCompoundChild(Ctx, Body);
841 if (const auto *NND =
842 dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) {
843 DKind = NND->getDirectiveKind();
844 if (isOpenMPWorksharingDirective(DKind) &&
845 isOpenMPLoopDirective(DKind) && hasStaticScheduling(*NND))
846 return true;
847 }
848 } else if (DKind == OMPD_teams) {
849 Body = NestedDir->getInnermostCapturedStmt()->IgnoreContainers(
850 /*IgnoreCaptured=*/true);
851 if (!Body)
852 return false;
853 ChildStmt = CGOpenMPRuntime::getSingleCompoundChild(Ctx, Body);
854 if (const auto *NND =
855 dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) {
856 DKind = NND->getDirectiveKind();
857 if (isOpenMPParallelDirective(DKind) &&
858 isOpenMPWorksharingDirective(DKind) &&
859 isOpenMPLoopDirective(DKind) && hasStaticScheduling(*NND))
860 return true;
861 if (DKind == OMPD_parallel) {
862 Body = NND->getInnermostCapturedStmt()->IgnoreContainers(
863 /*IgnoreCaptured=*/true);
864 if (!Body)
865 return false;
866 ChildStmt = CGOpenMPRuntime::getSingleCompoundChild(Ctx, Body);
867 if (const auto *NND =
868 dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) {
869 DKind = NND->getDirectiveKind();
870 if (isOpenMPWorksharingDirective(DKind) &&
871 isOpenMPLoopDirective(DKind) && hasStaticScheduling(*NND))
872 return true;
873 }
874 }
875 }
876 }
877 return false;
878 case OMPD_target_teams:
879 if (isOpenMPParallelDirective(DKind) &&
880 isOpenMPWorksharingDirective(DKind) && isOpenMPLoopDirective(DKind) &&
881 hasStaticScheduling(*NestedDir))
882 return true;
883 if (DKind == OMPD_distribute_simd || DKind == OMPD_simd)
884 return true;
885 if (DKind == OMPD_parallel) {
886 Body = NestedDir->getInnermostCapturedStmt()->IgnoreContainers(
887 /*IgnoreCaptured=*/true);
888 if (!Body)
889 return false;
890 ChildStmt = CGOpenMPRuntime::getSingleCompoundChild(Ctx, Body);
891 if (const auto *NND =
892 dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) {
893 DKind = NND->getDirectiveKind();
894 if (isOpenMPWorksharingDirective(DKind) &&
895 isOpenMPLoopDirective(DKind) && hasStaticScheduling(*NND))
896 return true;
897 }
898 }
899 return false;
900 case OMPD_target_parallel:
901 if (DKind == OMPD_simd)
902 return true;
903 return isOpenMPWorksharingDirective(DKind) &&
904 isOpenMPLoopDirective(DKind) && hasStaticScheduling(*NestedDir);
905 case OMPD_target_teams_distribute:
906 case OMPD_target_simd:
907 case OMPD_target_parallel_for:
908 case OMPD_target_parallel_for_simd:
909 case OMPD_target_teams_distribute_simd:
910 case OMPD_target_teams_distribute_parallel_for:
911 case OMPD_target_teams_distribute_parallel_for_simd:
912 case OMPD_parallel:
913 case OMPD_for:
914 case OMPD_parallel_for:
915 case OMPD_parallel_master:
916 case OMPD_parallel_sections:
917 case OMPD_for_simd:
918 case OMPD_parallel_for_simd:
919 case OMPD_cancel:
920 case OMPD_cancellation_point:
921 case OMPD_ordered:
922 case OMPD_threadprivate:
923 case OMPD_allocate:
924 case OMPD_task:
925 case OMPD_simd:
926 case OMPD_sections:
927 case OMPD_section:
928 case OMPD_single:
929 case OMPD_master:
930 case OMPD_critical:
931 case OMPD_taskyield:
932 case OMPD_barrier:
933 case OMPD_taskwait:
934 case OMPD_taskgroup:
935 case OMPD_atomic:
936 case OMPD_flush:
937 case OMPD_depobj:
938 case OMPD_scan:
939 case OMPD_teams:
940 case OMPD_target_data:
941 case OMPD_target_exit_data:
942 case OMPD_target_enter_data:
943 case OMPD_distribute:
944 case OMPD_distribute_simd:
945 case OMPD_distribute_parallel_for:
946 case OMPD_distribute_parallel_for_simd:
947 case OMPD_teams_distribute:
948 case OMPD_teams_distribute_simd:
949 case OMPD_teams_distribute_parallel_for:
950 case OMPD_teams_distribute_parallel_for_simd:
951 case OMPD_target_update:
952 case OMPD_declare_simd:
953 case OMPD_declare_variant:
954 case OMPD_begin_declare_variant:
955 case OMPD_end_declare_variant:
956 case OMPD_declare_target:
957 case OMPD_end_declare_target:
958 case OMPD_declare_reduction:
959 case OMPD_declare_mapper:
960 case OMPD_taskloop:
961 case OMPD_taskloop_simd:
962 case OMPD_master_taskloop:
963 case OMPD_master_taskloop_simd:
964 case OMPD_parallel_master_taskloop:
965 case OMPD_parallel_master_taskloop_simd:
966 case OMPD_requires:
967 case OMPD_unknown:
968 default:
969 llvm_unreachable("Unexpected directive.");
970 }
971 }
972
973 return false;
974 }
975
976 /// Checks if the construct supports lightweight runtime. It must be SPMD
977 /// construct + inner loop-based construct with static scheduling.
supportsLightweightRuntime(ASTContext & Ctx,const OMPExecutableDirective & D)978 static bool supportsLightweightRuntime(ASTContext &Ctx,
979 const OMPExecutableDirective &D) {
980 if (!supportsSPMDExecutionMode(Ctx, D))
981 return false;
982 OpenMPDirectiveKind DirectiveKind = D.getDirectiveKind();
983 switch (DirectiveKind) {
984 case OMPD_target:
985 case OMPD_target_teams:
986 case OMPD_target_parallel:
987 return hasNestedLightweightDirective(Ctx, D);
988 case OMPD_target_parallel_for:
989 case OMPD_target_parallel_for_simd:
990 case OMPD_target_teams_distribute_parallel_for:
991 case OMPD_target_teams_distribute_parallel_for_simd:
992 // (Last|First)-privates must be shared in parallel region.
993 return hasStaticScheduling(D);
994 case OMPD_target_simd:
995 case OMPD_target_teams_distribute_simd:
996 return true;
997 case OMPD_target_teams_distribute:
998 return false;
999 case OMPD_parallel:
1000 case OMPD_for:
1001 case OMPD_parallel_for:
1002 case OMPD_parallel_master:
1003 case OMPD_parallel_sections:
1004 case OMPD_for_simd:
1005 case OMPD_parallel_for_simd:
1006 case OMPD_cancel:
1007 case OMPD_cancellation_point:
1008 case OMPD_ordered:
1009 case OMPD_threadprivate:
1010 case OMPD_allocate:
1011 case OMPD_task:
1012 case OMPD_simd:
1013 case OMPD_sections:
1014 case OMPD_section:
1015 case OMPD_single:
1016 case OMPD_master:
1017 case OMPD_critical:
1018 case OMPD_taskyield:
1019 case OMPD_barrier:
1020 case OMPD_taskwait:
1021 case OMPD_taskgroup:
1022 case OMPD_atomic:
1023 case OMPD_flush:
1024 case OMPD_depobj:
1025 case OMPD_scan:
1026 case OMPD_teams:
1027 case OMPD_target_data:
1028 case OMPD_target_exit_data:
1029 case OMPD_target_enter_data:
1030 case OMPD_distribute:
1031 case OMPD_distribute_simd:
1032 case OMPD_distribute_parallel_for:
1033 case OMPD_distribute_parallel_for_simd:
1034 case OMPD_teams_distribute:
1035 case OMPD_teams_distribute_simd:
1036 case OMPD_teams_distribute_parallel_for:
1037 case OMPD_teams_distribute_parallel_for_simd:
1038 case OMPD_target_update:
1039 case OMPD_declare_simd:
1040 case OMPD_declare_variant:
1041 case OMPD_begin_declare_variant:
1042 case OMPD_end_declare_variant:
1043 case OMPD_declare_target:
1044 case OMPD_end_declare_target:
1045 case OMPD_declare_reduction:
1046 case OMPD_declare_mapper:
1047 case OMPD_taskloop:
1048 case OMPD_taskloop_simd:
1049 case OMPD_master_taskloop:
1050 case OMPD_master_taskloop_simd:
1051 case OMPD_parallel_master_taskloop:
1052 case OMPD_parallel_master_taskloop_simd:
1053 case OMPD_requires:
1054 case OMPD_unknown:
1055 default:
1056 break;
1057 }
1058 llvm_unreachable(
1059 "Unknown programming model for OpenMP directive on NVPTX target.");
1060 }
1061
emitNonSPMDKernel(const OMPExecutableDirective & D,StringRef ParentName,llvm::Function * & OutlinedFn,llvm::Constant * & OutlinedFnID,bool IsOffloadEntry,const RegionCodeGenTy & CodeGen)1062 void CGOpenMPRuntimeGPU::emitNonSPMDKernel(const OMPExecutableDirective &D,
1063 StringRef ParentName,
1064 llvm::Function *&OutlinedFn,
1065 llvm::Constant *&OutlinedFnID,
1066 bool IsOffloadEntry,
1067 const RegionCodeGenTy &CodeGen) {
1068 ExecutionRuntimeModesRAII ModeRAII(CurrentExecutionMode);
1069 EntryFunctionState EST;
1070 WorkerFunctionState WST(CGM, D.getBeginLoc());
1071 Work.clear();
1072 WrapperFunctionsMap.clear();
1073
1074 // Emit target region as a standalone region.
1075 class NVPTXPrePostActionTy : public PrePostActionTy {
1076 CGOpenMPRuntimeGPU::EntryFunctionState &EST;
1077 CGOpenMPRuntimeGPU::WorkerFunctionState &WST;
1078
1079 public:
1080 NVPTXPrePostActionTy(CGOpenMPRuntimeGPU::EntryFunctionState &EST,
1081 CGOpenMPRuntimeGPU::WorkerFunctionState &WST)
1082 : EST(EST), WST(WST) {}
1083 void Enter(CodeGenFunction &CGF) override {
1084 auto &RT =
1085 static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime());
1086 RT.emitNonSPMDEntryHeader(CGF, EST, WST);
1087 // Skip target region initialization.
1088 RT.setLocThreadIdInsertPt(CGF, /*AtCurrentPoint=*/true);
1089 }
1090 void Exit(CodeGenFunction &CGF) override {
1091 auto &RT =
1092 static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime());
1093 RT.clearLocThreadIdInsertPt(CGF);
1094 RT.emitNonSPMDEntryFooter(CGF, EST);
1095 }
1096 } Action(EST, WST);
1097 CodeGen.setAction(Action);
1098 IsInTTDRegion = true;
1099 // Reserve place for the globalized memory.
1100 GlobalizedRecords.emplace_back();
1101 if (!KernelStaticGlobalized) {
1102 KernelStaticGlobalized = new llvm::GlobalVariable(
1103 CGM.getModule(), CGM.VoidPtrTy, /*isConstant=*/false,
1104 llvm::GlobalValue::InternalLinkage,
1105 llvm::UndefValue::get(CGM.VoidPtrTy),
1106 "_openmp_kernel_static_glob_rd$ptr", /*InsertBefore=*/nullptr,
1107 llvm::GlobalValue::NotThreadLocal,
1108 CGM.getContext().getTargetAddressSpace(LangAS::cuda_shared));
1109 }
1110 emitTargetOutlinedFunctionHelper(D, ParentName, OutlinedFn, OutlinedFnID,
1111 IsOffloadEntry, CodeGen);
1112 IsInTTDRegion = false;
1113
1114 // Now change the name of the worker function to correspond to this target
1115 // region's entry function.
1116 WST.WorkerFn->setName(Twine(OutlinedFn->getName(), "_worker"));
1117
1118 // Create the worker function
1119 emitWorkerFunction(WST);
1120 }
1121
1122 // Setup NVPTX threads for master-worker OpenMP scheme.
emitNonSPMDEntryHeader(CodeGenFunction & CGF,EntryFunctionState & EST,WorkerFunctionState & WST)1123 void CGOpenMPRuntimeGPU::emitNonSPMDEntryHeader(CodeGenFunction &CGF,
1124 EntryFunctionState &EST,
1125 WorkerFunctionState &WST) {
1126 CGBuilderTy &Bld = CGF.Builder;
1127
1128 llvm::BasicBlock *WorkerBB = CGF.createBasicBlock(".worker");
1129 llvm::BasicBlock *MasterCheckBB = CGF.createBasicBlock(".mastercheck");
1130 llvm::BasicBlock *MasterBB = CGF.createBasicBlock(".master");
1131 EST.ExitBB = CGF.createBasicBlock(".exit");
1132
1133 auto &RT = static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime());
1134 llvm::Value *IsWorker =
1135 Bld.CreateICmpULT(RT.getGPUThreadID(CGF), getThreadLimit(CGF));
1136 Bld.CreateCondBr(IsWorker, WorkerBB, MasterCheckBB);
1137
1138 CGF.EmitBlock(WorkerBB);
1139 emitCall(CGF, WST.Loc, WST.WorkerFn);
1140 CGF.EmitBranch(EST.ExitBB);
1141
1142 CGF.EmitBlock(MasterCheckBB);
1143 llvm::Value *IsMaster =
1144 Bld.CreateICmpEQ(RT.getGPUThreadID(CGF), getMasterThreadID(CGF));
1145 Bld.CreateCondBr(IsMaster, MasterBB, EST.ExitBB);
1146
1147 CGF.EmitBlock(MasterBB);
1148 IsInTargetMasterThreadRegion = true;
1149 // SEQUENTIAL (MASTER) REGION START
1150 // First action in sequential region:
1151 // Initialize the state of the OpenMP runtime library on the GPU.
1152 // TODO: Optimize runtime initialization and pass in correct value.
1153 llvm::Value *Args[] = {getThreadLimit(CGF),
1154 Bld.getInt16(/*RequiresOMPRuntime=*/1)};
1155 CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
1156 CGM.getModule(), OMPRTL___kmpc_kernel_init),
1157 Args);
1158
1159 // For data sharing, we need to initialize the stack.
1160 CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
1161 CGM.getModule(), OMPRTL___kmpc_data_sharing_init_stack));
1162
1163 emitGenericVarsProlog(CGF, WST.Loc);
1164 }
1165
emitNonSPMDEntryFooter(CodeGenFunction & CGF,EntryFunctionState & EST)1166 void CGOpenMPRuntimeGPU::emitNonSPMDEntryFooter(CodeGenFunction &CGF,
1167 EntryFunctionState &EST) {
1168 IsInTargetMasterThreadRegion = false;
1169 if (!CGF.HaveInsertPoint())
1170 return;
1171
1172 emitGenericVarsEpilog(CGF);
1173
1174 if (!EST.ExitBB)
1175 EST.ExitBB = CGF.createBasicBlock(".exit");
1176
1177 llvm::BasicBlock *TerminateBB = CGF.createBasicBlock(".termination.notifier");
1178 CGF.EmitBranch(TerminateBB);
1179
1180 CGF.EmitBlock(TerminateBB);
1181 // Signal termination condition.
1182 // TODO: Optimize runtime initialization and pass in correct value.
1183 llvm::Value *Args[] = {CGF.Builder.getInt16(/*IsOMPRuntimeInitialized=*/1)};
1184 CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
1185 CGM.getModule(), OMPRTL___kmpc_kernel_deinit),
1186 Args);
1187 // Barrier to terminate worker threads.
1188 syncCTAThreads(CGF);
1189 // Master thread jumps to exit point.
1190 CGF.EmitBranch(EST.ExitBB);
1191
1192 CGF.EmitBlock(EST.ExitBB);
1193 EST.ExitBB = nullptr;
1194 }
1195
emitSPMDKernel(const OMPExecutableDirective & D,StringRef ParentName,llvm::Function * & OutlinedFn,llvm::Constant * & OutlinedFnID,bool IsOffloadEntry,const RegionCodeGenTy & CodeGen)1196 void CGOpenMPRuntimeGPU::emitSPMDKernel(const OMPExecutableDirective &D,
1197 StringRef ParentName,
1198 llvm::Function *&OutlinedFn,
1199 llvm::Constant *&OutlinedFnID,
1200 bool IsOffloadEntry,
1201 const RegionCodeGenTy &CodeGen) {
1202 ExecutionRuntimeModesRAII ModeRAII(
1203 CurrentExecutionMode, RequiresFullRuntime,
1204 CGM.getLangOpts().OpenMPCUDAForceFullRuntime ||
1205 !supportsLightweightRuntime(CGM.getContext(), D));
1206 EntryFunctionState EST;
1207
1208 // Emit target region as a standalone region.
1209 class NVPTXPrePostActionTy : public PrePostActionTy {
1210 CGOpenMPRuntimeGPU &RT;
1211 CGOpenMPRuntimeGPU::EntryFunctionState &EST;
1212 const OMPExecutableDirective &D;
1213
1214 public:
1215 NVPTXPrePostActionTy(CGOpenMPRuntimeGPU &RT,
1216 CGOpenMPRuntimeGPU::EntryFunctionState &EST,
1217 const OMPExecutableDirective &D)
1218 : RT(RT), EST(EST), D(D) {}
1219 void Enter(CodeGenFunction &CGF) override {
1220 RT.emitSPMDEntryHeader(CGF, EST, D);
1221 // Skip target region initialization.
1222 RT.setLocThreadIdInsertPt(CGF, /*AtCurrentPoint=*/true);
1223 }
1224 void Exit(CodeGenFunction &CGF) override {
1225 RT.clearLocThreadIdInsertPt(CGF);
1226 RT.emitSPMDEntryFooter(CGF, EST);
1227 }
1228 } Action(*this, EST, D);
1229 CodeGen.setAction(Action);
1230 IsInTTDRegion = true;
1231 // Reserve place for the globalized memory.
1232 GlobalizedRecords.emplace_back();
1233 if (!KernelStaticGlobalized) {
1234 KernelStaticGlobalized = new llvm::GlobalVariable(
1235 CGM.getModule(), CGM.VoidPtrTy, /*isConstant=*/false,
1236 llvm::GlobalValue::InternalLinkage,
1237 llvm::UndefValue::get(CGM.VoidPtrTy),
1238 "_openmp_kernel_static_glob_rd$ptr", /*InsertBefore=*/nullptr,
1239 llvm::GlobalValue::NotThreadLocal,
1240 CGM.getContext().getTargetAddressSpace(LangAS::cuda_shared));
1241 }
1242 emitTargetOutlinedFunctionHelper(D, ParentName, OutlinedFn, OutlinedFnID,
1243 IsOffloadEntry, CodeGen);
1244 IsInTTDRegion = false;
1245 }
1246
emitSPMDEntryHeader(CodeGenFunction & CGF,EntryFunctionState & EST,const OMPExecutableDirective & D)1247 void CGOpenMPRuntimeGPU::emitSPMDEntryHeader(
1248 CodeGenFunction &CGF, EntryFunctionState &EST,
1249 const OMPExecutableDirective &D) {
1250 CGBuilderTy &Bld = CGF.Builder;
1251
1252 // Setup BBs in entry function.
1253 llvm::BasicBlock *ExecuteBB = CGF.createBasicBlock(".execute");
1254 EST.ExitBB = CGF.createBasicBlock(".exit");
1255
1256 llvm::Value *Args[] = {getThreadLimit(CGF, /*IsInSPMDExecutionMode=*/true),
1257 /*RequiresOMPRuntime=*/
1258 Bld.getInt16(RequiresFullRuntime ? 1 : 0)};
1259 CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
1260 CGM.getModule(), OMPRTL___kmpc_spmd_kernel_init),
1261 Args);
1262
1263 if (RequiresFullRuntime) {
1264 // For data sharing, we need to initialize the stack.
1265 CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
1266 CGM.getModule(), OMPRTL___kmpc_data_sharing_init_stack_spmd));
1267 }
1268
1269 CGF.EmitBranch(ExecuteBB);
1270
1271 CGF.EmitBlock(ExecuteBB);
1272
1273 IsInTargetMasterThreadRegion = true;
1274 }
1275
emitSPMDEntryFooter(CodeGenFunction & CGF,EntryFunctionState & EST)1276 void CGOpenMPRuntimeGPU::emitSPMDEntryFooter(CodeGenFunction &CGF,
1277 EntryFunctionState &EST) {
1278 IsInTargetMasterThreadRegion = false;
1279 if (!CGF.HaveInsertPoint())
1280 return;
1281
1282 if (!EST.ExitBB)
1283 EST.ExitBB = CGF.createBasicBlock(".exit");
1284
1285 llvm::BasicBlock *OMPDeInitBB = CGF.createBasicBlock(".omp.deinit");
1286 CGF.EmitBranch(OMPDeInitBB);
1287
1288 CGF.EmitBlock(OMPDeInitBB);
1289 // DeInitialize the OMP state in the runtime; called by all active threads.
1290 llvm::Value *Args[] = {/*RequiresOMPRuntime=*/
1291 CGF.Builder.getInt16(RequiresFullRuntime ? 1 : 0)};
1292 CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
1293 CGM.getModule(), OMPRTL___kmpc_spmd_kernel_deinit_v2),
1294 Args);
1295 CGF.EmitBranch(EST.ExitBB);
1296
1297 CGF.EmitBlock(EST.ExitBB);
1298 EST.ExitBB = nullptr;
1299 }
1300
1301 // Create a unique global variable to indicate the execution mode of this target
1302 // region. The execution mode is either 'generic', or 'spmd' depending on the
1303 // target directive. This variable is picked up by the offload library to setup
1304 // the device appropriately before kernel launch. If the execution mode is
1305 // 'generic', the runtime reserves one warp for the master, otherwise, all
1306 // warps participate in parallel work.
setPropertyExecutionMode(CodeGenModule & CGM,StringRef Name,bool Mode)1307 static void setPropertyExecutionMode(CodeGenModule &CGM, StringRef Name,
1308 bool Mode) {
1309 auto *GVMode =
1310 new llvm::GlobalVariable(CGM.getModule(), CGM.Int8Ty, /*isConstant=*/true,
1311 llvm::GlobalValue::WeakAnyLinkage,
1312 llvm::ConstantInt::get(CGM.Int8Ty, Mode ? 0 : 1),
1313 Twine(Name, "_exec_mode"));
1314 CGM.addCompilerUsedGlobal(GVMode);
1315 }
1316
emitWorkerFunction(WorkerFunctionState & WST)1317 void CGOpenMPRuntimeGPU::emitWorkerFunction(WorkerFunctionState &WST) {
1318 ASTContext &Ctx = CGM.getContext();
1319
1320 CodeGenFunction CGF(CGM, /*suppressNewContext=*/true);
1321 CGF.StartFunction(GlobalDecl(), Ctx.VoidTy, WST.WorkerFn, WST.CGFI, {},
1322 WST.Loc, WST.Loc);
1323 emitWorkerLoop(CGF, WST);
1324 CGF.FinishFunction();
1325 }
1326
emitWorkerLoop(CodeGenFunction & CGF,WorkerFunctionState & WST)1327 void CGOpenMPRuntimeGPU::emitWorkerLoop(CodeGenFunction &CGF,
1328 WorkerFunctionState &WST) {
1329 //
1330 // The workers enter this loop and wait for parallel work from the master.
1331 // When the master encounters a parallel region it sets up the work + variable
1332 // arguments, and wakes up the workers. The workers first check to see if
1333 // they are required for the parallel region, i.e., within the # of requested
1334 // parallel threads. The activated workers load the variable arguments and
1335 // execute the parallel work.
1336 //
1337
1338 CGBuilderTy &Bld = CGF.Builder;
1339
1340 llvm::BasicBlock *AwaitBB = CGF.createBasicBlock(".await.work");
1341 llvm::BasicBlock *SelectWorkersBB = CGF.createBasicBlock(".select.workers");
1342 llvm::BasicBlock *ExecuteBB = CGF.createBasicBlock(".execute.parallel");
1343 llvm::BasicBlock *TerminateBB = CGF.createBasicBlock(".terminate.parallel");
1344 llvm::BasicBlock *BarrierBB = CGF.createBasicBlock(".barrier.parallel");
1345 llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".exit");
1346
1347 CGF.EmitBranch(AwaitBB);
1348
1349 // Workers wait for work from master.
1350 CGF.EmitBlock(AwaitBB);
1351 // Wait for parallel work
1352 syncCTAThreads(CGF);
1353
1354 Address WorkFn =
1355 CGF.CreateDefaultAlignTempAlloca(CGF.Int8PtrTy, /*Name=*/"work_fn");
1356 Address ExecStatus =
1357 CGF.CreateDefaultAlignTempAlloca(CGF.Int8Ty, /*Name=*/"exec_status");
1358 CGF.InitTempAlloca(ExecStatus, Bld.getInt8(/*C=*/0));
1359 CGF.InitTempAlloca(WorkFn, llvm::Constant::getNullValue(CGF.Int8PtrTy));
1360
1361 // TODO: Optimize runtime initialization and pass in correct value.
1362 llvm::Value *Args[] = {WorkFn.getPointer()};
1363 llvm::Value *Ret =
1364 CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
1365 CGM.getModule(), OMPRTL___kmpc_kernel_parallel),
1366 Args);
1367 Bld.CreateStore(Bld.CreateZExt(Ret, CGF.Int8Ty), ExecStatus);
1368
1369 // On termination condition (workid == 0), exit loop.
1370 llvm::Value *WorkID = Bld.CreateLoad(WorkFn);
1371 llvm::Value *ShouldTerminate = Bld.CreateIsNull(WorkID, "should_terminate");
1372 Bld.CreateCondBr(ShouldTerminate, ExitBB, SelectWorkersBB);
1373
1374 // Activate requested workers.
1375 CGF.EmitBlock(SelectWorkersBB);
1376 llvm::Value *IsActive =
1377 Bld.CreateIsNotNull(Bld.CreateLoad(ExecStatus), "is_active");
1378 Bld.CreateCondBr(IsActive, ExecuteBB, BarrierBB);
1379
1380 // Signal start of parallel region.
1381 CGF.EmitBlock(ExecuteBB);
1382 // Skip initialization.
1383 setLocThreadIdInsertPt(CGF, /*AtCurrentPoint=*/true);
1384
1385 // Process work items: outlined parallel functions.
1386 for (llvm::Function *W : Work) {
1387 // Try to match this outlined function.
1388 llvm::Value *ID = Bld.CreatePointerBitCastOrAddrSpaceCast(W, CGM.Int8PtrTy);
1389
1390 llvm::Value *WorkFnMatch =
1391 Bld.CreateICmpEQ(Bld.CreateLoad(WorkFn), ID, "work_match");
1392
1393 llvm::BasicBlock *ExecuteFNBB = CGF.createBasicBlock(".execute.fn");
1394 llvm::BasicBlock *CheckNextBB = CGF.createBasicBlock(".check.next");
1395 Bld.CreateCondBr(WorkFnMatch, ExecuteFNBB, CheckNextBB);
1396
1397 // Execute this outlined function.
1398 CGF.EmitBlock(ExecuteFNBB);
1399
1400 // Insert call to work function via shared wrapper. The shared
1401 // wrapper takes two arguments:
1402 // - the parallelism level;
1403 // - the thread ID;
1404 emitCall(CGF, WST.Loc, W,
1405 {Bld.getInt16(/*ParallelLevel=*/0), getThreadID(CGF, WST.Loc)});
1406
1407 // Go to end of parallel region.
1408 CGF.EmitBranch(TerminateBB);
1409
1410 CGF.EmitBlock(CheckNextBB);
1411 }
1412 // Default case: call to outlined function through pointer if the target
1413 // region makes a declare target call that may contain an orphaned parallel
1414 // directive.
1415 auto *ParallelFnTy =
1416 llvm::FunctionType::get(CGM.VoidTy, {CGM.Int16Ty, CGM.Int32Ty},
1417 /*isVarArg=*/false);
1418 llvm::Value *WorkFnCast =
1419 Bld.CreateBitCast(WorkID, ParallelFnTy->getPointerTo());
1420 // Insert call to work function via shared wrapper. The shared
1421 // wrapper takes two arguments:
1422 // - the parallelism level;
1423 // - the thread ID;
1424 emitCall(CGF, WST.Loc, {ParallelFnTy, WorkFnCast},
1425 {Bld.getInt16(/*ParallelLevel=*/0), getThreadID(CGF, WST.Loc)});
1426 // Go to end of parallel region.
1427 CGF.EmitBranch(TerminateBB);
1428
1429 // Signal end of parallel region.
1430 CGF.EmitBlock(TerminateBB);
1431 CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
1432 CGM.getModule(), OMPRTL___kmpc_kernel_end_parallel),
1433 llvm::None);
1434 CGF.EmitBranch(BarrierBB);
1435
1436 // All active and inactive workers wait at a barrier after parallel region.
1437 CGF.EmitBlock(BarrierBB);
1438 // Barrier after parallel region.
1439 syncCTAThreads(CGF);
1440 CGF.EmitBranch(AwaitBB);
1441
1442 // Exit target region.
1443 CGF.EmitBlock(ExitBB);
1444 // Skip initialization.
1445 clearLocThreadIdInsertPt(CGF);
1446 }
1447
createOffloadEntry(llvm::Constant * ID,llvm::Constant * Addr,uint64_t Size,int32_t,llvm::GlobalValue::LinkageTypes)1448 void CGOpenMPRuntimeGPU::createOffloadEntry(llvm::Constant *ID,
1449 llvm::Constant *Addr,
1450 uint64_t Size, int32_t,
1451 llvm::GlobalValue::LinkageTypes) {
1452 // TODO: Add support for global variables on the device after declare target
1453 // support.
1454 if (!isa<llvm::Function>(Addr))
1455 return;
1456 llvm::Module &M = CGM.getModule();
1457 llvm::LLVMContext &Ctx = CGM.getLLVMContext();
1458
1459 // Get "nvvm.annotations" metadata node
1460 llvm::NamedMDNode *MD = M.getOrInsertNamedMetadata("nvvm.annotations");
1461
1462 llvm::Metadata *MDVals[] = {
1463 llvm::ConstantAsMetadata::get(Addr), llvm::MDString::get(Ctx, "kernel"),
1464 llvm::ConstantAsMetadata::get(
1465 llvm::ConstantInt::get(llvm::Type::getInt32Ty(Ctx), 1))};
1466 // Append metadata to nvvm.annotations
1467 MD->addOperand(llvm::MDNode::get(Ctx, MDVals));
1468 }
1469
emitTargetOutlinedFunction(const OMPExecutableDirective & D,StringRef ParentName,llvm::Function * & OutlinedFn,llvm::Constant * & OutlinedFnID,bool IsOffloadEntry,const RegionCodeGenTy & CodeGen)1470 void CGOpenMPRuntimeGPU::emitTargetOutlinedFunction(
1471 const OMPExecutableDirective &D, StringRef ParentName,
1472 llvm::Function *&OutlinedFn, llvm::Constant *&OutlinedFnID,
1473 bool IsOffloadEntry, const RegionCodeGenTy &CodeGen) {
1474 if (!IsOffloadEntry) // Nothing to do.
1475 return;
1476
1477 assert(!ParentName.empty() && "Invalid target region parent name!");
1478
1479 bool Mode = supportsSPMDExecutionMode(CGM.getContext(), D);
1480 if (Mode)
1481 emitSPMDKernel(D, ParentName, OutlinedFn, OutlinedFnID, IsOffloadEntry,
1482 CodeGen);
1483 else
1484 emitNonSPMDKernel(D, ParentName, OutlinedFn, OutlinedFnID, IsOffloadEntry,
1485 CodeGen);
1486
1487 setPropertyExecutionMode(CGM, OutlinedFn->getName(), Mode);
1488 }
1489
1490 namespace {
1491 LLVM_ENABLE_BITMASK_ENUMS_IN_NAMESPACE();
1492 /// Enum for accesseing the reserved_2 field of the ident_t struct.
1493 enum ModeFlagsTy : unsigned {
1494 /// Bit set to 1 when in SPMD mode.
1495 KMP_IDENT_SPMD_MODE = 0x01,
1496 /// Bit set to 1 when a simplified runtime is used.
1497 KMP_IDENT_SIMPLE_RT_MODE = 0x02,
1498 LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue=*/KMP_IDENT_SIMPLE_RT_MODE)
1499 };
1500
1501 /// Special mode Undefined. Is the combination of Non-SPMD mode + SimpleRuntime.
1502 static const ModeFlagsTy UndefinedMode =
1503 (~KMP_IDENT_SPMD_MODE) & KMP_IDENT_SIMPLE_RT_MODE;
1504 } // anonymous namespace
1505
getDefaultLocationReserved2Flags() const1506 unsigned CGOpenMPRuntimeGPU::getDefaultLocationReserved2Flags() const {
1507 switch (getExecutionMode()) {
1508 case EM_SPMD:
1509 if (requiresFullRuntime())
1510 return KMP_IDENT_SPMD_MODE & (~KMP_IDENT_SIMPLE_RT_MODE);
1511 return KMP_IDENT_SPMD_MODE | KMP_IDENT_SIMPLE_RT_MODE;
1512 case EM_NonSPMD:
1513 assert(requiresFullRuntime() && "Expected full runtime.");
1514 return (~KMP_IDENT_SPMD_MODE) & (~KMP_IDENT_SIMPLE_RT_MODE);
1515 case EM_Unknown:
1516 return UndefinedMode;
1517 }
1518 llvm_unreachable("Unknown flags are requested.");
1519 }
1520
CGOpenMPRuntimeGPU(CodeGenModule & CGM)1521 CGOpenMPRuntimeGPU::CGOpenMPRuntimeGPU(CodeGenModule &CGM)
1522 : CGOpenMPRuntime(CGM, "_", "$") {
1523 if (!CGM.getLangOpts().OpenMPIsDevice)
1524 llvm_unreachable("OpenMP NVPTX can only handle device code.");
1525 }
1526
emitProcBindClause(CodeGenFunction & CGF,ProcBindKind ProcBind,SourceLocation Loc)1527 void CGOpenMPRuntimeGPU::emitProcBindClause(CodeGenFunction &CGF,
1528 ProcBindKind ProcBind,
1529 SourceLocation Loc) {
1530 // Do nothing in case of SPMD mode and L0 parallel.
1531 if (getExecutionMode() == CGOpenMPRuntimeGPU::EM_SPMD)
1532 return;
1533
1534 CGOpenMPRuntime::emitProcBindClause(CGF, ProcBind, Loc);
1535 }
1536
emitNumThreadsClause(CodeGenFunction & CGF,llvm::Value * NumThreads,SourceLocation Loc)1537 void CGOpenMPRuntimeGPU::emitNumThreadsClause(CodeGenFunction &CGF,
1538 llvm::Value *NumThreads,
1539 SourceLocation Loc) {
1540 // Do nothing in case of SPMD mode and L0 parallel.
1541 if (getExecutionMode() == CGOpenMPRuntimeGPU::EM_SPMD)
1542 return;
1543
1544 CGOpenMPRuntime::emitNumThreadsClause(CGF, NumThreads, Loc);
1545 }
1546
emitNumTeamsClause(CodeGenFunction & CGF,const Expr * NumTeams,const Expr * ThreadLimit,SourceLocation Loc)1547 void CGOpenMPRuntimeGPU::emitNumTeamsClause(CodeGenFunction &CGF,
1548 const Expr *NumTeams,
1549 const Expr *ThreadLimit,
1550 SourceLocation Loc) {}
1551
emitParallelOutlinedFunction(const OMPExecutableDirective & D,const VarDecl * ThreadIDVar,OpenMPDirectiveKind InnermostKind,const RegionCodeGenTy & CodeGen)1552 llvm::Function *CGOpenMPRuntimeGPU::emitParallelOutlinedFunction(
1553 const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
1554 OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) {
1555 // Emit target region as a standalone region.
1556 class NVPTXPrePostActionTy : public PrePostActionTy {
1557 bool &IsInParallelRegion;
1558 bool PrevIsInParallelRegion;
1559
1560 public:
1561 NVPTXPrePostActionTy(bool &IsInParallelRegion)
1562 : IsInParallelRegion(IsInParallelRegion) {}
1563 void Enter(CodeGenFunction &CGF) override {
1564 PrevIsInParallelRegion = IsInParallelRegion;
1565 IsInParallelRegion = true;
1566 }
1567 void Exit(CodeGenFunction &CGF) override {
1568 IsInParallelRegion = PrevIsInParallelRegion;
1569 }
1570 } Action(IsInParallelRegion);
1571 CodeGen.setAction(Action);
1572 bool PrevIsInTTDRegion = IsInTTDRegion;
1573 IsInTTDRegion = false;
1574 bool PrevIsInTargetMasterThreadRegion = IsInTargetMasterThreadRegion;
1575 IsInTargetMasterThreadRegion = false;
1576 auto *OutlinedFun =
1577 cast<llvm::Function>(CGOpenMPRuntime::emitParallelOutlinedFunction(
1578 D, ThreadIDVar, InnermostKind, CodeGen));
1579 if (CGM.getLangOpts().Optimize) {
1580 OutlinedFun->removeFnAttr(llvm::Attribute::NoInline);
1581 OutlinedFun->removeFnAttr(llvm::Attribute::OptimizeNone);
1582 OutlinedFun->addFnAttr(llvm::Attribute::AlwaysInline);
1583 }
1584 IsInTargetMasterThreadRegion = PrevIsInTargetMasterThreadRegion;
1585 IsInTTDRegion = PrevIsInTTDRegion;
1586 if (getExecutionMode() != CGOpenMPRuntimeGPU::EM_SPMD &&
1587 !IsInParallelRegion) {
1588 llvm::Function *WrapperFun =
1589 createParallelDataSharingWrapper(OutlinedFun, D);
1590 WrapperFunctionsMap[OutlinedFun] = WrapperFun;
1591 }
1592
1593 return OutlinedFun;
1594 }
1595
1596 /// Get list of lastprivate variables from the teams distribute ... or
1597 /// teams {distribute ...} directives.
1598 static void
getDistributeLastprivateVars(ASTContext & Ctx,const OMPExecutableDirective & D,llvm::SmallVectorImpl<const ValueDecl * > & Vars)1599 getDistributeLastprivateVars(ASTContext &Ctx, const OMPExecutableDirective &D,
1600 llvm::SmallVectorImpl<const ValueDecl *> &Vars) {
1601 assert(isOpenMPTeamsDirective(D.getDirectiveKind()) &&
1602 "expected teams directive.");
1603 const OMPExecutableDirective *Dir = &D;
1604 if (!isOpenMPDistributeDirective(D.getDirectiveKind())) {
1605 if (const Stmt *S = CGOpenMPRuntime::getSingleCompoundChild(
1606 Ctx,
1607 D.getInnermostCapturedStmt()->getCapturedStmt()->IgnoreContainers(
1608 /*IgnoreCaptured=*/true))) {
1609 Dir = dyn_cast_or_null<OMPExecutableDirective>(S);
1610 if (Dir && !isOpenMPDistributeDirective(Dir->getDirectiveKind()))
1611 Dir = nullptr;
1612 }
1613 }
1614 if (!Dir)
1615 return;
1616 for (const auto *C : Dir->getClausesOfKind<OMPLastprivateClause>()) {
1617 for (const Expr *E : C->getVarRefs())
1618 Vars.push_back(getPrivateItem(E));
1619 }
1620 }
1621
1622 /// Get list of reduction variables from the teams ... directives.
1623 static void
getTeamsReductionVars(ASTContext & Ctx,const OMPExecutableDirective & D,llvm::SmallVectorImpl<const ValueDecl * > & Vars)1624 getTeamsReductionVars(ASTContext &Ctx, const OMPExecutableDirective &D,
1625 llvm::SmallVectorImpl<const ValueDecl *> &Vars) {
1626 assert(isOpenMPTeamsDirective(D.getDirectiveKind()) &&
1627 "expected teams directive.");
1628 for (const auto *C : D.getClausesOfKind<OMPReductionClause>()) {
1629 for (const Expr *E : C->privates())
1630 Vars.push_back(getPrivateItem(E));
1631 }
1632 }
1633
emitTeamsOutlinedFunction(const OMPExecutableDirective & D,const VarDecl * ThreadIDVar,OpenMPDirectiveKind InnermostKind,const RegionCodeGenTy & CodeGen)1634 llvm::Function *CGOpenMPRuntimeGPU::emitTeamsOutlinedFunction(
1635 const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
1636 OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) {
1637 SourceLocation Loc = D.getBeginLoc();
1638
1639 const RecordDecl *GlobalizedRD = nullptr;
1640 llvm::SmallVector<const ValueDecl *, 4> LastPrivatesReductions;
1641 llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *> MappedDeclsFields;
1642 unsigned WarpSize = CGM.getTarget().getGridValue(llvm::omp::GV_Warp_Size);
1643 // Globalize team reductions variable unconditionally in all modes.
1644 if (getExecutionMode() != CGOpenMPRuntimeGPU::EM_SPMD)
1645 getTeamsReductionVars(CGM.getContext(), D, LastPrivatesReductions);
1646 if (getExecutionMode() == CGOpenMPRuntimeGPU::EM_SPMD) {
1647 getDistributeLastprivateVars(CGM.getContext(), D, LastPrivatesReductions);
1648 if (!LastPrivatesReductions.empty()) {
1649 GlobalizedRD = ::buildRecordForGlobalizedVars(
1650 CGM.getContext(), llvm::None, LastPrivatesReductions,
1651 MappedDeclsFields, WarpSize);
1652 }
1653 } else if (!LastPrivatesReductions.empty()) {
1654 assert(!TeamAndReductions.first &&
1655 "Previous team declaration is not expected.");
1656 TeamAndReductions.first = D.getCapturedStmt(OMPD_teams)->getCapturedDecl();
1657 std::swap(TeamAndReductions.second, LastPrivatesReductions);
1658 }
1659
1660 // Emit target region as a standalone region.
1661 class NVPTXPrePostActionTy : public PrePostActionTy {
1662 SourceLocation &Loc;
1663 const RecordDecl *GlobalizedRD;
1664 llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *>
1665 &MappedDeclsFields;
1666
1667 public:
1668 NVPTXPrePostActionTy(
1669 SourceLocation &Loc, const RecordDecl *GlobalizedRD,
1670 llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *>
1671 &MappedDeclsFields)
1672 : Loc(Loc), GlobalizedRD(GlobalizedRD),
1673 MappedDeclsFields(MappedDeclsFields) {}
1674 void Enter(CodeGenFunction &CGF) override {
1675 auto &Rt =
1676 static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime());
1677 if (GlobalizedRD) {
1678 auto I = Rt.FunctionGlobalizedDecls.try_emplace(CGF.CurFn).first;
1679 I->getSecond().GlobalRecord = GlobalizedRD;
1680 I->getSecond().MappedParams =
1681 std::make_unique<CodeGenFunction::OMPMapVars>();
1682 DeclToAddrMapTy &Data = I->getSecond().LocalVarData;
1683 for (const auto &Pair : MappedDeclsFields) {
1684 assert(Pair.getFirst()->isCanonicalDecl() &&
1685 "Expected canonical declaration");
1686 Data.insert(std::make_pair(Pair.getFirst(),
1687 MappedVarData(Pair.getSecond(),
1688 /*IsOnePerTeam=*/true)));
1689 }
1690 }
1691 Rt.emitGenericVarsProlog(CGF, Loc);
1692 }
1693 void Exit(CodeGenFunction &CGF) override {
1694 static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime())
1695 .emitGenericVarsEpilog(CGF);
1696 }
1697 } Action(Loc, GlobalizedRD, MappedDeclsFields);
1698 CodeGen.setAction(Action);
1699 llvm::Function *OutlinedFun = CGOpenMPRuntime::emitTeamsOutlinedFunction(
1700 D, ThreadIDVar, InnermostKind, CodeGen);
1701 if (CGM.getLangOpts().Optimize) {
1702 OutlinedFun->removeFnAttr(llvm::Attribute::NoInline);
1703 OutlinedFun->removeFnAttr(llvm::Attribute::OptimizeNone);
1704 OutlinedFun->addFnAttr(llvm::Attribute::AlwaysInline);
1705 }
1706
1707 return OutlinedFun;
1708 }
1709
emitGenericVarsProlog(CodeGenFunction & CGF,SourceLocation Loc,bool WithSPMDCheck)1710 void CGOpenMPRuntimeGPU::emitGenericVarsProlog(CodeGenFunction &CGF,
1711 SourceLocation Loc,
1712 bool WithSPMDCheck) {
1713 if (getDataSharingMode(CGM) != CGOpenMPRuntimeGPU::Generic &&
1714 getExecutionMode() != CGOpenMPRuntimeGPU::EM_SPMD)
1715 return;
1716
1717 CGBuilderTy &Bld = CGF.Builder;
1718
1719 const auto I = FunctionGlobalizedDecls.find(CGF.CurFn);
1720 if (I == FunctionGlobalizedDecls.end())
1721 return;
1722 if (const RecordDecl *GlobalizedVarsRecord = I->getSecond().GlobalRecord) {
1723 QualType GlobalRecTy = CGM.getContext().getRecordType(GlobalizedVarsRecord);
1724 QualType SecGlobalRecTy;
1725
1726 // Recover pointer to this function's global record. The runtime will
1727 // handle the specifics of the allocation of the memory.
1728 // Use actual memory size of the record including the padding
1729 // for alignment purposes.
1730 unsigned Alignment =
1731 CGM.getContext().getTypeAlignInChars(GlobalRecTy).getQuantity();
1732 unsigned GlobalRecordSize =
1733 CGM.getContext().getTypeSizeInChars(GlobalRecTy).getQuantity();
1734 GlobalRecordSize = llvm::alignTo(GlobalRecordSize, Alignment);
1735
1736 llvm::PointerType *GlobalRecPtrTy =
1737 CGF.ConvertTypeForMem(GlobalRecTy)->getPointerTo();
1738 llvm::Value *GlobalRecCastAddr;
1739 llvm::Value *IsTTD = nullptr;
1740 if (!IsInTTDRegion &&
1741 (WithSPMDCheck ||
1742 getExecutionMode() == CGOpenMPRuntimeGPU::EM_Unknown)) {
1743 llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".exit");
1744 llvm::BasicBlock *SPMDBB = CGF.createBasicBlock(".spmd");
1745 llvm::BasicBlock *NonSPMDBB = CGF.createBasicBlock(".non-spmd");
1746 if (I->getSecond().SecondaryGlobalRecord.hasValue()) {
1747 llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc);
1748 llvm::Value *ThreadID = getThreadID(CGF, Loc);
1749 llvm::Value *PL = CGF.EmitRuntimeCall(
1750 OMPBuilder.getOrCreateRuntimeFunction(CGM.getModule(),
1751 OMPRTL___kmpc_parallel_level),
1752 {RTLoc, ThreadID});
1753 IsTTD = Bld.CreateIsNull(PL);
1754 }
1755 llvm::Value *IsSPMD = Bld.CreateIsNotNull(
1756 CGF.EmitNounwindRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
1757 CGM.getModule(), OMPRTL___kmpc_is_spmd_exec_mode)));
1758 Bld.CreateCondBr(IsSPMD, SPMDBB, NonSPMDBB);
1759 // There is no need to emit line number for unconditional branch.
1760 (void)ApplyDebugLocation::CreateEmpty(CGF);
1761 CGF.EmitBlock(SPMDBB);
1762 Address RecPtr = Address(llvm::ConstantPointerNull::get(GlobalRecPtrTy),
1763 CharUnits::fromQuantity(Alignment));
1764 CGF.EmitBranch(ExitBB);
1765 // There is no need to emit line number for unconditional branch.
1766 (void)ApplyDebugLocation::CreateEmpty(CGF);
1767 CGF.EmitBlock(NonSPMDBB);
1768 llvm::Value *Size = llvm::ConstantInt::get(CGM.SizeTy, GlobalRecordSize);
1769 if (const RecordDecl *SecGlobalizedVarsRecord =
1770 I->getSecond().SecondaryGlobalRecord.getValueOr(nullptr)) {
1771 SecGlobalRecTy =
1772 CGM.getContext().getRecordType(SecGlobalizedVarsRecord);
1773
1774 // Recover pointer to this function's global record. The runtime will
1775 // handle the specifics of the allocation of the memory.
1776 // Use actual memory size of the record including the padding
1777 // for alignment purposes.
1778 unsigned Alignment =
1779 CGM.getContext().getTypeAlignInChars(SecGlobalRecTy).getQuantity();
1780 unsigned GlobalRecordSize =
1781 CGM.getContext().getTypeSizeInChars(SecGlobalRecTy).getQuantity();
1782 GlobalRecordSize = llvm::alignTo(GlobalRecordSize, Alignment);
1783 Size = Bld.CreateSelect(
1784 IsTTD, llvm::ConstantInt::get(CGM.SizeTy, GlobalRecordSize), Size);
1785 }
1786 // TODO: allow the usage of shared memory to be controlled by
1787 // the user, for now, default to global.
1788 llvm::Value *GlobalRecordSizeArg[] = {
1789 Size, CGF.Builder.getInt16(/*UseSharedMemory=*/0)};
1790 llvm::Value *GlobalRecValue = CGF.EmitRuntimeCall(
1791 OMPBuilder.getOrCreateRuntimeFunction(
1792 CGM.getModule(), OMPRTL___kmpc_data_sharing_coalesced_push_stack),
1793 GlobalRecordSizeArg);
1794 GlobalRecCastAddr = Bld.CreatePointerBitCastOrAddrSpaceCast(
1795 GlobalRecValue, GlobalRecPtrTy);
1796 CGF.EmitBlock(ExitBB);
1797 auto *Phi = Bld.CreatePHI(GlobalRecPtrTy,
1798 /*NumReservedValues=*/2, "_select_stack");
1799 Phi->addIncoming(RecPtr.getPointer(), SPMDBB);
1800 Phi->addIncoming(GlobalRecCastAddr, NonSPMDBB);
1801 GlobalRecCastAddr = Phi;
1802 I->getSecond().GlobalRecordAddr = Phi;
1803 I->getSecond().IsInSPMDModeFlag = IsSPMD;
1804 } else if (!CGM.getLangOpts().OpenMPCUDATargetParallel && IsInTTDRegion) {
1805 assert(GlobalizedRecords.back().Records.size() < 2 &&
1806 "Expected less than 2 globalized records: one for target and one "
1807 "for teams.");
1808 unsigned Offset = 0;
1809 for (const RecordDecl *RD : GlobalizedRecords.back().Records) {
1810 QualType RDTy = CGM.getContext().getRecordType(RD);
1811 unsigned Alignment =
1812 CGM.getContext().getTypeAlignInChars(RDTy).getQuantity();
1813 unsigned Size = CGM.getContext().getTypeSizeInChars(RDTy).getQuantity();
1814 Offset =
1815 llvm::alignTo(llvm::alignTo(Offset, Alignment) + Size, Alignment);
1816 }
1817 unsigned Alignment =
1818 CGM.getContext().getTypeAlignInChars(GlobalRecTy).getQuantity();
1819 Offset = llvm::alignTo(Offset, Alignment);
1820 GlobalizedRecords.back().Records.push_back(GlobalizedVarsRecord);
1821 ++GlobalizedRecords.back().RegionCounter;
1822 if (GlobalizedRecords.back().Records.size() == 1) {
1823 assert(KernelStaticGlobalized &&
1824 "Kernel static pointer must be initialized already.");
1825 auto *UseSharedMemory = new llvm::GlobalVariable(
1826 CGM.getModule(), CGM.Int16Ty, /*isConstant=*/true,
1827 llvm::GlobalValue::InternalLinkage, nullptr,
1828 "_openmp_static_kernel$is_shared");
1829 UseSharedMemory->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
1830 QualType Int16Ty = CGM.getContext().getIntTypeForBitwidth(
1831 /*DestWidth=*/16, /*Signed=*/0);
1832 llvm::Value *IsInSharedMemory = CGF.EmitLoadOfScalar(
1833 Address(UseSharedMemory,
1834 CGM.getContext().getTypeAlignInChars(Int16Ty)),
1835 /*Volatile=*/false, Int16Ty, Loc);
1836 auto *StaticGlobalized = new llvm::GlobalVariable(
1837 CGM.getModule(), CGM.Int8Ty, /*isConstant=*/false,
1838 llvm::GlobalValue::CommonLinkage, nullptr);
1839 auto *RecSize = new llvm::GlobalVariable(
1840 CGM.getModule(), CGM.SizeTy, /*isConstant=*/true,
1841 llvm::GlobalValue::InternalLinkage, nullptr,
1842 "_openmp_static_kernel$size");
1843 RecSize->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
1844 llvm::Value *Ld = CGF.EmitLoadOfScalar(
1845 Address(RecSize, CGM.getSizeAlign()), /*Volatile=*/false,
1846 CGM.getContext().getSizeType(), Loc);
1847 llvm::Value *ResAddr = Bld.CreatePointerBitCastOrAddrSpaceCast(
1848 KernelStaticGlobalized, CGM.VoidPtrPtrTy);
1849 llvm::Value *GlobalRecordSizeArg[] = {
1850 llvm::ConstantInt::get(
1851 CGM.Int16Ty,
1852 getExecutionMode() == CGOpenMPRuntimeGPU::EM_SPMD ? 1 : 0),
1853 StaticGlobalized, Ld, IsInSharedMemory, ResAddr};
1854 CGF.EmitRuntimeCall(
1855 OMPBuilder.getOrCreateRuntimeFunction(
1856 CGM.getModule(), OMPRTL___kmpc_get_team_static_memory),
1857 GlobalRecordSizeArg);
1858 GlobalizedRecords.back().Buffer = StaticGlobalized;
1859 GlobalizedRecords.back().RecSize = RecSize;
1860 GlobalizedRecords.back().UseSharedMemory = UseSharedMemory;
1861 GlobalizedRecords.back().Loc = Loc;
1862 }
1863 assert(KernelStaticGlobalized && "Global address must be set already.");
1864 Address FrameAddr = CGF.EmitLoadOfPointer(
1865 Address(KernelStaticGlobalized, CGM.getPointerAlign()),
1866 CGM.getContext()
1867 .getPointerType(CGM.getContext().VoidPtrTy)
1868 .castAs<PointerType>());
1869 llvm::Value *GlobalRecValue =
1870 Bld.CreateConstInBoundsGEP(FrameAddr, Offset).getPointer();
1871 I->getSecond().GlobalRecordAddr = GlobalRecValue;
1872 I->getSecond().IsInSPMDModeFlag = nullptr;
1873 GlobalRecCastAddr = Bld.CreatePointerBitCastOrAddrSpaceCast(
1874 GlobalRecValue, CGF.ConvertTypeForMem(GlobalRecTy)->getPointerTo());
1875 } else {
1876 // TODO: allow the usage of shared memory to be controlled by
1877 // the user, for now, default to global.
1878 bool UseSharedMemory =
1879 IsInTTDRegion && GlobalRecordSize <= SharedMemorySize;
1880 llvm::Value *GlobalRecordSizeArg[] = {
1881 llvm::ConstantInt::get(CGM.SizeTy, GlobalRecordSize),
1882 CGF.Builder.getInt16(UseSharedMemory ? 1 : 0)};
1883 llvm::Value *GlobalRecValue = CGF.EmitRuntimeCall(
1884 OMPBuilder.getOrCreateRuntimeFunction(
1885 CGM.getModule(),
1886 IsInTTDRegion ? OMPRTL___kmpc_data_sharing_push_stack
1887 : OMPRTL___kmpc_data_sharing_coalesced_push_stack),
1888 GlobalRecordSizeArg);
1889 GlobalRecCastAddr = Bld.CreatePointerBitCastOrAddrSpaceCast(
1890 GlobalRecValue, GlobalRecPtrTy);
1891 I->getSecond().GlobalRecordAddr = GlobalRecValue;
1892 I->getSecond().IsInSPMDModeFlag = nullptr;
1893 }
1894 LValue Base =
1895 CGF.MakeNaturalAlignPointeeAddrLValue(GlobalRecCastAddr, GlobalRecTy);
1896
1897 // Emit the "global alloca" which is a GEP from the global declaration
1898 // record using the pointer returned by the runtime.
1899 LValue SecBase;
1900 decltype(I->getSecond().LocalVarData)::const_iterator SecIt;
1901 if (IsTTD) {
1902 SecIt = I->getSecond().SecondaryLocalVarData->begin();
1903 llvm::PointerType *SecGlobalRecPtrTy =
1904 CGF.ConvertTypeForMem(SecGlobalRecTy)->getPointerTo();
1905 SecBase = CGF.MakeNaturalAlignPointeeAddrLValue(
1906 Bld.CreatePointerBitCastOrAddrSpaceCast(
1907 I->getSecond().GlobalRecordAddr, SecGlobalRecPtrTy),
1908 SecGlobalRecTy);
1909 }
1910 for (auto &Rec : I->getSecond().LocalVarData) {
1911 bool EscapedParam = I->getSecond().EscapedParameters.count(Rec.first);
1912 llvm::Value *ParValue;
1913 if (EscapedParam) {
1914 const auto *VD = cast<VarDecl>(Rec.first);
1915 LValue ParLVal =
1916 CGF.MakeAddrLValue(CGF.GetAddrOfLocalVar(VD), VD->getType());
1917 ParValue = CGF.EmitLoadOfScalar(ParLVal, Loc);
1918 }
1919 LValue VarAddr = CGF.EmitLValueForField(Base, Rec.second.FD);
1920 // Emit VarAddr basing on lane-id if required.
1921 QualType VarTy;
1922 if (Rec.second.IsOnePerTeam) {
1923 VarTy = Rec.second.FD->getType();
1924 } else {
1925 llvm::Value *Ptr = CGF.Builder.CreateInBoundsGEP(
1926 VarAddr.getAddress(CGF).getPointer(),
1927 {Bld.getInt32(0), getNVPTXLaneID(CGF)});
1928 VarTy =
1929 Rec.second.FD->getType()->castAsArrayTypeUnsafe()->getElementType();
1930 VarAddr = CGF.MakeAddrLValue(
1931 Address(Ptr, CGM.getContext().getDeclAlign(Rec.first)), VarTy,
1932 AlignmentSource::Decl);
1933 }
1934 Rec.second.PrivateAddr = VarAddr.getAddress(CGF);
1935 if (!IsInTTDRegion &&
1936 (WithSPMDCheck ||
1937 getExecutionMode() == CGOpenMPRuntimeGPU::EM_Unknown)) {
1938 assert(I->getSecond().IsInSPMDModeFlag &&
1939 "Expected unknown execution mode or required SPMD check.");
1940 if (IsTTD) {
1941 assert(SecIt->second.IsOnePerTeam &&
1942 "Secondary glob data must be one per team.");
1943 LValue SecVarAddr = CGF.EmitLValueForField(SecBase, SecIt->second.FD);
1944 VarAddr.setAddress(
1945 Address(Bld.CreateSelect(IsTTD, SecVarAddr.getPointer(CGF),
1946 VarAddr.getPointer(CGF)),
1947 VarAddr.getAlignment()));
1948 Rec.second.PrivateAddr = VarAddr.getAddress(CGF);
1949 }
1950 Address GlobalPtr = Rec.second.PrivateAddr;
1951 Address LocalAddr = CGF.CreateMemTemp(VarTy, Rec.second.FD->getName());
1952 Rec.second.PrivateAddr = Address(
1953 Bld.CreateSelect(I->getSecond().IsInSPMDModeFlag,
1954 LocalAddr.getPointer(), GlobalPtr.getPointer()),
1955 LocalAddr.getAlignment());
1956 }
1957 if (EscapedParam) {
1958 const auto *VD = cast<VarDecl>(Rec.first);
1959 CGF.EmitStoreOfScalar(ParValue, VarAddr);
1960 I->getSecond().MappedParams->setVarAddr(CGF, VD,
1961 VarAddr.getAddress(CGF));
1962 }
1963 if (IsTTD)
1964 ++SecIt;
1965 }
1966 }
1967 for (const ValueDecl *VD : I->getSecond().EscapedVariableLengthDecls) {
1968 // Recover pointer to this function's global record. The runtime will
1969 // handle the specifics of the allocation of the memory.
1970 // Use actual memory size of the record including the padding
1971 // for alignment purposes.
1972 CGBuilderTy &Bld = CGF.Builder;
1973 llvm::Value *Size = CGF.getTypeSize(VD->getType());
1974 CharUnits Align = CGM.getContext().getDeclAlign(VD);
1975 Size = Bld.CreateNUWAdd(
1976 Size, llvm::ConstantInt::get(CGF.SizeTy, Align.getQuantity() - 1));
1977 llvm::Value *AlignVal =
1978 llvm::ConstantInt::get(CGF.SizeTy, Align.getQuantity());
1979 Size = Bld.CreateUDiv(Size, AlignVal);
1980 Size = Bld.CreateNUWMul(Size, AlignVal);
1981 // TODO: allow the usage of shared memory to be controlled by
1982 // the user, for now, default to global.
1983 llvm::Value *GlobalRecordSizeArg[] = {
1984 Size, CGF.Builder.getInt16(/*UseSharedMemory=*/0)};
1985 llvm::Value *GlobalRecValue = CGF.EmitRuntimeCall(
1986 OMPBuilder.getOrCreateRuntimeFunction(
1987 CGM.getModule(), OMPRTL___kmpc_data_sharing_coalesced_push_stack),
1988 GlobalRecordSizeArg);
1989 llvm::Value *GlobalRecCastAddr = Bld.CreatePointerBitCastOrAddrSpaceCast(
1990 GlobalRecValue, CGF.ConvertTypeForMem(VD->getType())->getPointerTo());
1991 LValue Base = CGF.MakeAddrLValue(GlobalRecCastAddr, VD->getType(),
1992 CGM.getContext().getDeclAlign(VD),
1993 AlignmentSource::Decl);
1994 I->getSecond().MappedParams->setVarAddr(CGF, cast<VarDecl>(VD),
1995 Base.getAddress(CGF));
1996 I->getSecond().EscapedVariableLengthDeclsAddrs.emplace_back(GlobalRecValue);
1997 }
1998 I->getSecond().MappedParams->apply(CGF);
1999 }
2000
emitGenericVarsEpilog(CodeGenFunction & CGF,bool WithSPMDCheck)2001 void CGOpenMPRuntimeGPU::emitGenericVarsEpilog(CodeGenFunction &CGF,
2002 bool WithSPMDCheck) {
2003 if (getDataSharingMode(CGM) != CGOpenMPRuntimeGPU::Generic &&
2004 getExecutionMode() != CGOpenMPRuntimeGPU::EM_SPMD)
2005 return;
2006
2007 const auto I = FunctionGlobalizedDecls.find(CGF.CurFn);
2008 if (I != FunctionGlobalizedDecls.end()) {
2009 I->getSecond().MappedParams->restore(CGF);
2010 if (!CGF.HaveInsertPoint())
2011 return;
2012 for (llvm::Value *Addr :
2013 llvm::reverse(I->getSecond().EscapedVariableLengthDeclsAddrs)) {
2014 CGF.EmitRuntimeCall(
2015 OMPBuilder.getOrCreateRuntimeFunction(
2016 CGM.getModule(), OMPRTL___kmpc_data_sharing_pop_stack),
2017 Addr);
2018 }
2019 if (I->getSecond().GlobalRecordAddr) {
2020 if (!IsInTTDRegion &&
2021 (WithSPMDCheck ||
2022 getExecutionMode() == CGOpenMPRuntimeGPU::EM_Unknown)) {
2023 CGBuilderTy &Bld = CGF.Builder;
2024 llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".exit");
2025 llvm::BasicBlock *NonSPMDBB = CGF.createBasicBlock(".non-spmd");
2026 Bld.CreateCondBr(I->getSecond().IsInSPMDModeFlag, ExitBB, NonSPMDBB);
2027 // There is no need to emit line number for unconditional branch.
2028 (void)ApplyDebugLocation::CreateEmpty(CGF);
2029 CGF.EmitBlock(NonSPMDBB);
2030 CGF.EmitRuntimeCall(
2031 OMPBuilder.getOrCreateRuntimeFunction(
2032 CGM.getModule(), OMPRTL___kmpc_data_sharing_pop_stack),
2033 CGF.EmitCastToVoidPtr(I->getSecond().GlobalRecordAddr));
2034 CGF.EmitBlock(ExitBB);
2035 } else if (!CGM.getLangOpts().OpenMPCUDATargetParallel && IsInTTDRegion) {
2036 assert(GlobalizedRecords.back().RegionCounter > 0 &&
2037 "region counter must be > 0.");
2038 --GlobalizedRecords.back().RegionCounter;
2039 // Emit the restore function only in the target region.
2040 if (GlobalizedRecords.back().RegionCounter == 0) {
2041 QualType Int16Ty = CGM.getContext().getIntTypeForBitwidth(
2042 /*DestWidth=*/16, /*Signed=*/0);
2043 llvm::Value *IsInSharedMemory = CGF.EmitLoadOfScalar(
2044 Address(GlobalizedRecords.back().UseSharedMemory,
2045 CGM.getContext().getTypeAlignInChars(Int16Ty)),
2046 /*Volatile=*/false, Int16Ty, GlobalizedRecords.back().Loc);
2047 llvm::Value *Args[] = {
2048 llvm::ConstantInt::get(
2049 CGM.Int16Ty,
2050 getExecutionMode() == CGOpenMPRuntimeGPU::EM_SPMD ? 1 : 0),
2051 IsInSharedMemory};
2052 CGF.EmitRuntimeCall(
2053 OMPBuilder.getOrCreateRuntimeFunction(
2054 CGM.getModule(), OMPRTL___kmpc_restore_team_static_memory),
2055 Args);
2056 }
2057 } else {
2058 CGF.EmitRuntimeCall(
2059 OMPBuilder.getOrCreateRuntimeFunction(
2060 CGM.getModule(), OMPRTL___kmpc_data_sharing_pop_stack),
2061 I->getSecond().GlobalRecordAddr);
2062 }
2063 }
2064 }
2065 }
2066
emitTeamsCall(CodeGenFunction & CGF,const OMPExecutableDirective & D,SourceLocation Loc,llvm::Function * OutlinedFn,ArrayRef<llvm::Value * > CapturedVars)2067 void CGOpenMPRuntimeGPU::emitTeamsCall(CodeGenFunction &CGF,
2068 const OMPExecutableDirective &D,
2069 SourceLocation Loc,
2070 llvm::Function *OutlinedFn,
2071 ArrayRef<llvm::Value *> CapturedVars) {
2072 if (!CGF.HaveInsertPoint())
2073 return;
2074
2075 Address ZeroAddr = CGF.CreateDefaultAlignTempAlloca(CGF.Int32Ty,
2076 /*Name=*/".zero.addr");
2077 CGF.InitTempAlloca(ZeroAddr, CGF.Builder.getInt32(/*C*/ 0));
2078 llvm::SmallVector<llvm::Value *, 16> OutlinedFnArgs;
2079 OutlinedFnArgs.push_back(emitThreadIDAddress(CGF, Loc).getPointer());
2080 OutlinedFnArgs.push_back(ZeroAddr.getPointer());
2081 OutlinedFnArgs.append(CapturedVars.begin(), CapturedVars.end());
2082 emitOutlinedFunctionCall(CGF, Loc, OutlinedFn, OutlinedFnArgs);
2083 }
2084
emitParallelCall(CodeGenFunction & CGF,SourceLocation Loc,llvm::Function * OutlinedFn,ArrayRef<llvm::Value * > CapturedVars,const Expr * IfCond)2085 void CGOpenMPRuntimeGPU::emitParallelCall(
2086 CodeGenFunction &CGF, SourceLocation Loc, llvm::Function *OutlinedFn,
2087 ArrayRef<llvm::Value *> CapturedVars, const Expr *IfCond) {
2088 if (!CGF.HaveInsertPoint())
2089 return;
2090
2091 if (getExecutionMode() == CGOpenMPRuntimeGPU::EM_SPMD)
2092 emitSPMDParallelCall(CGF, Loc, OutlinedFn, CapturedVars, IfCond);
2093 else
2094 emitNonSPMDParallelCall(CGF, Loc, OutlinedFn, CapturedVars, IfCond);
2095 }
2096
emitNonSPMDParallelCall(CodeGenFunction & CGF,SourceLocation Loc,llvm::Value * OutlinedFn,ArrayRef<llvm::Value * > CapturedVars,const Expr * IfCond)2097 void CGOpenMPRuntimeGPU::emitNonSPMDParallelCall(
2098 CodeGenFunction &CGF, SourceLocation Loc, llvm::Value *OutlinedFn,
2099 ArrayRef<llvm::Value *> CapturedVars, const Expr *IfCond) {
2100 llvm::Function *Fn = cast<llvm::Function>(OutlinedFn);
2101
2102 // Force inline this outlined function at its call site.
2103 Fn->setLinkage(llvm::GlobalValue::InternalLinkage);
2104
2105 Address ZeroAddr = CGF.CreateDefaultAlignTempAlloca(CGF.Int32Ty,
2106 /*Name=*/".zero.addr");
2107 CGF.InitTempAlloca(ZeroAddr, CGF.Builder.getInt32(/*C*/ 0));
2108 // ThreadId for serialized parallels is 0.
2109 Address ThreadIDAddr = ZeroAddr;
2110 auto &&CodeGen = [this, Fn, CapturedVars, Loc, &ThreadIDAddr](
2111 CodeGenFunction &CGF, PrePostActionTy &Action) {
2112 Action.Enter(CGF);
2113
2114 Address ZeroAddr =
2115 CGF.CreateDefaultAlignTempAlloca(CGF.Int32Ty,
2116 /*Name=*/".bound.zero.addr");
2117 CGF.InitTempAlloca(ZeroAddr, CGF.Builder.getInt32(/*C*/ 0));
2118 llvm::SmallVector<llvm::Value *, 16> OutlinedFnArgs;
2119 OutlinedFnArgs.push_back(ThreadIDAddr.getPointer());
2120 OutlinedFnArgs.push_back(ZeroAddr.getPointer());
2121 OutlinedFnArgs.append(CapturedVars.begin(), CapturedVars.end());
2122 emitOutlinedFunctionCall(CGF, Loc, Fn, OutlinedFnArgs);
2123 };
2124 auto &&SeqGen = [this, &CodeGen, Loc](CodeGenFunction &CGF,
2125 PrePostActionTy &) {
2126
2127 RegionCodeGenTy RCG(CodeGen);
2128 llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc);
2129 llvm::Value *ThreadID = getThreadID(CGF, Loc);
2130 llvm::Value *Args[] = {RTLoc, ThreadID};
2131
2132 NVPTXActionTy Action(
2133 OMPBuilder.getOrCreateRuntimeFunction(
2134 CGM.getModule(), OMPRTL___kmpc_serialized_parallel),
2135 Args,
2136 OMPBuilder.getOrCreateRuntimeFunction(
2137 CGM.getModule(), OMPRTL___kmpc_end_serialized_parallel),
2138 Args);
2139 RCG.setAction(Action);
2140 RCG(CGF);
2141 };
2142
2143 auto &&L0ParallelGen = [this, CapturedVars, Fn](CodeGenFunction &CGF,
2144 PrePostActionTy &Action) {
2145 CGBuilderTy &Bld = CGF.Builder;
2146 llvm::Function *WFn = WrapperFunctionsMap[Fn];
2147 assert(WFn && "Wrapper function does not exist!");
2148 llvm::Value *ID = Bld.CreateBitOrPointerCast(WFn, CGM.Int8PtrTy);
2149
2150 // Prepare for parallel region. Indicate the outlined function.
2151 llvm::Value *Args[] = {ID};
2152 CGF.EmitRuntimeCall(
2153 OMPBuilder.getOrCreateRuntimeFunction(
2154 CGM.getModule(), OMPRTL___kmpc_kernel_prepare_parallel),
2155 Args);
2156
2157 // Create a private scope that will globalize the arguments
2158 // passed from the outside of the target region.
2159 CodeGenFunction::OMPPrivateScope PrivateArgScope(CGF);
2160
2161 // There's something to share.
2162 if (!CapturedVars.empty()) {
2163 // Prepare for parallel region. Indicate the outlined function.
2164 Address SharedArgs =
2165 CGF.CreateDefaultAlignTempAlloca(CGF.VoidPtrPtrTy, "shared_arg_refs");
2166 llvm::Value *SharedArgsPtr = SharedArgs.getPointer();
2167
2168 llvm::Value *DataSharingArgs[] = {
2169 SharedArgsPtr,
2170 llvm::ConstantInt::get(CGM.SizeTy, CapturedVars.size())};
2171 CGF.EmitRuntimeCall(
2172 OMPBuilder.getOrCreateRuntimeFunction(
2173 CGM.getModule(), OMPRTL___kmpc_begin_sharing_variables),
2174 DataSharingArgs);
2175
2176 // Store variable address in a list of references to pass to workers.
2177 unsigned Idx = 0;
2178 ASTContext &Ctx = CGF.getContext();
2179 Address SharedArgListAddress = CGF.EmitLoadOfPointer(
2180 SharedArgs, Ctx.getPointerType(Ctx.getPointerType(Ctx.VoidPtrTy))
2181 .castAs<PointerType>());
2182 for (llvm::Value *V : CapturedVars) {
2183 Address Dst = Bld.CreateConstInBoundsGEP(SharedArgListAddress, Idx);
2184 llvm::Value *PtrV;
2185 if (V->getType()->isIntegerTy())
2186 PtrV = Bld.CreateIntToPtr(V, CGF.VoidPtrTy);
2187 else
2188 PtrV = Bld.CreatePointerBitCastOrAddrSpaceCast(V, CGF.VoidPtrTy);
2189 CGF.EmitStoreOfScalar(PtrV, Dst, /*Volatile=*/false,
2190 Ctx.getPointerType(Ctx.VoidPtrTy));
2191 ++Idx;
2192 }
2193 }
2194
2195 // Activate workers. This barrier is used by the master to signal
2196 // work for the workers.
2197 syncCTAThreads(CGF);
2198
2199 // OpenMP [2.5, Parallel Construct, p.49]
2200 // There is an implied barrier at the end of a parallel region. After the
2201 // end of a parallel region, only the master thread of the team resumes
2202 // execution of the enclosing task region.
2203 //
2204 // The master waits at this barrier until all workers are done.
2205 syncCTAThreads(CGF);
2206
2207 if (!CapturedVars.empty())
2208 CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
2209 CGM.getModule(), OMPRTL___kmpc_end_sharing_variables));
2210
2211 // Remember for post-processing in worker loop.
2212 Work.emplace_back(WFn);
2213 };
2214
2215 auto &&LNParallelGen = [this, Loc, &SeqGen, &L0ParallelGen](
2216 CodeGenFunction &CGF, PrePostActionTy &Action) {
2217 if (IsInParallelRegion) {
2218 SeqGen(CGF, Action);
2219 } else if (IsInTargetMasterThreadRegion) {
2220 L0ParallelGen(CGF, Action);
2221 } else {
2222 // Check for master and then parallelism:
2223 // if (__kmpc_is_spmd_exec_mode() || __kmpc_parallel_level(loc, gtid)) {
2224 // Serialized execution.
2225 // } else {
2226 // Worker call.
2227 // }
2228 CGBuilderTy &Bld = CGF.Builder;
2229 llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".exit");
2230 llvm::BasicBlock *SeqBB = CGF.createBasicBlock(".sequential");
2231 llvm::BasicBlock *ParallelCheckBB = CGF.createBasicBlock(".parcheck");
2232 llvm::BasicBlock *MasterBB = CGF.createBasicBlock(".master");
2233 llvm::Value *IsSPMD = Bld.CreateIsNotNull(
2234 CGF.EmitNounwindRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
2235 CGM.getModule(), OMPRTL___kmpc_is_spmd_exec_mode)));
2236 Bld.CreateCondBr(IsSPMD, SeqBB, ParallelCheckBB);
2237 // There is no need to emit line number for unconditional branch.
2238 (void)ApplyDebugLocation::CreateEmpty(CGF);
2239 CGF.EmitBlock(ParallelCheckBB);
2240 llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc);
2241 llvm::Value *ThreadID = getThreadID(CGF, Loc);
2242 llvm::Value *PL = CGF.EmitRuntimeCall(
2243 OMPBuilder.getOrCreateRuntimeFunction(CGM.getModule(),
2244 OMPRTL___kmpc_parallel_level),
2245 {RTLoc, ThreadID});
2246 llvm::Value *Res = Bld.CreateIsNotNull(PL);
2247 Bld.CreateCondBr(Res, SeqBB, MasterBB);
2248 CGF.EmitBlock(SeqBB);
2249 SeqGen(CGF, Action);
2250 CGF.EmitBranch(ExitBB);
2251 // There is no need to emit line number for unconditional branch.
2252 (void)ApplyDebugLocation::CreateEmpty(CGF);
2253 CGF.EmitBlock(MasterBB);
2254 L0ParallelGen(CGF, Action);
2255 CGF.EmitBranch(ExitBB);
2256 // There is no need to emit line number for unconditional branch.
2257 (void)ApplyDebugLocation::CreateEmpty(CGF);
2258 // Emit the continuation block for code after the if.
2259 CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
2260 }
2261 };
2262
2263 if (IfCond) {
2264 emitIfClause(CGF, IfCond, LNParallelGen, SeqGen);
2265 } else {
2266 CodeGenFunction::RunCleanupsScope Scope(CGF);
2267 RegionCodeGenTy ThenRCG(LNParallelGen);
2268 ThenRCG(CGF);
2269 }
2270 }
2271
emitSPMDParallelCall(CodeGenFunction & CGF,SourceLocation Loc,llvm::Function * OutlinedFn,ArrayRef<llvm::Value * > CapturedVars,const Expr * IfCond)2272 void CGOpenMPRuntimeGPU::emitSPMDParallelCall(
2273 CodeGenFunction &CGF, SourceLocation Loc, llvm::Function *OutlinedFn,
2274 ArrayRef<llvm::Value *> CapturedVars, const Expr *IfCond) {
2275 // Just call the outlined function to execute the parallel region.
2276 // OutlinedFn(>id, &zero, CapturedStruct);
2277 //
2278 llvm::SmallVector<llvm::Value *, 16> OutlinedFnArgs;
2279
2280 Address ZeroAddr = CGF.CreateDefaultAlignTempAlloca(CGF.Int32Ty,
2281 /*Name=*/".zero.addr");
2282 CGF.InitTempAlloca(ZeroAddr, CGF.Builder.getInt32(/*C*/ 0));
2283 // ThreadId for serialized parallels is 0.
2284 Address ThreadIDAddr = ZeroAddr;
2285 auto &&CodeGen = [this, OutlinedFn, CapturedVars, Loc, &ThreadIDAddr](
2286 CodeGenFunction &CGF, PrePostActionTy &Action) {
2287 Action.Enter(CGF);
2288
2289 Address ZeroAddr =
2290 CGF.CreateDefaultAlignTempAlloca(CGF.Int32Ty,
2291 /*Name=*/".bound.zero.addr");
2292 CGF.InitTempAlloca(ZeroAddr, CGF.Builder.getInt32(/*C*/ 0));
2293 llvm::SmallVector<llvm::Value *, 16> OutlinedFnArgs;
2294 OutlinedFnArgs.push_back(ThreadIDAddr.getPointer());
2295 OutlinedFnArgs.push_back(ZeroAddr.getPointer());
2296 OutlinedFnArgs.append(CapturedVars.begin(), CapturedVars.end());
2297 emitOutlinedFunctionCall(CGF, Loc, OutlinedFn, OutlinedFnArgs);
2298 };
2299 auto &&SeqGen = [this, &CodeGen, Loc](CodeGenFunction &CGF,
2300 PrePostActionTy &) {
2301
2302 RegionCodeGenTy RCG(CodeGen);
2303 llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc);
2304 llvm::Value *ThreadID = getThreadID(CGF, Loc);
2305 llvm::Value *Args[] = {RTLoc, ThreadID};
2306
2307 NVPTXActionTy Action(
2308 OMPBuilder.getOrCreateRuntimeFunction(
2309 CGM.getModule(), OMPRTL___kmpc_serialized_parallel),
2310 Args,
2311 OMPBuilder.getOrCreateRuntimeFunction(
2312 CGM.getModule(), OMPRTL___kmpc_end_serialized_parallel),
2313 Args);
2314 RCG.setAction(Action);
2315 RCG(CGF);
2316 };
2317
2318 if (IsInTargetMasterThreadRegion) {
2319 // In the worker need to use the real thread id.
2320 ThreadIDAddr = emitThreadIDAddress(CGF, Loc);
2321 RegionCodeGenTy RCG(CodeGen);
2322 RCG(CGF);
2323 } else {
2324 // If we are not in the target region, it is definitely L2 parallelism or
2325 // more, because for SPMD mode we always has L1 parallel level, sowe don't
2326 // need to check for orphaned directives.
2327 RegionCodeGenTy RCG(SeqGen);
2328 RCG(CGF);
2329 }
2330 }
2331
syncCTAThreads(CodeGenFunction & CGF)2332 void CGOpenMPRuntimeGPU::syncCTAThreads(CodeGenFunction &CGF) {
2333 // Always emit simple barriers!
2334 if (!CGF.HaveInsertPoint())
2335 return;
2336 // Build call __kmpc_barrier_simple_spmd(nullptr, 0);
2337 // This function does not use parameters, so we can emit just default values.
2338 llvm::Value *Args[] = {
2339 llvm::ConstantPointerNull::get(
2340 cast<llvm::PointerType>(getIdentTyPointerTy())),
2341 llvm::ConstantInt::get(CGF.Int32Ty, /*V=*/0, /*isSigned=*/true)};
2342 CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
2343 CGM.getModule(), OMPRTL___kmpc_barrier_simple_spmd),
2344 Args);
2345 }
2346
emitBarrierCall(CodeGenFunction & CGF,SourceLocation Loc,OpenMPDirectiveKind Kind,bool,bool)2347 void CGOpenMPRuntimeGPU::emitBarrierCall(CodeGenFunction &CGF,
2348 SourceLocation Loc,
2349 OpenMPDirectiveKind Kind, bool,
2350 bool) {
2351 // Always emit simple barriers!
2352 if (!CGF.HaveInsertPoint())
2353 return;
2354 // Build call __kmpc_cancel_barrier(loc, thread_id);
2355 unsigned Flags = getDefaultFlagsForBarriers(Kind);
2356 llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc, Flags),
2357 getThreadID(CGF, Loc)};
2358
2359 CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
2360 CGM.getModule(), OMPRTL___kmpc_barrier),
2361 Args);
2362 }
2363
emitCriticalRegion(CodeGenFunction & CGF,StringRef CriticalName,const RegionCodeGenTy & CriticalOpGen,SourceLocation Loc,const Expr * Hint)2364 void CGOpenMPRuntimeGPU::emitCriticalRegion(
2365 CodeGenFunction &CGF, StringRef CriticalName,
2366 const RegionCodeGenTy &CriticalOpGen, SourceLocation Loc,
2367 const Expr *Hint) {
2368 llvm::BasicBlock *LoopBB = CGF.createBasicBlock("omp.critical.loop");
2369 llvm::BasicBlock *TestBB = CGF.createBasicBlock("omp.critical.test");
2370 llvm::BasicBlock *SyncBB = CGF.createBasicBlock("omp.critical.sync");
2371 llvm::BasicBlock *BodyBB = CGF.createBasicBlock("omp.critical.body");
2372 llvm::BasicBlock *ExitBB = CGF.createBasicBlock("omp.critical.exit");
2373
2374 auto &RT = static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime());
2375
2376 // Get the mask of active threads in the warp.
2377 llvm::Value *Mask = CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
2378 CGM.getModule(), OMPRTL___kmpc_warp_active_thread_mask));
2379 // Fetch team-local id of the thread.
2380 llvm::Value *ThreadID = RT.getGPUThreadID(CGF);
2381
2382 // Get the width of the team.
2383 llvm::Value *TeamWidth = RT.getGPUNumThreads(CGF);
2384
2385 // Initialize the counter variable for the loop.
2386 QualType Int32Ty =
2387 CGF.getContext().getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/0);
2388 Address Counter = CGF.CreateMemTemp(Int32Ty, "critical_counter");
2389 LValue CounterLVal = CGF.MakeAddrLValue(Counter, Int32Ty);
2390 CGF.EmitStoreOfScalar(llvm::Constant::getNullValue(CGM.Int32Ty), CounterLVal,
2391 /*isInit=*/true);
2392
2393 // Block checks if loop counter exceeds upper bound.
2394 CGF.EmitBlock(LoopBB);
2395 llvm::Value *CounterVal = CGF.EmitLoadOfScalar(CounterLVal, Loc);
2396 llvm::Value *CmpLoopBound = CGF.Builder.CreateICmpSLT(CounterVal, TeamWidth);
2397 CGF.Builder.CreateCondBr(CmpLoopBound, TestBB, ExitBB);
2398
2399 // Block tests which single thread should execute region, and which threads
2400 // should go straight to synchronisation point.
2401 CGF.EmitBlock(TestBB);
2402 CounterVal = CGF.EmitLoadOfScalar(CounterLVal, Loc);
2403 llvm::Value *CmpThreadToCounter =
2404 CGF.Builder.CreateICmpEQ(ThreadID, CounterVal);
2405 CGF.Builder.CreateCondBr(CmpThreadToCounter, BodyBB, SyncBB);
2406
2407 // Block emits the body of the critical region.
2408 CGF.EmitBlock(BodyBB);
2409
2410 // Output the critical statement.
2411 CGOpenMPRuntime::emitCriticalRegion(CGF, CriticalName, CriticalOpGen, Loc,
2412 Hint);
2413
2414 // After the body surrounded by the critical region, the single executing
2415 // thread will jump to the synchronisation point.
2416 // Block waits for all threads in current team to finish then increments the
2417 // counter variable and returns to the loop.
2418 CGF.EmitBlock(SyncBB);
2419 // Reconverge active threads in the warp.
2420 (void)CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
2421 CGM.getModule(), OMPRTL___kmpc_syncwarp),
2422 Mask);
2423
2424 llvm::Value *IncCounterVal =
2425 CGF.Builder.CreateNSWAdd(CounterVal, CGF.Builder.getInt32(1));
2426 CGF.EmitStoreOfScalar(IncCounterVal, CounterLVal);
2427 CGF.EmitBranch(LoopBB);
2428
2429 // Block that is reached when all threads in the team complete the region.
2430 CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
2431 }
2432
2433 /// Cast value to the specified type.
castValueToType(CodeGenFunction & CGF,llvm::Value * Val,QualType ValTy,QualType CastTy,SourceLocation Loc)2434 static llvm::Value *castValueToType(CodeGenFunction &CGF, llvm::Value *Val,
2435 QualType ValTy, QualType CastTy,
2436 SourceLocation Loc) {
2437 assert(!CGF.getContext().getTypeSizeInChars(CastTy).isZero() &&
2438 "Cast type must sized.");
2439 assert(!CGF.getContext().getTypeSizeInChars(ValTy).isZero() &&
2440 "Val type must sized.");
2441 llvm::Type *LLVMCastTy = CGF.ConvertTypeForMem(CastTy);
2442 if (ValTy == CastTy)
2443 return Val;
2444 if (CGF.getContext().getTypeSizeInChars(ValTy) ==
2445 CGF.getContext().getTypeSizeInChars(CastTy))
2446 return CGF.Builder.CreateBitCast(Val, LLVMCastTy);
2447 if (CastTy->isIntegerType() && ValTy->isIntegerType())
2448 return CGF.Builder.CreateIntCast(Val, LLVMCastTy,
2449 CastTy->hasSignedIntegerRepresentation());
2450 Address CastItem = CGF.CreateMemTemp(CastTy);
2451 Address ValCastItem = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
2452 CastItem, Val->getType()->getPointerTo(CastItem.getAddressSpace()));
2453 CGF.EmitStoreOfScalar(Val, ValCastItem, /*Volatile=*/false, ValTy,
2454 LValueBaseInfo(AlignmentSource::Type),
2455 TBAAAccessInfo());
2456 return CGF.EmitLoadOfScalar(CastItem, /*Volatile=*/false, CastTy, Loc,
2457 LValueBaseInfo(AlignmentSource::Type),
2458 TBAAAccessInfo());
2459 }
2460
2461 /// This function creates calls to one of two shuffle functions to copy
2462 /// variables between lanes in a warp.
createRuntimeShuffleFunction(CodeGenFunction & CGF,llvm::Value * Elem,QualType ElemType,llvm::Value * Offset,SourceLocation Loc)2463 static llvm::Value *createRuntimeShuffleFunction(CodeGenFunction &CGF,
2464 llvm::Value *Elem,
2465 QualType ElemType,
2466 llvm::Value *Offset,
2467 SourceLocation Loc) {
2468 CodeGenModule &CGM = CGF.CGM;
2469 CGBuilderTy &Bld = CGF.Builder;
2470 CGOpenMPRuntimeGPU &RT =
2471 *(static_cast<CGOpenMPRuntimeGPU *>(&CGM.getOpenMPRuntime()));
2472 llvm::OpenMPIRBuilder &OMPBuilder = RT.getOMPBuilder();
2473
2474 CharUnits Size = CGF.getContext().getTypeSizeInChars(ElemType);
2475 assert(Size.getQuantity() <= 8 &&
2476 "Unsupported bitwidth in shuffle instruction.");
2477
2478 RuntimeFunction ShuffleFn = Size.getQuantity() <= 4
2479 ? OMPRTL___kmpc_shuffle_int32
2480 : OMPRTL___kmpc_shuffle_int64;
2481
2482 // Cast all types to 32- or 64-bit values before calling shuffle routines.
2483 QualType CastTy = CGF.getContext().getIntTypeForBitwidth(
2484 Size.getQuantity() <= 4 ? 32 : 64, /*Signed=*/1);
2485 llvm::Value *ElemCast = castValueToType(CGF, Elem, ElemType, CastTy, Loc);
2486 llvm::Value *WarpSize =
2487 Bld.CreateIntCast(RT.getGPUWarpSize(CGF), CGM.Int16Ty, /*isSigned=*/true);
2488
2489 llvm::Value *ShuffledVal = CGF.EmitRuntimeCall(
2490 OMPBuilder.getOrCreateRuntimeFunction(CGM.getModule(), ShuffleFn),
2491 {ElemCast, Offset, WarpSize});
2492
2493 return castValueToType(CGF, ShuffledVal, CastTy, ElemType, Loc);
2494 }
2495
shuffleAndStore(CodeGenFunction & CGF,Address SrcAddr,Address DestAddr,QualType ElemType,llvm::Value * Offset,SourceLocation Loc)2496 static void shuffleAndStore(CodeGenFunction &CGF, Address SrcAddr,
2497 Address DestAddr, QualType ElemType,
2498 llvm::Value *Offset, SourceLocation Loc) {
2499 CGBuilderTy &Bld = CGF.Builder;
2500
2501 CharUnits Size = CGF.getContext().getTypeSizeInChars(ElemType);
2502 // Create the loop over the big sized data.
2503 // ptr = (void*)Elem;
2504 // ptrEnd = (void*) Elem + 1;
2505 // Step = 8;
2506 // while (ptr + Step < ptrEnd)
2507 // shuffle((int64_t)*ptr);
2508 // Step = 4;
2509 // while (ptr + Step < ptrEnd)
2510 // shuffle((int32_t)*ptr);
2511 // ...
2512 Address ElemPtr = DestAddr;
2513 Address Ptr = SrcAddr;
2514 Address PtrEnd = Bld.CreatePointerBitCastOrAddrSpaceCast(
2515 Bld.CreateConstGEP(SrcAddr, 1), CGF.VoidPtrTy);
2516 for (int IntSize = 8; IntSize >= 1; IntSize /= 2) {
2517 if (Size < CharUnits::fromQuantity(IntSize))
2518 continue;
2519 QualType IntType = CGF.getContext().getIntTypeForBitwidth(
2520 CGF.getContext().toBits(CharUnits::fromQuantity(IntSize)),
2521 /*Signed=*/1);
2522 llvm::Type *IntTy = CGF.ConvertTypeForMem(IntType);
2523 Ptr = Bld.CreatePointerBitCastOrAddrSpaceCast(Ptr, IntTy->getPointerTo());
2524 ElemPtr =
2525 Bld.CreatePointerBitCastOrAddrSpaceCast(ElemPtr, IntTy->getPointerTo());
2526 if (Size.getQuantity() / IntSize > 1) {
2527 llvm::BasicBlock *PreCondBB = CGF.createBasicBlock(".shuffle.pre_cond");
2528 llvm::BasicBlock *ThenBB = CGF.createBasicBlock(".shuffle.then");
2529 llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".shuffle.exit");
2530 llvm::BasicBlock *CurrentBB = Bld.GetInsertBlock();
2531 CGF.EmitBlock(PreCondBB);
2532 llvm::PHINode *PhiSrc =
2533 Bld.CreatePHI(Ptr.getType(), /*NumReservedValues=*/2);
2534 PhiSrc->addIncoming(Ptr.getPointer(), CurrentBB);
2535 llvm::PHINode *PhiDest =
2536 Bld.CreatePHI(ElemPtr.getType(), /*NumReservedValues=*/2);
2537 PhiDest->addIncoming(ElemPtr.getPointer(), CurrentBB);
2538 Ptr = Address(PhiSrc, Ptr.getAlignment());
2539 ElemPtr = Address(PhiDest, ElemPtr.getAlignment());
2540 llvm::Value *PtrDiff = Bld.CreatePtrDiff(
2541 PtrEnd.getPointer(), Bld.CreatePointerBitCastOrAddrSpaceCast(
2542 Ptr.getPointer(), CGF.VoidPtrTy));
2543 Bld.CreateCondBr(Bld.CreateICmpSGT(PtrDiff, Bld.getInt64(IntSize - 1)),
2544 ThenBB, ExitBB);
2545 CGF.EmitBlock(ThenBB);
2546 llvm::Value *Res = createRuntimeShuffleFunction(
2547 CGF,
2548 CGF.EmitLoadOfScalar(Ptr, /*Volatile=*/false, IntType, Loc,
2549 LValueBaseInfo(AlignmentSource::Type),
2550 TBAAAccessInfo()),
2551 IntType, Offset, Loc);
2552 CGF.EmitStoreOfScalar(Res, ElemPtr, /*Volatile=*/false, IntType,
2553 LValueBaseInfo(AlignmentSource::Type),
2554 TBAAAccessInfo());
2555 Address LocalPtr = Bld.CreateConstGEP(Ptr, 1);
2556 Address LocalElemPtr = Bld.CreateConstGEP(ElemPtr, 1);
2557 PhiSrc->addIncoming(LocalPtr.getPointer(), ThenBB);
2558 PhiDest->addIncoming(LocalElemPtr.getPointer(), ThenBB);
2559 CGF.EmitBranch(PreCondBB);
2560 CGF.EmitBlock(ExitBB);
2561 } else {
2562 llvm::Value *Res = createRuntimeShuffleFunction(
2563 CGF,
2564 CGF.EmitLoadOfScalar(Ptr, /*Volatile=*/false, IntType, Loc,
2565 LValueBaseInfo(AlignmentSource::Type),
2566 TBAAAccessInfo()),
2567 IntType, Offset, Loc);
2568 CGF.EmitStoreOfScalar(Res, ElemPtr, /*Volatile=*/false, IntType,
2569 LValueBaseInfo(AlignmentSource::Type),
2570 TBAAAccessInfo());
2571 Ptr = Bld.CreateConstGEP(Ptr, 1);
2572 ElemPtr = Bld.CreateConstGEP(ElemPtr, 1);
2573 }
2574 Size = Size % IntSize;
2575 }
2576 }
2577
2578 namespace {
2579 enum CopyAction : unsigned {
2580 // RemoteLaneToThread: Copy over a Reduce list from a remote lane in
2581 // the warp using shuffle instructions.
2582 RemoteLaneToThread,
2583 // ThreadCopy: Make a copy of a Reduce list on the thread's stack.
2584 ThreadCopy,
2585 // ThreadToScratchpad: Copy a team-reduced array to the scratchpad.
2586 ThreadToScratchpad,
2587 // ScratchpadToThread: Copy from a scratchpad array in global memory
2588 // containing team-reduced data to a thread's stack.
2589 ScratchpadToThread,
2590 };
2591 } // namespace
2592
2593 struct CopyOptionsTy {
2594 llvm::Value *RemoteLaneOffset;
2595 llvm::Value *ScratchpadIndex;
2596 llvm::Value *ScratchpadWidth;
2597 };
2598
2599 /// Emit instructions to copy a Reduce list, which contains partially
2600 /// aggregated values, in the specified direction.
emitReductionListCopy(CopyAction Action,CodeGenFunction & CGF,QualType ReductionArrayTy,ArrayRef<const Expr * > Privates,Address SrcBase,Address DestBase,CopyOptionsTy CopyOptions={nullptr, nullptr, nullptr})2601 static void emitReductionListCopy(
2602 CopyAction Action, CodeGenFunction &CGF, QualType ReductionArrayTy,
2603 ArrayRef<const Expr *> Privates, Address SrcBase, Address DestBase,
2604 CopyOptionsTy CopyOptions = {nullptr, nullptr, nullptr}) {
2605
2606 CodeGenModule &CGM = CGF.CGM;
2607 ASTContext &C = CGM.getContext();
2608 CGBuilderTy &Bld = CGF.Builder;
2609
2610 llvm::Value *RemoteLaneOffset = CopyOptions.RemoteLaneOffset;
2611 llvm::Value *ScratchpadIndex = CopyOptions.ScratchpadIndex;
2612 llvm::Value *ScratchpadWidth = CopyOptions.ScratchpadWidth;
2613
2614 // Iterates, element-by-element, through the source Reduce list and
2615 // make a copy.
2616 unsigned Idx = 0;
2617 unsigned Size = Privates.size();
2618 for (const Expr *Private : Privates) {
2619 Address SrcElementAddr = Address::invalid();
2620 Address DestElementAddr = Address::invalid();
2621 Address DestElementPtrAddr = Address::invalid();
2622 // Should we shuffle in an element from a remote lane?
2623 bool ShuffleInElement = false;
2624 // Set to true to update the pointer in the dest Reduce list to a
2625 // newly created element.
2626 bool UpdateDestListPtr = false;
2627 // Increment the src or dest pointer to the scratchpad, for each
2628 // new element.
2629 bool IncrScratchpadSrc = false;
2630 bool IncrScratchpadDest = false;
2631
2632 switch (Action) {
2633 case RemoteLaneToThread: {
2634 // Step 1.1: Get the address for the src element in the Reduce list.
2635 Address SrcElementPtrAddr = Bld.CreateConstArrayGEP(SrcBase, Idx);
2636 SrcElementAddr = CGF.EmitLoadOfPointer(
2637 SrcElementPtrAddr,
2638 C.getPointerType(Private->getType())->castAs<PointerType>());
2639
2640 // Step 1.2: Create a temporary to store the element in the destination
2641 // Reduce list.
2642 DestElementPtrAddr = Bld.CreateConstArrayGEP(DestBase, Idx);
2643 DestElementAddr =
2644 CGF.CreateMemTemp(Private->getType(), ".omp.reduction.element");
2645 ShuffleInElement = true;
2646 UpdateDestListPtr = true;
2647 break;
2648 }
2649 case ThreadCopy: {
2650 // Step 1.1: Get the address for the src element in the Reduce list.
2651 Address SrcElementPtrAddr = Bld.CreateConstArrayGEP(SrcBase, Idx);
2652 SrcElementAddr = CGF.EmitLoadOfPointer(
2653 SrcElementPtrAddr,
2654 C.getPointerType(Private->getType())->castAs<PointerType>());
2655
2656 // Step 1.2: Get the address for dest element. The destination
2657 // element has already been created on the thread's stack.
2658 DestElementPtrAddr = Bld.CreateConstArrayGEP(DestBase, Idx);
2659 DestElementAddr = CGF.EmitLoadOfPointer(
2660 DestElementPtrAddr,
2661 C.getPointerType(Private->getType())->castAs<PointerType>());
2662 break;
2663 }
2664 case ThreadToScratchpad: {
2665 // Step 1.1: Get the address for the src element in the Reduce list.
2666 Address SrcElementPtrAddr = Bld.CreateConstArrayGEP(SrcBase, Idx);
2667 SrcElementAddr = CGF.EmitLoadOfPointer(
2668 SrcElementPtrAddr,
2669 C.getPointerType(Private->getType())->castAs<PointerType>());
2670
2671 // Step 1.2: Get the address for dest element:
2672 // address = base + index * ElementSizeInChars.
2673 llvm::Value *ElementSizeInChars = CGF.getTypeSize(Private->getType());
2674 llvm::Value *CurrentOffset =
2675 Bld.CreateNUWMul(ElementSizeInChars, ScratchpadIndex);
2676 llvm::Value *ScratchPadElemAbsolutePtrVal =
2677 Bld.CreateNUWAdd(DestBase.getPointer(), CurrentOffset);
2678 ScratchPadElemAbsolutePtrVal =
2679 Bld.CreateIntToPtr(ScratchPadElemAbsolutePtrVal, CGF.VoidPtrTy);
2680 DestElementAddr = Address(ScratchPadElemAbsolutePtrVal,
2681 C.getTypeAlignInChars(Private->getType()));
2682 IncrScratchpadDest = true;
2683 break;
2684 }
2685 case ScratchpadToThread: {
2686 // Step 1.1: Get the address for the src element in the scratchpad.
2687 // address = base + index * ElementSizeInChars.
2688 llvm::Value *ElementSizeInChars = CGF.getTypeSize(Private->getType());
2689 llvm::Value *CurrentOffset =
2690 Bld.CreateNUWMul(ElementSizeInChars, ScratchpadIndex);
2691 llvm::Value *ScratchPadElemAbsolutePtrVal =
2692 Bld.CreateNUWAdd(SrcBase.getPointer(), CurrentOffset);
2693 ScratchPadElemAbsolutePtrVal =
2694 Bld.CreateIntToPtr(ScratchPadElemAbsolutePtrVal, CGF.VoidPtrTy);
2695 SrcElementAddr = Address(ScratchPadElemAbsolutePtrVal,
2696 C.getTypeAlignInChars(Private->getType()));
2697 IncrScratchpadSrc = true;
2698
2699 // Step 1.2: Create a temporary to store the element in the destination
2700 // Reduce list.
2701 DestElementPtrAddr = Bld.CreateConstArrayGEP(DestBase, Idx);
2702 DestElementAddr =
2703 CGF.CreateMemTemp(Private->getType(), ".omp.reduction.element");
2704 UpdateDestListPtr = true;
2705 break;
2706 }
2707 }
2708
2709 // Regardless of src and dest of copy, we emit the load of src
2710 // element as this is required in all directions
2711 SrcElementAddr = Bld.CreateElementBitCast(
2712 SrcElementAddr, CGF.ConvertTypeForMem(Private->getType()));
2713 DestElementAddr = Bld.CreateElementBitCast(DestElementAddr,
2714 SrcElementAddr.getElementType());
2715
2716 // Now that all active lanes have read the element in the
2717 // Reduce list, shuffle over the value from the remote lane.
2718 if (ShuffleInElement) {
2719 shuffleAndStore(CGF, SrcElementAddr, DestElementAddr, Private->getType(),
2720 RemoteLaneOffset, Private->getExprLoc());
2721 } else {
2722 switch (CGF.getEvaluationKind(Private->getType())) {
2723 case TEK_Scalar: {
2724 llvm::Value *Elem = CGF.EmitLoadOfScalar(
2725 SrcElementAddr, /*Volatile=*/false, Private->getType(),
2726 Private->getExprLoc(), LValueBaseInfo(AlignmentSource::Type),
2727 TBAAAccessInfo());
2728 // Store the source element value to the dest element address.
2729 CGF.EmitStoreOfScalar(
2730 Elem, DestElementAddr, /*Volatile=*/false, Private->getType(),
2731 LValueBaseInfo(AlignmentSource::Type), TBAAAccessInfo());
2732 break;
2733 }
2734 case TEK_Complex: {
2735 CodeGenFunction::ComplexPairTy Elem = CGF.EmitLoadOfComplex(
2736 CGF.MakeAddrLValue(SrcElementAddr, Private->getType()),
2737 Private->getExprLoc());
2738 CGF.EmitStoreOfComplex(
2739 Elem, CGF.MakeAddrLValue(DestElementAddr, Private->getType()),
2740 /*isInit=*/false);
2741 break;
2742 }
2743 case TEK_Aggregate:
2744 CGF.EmitAggregateCopy(
2745 CGF.MakeAddrLValue(DestElementAddr, Private->getType()),
2746 CGF.MakeAddrLValue(SrcElementAddr, Private->getType()),
2747 Private->getType(), AggValueSlot::DoesNotOverlap);
2748 break;
2749 }
2750 }
2751
2752 // Step 3.1: Modify reference in dest Reduce list as needed.
2753 // Modifying the reference in Reduce list to point to the newly
2754 // created element. The element is live in the current function
2755 // scope and that of functions it invokes (i.e., reduce_function).
2756 // RemoteReduceData[i] = (void*)&RemoteElem
2757 if (UpdateDestListPtr) {
2758 CGF.EmitStoreOfScalar(Bld.CreatePointerBitCastOrAddrSpaceCast(
2759 DestElementAddr.getPointer(), CGF.VoidPtrTy),
2760 DestElementPtrAddr, /*Volatile=*/false,
2761 C.VoidPtrTy);
2762 }
2763
2764 // Step 4.1: Increment SrcBase/DestBase so that it points to the starting
2765 // address of the next element in scratchpad memory, unless we're currently
2766 // processing the last one. Memory alignment is also taken care of here.
2767 if ((IncrScratchpadDest || IncrScratchpadSrc) && (Idx + 1 < Size)) {
2768 llvm::Value *ScratchpadBasePtr =
2769 IncrScratchpadDest ? DestBase.getPointer() : SrcBase.getPointer();
2770 llvm::Value *ElementSizeInChars = CGF.getTypeSize(Private->getType());
2771 ScratchpadBasePtr = Bld.CreateNUWAdd(
2772 ScratchpadBasePtr,
2773 Bld.CreateNUWMul(ScratchpadWidth, ElementSizeInChars));
2774
2775 // Take care of global memory alignment for performance
2776 ScratchpadBasePtr = Bld.CreateNUWSub(
2777 ScratchpadBasePtr, llvm::ConstantInt::get(CGM.SizeTy, 1));
2778 ScratchpadBasePtr = Bld.CreateUDiv(
2779 ScratchpadBasePtr,
2780 llvm::ConstantInt::get(CGM.SizeTy, GlobalMemoryAlignment));
2781 ScratchpadBasePtr = Bld.CreateNUWAdd(
2782 ScratchpadBasePtr, llvm::ConstantInt::get(CGM.SizeTy, 1));
2783 ScratchpadBasePtr = Bld.CreateNUWMul(
2784 ScratchpadBasePtr,
2785 llvm::ConstantInt::get(CGM.SizeTy, GlobalMemoryAlignment));
2786
2787 if (IncrScratchpadDest)
2788 DestBase = Address(ScratchpadBasePtr, CGF.getPointerAlign());
2789 else /* IncrScratchpadSrc = true */
2790 SrcBase = Address(ScratchpadBasePtr, CGF.getPointerAlign());
2791 }
2792
2793 ++Idx;
2794 }
2795 }
2796
2797 /// This function emits a helper that gathers Reduce lists from the first
2798 /// lane of every active warp to lanes in the first warp.
2799 ///
2800 /// void inter_warp_copy_func(void* reduce_data, num_warps)
2801 /// shared smem[warp_size];
2802 /// For all data entries D in reduce_data:
2803 /// sync
2804 /// If (I am the first lane in each warp)
2805 /// Copy my local D to smem[warp_id]
2806 /// sync
2807 /// if (I am the first warp)
2808 /// Copy smem[thread_id] to my local D
emitInterWarpCopyFunction(CodeGenModule & CGM,ArrayRef<const Expr * > Privates,QualType ReductionArrayTy,SourceLocation Loc)2809 static llvm::Value *emitInterWarpCopyFunction(CodeGenModule &CGM,
2810 ArrayRef<const Expr *> Privates,
2811 QualType ReductionArrayTy,
2812 SourceLocation Loc) {
2813 ASTContext &C = CGM.getContext();
2814 llvm::Module &M = CGM.getModule();
2815
2816 // ReduceList: thread local Reduce list.
2817 // At the stage of the computation when this function is called, partially
2818 // aggregated values reside in the first lane of every active warp.
2819 ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
2820 C.VoidPtrTy, ImplicitParamDecl::Other);
2821 // NumWarps: number of warps active in the parallel region. This could
2822 // be smaller than 32 (max warps in a CTA) for partial block reduction.
2823 ImplicitParamDecl NumWarpsArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
2824 C.getIntTypeForBitwidth(32, /* Signed */ true),
2825 ImplicitParamDecl::Other);
2826 FunctionArgList Args;
2827 Args.push_back(&ReduceListArg);
2828 Args.push_back(&NumWarpsArg);
2829
2830 const CGFunctionInfo &CGFI =
2831 CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
2832 auto *Fn = llvm::Function::Create(CGM.getTypes().GetFunctionType(CGFI),
2833 llvm::GlobalValue::InternalLinkage,
2834 "_omp_reduction_inter_warp_copy_func", &M);
2835 CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
2836 Fn->setDoesNotRecurse();
2837 CodeGenFunction CGF(CGM);
2838 CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc);
2839
2840 CGBuilderTy &Bld = CGF.Builder;
2841
2842 // This array is used as a medium to transfer, one reduce element at a time,
2843 // the data from the first lane of every warp to lanes in the first warp
2844 // in order to perform the final step of a reduction in a parallel region
2845 // (reduction across warps). The array is placed in NVPTX __shared__ memory
2846 // for reduced latency, as well as to have a distinct copy for concurrently
2847 // executing target regions. The array is declared with common linkage so
2848 // as to be shared across compilation units.
2849 StringRef TransferMediumName =
2850 "__openmp_nvptx_data_transfer_temporary_storage";
2851 llvm::GlobalVariable *TransferMedium =
2852 M.getGlobalVariable(TransferMediumName);
2853 unsigned WarpSize = CGF.getTarget().getGridValue(llvm::omp::GV_Warp_Size);
2854 if (!TransferMedium) {
2855 auto *Ty = llvm::ArrayType::get(CGM.Int32Ty, WarpSize);
2856 unsigned SharedAddressSpace = C.getTargetAddressSpace(LangAS::cuda_shared);
2857 TransferMedium = new llvm::GlobalVariable(
2858 M, Ty, /*isConstant=*/false, llvm::GlobalVariable::WeakAnyLinkage,
2859 llvm::UndefValue::get(Ty), TransferMediumName,
2860 /*InsertBefore=*/nullptr, llvm::GlobalVariable::NotThreadLocal,
2861 SharedAddressSpace);
2862 CGM.addCompilerUsedGlobal(TransferMedium);
2863 }
2864
2865 auto &RT = static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime());
2866 // Get the CUDA thread id of the current OpenMP thread on the GPU.
2867 llvm::Value *ThreadID = RT.getGPUThreadID(CGF);
2868 // nvptx_lane_id = nvptx_id % warpsize
2869 llvm::Value *LaneID = getNVPTXLaneID(CGF);
2870 // nvptx_warp_id = nvptx_id / warpsize
2871 llvm::Value *WarpID = getNVPTXWarpID(CGF);
2872
2873 Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg);
2874 Address LocalReduceList(
2875 Bld.CreatePointerBitCastOrAddrSpaceCast(
2876 CGF.EmitLoadOfScalar(
2877 AddrReduceListArg, /*Volatile=*/false, C.VoidPtrTy, Loc,
2878 LValueBaseInfo(AlignmentSource::Type), TBAAAccessInfo()),
2879 CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo()),
2880 CGF.getPointerAlign());
2881
2882 unsigned Idx = 0;
2883 for (const Expr *Private : Privates) {
2884 //
2885 // Warp master copies reduce element to transfer medium in __shared__
2886 // memory.
2887 //
2888 unsigned RealTySize =
2889 C.getTypeSizeInChars(Private->getType())
2890 .alignTo(C.getTypeAlignInChars(Private->getType()))
2891 .getQuantity();
2892 for (unsigned TySize = 4; TySize > 0 && RealTySize > 0; TySize /=2) {
2893 unsigned NumIters = RealTySize / TySize;
2894 if (NumIters == 0)
2895 continue;
2896 QualType CType = C.getIntTypeForBitwidth(
2897 C.toBits(CharUnits::fromQuantity(TySize)), /*Signed=*/1);
2898 llvm::Type *CopyType = CGF.ConvertTypeForMem(CType);
2899 CharUnits Align = CharUnits::fromQuantity(TySize);
2900 llvm::Value *Cnt = nullptr;
2901 Address CntAddr = Address::invalid();
2902 llvm::BasicBlock *PrecondBB = nullptr;
2903 llvm::BasicBlock *ExitBB = nullptr;
2904 if (NumIters > 1) {
2905 CntAddr = CGF.CreateMemTemp(C.IntTy, ".cnt.addr");
2906 CGF.EmitStoreOfScalar(llvm::Constant::getNullValue(CGM.IntTy), CntAddr,
2907 /*Volatile=*/false, C.IntTy);
2908 PrecondBB = CGF.createBasicBlock("precond");
2909 ExitBB = CGF.createBasicBlock("exit");
2910 llvm::BasicBlock *BodyBB = CGF.createBasicBlock("body");
2911 // There is no need to emit line number for unconditional branch.
2912 (void)ApplyDebugLocation::CreateEmpty(CGF);
2913 CGF.EmitBlock(PrecondBB);
2914 Cnt = CGF.EmitLoadOfScalar(CntAddr, /*Volatile=*/false, C.IntTy, Loc);
2915 llvm::Value *Cmp =
2916 Bld.CreateICmpULT(Cnt, llvm::ConstantInt::get(CGM.IntTy, NumIters));
2917 Bld.CreateCondBr(Cmp, BodyBB, ExitBB);
2918 CGF.EmitBlock(BodyBB);
2919 }
2920 // kmpc_barrier.
2921 CGM.getOpenMPRuntime().emitBarrierCall(CGF, Loc, OMPD_unknown,
2922 /*EmitChecks=*/false,
2923 /*ForceSimpleCall=*/true);
2924 llvm::BasicBlock *ThenBB = CGF.createBasicBlock("then");
2925 llvm::BasicBlock *ElseBB = CGF.createBasicBlock("else");
2926 llvm::BasicBlock *MergeBB = CGF.createBasicBlock("ifcont");
2927
2928 // if (lane_id == 0)
2929 llvm::Value *IsWarpMaster = Bld.CreateIsNull(LaneID, "warp_master");
2930 Bld.CreateCondBr(IsWarpMaster, ThenBB, ElseBB);
2931 CGF.EmitBlock(ThenBB);
2932
2933 // Reduce element = LocalReduceList[i]
2934 Address ElemPtrPtrAddr = Bld.CreateConstArrayGEP(LocalReduceList, Idx);
2935 llvm::Value *ElemPtrPtr = CGF.EmitLoadOfScalar(
2936 ElemPtrPtrAddr, /*Volatile=*/false, C.VoidPtrTy, SourceLocation());
2937 // elemptr = ((CopyType*)(elemptrptr)) + I
2938 Address ElemPtr = Address(ElemPtrPtr, Align);
2939 ElemPtr = Bld.CreateElementBitCast(ElemPtr, CopyType);
2940 if (NumIters > 1) {
2941 ElemPtr = Address(Bld.CreateGEP(ElemPtr.getPointer(), Cnt),
2942 ElemPtr.getAlignment());
2943 }
2944
2945 // Get pointer to location in transfer medium.
2946 // MediumPtr = &medium[warp_id]
2947 llvm::Value *MediumPtrVal = Bld.CreateInBoundsGEP(
2948 TransferMedium, {llvm::Constant::getNullValue(CGM.Int64Ty), WarpID});
2949 Address MediumPtr(MediumPtrVal, Align);
2950 // Casting to actual data type.
2951 // MediumPtr = (CopyType*)MediumPtrAddr;
2952 MediumPtr = Bld.CreateElementBitCast(MediumPtr, CopyType);
2953
2954 // elem = *elemptr
2955 //*MediumPtr = elem
2956 llvm::Value *Elem = CGF.EmitLoadOfScalar(
2957 ElemPtr, /*Volatile=*/false, CType, Loc,
2958 LValueBaseInfo(AlignmentSource::Type), TBAAAccessInfo());
2959 // Store the source element value to the dest element address.
2960 CGF.EmitStoreOfScalar(Elem, MediumPtr, /*Volatile=*/true, CType,
2961 LValueBaseInfo(AlignmentSource::Type),
2962 TBAAAccessInfo());
2963
2964 Bld.CreateBr(MergeBB);
2965
2966 CGF.EmitBlock(ElseBB);
2967 Bld.CreateBr(MergeBB);
2968
2969 CGF.EmitBlock(MergeBB);
2970
2971 // kmpc_barrier.
2972 CGM.getOpenMPRuntime().emitBarrierCall(CGF, Loc, OMPD_unknown,
2973 /*EmitChecks=*/false,
2974 /*ForceSimpleCall=*/true);
2975
2976 //
2977 // Warp 0 copies reduce element from transfer medium.
2978 //
2979 llvm::BasicBlock *W0ThenBB = CGF.createBasicBlock("then");
2980 llvm::BasicBlock *W0ElseBB = CGF.createBasicBlock("else");
2981 llvm::BasicBlock *W0MergeBB = CGF.createBasicBlock("ifcont");
2982
2983 Address AddrNumWarpsArg = CGF.GetAddrOfLocalVar(&NumWarpsArg);
2984 llvm::Value *NumWarpsVal = CGF.EmitLoadOfScalar(
2985 AddrNumWarpsArg, /*Volatile=*/false, C.IntTy, Loc);
2986
2987 // Up to 32 threads in warp 0 are active.
2988 llvm::Value *IsActiveThread =
2989 Bld.CreateICmpULT(ThreadID, NumWarpsVal, "is_active_thread");
2990 Bld.CreateCondBr(IsActiveThread, W0ThenBB, W0ElseBB);
2991
2992 CGF.EmitBlock(W0ThenBB);
2993
2994 // SrcMediumPtr = &medium[tid]
2995 llvm::Value *SrcMediumPtrVal = Bld.CreateInBoundsGEP(
2996 TransferMedium,
2997 {llvm::Constant::getNullValue(CGM.Int64Ty), ThreadID});
2998 Address SrcMediumPtr(SrcMediumPtrVal, Align);
2999 // SrcMediumVal = *SrcMediumPtr;
3000 SrcMediumPtr = Bld.CreateElementBitCast(SrcMediumPtr, CopyType);
3001
3002 // TargetElemPtr = (CopyType*)(SrcDataAddr[i]) + I
3003 Address TargetElemPtrPtr = Bld.CreateConstArrayGEP(LocalReduceList, Idx);
3004 llvm::Value *TargetElemPtrVal = CGF.EmitLoadOfScalar(
3005 TargetElemPtrPtr, /*Volatile=*/false, C.VoidPtrTy, Loc);
3006 Address TargetElemPtr = Address(TargetElemPtrVal, Align);
3007 TargetElemPtr = Bld.CreateElementBitCast(TargetElemPtr, CopyType);
3008 if (NumIters > 1) {
3009 TargetElemPtr = Address(Bld.CreateGEP(TargetElemPtr.getPointer(), Cnt),
3010 TargetElemPtr.getAlignment());
3011 }
3012
3013 // *TargetElemPtr = SrcMediumVal;
3014 llvm::Value *SrcMediumValue =
3015 CGF.EmitLoadOfScalar(SrcMediumPtr, /*Volatile=*/true, CType, Loc);
3016 CGF.EmitStoreOfScalar(SrcMediumValue, TargetElemPtr, /*Volatile=*/false,
3017 CType);
3018 Bld.CreateBr(W0MergeBB);
3019
3020 CGF.EmitBlock(W0ElseBB);
3021 Bld.CreateBr(W0MergeBB);
3022
3023 CGF.EmitBlock(W0MergeBB);
3024
3025 if (NumIters > 1) {
3026 Cnt = Bld.CreateNSWAdd(Cnt, llvm::ConstantInt::get(CGM.IntTy, /*V=*/1));
3027 CGF.EmitStoreOfScalar(Cnt, CntAddr, /*Volatile=*/false, C.IntTy);
3028 CGF.EmitBranch(PrecondBB);
3029 (void)ApplyDebugLocation::CreateEmpty(CGF);
3030 CGF.EmitBlock(ExitBB);
3031 }
3032 RealTySize %= TySize;
3033 }
3034 ++Idx;
3035 }
3036
3037 CGF.FinishFunction();
3038 return Fn;
3039 }
3040
3041 /// Emit a helper that reduces data across two OpenMP threads (lanes)
3042 /// in the same warp. It uses shuffle instructions to copy over data from
3043 /// a remote lane's stack. The reduction algorithm performed is specified
3044 /// by the fourth parameter.
3045 ///
3046 /// Algorithm Versions.
3047 /// Full Warp Reduce (argument value 0):
3048 /// This algorithm assumes that all 32 lanes are active and gathers
3049 /// data from these 32 lanes, producing a single resultant value.
3050 /// Contiguous Partial Warp Reduce (argument value 1):
3051 /// This algorithm assumes that only a *contiguous* subset of lanes
3052 /// are active. This happens for the last warp in a parallel region
3053 /// when the user specified num_threads is not an integer multiple of
3054 /// 32. This contiguous subset always starts with the zeroth lane.
3055 /// Partial Warp Reduce (argument value 2):
3056 /// This algorithm gathers data from any number of lanes at any position.
3057 /// All reduced values are stored in the lowest possible lane. The set
3058 /// of problems every algorithm addresses is a super set of those
3059 /// addressable by algorithms with a lower version number. Overhead
3060 /// increases as algorithm version increases.
3061 ///
3062 /// Terminology
3063 /// Reduce element:
3064 /// Reduce element refers to the individual data field with primitive
3065 /// data types to be combined and reduced across threads.
3066 /// Reduce list:
3067 /// Reduce list refers to a collection of local, thread-private
3068 /// reduce elements.
3069 /// Remote Reduce list:
3070 /// Remote Reduce list refers to a collection of remote (relative to
3071 /// the current thread) reduce elements.
3072 ///
3073 /// We distinguish between three states of threads that are important to
3074 /// the implementation of this function.
3075 /// Alive threads:
3076 /// Threads in a warp executing the SIMT instruction, as distinguished from
3077 /// threads that are inactive due to divergent control flow.
3078 /// Active threads:
3079 /// The minimal set of threads that has to be alive upon entry to this
3080 /// function. The computation is correct iff active threads are alive.
3081 /// Some threads are alive but they are not active because they do not
3082 /// contribute to the computation in any useful manner. Turning them off
3083 /// may introduce control flow overheads without any tangible benefits.
3084 /// Effective threads:
3085 /// In order to comply with the argument requirements of the shuffle
3086 /// function, we must keep all lanes holding data alive. But at most
3087 /// half of them perform value aggregation; we refer to this half of
3088 /// threads as effective. The other half is simply handing off their
3089 /// data.
3090 ///
3091 /// Procedure
3092 /// Value shuffle:
3093 /// In this step active threads transfer data from higher lane positions
3094 /// in the warp to lower lane positions, creating Remote Reduce list.
3095 /// Value aggregation:
3096 /// In this step, effective threads combine their thread local Reduce list
3097 /// with Remote Reduce list and store the result in the thread local
3098 /// Reduce list.
3099 /// Value copy:
3100 /// In this step, we deal with the assumption made by algorithm 2
3101 /// (i.e. contiguity assumption). When we have an odd number of lanes
3102 /// active, say 2k+1, only k threads will be effective and therefore k
3103 /// new values will be produced. However, the Reduce list owned by the
3104 /// (2k+1)th thread is ignored in the value aggregation. Therefore
3105 /// we copy the Reduce list from the (2k+1)th lane to (k+1)th lane so
3106 /// that the contiguity assumption still holds.
emitShuffleAndReduceFunction(CodeGenModule & CGM,ArrayRef<const Expr * > Privates,QualType ReductionArrayTy,llvm::Function * ReduceFn,SourceLocation Loc)3107 static llvm::Function *emitShuffleAndReduceFunction(
3108 CodeGenModule &CGM, ArrayRef<const Expr *> Privates,
3109 QualType ReductionArrayTy, llvm::Function *ReduceFn, SourceLocation Loc) {
3110 ASTContext &C = CGM.getContext();
3111
3112 // Thread local Reduce list used to host the values of data to be reduced.
3113 ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
3114 C.VoidPtrTy, ImplicitParamDecl::Other);
3115 // Current lane id; could be logical.
3116 ImplicitParamDecl LaneIDArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.ShortTy,
3117 ImplicitParamDecl::Other);
3118 // Offset of the remote source lane relative to the current lane.
3119 ImplicitParamDecl RemoteLaneOffsetArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
3120 C.ShortTy, ImplicitParamDecl::Other);
3121 // Algorithm version. This is expected to be known at compile time.
3122 ImplicitParamDecl AlgoVerArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
3123 C.ShortTy, ImplicitParamDecl::Other);
3124 FunctionArgList Args;
3125 Args.push_back(&ReduceListArg);
3126 Args.push_back(&LaneIDArg);
3127 Args.push_back(&RemoteLaneOffsetArg);
3128 Args.push_back(&AlgoVerArg);
3129
3130 const CGFunctionInfo &CGFI =
3131 CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
3132 auto *Fn = llvm::Function::Create(
3133 CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage,
3134 "_omp_reduction_shuffle_and_reduce_func", &CGM.getModule());
3135 CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
3136 Fn->setDoesNotRecurse();
3137 if (CGM.getLangOpts().Optimize) {
3138 Fn->removeFnAttr(llvm::Attribute::NoInline);
3139 Fn->removeFnAttr(llvm::Attribute::OptimizeNone);
3140 Fn->addFnAttr(llvm::Attribute::AlwaysInline);
3141 }
3142
3143 CodeGenFunction CGF(CGM);
3144 CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc);
3145
3146 CGBuilderTy &Bld = CGF.Builder;
3147
3148 Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg);
3149 Address LocalReduceList(
3150 Bld.CreatePointerBitCastOrAddrSpaceCast(
3151 CGF.EmitLoadOfScalar(AddrReduceListArg, /*Volatile=*/false,
3152 C.VoidPtrTy, SourceLocation()),
3153 CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo()),
3154 CGF.getPointerAlign());
3155
3156 Address AddrLaneIDArg = CGF.GetAddrOfLocalVar(&LaneIDArg);
3157 llvm::Value *LaneIDArgVal = CGF.EmitLoadOfScalar(
3158 AddrLaneIDArg, /*Volatile=*/false, C.ShortTy, SourceLocation());
3159
3160 Address AddrRemoteLaneOffsetArg = CGF.GetAddrOfLocalVar(&RemoteLaneOffsetArg);
3161 llvm::Value *RemoteLaneOffsetArgVal = CGF.EmitLoadOfScalar(
3162 AddrRemoteLaneOffsetArg, /*Volatile=*/false, C.ShortTy, SourceLocation());
3163
3164 Address AddrAlgoVerArg = CGF.GetAddrOfLocalVar(&AlgoVerArg);
3165 llvm::Value *AlgoVerArgVal = CGF.EmitLoadOfScalar(
3166 AddrAlgoVerArg, /*Volatile=*/false, C.ShortTy, SourceLocation());
3167
3168 // Create a local thread-private variable to host the Reduce list
3169 // from a remote lane.
3170 Address RemoteReduceList =
3171 CGF.CreateMemTemp(ReductionArrayTy, ".omp.reduction.remote_reduce_list");
3172
3173 // This loop iterates through the list of reduce elements and copies,
3174 // element by element, from a remote lane in the warp to RemoteReduceList,
3175 // hosted on the thread's stack.
3176 emitReductionListCopy(RemoteLaneToThread, CGF, ReductionArrayTy, Privates,
3177 LocalReduceList, RemoteReduceList,
3178 {/*RemoteLaneOffset=*/RemoteLaneOffsetArgVal,
3179 /*ScratchpadIndex=*/nullptr,
3180 /*ScratchpadWidth=*/nullptr});
3181
3182 // The actions to be performed on the Remote Reduce list is dependent
3183 // on the algorithm version.
3184 //
3185 // if (AlgoVer==0) || (AlgoVer==1 && (LaneId < Offset)) || (AlgoVer==2 &&
3186 // LaneId % 2 == 0 && Offset > 0):
3187 // do the reduction value aggregation
3188 //
3189 // The thread local variable Reduce list is mutated in place to host the
3190 // reduced data, which is the aggregated value produced from local and
3191 // remote lanes.
3192 //
3193 // Note that AlgoVer is expected to be a constant integer known at compile
3194 // time.
3195 // When AlgoVer==0, the first conjunction evaluates to true, making
3196 // the entire predicate true during compile time.
3197 // When AlgoVer==1, the second conjunction has only the second part to be
3198 // evaluated during runtime. Other conjunctions evaluates to false
3199 // during compile time.
3200 // When AlgoVer==2, the third conjunction has only the second part to be
3201 // evaluated during runtime. Other conjunctions evaluates to false
3202 // during compile time.
3203 llvm::Value *CondAlgo0 = Bld.CreateIsNull(AlgoVerArgVal);
3204
3205 llvm::Value *Algo1 = Bld.CreateICmpEQ(AlgoVerArgVal, Bld.getInt16(1));
3206 llvm::Value *CondAlgo1 = Bld.CreateAnd(
3207 Algo1, Bld.CreateICmpULT(LaneIDArgVal, RemoteLaneOffsetArgVal));
3208
3209 llvm::Value *Algo2 = Bld.CreateICmpEQ(AlgoVerArgVal, Bld.getInt16(2));
3210 llvm::Value *CondAlgo2 = Bld.CreateAnd(
3211 Algo2, Bld.CreateIsNull(Bld.CreateAnd(LaneIDArgVal, Bld.getInt16(1))));
3212 CondAlgo2 = Bld.CreateAnd(
3213 CondAlgo2, Bld.CreateICmpSGT(RemoteLaneOffsetArgVal, Bld.getInt16(0)));
3214
3215 llvm::Value *CondReduce = Bld.CreateOr(CondAlgo0, CondAlgo1);
3216 CondReduce = Bld.CreateOr(CondReduce, CondAlgo2);
3217
3218 llvm::BasicBlock *ThenBB = CGF.createBasicBlock("then");
3219 llvm::BasicBlock *ElseBB = CGF.createBasicBlock("else");
3220 llvm::BasicBlock *MergeBB = CGF.createBasicBlock("ifcont");
3221 Bld.CreateCondBr(CondReduce, ThenBB, ElseBB);
3222
3223 CGF.EmitBlock(ThenBB);
3224 // reduce_function(LocalReduceList, RemoteReduceList)
3225 llvm::Value *LocalReduceListPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
3226 LocalReduceList.getPointer(), CGF.VoidPtrTy);
3227 llvm::Value *RemoteReduceListPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
3228 RemoteReduceList.getPointer(), CGF.VoidPtrTy);
3229 CGM.getOpenMPRuntime().emitOutlinedFunctionCall(
3230 CGF, Loc, ReduceFn, {LocalReduceListPtr, RemoteReduceListPtr});
3231 Bld.CreateBr(MergeBB);
3232
3233 CGF.EmitBlock(ElseBB);
3234 Bld.CreateBr(MergeBB);
3235
3236 CGF.EmitBlock(MergeBB);
3237
3238 // if (AlgoVer==1 && (LaneId >= Offset)) copy Remote Reduce list to local
3239 // Reduce list.
3240 Algo1 = Bld.CreateICmpEQ(AlgoVerArgVal, Bld.getInt16(1));
3241 llvm::Value *CondCopy = Bld.CreateAnd(
3242 Algo1, Bld.CreateICmpUGE(LaneIDArgVal, RemoteLaneOffsetArgVal));
3243
3244 llvm::BasicBlock *CpyThenBB = CGF.createBasicBlock("then");
3245 llvm::BasicBlock *CpyElseBB = CGF.createBasicBlock("else");
3246 llvm::BasicBlock *CpyMergeBB = CGF.createBasicBlock("ifcont");
3247 Bld.CreateCondBr(CondCopy, CpyThenBB, CpyElseBB);
3248
3249 CGF.EmitBlock(CpyThenBB);
3250 emitReductionListCopy(ThreadCopy, CGF, ReductionArrayTy, Privates,
3251 RemoteReduceList, LocalReduceList);
3252 Bld.CreateBr(CpyMergeBB);
3253
3254 CGF.EmitBlock(CpyElseBB);
3255 Bld.CreateBr(CpyMergeBB);
3256
3257 CGF.EmitBlock(CpyMergeBB);
3258
3259 CGF.FinishFunction();
3260 return Fn;
3261 }
3262
3263 /// This function emits a helper that copies all the reduction variables from
3264 /// the team into the provided global buffer for the reduction variables.
3265 ///
3266 /// void list_to_global_copy_func(void *buffer, int Idx, void *reduce_data)
3267 /// For all data entries D in reduce_data:
3268 /// Copy local D to buffer.D[Idx]
emitListToGlobalCopyFunction(CodeGenModule & CGM,ArrayRef<const Expr * > Privates,QualType ReductionArrayTy,SourceLocation Loc,const RecordDecl * TeamReductionRec,const llvm::SmallDenseMap<const ValueDecl *,const FieldDecl * > & VarFieldMap)3269 static llvm::Value *emitListToGlobalCopyFunction(
3270 CodeGenModule &CGM, ArrayRef<const Expr *> Privates,
3271 QualType ReductionArrayTy, SourceLocation Loc,
3272 const RecordDecl *TeamReductionRec,
3273 const llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *>
3274 &VarFieldMap) {
3275 ASTContext &C = CGM.getContext();
3276
3277 // Buffer: global reduction buffer.
3278 ImplicitParamDecl BufferArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
3279 C.VoidPtrTy, ImplicitParamDecl::Other);
3280 // Idx: index of the buffer.
3281 ImplicitParamDecl IdxArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.IntTy,
3282 ImplicitParamDecl::Other);
3283 // ReduceList: thread local Reduce list.
3284 ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
3285 C.VoidPtrTy, ImplicitParamDecl::Other);
3286 FunctionArgList Args;
3287 Args.push_back(&BufferArg);
3288 Args.push_back(&IdxArg);
3289 Args.push_back(&ReduceListArg);
3290
3291 const CGFunctionInfo &CGFI =
3292 CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
3293 auto *Fn = llvm::Function::Create(
3294 CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage,
3295 "_omp_reduction_list_to_global_copy_func", &CGM.getModule());
3296 CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
3297 Fn->setDoesNotRecurse();
3298 CodeGenFunction CGF(CGM);
3299 CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc);
3300
3301 CGBuilderTy &Bld = CGF.Builder;
3302
3303 Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg);
3304 Address AddrBufferArg = CGF.GetAddrOfLocalVar(&BufferArg);
3305 Address LocalReduceList(
3306 Bld.CreatePointerBitCastOrAddrSpaceCast(
3307 CGF.EmitLoadOfScalar(AddrReduceListArg, /*Volatile=*/false,
3308 C.VoidPtrTy, Loc),
3309 CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo()),
3310 CGF.getPointerAlign());
3311 QualType StaticTy = C.getRecordType(TeamReductionRec);
3312 llvm::Type *LLVMReductionsBufferTy =
3313 CGM.getTypes().ConvertTypeForMem(StaticTy);
3314 llvm::Value *BufferArrPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
3315 CGF.EmitLoadOfScalar(AddrBufferArg, /*Volatile=*/false, C.VoidPtrTy, Loc),
3316 LLVMReductionsBufferTy->getPointerTo());
3317 llvm::Value *Idxs[] = {llvm::ConstantInt::getNullValue(CGF.Int32Ty),
3318 CGF.EmitLoadOfScalar(CGF.GetAddrOfLocalVar(&IdxArg),
3319 /*Volatile=*/false, C.IntTy,
3320 Loc)};
3321 unsigned Idx = 0;
3322 for (const Expr *Private : Privates) {
3323 // Reduce element = LocalReduceList[i]
3324 Address ElemPtrPtrAddr = Bld.CreateConstArrayGEP(LocalReduceList, Idx);
3325 llvm::Value *ElemPtrPtr = CGF.EmitLoadOfScalar(
3326 ElemPtrPtrAddr, /*Volatile=*/false, C.VoidPtrTy, SourceLocation());
3327 // elemptr = ((CopyType*)(elemptrptr)) + I
3328 ElemPtrPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
3329 ElemPtrPtr, CGF.ConvertTypeForMem(Private->getType())->getPointerTo());
3330 Address ElemPtr =
3331 Address(ElemPtrPtr, C.getTypeAlignInChars(Private->getType()));
3332 const ValueDecl *VD = cast<DeclRefExpr>(Private)->getDecl();
3333 // Global = Buffer.VD[Idx];
3334 const FieldDecl *FD = VarFieldMap.lookup(VD);
3335 LValue GlobLVal = CGF.EmitLValueForField(
3336 CGF.MakeNaturalAlignAddrLValue(BufferArrPtr, StaticTy), FD);
3337 llvm::Value *BufferPtr =
3338 Bld.CreateInBoundsGEP(GlobLVal.getPointer(CGF), Idxs);
3339 GlobLVal.setAddress(Address(BufferPtr, GlobLVal.getAlignment()));
3340 switch (CGF.getEvaluationKind(Private->getType())) {
3341 case TEK_Scalar: {
3342 llvm::Value *V = CGF.EmitLoadOfScalar(
3343 ElemPtr, /*Volatile=*/false, Private->getType(), Loc,
3344 LValueBaseInfo(AlignmentSource::Type), TBAAAccessInfo());
3345 CGF.EmitStoreOfScalar(V, GlobLVal);
3346 break;
3347 }
3348 case TEK_Complex: {
3349 CodeGenFunction::ComplexPairTy V = CGF.EmitLoadOfComplex(
3350 CGF.MakeAddrLValue(ElemPtr, Private->getType()), Loc);
3351 CGF.EmitStoreOfComplex(V, GlobLVal, /*isInit=*/false);
3352 break;
3353 }
3354 case TEK_Aggregate:
3355 CGF.EmitAggregateCopy(GlobLVal,
3356 CGF.MakeAddrLValue(ElemPtr, Private->getType()),
3357 Private->getType(), AggValueSlot::DoesNotOverlap);
3358 break;
3359 }
3360 ++Idx;
3361 }
3362
3363 CGF.FinishFunction();
3364 return Fn;
3365 }
3366
3367 /// This function emits a helper that reduces all the reduction variables from
3368 /// the team into the provided global buffer for the reduction variables.
3369 ///
3370 /// void list_to_global_reduce_func(void *buffer, int Idx, void *reduce_data)
3371 /// void *GlobPtrs[];
3372 /// GlobPtrs[0] = (void*)&buffer.D0[Idx];
3373 /// ...
3374 /// GlobPtrs[N] = (void*)&buffer.DN[Idx];
3375 /// reduce_function(GlobPtrs, reduce_data);
emitListToGlobalReduceFunction(CodeGenModule & CGM,ArrayRef<const Expr * > Privates,QualType ReductionArrayTy,SourceLocation Loc,const RecordDecl * TeamReductionRec,const llvm::SmallDenseMap<const ValueDecl *,const FieldDecl * > & VarFieldMap,llvm::Function * ReduceFn)3376 static llvm::Value *emitListToGlobalReduceFunction(
3377 CodeGenModule &CGM, ArrayRef<const Expr *> Privates,
3378 QualType ReductionArrayTy, SourceLocation Loc,
3379 const RecordDecl *TeamReductionRec,
3380 const llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *>
3381 &VarFieldMap,
3382 llvm::Function *ReduceFn) {
3383 ASTContext &C = CGM.getContext();
3384
3385 // Buffer: global reduction buffer.
3386 ImplicitParamDecl BufferArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
3387 C.VoidPtrTy, ImplicitParamDecl::Other);
3388 // Idx: index of the buffer.
3389 ImplicitParamDecl IdxArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.IntTy,
3390 ImplicitParamDecl::Other);
3391 // ReduceList: thread local Reduce list.
3392 ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
3393 C.VoidPtrTy, ImplicitParamDecl::Other);
3394 FunctionArgList Args;
3395 Args.push_back(&BufferArg);
3396 Args.push_back(&IdxArg);
3397 Args.push_back(&ReduceListArg);
3398
3399 const CGFunctionInfo &CGFI =
3400 CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
3401 auto *Fn = llvm::Function::Create(
3402 CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage,
3403 "_omp_reduction_list_to_global_reduce_func", &CGM.getModule());
3404 CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
3405 Fn->setDoesNotRecurse();
3406 CodeGenFunction CGF(CGM);
3407 CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc);
3408
3409 CGBuilderTy &Bld = CGF.Builder;
3410
3411 Address AddrBufferArg = CGF.GetAddrOfLocalVar(&BufferArg);
3412 QualType StaticTy = C.getRecordType(TeamReductionRec);
3413 llvm::Type *LLVMReductionsBufferTy =
3414 CGM.getTypes().ConvertTypeForMem(StaticTy);
3415 llvm::Value *BufferArrPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
3416 CGF.EmitLoadOfScalar(AddrBufferArg, /*Volatile=*/false, C.VoidPtrTy, Loc),
3417 LLVMReductionsBufferTy->getPointerTo());
3418
3419 // 1. Build a list of reduction variables.
3420 // void *RedList[<n>] = {<ReductionVars>[0], ..., <ReductionVars>[<n>-1]};
3421 Address ReductionList =
3422 CGF.CreateMemTemp(ReductionArrayTy, ".omp.reduction.red_list");
3423 auto IPriv = Privates.begin();
3424 llvm::Value *Idxs[] = {llvm::ConstantInt::getNullValue(CGF.Int32Ty),
3425 CGF.EmitLoadOfScalar(CGF.GetAddrOfLocalVar(&IdxArg),
3426 /*Volatile=*/false, C.IntTy,
3427 Loc)};
3428 unsigned Idx = 0;
3429 for (unsigned I = 0, E = Privates.size(); I < E; ++I, ++IPriv, ++Idx) {
3430 Address Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx);
3431 // Global = Buffer.VD[Idx];
3432 const ValueDecl *VD = cast<DeclRefExpr>(*IPriv)->getDecl();
3433 const FieldDecl *FD = VarFieldMap.lookup(VD);
3434 LValue GlobLVal = CGF.EmitLValueForField(
3435 CGF.MakeNaturalAlignAddrLValue(BufferArrPtr, StaticTy), FD);
3436 llvm::Value *BufferPtr =
3437 Bld.CreateInBoundsGEP(GlobLVal.getPointer(CGF), Idxs);
3438 llvm::Value *Ptr = CGF.EmitCastToVoidPtr(BufferPtr);
3439 CGF.EmitStoreOfScalar(Ptr, Elem, /*Volatile=*/false, C.VoidPtrTy);
3440 if ((*IPriv)->getType()->isVariablyModifiedType()) {
3441 // Store array size.
3442 ++Idx;
3443 Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx);
3444 llvm::Value *Size = CGF.Builder.CreateIntCast(
3445 CGF.getVLASize(
3446 CGF.getContext().getAsVariableArrayType((*IPriv)->getType()))
3447 .NumElts,
3448 CGF.SizeTy, /*isSigned=*/false);
3449 CGF.Builder.CreateStore(CGF.Builder.CreateIntToPtr(Size, CGF.VoidPtrTy),
3450 Elem);
3451 }
3452 }
3453
3454 // Call reduce_function(GlobalReduceList, ReduceList)
3455 llvm::Value *GlobalReduceList =
3456 CGF.EmitCastToVoidPtr(ReductionList.getPointer());
3457 Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg);
3458 llvm::Value *ReducedPtr = CGF.EmitLoadOfScalar(
3459 AddrReduceListArg, /*Volatile=*/false, C.VoidPtrTy, Loc);
3460 CGM.getOpenMPRuntime().emitOutlinedFunctionCall(
3461 CGF, Loc, ReduceFn, {GlobalReduceList, ReducedPtr});
3462 CGF.FinishFunction();
3463 return Fn;
3464 }
3465
3466 /// This function emits a helper that copies all the reduction variables from
3467 /// the team into the provided global buffer for the reduction variables.
3468 ///
3469 /// void list_to_global_copy_func(void *buffer, int Idx, void *reduce_data)
3470 /// For all data entries D in reduce_data:
3471 /// Copy buffer.D[Idx] to local D;
emitGlobalToListCopyFunction(CodeGenModule & CGM,ArrayRef<const Expr * > Privates,QualType ReductionArrayTy,SourceLocation Loc,const RecordDecl * TeamReductionRec,const llvm::SmallDenseMap<const ValueDecl *,const FieldDecl * > & VarFieldMap)3472 static llvm::Value *emitGlobalToListCopyFunction(
3473 CodeGenModule &CGM, ArrayRef<const Expr *> Privates,
3474 QualType ReductionArrayTy, SourceLocation Loc,
3475 const RecordDecl *TeamReductionRec,
3476 const llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *>
3477 &VarFieldMap) {
3478 ASTContext &C = CGM.getContext();
3479
3480 // Buffer: global reduction buffer.
3481 ImplicitParamDecl BufferArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
3482 C.VoidPtrTy, ImplicitParamDecl::Other);
3483 // Idx: index of the buffer.
3484 ImplicitParamDecl IdxArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.IntTy,
3485 ImplicitParamDecl::Other);
3486 // ReduceList: thread local Reduce list.
3487 ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
3488 C.VoidPtrTy, ImplicitParamDecl::Other);
3489 FunctionArgList Args;
3490 Args.push_back(&BufferArg);
3491 Args.push_back(&IdxArg);
3492 Args.push_back(&ReduceListArg);
3493
3494 const CGFunctionInfo &CGFI =
3495 CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
3496 auto *Fn = llvm::Function::Create(
3497 CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage,
3498 "_omp_reduction_global_to_list_copy_func", &CGM.getModule());
3499 CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
3500 Fn->setDoesNotRecurse();
3501 CodeGenFunction CGF(CGM);
3502 CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc);
3503
3504 CGBuilderTy &Bld = CGF.Builder;
3505
3506 Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg);
3507 Address AddrBufferArg = CGF.GetAddrOfLocalVar(&BufferArg);
3508 Address LocalReduceList(
3509 Bld.CreatePointerBitCastOrAddrSpaceCast(
3510 CGF.EmitLoadOfScalar(AddrReduceListArg, /*Volatile=*/false,
3511 C.VoidPtrTy, Loc),
3512 CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo()),
3513 CGF.getPointerAlign());
3514 QualType StaticTy = C.getRecordType(TeamReductionRec);
3515 llvm::Type *LLVMReductionsBufferTy =
3516 CGM.getTypes().ConvertTypeForMem(StaticTy);
3517 llvm::Value *BufferArrPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
3518 CGF.EmitLoadOfScalar(AddrBufferArg, /*Volatile=*/false, C.VoidPtrTy, Loc),
3519 LLVMReductionsBufferTy->getPointerTo());
3520
3521 llvm::Value *Idxs[] = {llvm::ConstantInt::getNullValue(CGF.Int32Ty),
3522 CGF.EmitLoadOfScalar(CGF.GetAddrOfLocalVar(&IdxArg),
3523 /*Volatile=*/false, C.IntTy,
3524 Loc)};
3525 unsigned Idx = 0;
3526 for (const Expr *Private : Privates) {
3527 // Reduce element = LocalReduceList[i]
3528 Address ElemPtrPtrAddr = Bld.CreateConstArrayGEP(LocalReduceList, Idx);
3529 llvm::Value *ElemPtrPtr = CGF.EmitLoadOfScalar(
3530 ElemPtrPtrAddr, /*Volatile=*/false, C.VoidPtrTy, SourceLocation());
3531 // elemptr = ((CopyType*)(elemptrptr)) + I
3532 ElemPtrPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
3533 ElemPtrPtr, CGF.ConvertTypeForMem(Private->getType())->getPointerTo());
3534 Address ElemPtr =
3535 Address(ElemPtrPtr, C.getTypeAlignInChars(Private->getType()));
3536 const ValueDecl *VD = cast<DeclRefExpr>(Private)->getDecl();
3537 // Global = Buffer.VD[Idx];
3538 const FieldDecl *FD = VarFieldMap.lookup(VD);
3539 LValue GlobLVal = CGF.EmitLValueForField(
3540 CGF.MakeNaturalAlignAddrLValue(BufferArrPtr, StaticTy), FD);
3541 llvm::Value *BufferPtr =
3542 Bld.CreateInBoundsGEP(GlobLVal.getPointer(CGF), Idxs);
3543 GlobLVal.setAddress(Address(BufferPtr, GlobLVal.getAlignment()));
3544 switch (CGF.getEvaluationKind(Private->getType())) {
3545 case TEK_Scalar: {
3546 llvm::Value *V = CGF.EmitLoadOfScalar(GlobLVal, Loc);
3547 CGF.EmitStoreOfScalar(V, ElemPtr, /*Volatile=*/false, Private->getType(),
3548 LValueBaseInfo(AlignmentSource::Type),
3549 TBAAAccessInfo());
3550 break;
3551 }
3552 case TEK_Complex: {
3553 CodeGenFunction::ComplexPairTy V = CGF.EmitLoadOfComplex(GlobLVal, Loc);
3554 CGF.EmitStoreOfComplex(V, CGF.MakeAddrLValue(ElemPtr, Private->getType()),
3555 /*isInit=*/false);
3556 break;
3557 }
3558 case TEK_Aggregate:
3559 CGF.EmitAggregateCopy(CGF.MakeAddrLValue(ElemPtr, Private->getType()),
3560 GlobLVal, Private->getType(),
3561 AggValueSlot::DoesNotOverlap);
3562 break;
3563 }
3564 ++Idx;
3565 }
3566
3567 CGF.FinishFunction();
3568 return Fn;
3569 }
3570
3571 /// This function emits a helper that reduces all the reduction variables from
3572 /// the team into the provided global buffer for the reduction variables.
3573 ///
3574 /// void global_to_list_reduce_func(void *buffer, int Idx, void *reduce_data)
3575 /// void *GlobPtrs[];
3576 /// GlobPtrs[0] = (void*)&buffer.D0[Idx];
3577 /// ...
3578 /// GlobPtrs[N] = (void*)&buffer.DN[Idx];
3579 /// reduce_function(reduce_data, GlobPtrs);
emitGlobalToListReduceFunction(CodeGenModule & CGM,ArrayRef<const Expr * > Privates,QualType ReductionArrayTy,SourceLocation Loc,const RecordDecl * TeamReductionRec,const llvm::SmallDenseMap<const ValueDecl *,const FieldDecl * > & VarFieldMap,llvm::Function * ReduceFn)3580 static llvm::Value *emitGlobalToListReduceFunction(
3581 CodeGenModule &CGM, ArrayRef<const Expr *> Privates,
3582 QualType ReductionArrayTy, SourceLocation Loc,
3583 const RecordDecl *TeamReductionRec,
3584 const llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *>
3585 &VarFieldMap,
3586 llvm::Function *ReduceFn) {
3587 ASTContext &C = CGM.getContext();
3588
3589 // Buffer: global reduction buffer.
3590 ImplicitParamDecl BufferArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
3591 C.VoidPtrTy, ImplicitParamDecl::Other);
3592 // Idx: index of the buffer.
3593 ImplicitParamDecl IdxArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.IntTy,
3594 ImplicitParamDecl::Other);
3595 // ReduceList: thread local Reduce list.
3596 ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
3597 C.VoidPtrTy, ImplicitParamDecl::Other);
3598 FunctionArgList Args;
3599 Args.push_back(&BufferArg);
3600 Args.push_back(&IdxArg);
3601 Args.push_back(&ReduceListArg);
3602
3603 const CGFunctionInfo &CGFI =
3604 CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
3605 auto *Fn = llvm::Function::Create(
3606 CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage,
3607 "_omp_reduction_global_to_list_reduce_func", &CGM.getModule());
3608 CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
3609 Fn->setDoesNotRecurse();
3610 CodeGenFunction CGF(CGM);
3611 CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc);
3612
3613 CGBuilderTy &Bld = CGF.Builder;
3614
3615 Address AddrBufferArg = CGF.GetAddrOfLocalVar(&BufferArg);
3616 QualType StaticTy = C.getRecordType(TeamReductionRec);
3617 llvm::Type *LLVMReductionsBufferTy =
3618 CGM.getTypes().ConvertTypeForMem(StaticTy);
3619 llvm::Value *BufferArrPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
3620 CGF.EmitLoadOfScalar(AddrBufferArg, /*Volatile=*/false, C.VoidPtrTy, Loc),
3621 LLVMReductionsBufferTy->getPointerTo());
3622
3623 // 1. Build a list of reduction variables.
3624 // void *RedList[<n>] = {<ReductionVars>[0], ..., <ReductionVars>[<n>-1]};
3625 Address ReductionList =
3626 CGF.CreateMemTemp(ReductionArrayTy, ".omp.reduction.red_list");
3627 auto IPriv = Privates.begin();
3628 llvm::Value *Idxs[] = {llvm::ConstantInt::getNullValue(CGF.Int32Ty),
3629 CGF.EmitLoadOfScalar(CGF.GetAddrOfLocalVar(&IdxArg),
3630 /*Volatile=*/false, C.IntTy,
3631 Loc)};
3632 unsigned Idx = 0;
3633 for (unsigned I = 0, E = Privates.size(); I < E; ++I, ++IPriv, ++Idx) {
3634 Address Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx);
3635 // Global = Buffer.VD[Idx];
3636 const ValueDecl *VD = cast<DeclRefExpr>(*IPriv)->getDecl();
3637 const FieldDecl *FD = VarFieldMap.lookup(VD);
3638 LValue GlobLVal = CGF.EmitLValueForField(
3639 CGF.MakeNaturalAlignAddrLValue(BufferArrPtr, StaticTy), FD);
3640 llvm::Value *BufferPtr =
3641 Bld.CreateInBoundsGEP(GlobLVal.getPointer(CGF), Idxs);
3642 llvm::Value *Ptr = CGF.EmitCastToVoidPtr(BufferPtr);
3643 CGF.EmitStoreOfScalar(Ptr, Elem, /*Volatile=*/false, C.VoidPtrTy);
3644 if ((*IPriv)->getType()->isVariablyModifiedType()) {
3645 // Store array size.
3646 ++Idx;
3647 Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx);
3648 llvm::Value *Size = CGF.Builder.CreateIntCast(
3649 CGF.getVLASize(
3650 CGF.getContext().getAsVariableArrayType((*IPriv)->getType()))
3651 .NumElts,
3652 CGF.SizeTy, /*isSigned=*/false);
3653 CGF.Builder.CreateStore(CGF.Builder.CreateIntToPtr(Size, CGF.VoidPtrTy),
3654 Elem);
3655 }
3656 }
3657
3658 // Call reduce_function(ReduceList, GlobalReduceList)
3659 llvm::Value *GlobalReduceList =
3660 CGF.EmitCastToVoidPtr(ReductionList.getPointer());
3661 Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg);
3662 llvm::Value *ReducedPtr = CGF.EmitLoadOfScalar(
3663 AddrReduceListArg, /*Volatile=*/false, C.VoidPtrTy, Loc);
3664 CGM.getOpenMPRuntime().emitOutlinedFunctionCall(
3665 CGF, Loc, ReduceFn, {ReducedPtr, GlobalReduceList});
3666 CGF.FinishFunction();
3667 return Fn;
3668 }
3669
3670 ///
3671 /// Design of OpenMP reductions on the GPU
3672 ///
3673 /// Consider a typical OpenMP program with one or more reduction
3674 /// clauses:
3675 ///
3676 /// float foo;
3677 /// double bar;
3678 /// #pragma omp target teams distribute parallel for \
3679 /// reduction(+:foo) reduction(*:bar)
3680 /// for (int i = 0; i < N; i++) {
3681 /// foo += A[i]; bar *= B[i];
3682 /// }
3683 ///
3684 /// where 'foo' and 'bar' are reduced across all OpenMP threads in
3685 /// all teams. In our OpenMP implementation on the NVPTX device an
3686 /// OpenMP team is mapped to a CUDA threadblock and OpenMP threads
3687 /// within a team are mapped to CUDA threads within a threadblock.
3688 /// Our goal is to efficiently aggregate values across all OpenMP
3689 /// threads such that:
3690 ///
3691 /// - the compiler and runtime are logically concise, and
3692 /// - the reduction is performed efficiently in a hierarchical
3693 /// manner as follows: within OpenMP threads in the same warp,
3694 /// across warps in a threadblock, and finally across teams on
3695 /// the NVPTX device.
3696 ///
3697 /// Introduction to Decoupling
3698 ///
3699 /// We would like to decouple the compiler and the runtime so that the
3700 /// latter is ignorant of the reduction variables (number, data types)
3701 /// and the reduction operators. This allows a simpler interface
3702 /// and implementation while still attaining good performance.
3703 ///
3704 /// Pseudocode for the aforementioned OpenMP program generated by the
3705 /// compiler is as follows:
3706 ///
3707 /// 1. Create private copies of reduction variables on each OpenMP
3708 /// thread: 'foo_private', 'bar_private'
3709 /// 2. Each OpenMP thread reduces the chunk of 'A' and 'B' assigned
3710 /// to it and writes the result in 'foo_private' and 'bar_private'
3711 /// respectively.
3712 /// 3. Call the OpenMP runtime on the GPU to reduce within a team
3713 /// and store the result on the team master:
3714 ///
3715 /// __kmpc_nvptx_parallel_reduce_nowait_v2(...,
3716 /// reduceData, shuffleReduceFn, interWarpCpyFn)
3717 ///
3718 /// where:
3719 /// struct ReduceData {
3720 /// double *foo;
3721 /// double *bar;
3722 /// } reduceData
3723 /// reduceData.foo = &foo_private
3724 /// reduceData.bar = &bar_private
3725 ///
3726 /// 'shuffleReduceFn' and 'interWarpCpyFn' are pointers to two
3727 /// auxiliary functions generated by the compiler that operate on
3728 /// variables of type 'ReduceData'. They aid the runtime perform
3729 /// algorithmic steps in a data agnostic manner.
3730 ///
3731 /// 'shuffleReduceFn' is a pointer to a function that reduces data
3732 /// of type 'ReduceData' across two OpenMP threads (lanes) in the
3733 /// same warp. It takes the following arguments as input:
3734 ///
3735 /// a. variable of type 'ReduceData' on the calling lane,
3736 /// b. its lane_id,
3737 /// c. an offset relative to the current lane_id to generate a
3738 /// remote_lane_id. The remote lane contains the second
3739 /// variable of type 'ReduceData' that is to be reduced.
3740 /// d. an algorithm version parameter determining which reduction
3741 /// algorithm to use.
3742 ///
3743 /// 'shuffleReduceFn' retrieves data from the remote lane using
3744 /// efficient GPU shuffle intrinsics and reduces, using the
3745 /// algorithm specified by the 4th parameter, the two operands
3746 /// element-wise. The result is written to the first operand.
3747 ///
3748 /// Different reduction algorithms are implemented in different
3749 /// runtime functions, all calling 'shuffleReduceFn' to perform
3750 /// the essential reduction step. Therefore, based on the 4th
3751 /// parameter, this function behaves slightly differently to
3752 /// cooperate with the runtime to ensure correctness under
3753 /// different circumstances.
3754 ///
3755 /// 'InterWarpCpyFn' is a pointer to a function that transfers
3756 /// reduced variables across warps. It tunnels, through CUDA
3757 /// shared memory, the thread-private data of type 'ReduceData'
3758 /// from lane 0 of each warp to a lane in the first warp.
3759 /// 4. Call the OpenMP runtime on the GPU to reduce across teams.
3760 /// The last team writes the global reduced value to memory.
3761 ///
3762 /// ret = __kmpc_nvptx_teams_reduce_nowait(...,
3763 /// reduceData, shuffleReduceFn, interWarpCpyFn,
3764 /// scratchpadCopyFn, loadAndReduceFn)
3765 ///
3766 /// 'scratchpadCopyFn' is a helper that stores reduced
3767 /// data from the team master to a scratchpad array in
3768 /// global memory.
3769 ///
3770 /// 'loadAndReduceFn' is a helper that loads data from
3771 /// the scratchpad array and reduces it with the input
3772 /// operand.
3773 ///
3774 /// These compiler generated functions hide address
3775 /// calculation and alignment information from the runtime.
3776 /// 5. if ret == 1:
3777 /// The team master of the last team stores the reduced
3778 /// result to the globals in memory.
3779 /// foo += reduceData.foo; bar *= reduceData.bar
3780 ///
3781 ///
3782 /// Warp Reduction Algorithms
3783 ///
3784 /// On the warp level, we have three algorithms implemented in the
3785 /// OpenMP runtime depending on the number of active lanes:
3786 ///
3787 /// Full Warp Reduction
3788 ///
3789 /// The reduce algorithm within a warp where all lanes are active
3790 /// is implemented in the runtime as follows:
3791 ///
3792 /// full_warp_reduce(void *reduce_data,
3793 /// kmp_ShuffleReductFctPtr ShuffleReduceFn) {
3794 /// for (int offset = WARPSIZE/2; offset > 0; offset /= 2)
3795 /// ShuffleReduceFn(reduce_data, 0, offset, 0);
3796 /// }
3797 ///
3798 /// The algorithm completes in log(2, WARPSIZE) steps.
3799 ///
3800 /// 'ShuffleReduceFn' is used here with lane_id set to 0 because it is
3801 /// not used therefore we save instructions by not retrieving lane_id
3802 /// from the corresponding special registers. The 4th parameter, which
3803 /// represents the version of the algorithm being used, is set to 0 to
3804 /// signify full warp reduction.
3805 ///
3806 /// In this version, 'ShuffleReduceFn' behaves, per element, as follows:
3807 ///
3808 /// #reduce_elem refers to an element in the local lane's data structure
3809 /// #remote_elem is retrieved from a remote lane
3810 /// remote_elem = shuffle_down(reduce_elem, offset, WARPSIZE);
3811 /// reduce_elem = reduce_elem REDUCE_OP remote_elem;
3812 ///
3813 /// Contiguous Partial Warp Reduction
3814 ///
3815 /// This reduce algorithm is used within a warp where only the first
3816 /// 'n' (n <= WARPSIZE) lanes are active. It is typically used when the
3817 /// number of OpenMP threads in a parallel region is not a multiple of
3818 /// WARPSIZE. The algorithm is implemented in the runtime as follows:
3819 ///
3820 /// void
3821 /// contiguous_partial_reduce(void *reduce_data,
3822 /// kmp_ShuffleReductFctPtr ShuffleReduceFn,
3823 /// int size, int lane_id) {
3824 /// int curr_size;
3825 /// int offset;
3826 /// curr_size = size;
3827 /// mask = curr_size/2;
3828 /// while (offset>0) {
3829 /// ShuffleReduceFn(reduce_data, lane_id, offset, 1);
3830 /// curr_size = (curr_size+1)/2;
3831 /// offset = curr_size/2;
3832 /// }
3833 /// }
3834 ///
3835 /// In this version, 'ShuffleReduceFn' behaves, per element, as follows:
3836 ///
3837 /// remote_elem = shuffle_down(reduce_elem, offset, WARPSIZE);
3838 /// if (lane_id < offset)
3839 /// reduce_elem = reduce_elem REDUCE_OP remote_elem
3840 /// else
3841 /// reduce_elem = remote_elem
3842 ///
3843 /// This algorithm assumes that the data to be reduced are located in a
3844 /// contiguous subset of lanes starting from the first. When there is
3845 /// an odd number of active lanes, the data in the last lane is not
3846 /// aggregated with any other lane's dat but is instead copied over.
3847 ///
3848 /// Dispersed Partial Warp Reduction
3849 ///
3850 /// This algorithm is used within a warp when any discontiguous subset of
3851 /// lanes are active. It is used to implement the reduction operation
3852 /// across lanes in an OpenMP simd region or in a nested parallel region.
3853 ///
3854 /// void
3855 /// dispersed_partial_reduce(void *reduce_data,
3856 /// kmp_ShuffleReductFctPtr ShuffleReduceFn) {
3857 /// int size, remote_id;
3858 /// int logical_lane_id = number_of_active_lanes_before_me() * 2;
3859 /// do {
3860 /// remote_id = next_active_lane_id_right_after_me();
3861 /// # the above function returns 0 of no active lane
3862 /// # is present right after the current lane.
3863 /// size = number_of_active_lanes_in_this_warp();
3864 /// logical_lane_id /= 2;
3865 /// ShuffleReduceFn(reduce_data, logical_lane_id,
3866 /// remote_id-1-threadIdx.x, 2);
3867 /// } while (logical_lane_id % 2 == 0 && size > 1);
3868 /// }
3869 ///
3870 /// There is no assumption made about the initial state of the reduction.
3871 /// Any number of lanes (>=1) could be active at any position. The reduction
3872 /// result is returned in the first active lane.
3873 ///
3874 /// In this version, 'ShuffleReduceFn' behaves, per element, as follows:
3875 ///
3876 /// remote_elem = shuffle_down(reduce_elem, offset, WARPSIZE);
3877 /// if (lane_id % 2 == 0 && offset > 0)
3878 /// reduce_elem = reduce_elem REDUCE_OP remote_elem
3879 /// else
3880 /// reduce_elem = remote_elem
3881 ///
3882 ///
3883 /// Intra-Team Reduction
3884 ///
3885 /// This function, as implemented in the runtime call
3886 /// '__kmpc_nvptx_parallel_reduce_nowait_v2', aggregates data across OpenMP
3887 /// threads in a team. It first reduces within a warp using the
3888 /// aforementioned algorithms. We then proceed to gather all such
3889 /// reduced values at the first warp.
3890 ///
3891 /// The runtime makes use of the function 'InterWarpCpyFn', which copies
3892 /// data from each of the "warp master" (zeroth lane of each warp, where
3893 /// warp-reduced data is held) to the zeroth warp. This step reduces (in
3894 /// a mathematical sense) the problem of reduction across warp masters in
3895 /// a block to the problem of warp reduction.
3896 ///
3897 ///
3898 /// Inter-Team Reduction
3899 ///
3900 /// Once a team has reduced its data to a single value, it is stored in
3901 /// a global scratchpad array. Since each team has a distinct slot, this
3902 /// can be done without locking.
3903 ///
3904 /// The last team to write to the scratchpad array proceeds to reduce the
3905 /// scratchpad array. One or more workers in the last team use the helper
3906 /// 'loadAndReduceDataFn' to load and reduce values from the array, i.e.,
3907 /// the k'th worker reduces every k'th element.
3908 ///
3909 /// Finally, a call is made to '__kmpc_nvptx_parallel_reduce_nowait_v2' to
3910 /// reduce across workers and compute a globally reduced value.
3911 ///
emitReduction(CodeGenFunction & CGF,SourceLocation Loc,ArrayRef<const Expr * > Privates,ArrayRef<const Expr * > LHSExprs,ArrayRef<const Expr * > RHSExprs,ArrayRef<const Expr * > ReductionOps,ReductionOptionsTy Options)3912 void CGOpenMPRuntimeGPU::emitReduction(
3913 CodeGenFunction &CGF, SourceLocation Loc, ArrayRef<const Expr *> Privates,
3914 ArrayRef<const Expr *> LHSExprs, ArrayRef<const Expr *> RHSExprs,
3915 ArrayRef<const Expr *> ReductionOps, ReductionOptionsTy Options) {
3916 if (!CGF.HaveInsertPoint())
3917 return;
3918
3919 bool ParallelReduction = isOpenMPParallelDirective(Options.ReductionKind);
3920 #ifndef NDEBUG
3921 bool TeamsReduction = isOpenMPTeamsDirective(Options.ReductionKind);
3922 #endif
3923
3924 if (Options.SimpleReduction) {
3925 assert(!TeamsReduction && !ParallelReduction &&
3926 "Invalid reduction selection in emitReduction.");
3927 CGOpenMPRuntime::emitReduction(CGF, Loc, Privates, LHSExprs, RHSExprs,
3928 ReductionOps, Options);
3929 return;
3930 }
3931
3932 assert((TeamsReduction || ParallelReduction) &&
3933 "Invalid reduction selection in emitReduction.");
3934
3935 // Build res = __kmpc_reduce{_nowait}(<gtid>, <n>, sizeof(RedList),
3936 // RedList, shuffle_reduce_func, interwarp_copy_func);
3937 // or
3938 // Build res = __kmpc_reduce_teams_nowait_simple(<loc>, <gtid>, <lck>);
3939 llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc);
3940 llvm::Value *ThreadId = getThreadID(CGF, Loc);
3941
3942 llvm::Value *Res;
3943 ASTContext &C = CGM.getContext();
3944 // 1. Build a list of reduction variables.
3945 // void *RedList[<n>] = {<ReductionVars>[0], ..., <ReductionVars>[<n>-1]};
3946 auto Size = RHSExprs.size();
3947 for (const Expr *E : Privates) {
3948 if (E->getType()->isVariablyModifiedType())
3949 // Reserve place for array size.
3950 ++Size;
3951 }
3952 llvm::APInt ArraySize(/*unsigned int numBits=*/32, Size);
3953 QualType ReductionArrayTy =
3954 C.getConstantArrayType(C.VoidPtrTy, ArraySize, nullptr, ArrayType::Normal,
3955 /*IndexTypeQuals=*/0);
3956 Address ReductionList =
3957 CGF.CreateMemTemp(ReductionArrayTy, ".omp.reduction.red_list");
3958 auto IPriv = Privates.begin();
3959 unsigned Idx = 0;
3960 for (unsigned I = 0, E = RHSExprs.size(); I < E; ++I, ++IPriv, ++Idx) {
3961 Address Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx);
3962 CGF.Builder.CreateStore(
3963 CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
3964 CGF.EmitLValue(RHSExprs[I]).getPointer(CGF), CGF.VoidPtrTy),
3965 Elem);
3966 if ((*IPriv)->getType()->isVariablyModifiedType()) {
3967 // Store array size.
3968 ++Idx;
3969 Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx);
3970 llvm::Value *Size = CGF.Builder.CreateIntCast(
3971 CGF.getVLASize(
3972 CGF.getContext().getAsVariableArrayType((*IPriv)->getType()))
3973 .NumElts,
3974 CGF.SizeTy, /*isSigned=*/false);
3975 CGF.Builder.CreateStore(CGF.Builder.CreateIntToPtr(Size, CGF.VoidPtrTy),
3976 Elem);
3977 }
3978 }
3979
3980 llvm::Value *RL = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
3981 ReductionList.getPointer(), CGF.VoidPtrTy);
3982 llvm::Function *ReductionFn = emitReductionFunction(
3983 Loc, CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo(), Privates,
3984 LHSExprs, RHSExprs, ReductionOps);
3985 llvm::Value *ReductionArrayTySize = CGF.getTypeSize(ReductionArrayTy);
3986 llvm::Function *ShuffleAndReduceFn = emitShuffleAndReduceFunction(
3987 CGM, Privates, ReductionArrayTy, ReductionFn, Loc);
3988 llvm::Value *InterWarpCopyFn =
3989 emitInterWarpCopyFunction(CGM, Privates, ReductionArrayTy, Loc);
3990
3991 if (ParallelReduction) {
3992 llvm::Value *Args[] = {RTLoc,
3993 ThreadId,
3994 CGF.Builder.getInt32(RHSExprs.size()),
3995 ReductionArrayTySize,
3996 RL,
3997 ShuffleAndReduceFn,
3998 InterWarpCopyFn};
3999
4000 Res = CGF.EmitRuntimeCall(
4001 OMPBuilder.getOrCreateRuntimeFunction(
4002 CGM.getModule(), OMPRTL___kmpc_nvptx_parallel_reduce_nowait_v2),
4003 Args);
4004 } else {
4005 assert(TeamsReduction && "expected teams reduction.");
4006 llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *> VarFieldMap;
4007 llvm::SmallVector<const ValueDecl *, 4> PrivatesReductions(Privates.size());
4008 int Cnt = 0;
4009 for (const Expr *DRE : Privates) {
4010 PrivatesReductions[Cnt] = cast<DeclRefExpr>(DRE)->getDecl();
4011 ++Cnt;
4012 }
4013 const RecordDecl *TeamReductionRec = ::buildRecordForGlobalizedVars(
4014 CGM.getContext(), PrivatesReductions, llvm::None, VarFieldMap,
4015 C.getLangOpts().OpenMPCUDAReductionBufNum);
4016 TeamsReductions.push_back(TeamReductionRec);
4017 if (!KernelTeamsReductionPtr) {
4018 KernelTeamsReductionPtr = new llvm::GlobalVariable(
4019 CGM.getModule(), CGM.VoidPtrTy, /*isConstant=*/true,
4020 llvm::GlobalValue::InternalLinkage, nullptr,
4021 "_openmp_teams_reductions_buffer_$_$ptr");
4022 }
4023 llvm::Value *GlobalBufferPtr = CGF.EmitLoadOfScalar(
4024 Address(KernelTeamsReductionPtr, CGM.getPointerAlign()),
4025 /*Volatile=*/false, C.getPointerType(C.VoidPtrTy), Loc);
4026 llvm::Value *GlobalToBufferCpyFn = ::emitListToGlobalCopyFunction(
4027 CGM, Privates, ReductionArrayTy, Loc, TeamReductionRec, VarFieldMap);
4028 llvm::Value *GlobalToBufferRedFn = ::emitListToGlobalReduceFunction(
4029 CGM, Privates, ReductionArrayTy, Loc, TeamReductionRec, VarFieldMap,
4030 ReductionFn);
4031 llvm::Value *BufferToGlobalCpyFn = ::emitGlobalToListCopyFunction(
4032 CGM, Privates, ReductionArrayTy, Loc, TeamReductionRec, VarFieldMap);
4033 llvm::Value *BufferToGlobalRedFn = ::emitGlobalToListReduceFunction(
4034 CGM, Privates, ReductionArrayTy, Loc, TeamReductionRec, VarFieldMap,
4035 ReductionFn);
4036
4037 llvm::Value *Args[] = {
4038 RTLoc,
4039 ThreadId,
4040 GlobalBufferPtr,
4041 CGF.Builder.getInt32(C.getLangOpts().OpenMPCUDAReductionBufNum),
4042 RL,
4043 ShuffleAndReduceFn,
4044 InterWarpCopyFn,
4045 GlobalToBufferCpyFn,
4046 GlobalToBufferRedFn,
4047 BufferToGlobalCpyFn,
4048 BufferToGlobalRedFn};
4049
4050 Res = CGF.EmitRuntimeCall(
4051 OMPBuilder.getOrCreateRuntimeFunction(
4052 CGM.getModule(), OMPRTL___kmpc_nvptx_teams_reduce_nowait_v2),
4053 Args);
4054 }
4055
4056 // 5. Build if (res == 1)
4057 llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".omp.reduction.done");
4058 llvm::BasicBlock *ThenBB = CGF.createBasicBlock(".omp.reduction.then");
4059 llvm::Value *Cond = CGF.Builder.CreateICmpEQ(
4060 Res, llvm::ConstantInt::get(CGM.Int32Ty, /*V=*/1));
4061 CGF.Builder.CreateCondBr(Cond, ThenBB, ExitBB);
4062
4063 // 6. Build then branch: where we have reduced values in the master
4064 // thread in each team.
4065 // __kmpc_end_reduce{_nowait}(<gtid>);
4066 // break;
4067 CGF.EmitBlock(ThenBB);
4068
4069 // Add emission of __kmpc_end_reduce{_nowait}(<gtid>);
4070 auto &&CodeGen = [Privates, LHSExprs, RHSExprs, ReductionOps,
4071 this](CodeGenFunction &CGF, PrePostActionTy &Action) {
4072 auto IPriv = Privates.begin();
4073 auto ILHS = LHSExprs.begin();
4074 auto IRHS = RHSExprs.begin();
4075 for (const Expr *E : ReductionOps) {
4076 emitSingleReductionCombiner(CGF, E, *IPriv, cast<DeclRefExpr>(*ILHS),
4077 cast<DeclRefExpr>(*IRHS));
4078 ++IPriv;
4079 ++ILHS;
4080 ++IRHS;
4081 }
4082 };
4083 llvm::Value *EndArgs[] = {ThreadId};
4084 RegionCodeGenTy RCG(CodeGen);
4085 NVPTXActionTy Action(
4086 nullptr, llvm::None,
4087 OMPBuilder.getOrCreateRuntimeFunction(
4088 CGM.getModule(), OMPRTL___kmpc_nvptx_end_reduce_nowait),
4089 EndArgs);
4090 RCG.setAction(Action);
4091 RCG(CGF);
4092 // There is no need to emit line number for unconditional branch.
4093 (void)ApplyDebugLocation::CreateEmpty(CGF);
4094 CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
4095 }
4096
4097 const VarDecl *
translateParameter(const FieldDecl * FD,const VarDecl * NativeParam) const4098 CGOpenMPRuntimeGPU::translateParameter(const FieldDecl *FD,
4099 const VarDecl *NativeParam) const {
4100 if (!NativeParam->getType()->isReferenceType())
4101 return NativeParam;
4102 QualType ArgType = NativeParam->getType();
4103 QualifierCollector QC;
4104 const Type *NonQualTy = QC.strip(ArgType);
4105 QualType PointeeTy = cast<ReferenceType>(NonQualTy)->getPointeeType();
4106 if (const auto *Attr = FD->getAttr<OMPCaptureKindAttr>()) {
4107 if (Attr->getCaptureKind() == OMPC_map) {
4108 PointeeTy = CGM.getContext().getAddrSpaceQualType(PointeeTy,
4109 LangAS::opencl_global);
4110 } else if (Attr->getCaptureKind() == OMPC_firstprivate &&
4111 PointeeTy.isConstant(CGM.getContext())) {
4112 PointeeTy = CGM.getContext().getAddrSpaceQualType(PointeeTy,
4113 LangAS::opencl_generic);
4114 }
4115 }
4116 ArgType = CGM.getContext().getPointerType(PointeeTy);
4117 QC.addRestrict();
4118 enum { NVPTX_local_addr = 5 };
4119 QC.addAddressSpace(getLangASFromTargetAS(NVPTX_local_addr));
4120 ArgType = QC.apply(CGM.getContext(), ArgType);
4121 if (isa<ImplicitParamDecl>(NativeParam))
4122 return ImplicitParamDecl::Create(
4123 CGM.getContext(), /*DC=*/nullptr, NativeParam->getLocation(),
4124 NativeParam->getIdentifier(), ArgType, ImplicitParamDecl::Other);
4125 return ParmVarDecl::Create(
4126 CGM.getContext(),
4127 const_cast<DeclContext *>(NativeParam->getDeclContext()),
4128 NativeParam->getBeginLoc(), NativeParam->getLocation(),
4129 NativeParam->getIdentifier(), ArgType,
4130 /*TInfo=*/nullptr, SC_None, /*DefArg=*/nullptr);
4131 }
4132
4133 Address
getParameterAddress(CodeGenFunction & CGF,const VarDecl * NativeParam,const VarDecl * TargetParam) const4134 CGOpenMPRuntimeGPU::getParameterAddress(CodeGenFunction &CGF,
4135 const VarDecl *NativeParam,
4136 const VarDecl *TargetParam) const {
4137 assert(NativeParam != TargetParam &&
4138 NativeParam->getType()->isReferenceType() &&
4139 "Native arg must not be the same as target arg.");
4140 Address LocalAddr = CGF.GetAddrOfLocalVar(TargetParam);
4141 QualType NativeParamType = NativeParam->getType();
4142 QualifierCollector QC;
4143 const Type *NonQualTy = QC.strip(NativeParamType);
4144 QualType NativePointeeTy = cast<ReferenceType>(NonQualTy)->getPointeeType();
4145 unsigned NativePointeeAddrSpace =
4146 CGF.getContext().getTargetAddressSpace(NativePointeeTy);
4147 QualType TargetTy = TargetParam->getType();
4148 llvm::Value *TargetAddr = CGF.EmitLoadOfScalar(
4149 LocalAddr, /*Volatile=*/false, TargetTy, SourceLocation());
4150 // First cast to generic.
4151 TargetAddr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
4152 TargetAddr, TargetAddr->getType()->getPointerElementType()->getPointerTo(
4153 /*AddrSpace=*/0));
4154 // Cast from generic to native address space.
4155 TargetAddr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
4156 TargetAddr, TargetAddr->getType()->getPointerElementType()->getPointerTo(
4157 NativePointeeAddrSpace));
4158 Address NativeParamAddr = CGF.CreateMemTemp(NativeParamType);
4159 CGF.EmitStoreOfScalar(TargetAddr, NativeParamAddr, /*Volatile=*/false,
4160 NativeParamType);
4161 return NativeParamAddr;
4162 }
4163
emitOutlinedFunctionCall(CodeGenFunction & CGF,SourceLocation Loc,llvm::FunctionCallee OutlinedFn,ArrayRef<llvm::Value * > Args) const4164 void CGOpenMPRuntimeGPU::emitOutlinedFunctionCall(
4165 CodeGenFunction &CGF, SourceLocation Loc, llvm::FunctionCallee OutlinedFn,
4166 ArrayRef<llvm::Value *> Args) const {
4167 SmallVector<llvm::Value *, 4> TargetArgs;
4168 TargetArgs.reserve(Args.size());
4169 auto *FnType = OutlinedFn.getFunctionType();
4170 for (unsigned I = 0, E = Args.size(); I < E; ++I) {
4171 if (FnType->isVarArg() && FnType->getNumParams() <= I) {
4172 TargetArgs.append(std::next(Args.begin(), I), Args.end());
4173 break;
4174 }
4175 llvm::Type *TargetType = FnType->getParamType(I);
4176 llvm::Value *NativeArg = Args[I];
4177 if (!TargetType->isPointerTy()) {
4178 TargetArgs.emplace_back(NativeArg);
4179 continue;
4180 }
4181 llvm::Value *TargetArg = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
4182 NativeArg,
4183 NativeArg->getType()->getPointerElementType()->getPointerTo());
4184 TargetArgs.emplace_back(
4185 CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(TargetArg, TargetType));
4186 }
4187 CGOpenMPRuntime::emitOutlinedFunctionCall(CGF, Loc, OutlinedFn, TargetArgs);
4188 }
4189
4190 /// Emit function which wraps the outline parallel region
4191 /// and controls the arguments which are passed to this function.
4192 /// The wrapper ensures that the outlined function is called
4193 /// with the correct arguments when data is shared.
createParallelDataSharingWrapper(llvm::Function * OutlinedParallelFn,const OMPExecutableDirective & D)4194 llvm::Function *CGOpenMPRuntimeGPU::createParallelDataSharingWrapper(
4195 llvm::Function *OutlinedParallelFn, const OMPExecutableDirective &D) {
4196 ASTContext &Ctx = CGM.getContext();
4197 const auto &CS = *D.getCapturedStmt(OMPD_parallel);
4198
4199 // Create a function that takes as argument the source thread.
4200 FunctionArgList WrapperArgs;
4201 QualType Int16QTy =
4202 Ctx.getIntTypeForBitwidth(/*DestWidth=*/16, /*Signed=*/false);
4203 QualType Int32QTy =
4204 Ctx.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/false);
4205 ImplicitParamDecl ParallelLevelArg(Ctx, /*DC=*/nullptr, D.getBeginLoc(),
4206 /*Id=*/nullptr, Int16QTy,
4207 ImplicitParamDecl::Other);
4208 ImplicitParamDecl WrapperArg(Ctx, /*DC=*/nullptr, D.getBeginLoc(),
4209 /*Id=*/nullptr, Int32QTy,
4210 ImplicitParamDecl::Other);
4211 WrapperArgs.emplace_back(&ParallelLevelArg);
4212 WrapperArgs.emplace_back(&WrapperArg);
4213
4214 const CGFunctionInfo &CGFI =
4215 CGM.getTypes().arrangeBuiltinFunctionDeclaration(Ctx.VoidTy, WrapperArgs);
4216
4217 auto *Fn = llvm::Function::Create(
4218 CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage,
4219 Twine(OutlinedParallelFn->getName(), "_wrapper"), &CGM.getModule());
4220 CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
4221 Fn->setLinkage(llvm::GlobalValue::InternalLinkage);
4222 Fn->setDoesNotRecurse();
4223
4224 CodeGenFunction CGF(CGM, /*suppressNewContext=*/true);
4225 CGF.StartFunction(GlobalDecl(), Ctx.VoidTy, Fn, CGFI, WrapperArgs,
4226 D.getBeginLoc(), D.getBeginLoc());
4227
4228 const auto *RD = CS.getCapturedRecordDecl();
4229 auto CurField = RD->field_begin();
4230
4231 Address ZeroAddr = CGF.CreateDefaultAlignTempAlloca(CGF.Int32Ty,
4232 /*Name=*/".zero.addr");
4233 CGF.InitTempAlloca(ZeroAddr, CGF.Builder.getInt32(/*C*/ 0));
4234 // Get the array of arguments.
4235 SmallVector<llvm::Value *, 8> Args;
4236
4237 Args.emplace_back(CGF.GetAddrOfLocalVar(&WrapperArg).getPointer());
4238 Args.emplace_back(ZeroAddr.getPointer());
4239
4240 CGBuilderTy &Bld = CGF.Builder;
4241 auto CI = CS.capture_begin();
4242
4243 // Use global memory for data sharing.
4244 // Handle passing of global args to workers.
4245 Address GlobalArgs =
4246 CGF.CreateDefaultAlignTempAlloca(CGF.VoidPtrPtrTy, "global_args");
4247 llvm::Value *GlobalArgsPtr = GlobalArgs.getPointer();
4248 llvm::Value *DataSharingArgs[] = {GlobalArgsPtr};
4249 CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
4250 CGM.getModule(), OMPRTL___kmpc_get_shared_variables),
4251 DataSharingArgs);
4252
4253 // Retrieve the shared variables from the list of references returned
4254 // by the runtime. Pass the variables to the outlined function.
4255 Address SharedArgListAddress = Address::invalid();
4256 if (CS.capture_size() > 0 ||
4257 isOpenMPLoopBoundSharingDirective(D.getDirectiveKind())) {
4258 SharedArgListAddress = CGF.EmitLoadOfPointer(
4259 GlobalArgs, CGF.getContext()
4260 .getPointerType(CGF.getContext().getPointerType(
4261 CGF.getContext().VoidPtrTy))
4262 .castAs<PointerType>());
4263 }
4264 unsigned Idx = 0;
4265 if (isOpenMPLoopBoundSharingDirective(D.getDirectiveKind())) {
4266 Address Src = Bld.CreateConstInBoundsGEP(SharedArgListAddress, Idx);
4267 Address TypedAddress = Bld.CreatePointerBitCastOrAddrSpaceCast(
4268 Src, CGF.SizeTy->getPointerTo());
4269 llvm::Value *LB = CGF.EmitLoadOfScalar(
4270 TypedAddress,
4271 /*Volatile=*/false,
4272 CGF.getContext().getPointerType(CGF.getContext().getSizeType()),
4273 cast<OMPLoopDirective>(D).getLowerBoundVariable()->getExprLoc());
4274 Args.emplace_back(LB);
4275 ++Idx;
4276 Src = Bld.CreateConstInBoundsGEP(SharedArgListAddress, Idx);
4277 TypedAddress = Bld.CreatePointerBitCastOrAddrSpaceCast(
4278 Src, CGF.SizeTy->getPointerTo());
4279 llvm::Value *UB = CGF.EmitLoadOfScalar(
4280 TypedAddress,
4281 /*Volatile=*/false,
4282 CGF.getContext().getPointerType(CGF.getContext().getSizeType()),
4283 cast<OMPLoopDirective>(D).getUpperBoundVariable()->getExprLoc());
4284 Args.emplace_back(UB);
4285 ++Idx;
4286 }
4287 if (CS.capture_size() > 0) {
4288 ASTContext &CGFContext = CGF.getContext();
4289 for (unsigned I = 0, E = CS.capture_size(); I < E; ++I, ++CI, ++CurField) {
4290 QualType ElemTy = CurField->getType();
4291 Address Src = Bld.CreateConstInBoundsGEP(SharedArgListAddress, I + Idx);
4292 Address TypedAddress = Bld.CreatePointerBitCastOrAddrSpaceCast(
4293 Src, CGF.ConvertTypeForMem(CGFContext.getPointerType(ElemTy)));
4294 llvm::Value *Arg = CGF.EmitLoadOfScalar(TypedAddress,
4295 /*Volatile=*/false,
4296 CGFContext.getPointerType(ElemTy),
4297 CI->getLocation());
4298 if (CI->capturesVariableByCopy() &&
4299 !CI->getCapturedVar()->getType()->isAnyPointerType()) {
4300 Arg = castValueToType(CGF, Arg, ElemTy, CGFContext.getUIntPtrType(),
4301 CI->getLocation());
4302 }
4303 Args.emplace_back(Arg);
4304 }
4305 }
4306
4307 emitOutlinedFunctionCall(CGF, D.getBeginLoc(), OutlinedParallelFn, Args);
4308 CGF.FinishFunction();
4309 return Fn;
4310 }
4311
emitFunctionProlog(CodeGenFunction & CGF,const Decl * D)4312 void CGOpenMPRuntimeGPU::emitFunctionProlog(CodeGenFunction &CGF,
4313 const Decl *D) {
4314 if (getDataSharingMode(CGM) != CGOpenMPRuntimeGPU::Generic)
4315 return;
4316
4317 assert(D && "Expected function or captured|block decl.");
4318 assert(FunctionGlobalizedDecls.count(CGF.CurFn) == 0 &&
4319 "Function is registered already.");
4320 assert((!TeamAndReductions.first || TeamAndReductions.first == D) &&
4321 "Team is set but not processed.");
4322 const Stmt *Body = nullptr;
4323 bool NeedToDelayGlobalization = false;
4324 if (const auto *FD = dyn_cast<FunctionDecl>(D)) {
4325 Body = FD->getBody();
4326 } else if (const auto *BD = dyn_cast<BlockDecl>(D)) {
4327 Body = BD->getBody();
4328 } else if (const auto *CD = dyn_cast<CapturedDecl>(D)) {
4329 Body = CD->getBody();
4330 NeedToDelayGlobalization = CGF.CapturedStmtInfo->getKind() == CR_OpenMP;
4331 if (NeedToDelayGlobalization &&
4332 getExecutionMode() == CGOpenMPRuntimeGPU::EM_SPMD)
4333 return;
4334 }
4335 if (!Body)
4336 return;
4337 CheckVarsEscapingDeclContext VarChecker(CGF, TeamAndReductions.second);
4338 VarChecker.Visit(Body);
4339 const RecordDecl *GlobalizedVarsRecord =
4340 VarChecker.getGlobalizedRecord(IsInTTDRegion);
4341 TeamAndReductions.first = nullptr;
4342 TeamAndReductions.second.clear();
4343 ArrayRef<const ValueDecl *> EscapedVariableLengthDecls =
4344 VarChecker.getEscapedVariableLengthDecls();
4345 if (!GlobalizedVarsRecord && EscapedVariableLengthDecls.empty())
4346 return;
4347 auto I = FunctionGlobalizedDecls.try_emplace(CGF.CurFn).first;
4348 I->getSecond().MappedParams =
4349 std::make_unique<CodeGenFunction::OMPMapVars>();
4350 I->getSecond().GlobalRecord = GlobalizedVarsRecord;
4351 I->getSecond().EscapedParameters.insert(
4352 VarChecker.getEscapedParameters().begin(),
4353 VarChecker.getEscapedParameters().end());
4354 I->getSecond().EscapedVariableLengthDecls.append(
4355 EscapedVariableLengthDecls.begin(), EscapedVariableLengthDecls.end());
4356 DeclToAddrMapTy &Data = I->getSecond().LocalVarData;
4357 for (const ValueDecl *VD : VarChecker.getEscapedDecls()) {
4358 assert(VD->isCanonicalDecl() && "Expected canonical declaration");
4359 const FieldDecl *FD = VarChecker.getFieldForGlobalizedVar(VD);
4360 Data.insert(std::make_pair(VD, MappedVarData(FD, IsInTTDRegion)));
4361 }
4362 if (!IsInTTDRegion && !NeedToDelayGlobalization && !IsInParallelRegion) {
4363 CheckVarsEscapingDeclContext VarChecker(CGF, llvm::None);
4364 VarChecker.Visit(Body);
4365 I->getSecond().SecondaryGlobalRecord =
4366 VarChecker.getGlobalizedRecord(/*IsInTTDRegion=*/true);
4367 I->getSecond().SecondaryLocalVarData.emplace();
4368 DeclToAddrMapTy &Data = I->getSecond().SecondaryLocalVarData.getValue();
4369 for (const ValueDecl *VD : VarChecker.getEscapedDecls()) {
4370 assert(VD->isCanonicalDecl() && "Expected canonical declaration");
4371 const FieldDecl *FD = VarChecker.getFieldForGlobalizedVar(VD);
4372 Data.insert(
4373 std::make_pair(VD, MappedVarData(FD, /*IsInTTDRegion=*/true)));
4374 }
4375 }
4376 if (!NeedToDelayGlobalization) {
4377 emitGenericVarsProlog(CGF, D->getBeginLoc(), /*WithSPMDCheck=*/true);
4378 struct GlobalizationScope final : EHScopeStack::Cleanup {
4379 GlobalizationScope() = default;
4380
4381 void Emit(CodeGenFunction &CGF, Flags flags) override {
4382 static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime())
4383 .emitGenericVarsEpilog(CGF, /*WithSPMDCheck=*/true);
4384 }
4385 };
4386 CGF.EHStack.pushCleanup<GlobalizationScope>(NormalAndEHCleanup);
4387 }
4388 }
4389
getAddressOfLocalVariable(CodeGenFunction & CGF,const VarDecl * VD)4390 Address CGOpenMPRuntimeGPU::getAddressOfLocalVariable(CodeGenFunction &CGF,
4391 const VarDecl *VD) {
4392 if (VD && VD->hasAttr<OMPAllocateDeclAttr>()) {
4393 const auto *A = VD->getAttr<OMPAllocateDeclAttr>();
4394 auto AS = LangAS::Default;
4395 switch (A->getAllocatorType()) {
4396 // Use the default allocator here as by default local vars are
4397 // threadlocal.
4398 case OMPAllocateDeclAttr::OMPNullMemAlloc:
4399 case OMPAllocateDeclAttr::OMPDefaultMemAlloc:
4400 case OMPAllocateDeclAttr::OMPThreadMemAlloc:
4401 case OMPAllocateDeclAttr::OMPHighBWMemAlloc:
4402 case OMPAllocateDeclAttr::OMPLowLatMemAlloc:
4403 // Follow the user decision - use default allocation.
4404 return Address::invalid();
4405 case OMPAllocateDeclAttr::OMPUserDefinedMemAlloc:
4406 // TODO: implement aupport for user-defined allocators.
4407 return Address::invalid();
4408 case OMPAllocateDeclAttr::OMPConstMemAlloc:
4409 AS = LangAS::cuda_constant;
4410 break;
4411 case OMPAllocateDeclAttr::OMPPTeamMemAlloc:
4412 AS = LangAS::cuda_shared;
4413 break;
4414 case OMPAllocateDeclAttr::OMPLargeCapMemAlloc:
4415 case OMPAllocateDeclAttr::OMPCGroupMemAlloc:
4416 break;
4417 }
4418 llvm::Type *VarTy = CGF.ConvertTypeForMem(VD->getType());
4419 auto *GV = new llvm::GlobalVariable(
4420 CGM.getModule(), VarTy, /*isConstant=*/false,
4421 llvm::GlobalValue::InternalLinkage, llvm::Constant::getNullValue(VarTy),
4422 VD->getName(),
4423 /*InsertBefore=*/nullptr, llvm::GlobalValue::NotThreadLocal,
4424 CGM.getContext().getTargetAddressSpace(AS));
4425 CharUnits Align = CGM.getContext().getDeclAlign(VD);
4426 GV->setAlignment(Align.getAsAlign());
4427 return Address(
4428 CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
4429 GV, VarTy->getPointerTo(CGM.getContext().getTargetAddressSpace(
4430 VD->getType().getAddressSpace()))),
4431 Align);
4432 }
4433
4434 if (getDataSharingMode(CGM) != CGOpenMPRuntimeGPU::Generic)
4435 return Address::invalid();
4436
4437 VD = VD->getCanonicalDecl();
4438 auto I = FunctionGlobalizedDecls.find(CGF.CurFn);
4439 if (I == FunctionGlobalizedDecls.end())
4440 return Address::invalid();
4441 auto VDI = I->getSecond().LocalVarData.find(VD);
4442 if (VDI != I->getSecond().LocalVarData.end())
4443 return VDI->second.PrivateAddr;
4444 if (VD->hasAttrs()) {
4445 for (specific_attr_iterator<OMPReferencedVarAttr> IT(VD->attr_begin()),
4446 E(VD->attr_end());
4447 IT != E; ++IT) {
4448 auto VDI = I->getSecond().LocalVarData.find(
4449 cast<VarDecl>(cast<DeclRefExpr>(IT->getRef())->getDecl())
4450 ->getCanonicalDecl());
4451 if (VDI != I->getSecond().LocalVarData.end())
4452 return VDI->second.PrivateAddr;
4453 }
4454 }
4455
4456 return Address::invalid();
4457 }
4458
functionFinished(CodeGenFunction & CGF)4459 void CGOpenMPRuntimeGPU::functionFinished(CodeGenFunction &CGF) {
4460 FunctionGlobalizedDecls.erase(CGF.CurFn);
4461 CGOpenMPRuntime::functionFinished(CGF);
4462 }
4463
getDefaultDistScheduleAndChunk(CodeGenFunction & CGF,const OMPLoopDirective & S,OpenMPDistScheduleClauseKind & ScheduleKind,llvm::Value * & Chunk) const4464 void CGOpenMPRuntimeGPU::getDefaultDistScheduleAndChunk(
4465 CodeGenFunction &CGF, const OMPLoopDirective &S,
4466 OpenMPDistScheduleClauseKind &ScheduleKind,
4467 llvm::Value *&Chunk) const {
4468 auto &RT = static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime());
4469 if (getExecutionMode() == CGOpenMPRuntimeGPU::EM_SPMD) {
4470 ScheduleKind = OMPC_DIST_SCHEDULE_static;
4471 Chunk = CGF.EmitScalarConversion(
4472 RT.getGPUNumThreads(CGF),
4473 CGF.getContext().getIntTypeForBitwidth(32, /*Signed=*/0),
4474 S.getIterationVariable()->getType(), S.getBeginLoc());
4475 return;
4476 }
4477 CGOpenMPRuntime::getDefaultDistScheduleAndChunk(
4478 CGF, S, ScheduleKind, Chunk);
4479 }
4480
getDefaultScheduleAndChunk(CodeGenFunction & CGF,const OMPLoopDirective & S,OpenMPScheduleClauseKind & ScheduleKind,const Expr * & ChunkExpr) const4481 void CGOpenMPRuntimeGPU::getDefaultScheduleAndChunk(
4482 CodeGenFunction &CGF, const OMPLoopDirective &S,
4483 OpenMPScheduleClauseKind &ScheduleKind,
4484 const Expr *&ChunkExpr) const {
4485 ScheduleKind = OMPC_SCHEDULE_static;
4486 // Chunk size is 1 in this case.
4487 llvm::APInt ChunkSize(32, 1);
4488 ChunkExpr = IntegerLiteral::Create(CGF.getContext(), ChunkSize,
4489 CGF.getContext().getIntTypeForBitwidth(32, /*Signed=*/0),
4490 SourceLocation());
4491 }
4492
adjustTargetSpecificDataForLambdas(CodeGenFunction & CGF,const OMPExecutableDirective & D) const4493 void CGOpenMPRuntimeGPU::adjustTargetSpecificDataForLambdas(
4494 CodeGenFunction &CGF, const OMPExecutableDirective &D) const {
4495 assert(isOpenMPTargetExecutionDirective(D.getDirectiveKind()) &&
4496 " Expected target-based directive.");
4497 const CapturedStmt *CS = D.getCapturedStmt(OMPD_target);
4498 for (const CapturedStmt::Capture &C : CS->captures()) {
4499 // Capture variables captured by reference in lambdas for target-based
4500 // directives.
4501 if (!C.capturesVariable())
4502 continue;
4503 const VarDecl *VD = C.getCapturedVar();
4504 const auto *RD = VD->getType()
4505 .getCanonicalType()
4506 .getNonReferenceType()
4507 ->getAsCXXRecordDecl();
4508 if (!RD || !RD->isLambda())
4509 continue;
4510 Address VDAddr = CGF.GetAddrOfLocalVar(VD);
4511 LValue VDLVal;
4512 if (VD->getType().getCanonicalType()->isReferenceType())
4513 VDLVal = CGF.EmitLoadOfReferenceLValue(VDAddr, VD->getType());
4514 else
4515 VDLVal = CGF.MakeAddrLValue(
4516 VDAddr, VD->getType().getCanonicalType().getNonReferenceType());
4517 llvm::DenseMap<const VarDecl *, FieldDecl *> Captures;
4518 FieldDecl *ThisCapture = nullptr;
4519 RD->getCaptureFields(Captures, ThisCapture);
4520 if (ThisCapture && CGF.CapturedStmtInfo->isCXXThisExprCaptured()) {
4521 LValue ThisLVal =
4522 CGF.EmitLValueForFieldInitialization(VDLVal, ThisCapture);
4523 llvm::Value *CXXThis = CGF.LoadCXXThis();
4524 CGF.EmitStoreOfScalar(CXXThis, ThisLVal);
4525 }
4526 for (const LambdaCapture &LC : RD->captures()) {
4527 if (LC.getCaptureKind() != LCK_ByRef)
4528 continue;
4529 const VarDecl *VD = LC.getCapturedVar();
4530 if (!CS->capturesVariable(VD))
4531 continue;
4532 auto It = Captures.find(VD);
4533 assert(It != Captures.end() && "Found lambda capture without field.");
4534 LValue VarLVal = CGF.EmitLValueForFieldInitialization(VDLVal, It->second);
4535 Address VDAddr = CGF.GetAddrOfLocalVar(VD);
4536 if (VD->getType().getCanonicalType()->isReferenceType())
4537 VDAddr = CGF.EmitLoadOfReferenceLValue(VDAddr,
4538 VD->getType().getCanonicalType())
4539 .getAddress(CGF);
4540 CGF.EmitStoreOfScalar(VDAddr.getPointer(), VarLVal);
4541 }
4542 }
4543 }
4544
getDefaultFirstprivateAddressSpace() const4545 unsigned CGOpenMPRuntimeGPU::getDefaultFirstprivateAddressSpace() const {
4546 return CGM.getContext().getTargetAddressSpace(LangAS::cuda_constant);
4547 }
4548
hasAllocateAttributeForGlobalVar(const VarDecl * VD,LangAS & AS)4549 bool CGOpenMPRuntimeGPU::hasAllocateAttributeForGlobalVar(const VarDecl *VD,
4550 LangAS &AS) {
4551 if (!VD || !VD->hasAttr<OMPAllocateDeclAttr>())
4552 return false;
4553 const auto *A = VD->getAttr<OMPAllocateDeclAttr>();
4554 switch(A->getAllocatorType()) {
4555 case OMPAllocateDeclAttr::OMPNullMemAlloc:
4556 case OMPAllocateDeclAttr::OMPDefaultMemAlloc:
4557 // Not supported, fallback to the default mem space.
4558 case OMPAllocateDeclAttr::OMPThreadMemAlloc:
4559 case OMPAllocateDeclAttr::OMPLargeCapMemAlloc:
4560 case OMPAllocateDeclAttr::OMPCGroupMemAlloc:
4561 case OMPAllocateDeclAttr::OMPHighBWMemAlloc:
4562 case OMPAllocateDeclAttr::OMPLowLatMemAlloc:
4563 AS = LangAS::Default;
4564 return true;
4565 case OMPAllocateDeclAttr::OMPConstMemAlloc:
4566 AS = LangAS::cuda_constant;
4567 return true;
4568 case OMPAllocateDeclAttr::OMPPTeamMemAlloc:
4569 AS = LangAS::cuda_shared;
4570 return true;
4571 case OMPAllocateDeclAttr::OMPUserDefinedMemAlloc:
4572 llvm_unreachable("Expected predefined allocator for the variables with the "
4573 "static storage.");
4574 }
4575 return false;
4576 }
4577
4578 // Get current CudaArch and ignore any unknown values
getCudaArch(CodeGenModule & CGM)4579 static CudaArch getCudaArch(CodeGenModule &CGM) {
4580 if (!CGM.getTarget().hasFeature("ptx"))
4581 return CudaArch::UNKNOWN;
4582 for (const auto &Feature : CGM.getTarget().getTargetOpts().FeatureMap) {
4583 if (Feature.getValue()) {
4584 CudaArch Arch = StringToCudaArch(Feature.getKey());
4585 if (Arch != CudaArch::UNKNOWN)
4586 return Arch;
4587 }
4588 }
4589 return CudaArch::UNKNOWN;
4590 }
4591
4592 /// Check to see if target architecture supports unified addressing which is
4593 /// a restriction for OpenMP requires clause "unified_shared_memory".
processRequiresDirective(const OMPRequiresDecl * D)4594 void CGOpenMPRuntimeGPU::processRequiresDirective(
4595 const OMPRequiresDecl *D) {
4596 for (const OMPClause *Clause : D->clauselists()) {
4597 if (Clause->getClauseKind() == OMPC_unified_shared_memory) {
4598 CudaArch Arch = getCudaArch(CGM);
4599 switch (Arch) {
4600 case CudaArch::SM_20:
4601 case CudaArch::SM_21:
4602 case CudaArch::SM_30:
4603 case CudaArch::SM_32:
4604 case CudaArch::SM_35:
4605 case CudaArch::SM_37:
4606 case CudaArch::SM_50:
4607 case CudaArch::SM_52:
4608 case CudaArch::SM_53:
4609 case CudaArch::SM_60:
4610 case CudaArch::SM_61:
4611 case CudaArch::SM_62: {
4612 SmallString<256> Buffer;
4613 llvm::raw_svector_ostream Out(Buffer);
4614 Out << "Target architecture " << CudaArchToString(Arch)
4615 << " does not support unified addressing";
4616 CGM.Error(Clause->getBeginLoc(), Out.str());
4617 return;
4618 }
4619 case CudaArch::SM_70:
4620 case CudaArch::SM_72:
4621 case CudaArch::SM_75:
4622 case CudaArch::SM_80:
4623 case CudaArch::GFX600:
4624 case CudaArch::GFX601:
4625 case CudaArch::GFX602:
4626 case CudaArch::GFX700:
4627 case CudaArch::GFX701:
4628 case CudaArch::GFX702:
4629 case CudaArch::GFX703:
4630 case CudaArch::GFX704:
4631 case CudaArch::GFX705:
4632 case CudaArch::GFX801:
4633 case CudaArch::GFX802:
4634 case CudaArch::GFX803:
4635 case CudaArch::GFX805:
4636 case CudaArch::GFX810:
4637 case CudaArch::GFX900:
4638 case CudaArch::GFX902:
4639 case CudaArch::GFX904:
4640 case CudaArch::GFX906:
4641 case CudaArch::GFX908:
4642 case CudaArch::GFX909:
4643 case CudaArch::GFX90c:
4644 case CudaArch::GFX1010:
4645 case CudaArch::GFX1011:
4646 case CudaArch::GFX1012:
4647 case CudaArch::GFX1030:
4648 case CudaArch::GFX1031:
4649 case CudaArch::GFX1032:
4650 case CudaArch::GFX1033:
4651 case CudaArch::UNUSED:
4652 case CudaArch::UNKNOWN:
4653 break;
4654 case CudaArch::LAST:
4655 llvm_unreachable("Unexpected Cuda arch.");
4656 }
4657 }
4658 }
4659 CGOpenMPRuntime::processRequiresDirective(D);
4660 }
4661
4662 /// Get number of SMs and number of blocks per SM.
getSMsBlocksPerSM(CodeGenModule & CGM)4663 static std::pair<unsigned, unsigned> getSMsBlocksPerSM(CodeGenModule &CGM) {
4664 std::pair<unsigned, unsigned> Data;
4665 if (CGM.getLangOpts().OpenMPCUDANumSMs)
4666 Data.first = CGM.getLangOpts().OpenMPCUDANumSMs;
4667 if (CGM.getLangOpts().OpenMPCUDABlocksPerSM)
4668 Data.second = CGM.getLangOpts().OpenMPCUDABlocksPerSM;
4669 if (Data.first && Data.second)
4670 return Data;
4671 switch (getCudaArch(CGM)) {
4672 case CudaArch::SM_20:
4673 case CudaArch::SM_21:
4674 case CudaArch::SM_30:
4675 case CudaArch::SM_32:
4676 case CudaArch::SM_35:
4677 case CudaArch::SM_37:
4678 case CudaArch::SM_50:
4679 case CudaArch::SM_52:
4680 case CudaArch::SM_53:
4681 return {16, 16};
4682 case CudaArch::SM_60:
4683 case CudaArch::SM_61:
4684 case CudaArch::SM_62:
4685 return {56, 32};
4686 case CudaArch::SM_70:
4687 case CudaArch::SM_72:
4688 case CudaArch::SM_75:
4689 case CudaArch::SM_80:
4690 return {84, 32};
4691 case CudaArch::GFX600:
4692 case CudaArch::GFX601:
4693 case CudaArch::GFX602:
4694 case CudaArch::GFX700:
4695 case CudaArch::GFX701:
4696 case CudaArch::GFX702:
4697 case CudaArch::GFX703:
4698 case CudaArch::GFX704:
4699 case CudaArch::GFX705:
4700 case CudaArch::GFX801:
4701 case CudaArch::GFX802:
4702 case CudaArch::GFX803:
4703 case CudaArch::GFX805:
4704 case CudaArch::GFX810:
4705 case CudaArch::GFX900:
4706 case CudaArch::GFX902:
4707 case CudaArch::GFX904:
4708 case CudaArch::GFX906:
4709 case CudaArch::GFX908:
4710 case CudaArch::GFX909:
4711 case CudaArch::GFX90c:
4712 case CudaArch::GFX1010:
4713 case CudaArch::GFX1011:
4714 case CudaArch::GFX1012:
4715 case CudaArch::GFX1030:
4716 case CudaArch::GFX1031:
4717 case CudaArch::GFX1032:
4718 case CudaArch::GFX1033:
4719 case CudaArch::UNUSED:
4720 case CudaArch::UNKNOWN:
4721 break;
4722 case CudaArch::LAST:
4723 llvm_unreachable("Unexpected Cuda arch.");
4724 }
4725 llvm_unreachable("Unexpected NVPTX target without ptx feature.");
4726 }
4727
clear()4728 void CGOpenMPRuntimeGPU::clear() {
4729 if (!GlobalizedRecords.empty() &&
4730 !CGM.getLangOpts().OpenMPCUDATargetParallel) {
4731 ASTContext &C = CGM.getContext();
4732 llvm::SmallVector<const GlobalPtrSizeRecsTy *, 4> GlobalRecs;
4733 llvm::SmallVector<const GlobalPtrSizeRecsTy *, 4> SharedRecs;
4734 RecordDecl *StaticRD = C.buildImplicitRecord(
4735 "_openmp_static_memory_type_$_", RecordDecl::TagKind::TTK_Union);
4736 StaticRD->startDefinition();
4737 RecordDecl *SharedStaticRD = C.buildImplicitRecord(
4738 "_shared_openmp_static_memory_type_$_", RecordDecl::TagKind::TTK_Union);
4739 SharedStaticRD->startDefinition();
4740 for (const GlobalPtrSizeRecsTy &Records : GlobalizedRecords) {
4741 if (Records.Records.empty())
4742 continue;
4743 unsigned Size = 0;
4744 unsigned RecAlignment = 0;
4745 for (const RecordDecl *RD : Records.Records) {
4746 QualType RDTy = C.getRecordType(RD);
4747 unsigned Alignment = C.getTypeAlignInChars(RDTy).getQuantity();
4748 RecAlignment = std::max(RecAlignment, Alignment);
4749 unsigned RecSize = C.getTypeSizeInChars(RDTy).getQuantity();
4750 Size =
4751 llvm::alignTo(llvm::alignTo(Size, Alignment) + RecSize, Alignment);
4752 }
4753 Size = llvm::alignTo(Size, RecAlignment);
4754 llvm::APInt ArySize(/*numBits=*/64, Size);
4755 QualType SubTy = C.getConstantArrayType(
4756 C.CharTy, ArySize, nullptr, ArrayType::Normal, /*IndexTypeQuals=*/0);
4757 const bool UseSharedMemory = Size <= SharedMemorySize;
4758 auto *Field =
4759 FieldDecl::Create(C, UseSharedMemory ? SharedStaticRD : StaticRD,
4760 SourceLocation(), SourceLocation(), nullptr, SubTy,
4761 C.getTrivialTypeSourceInfo(SubTy, SourceLocation()),
4762 /*BW=*/nullptr, /*Mutable=*/false,
4763 /*InitStyle=*/ICIS_NoInit);
4764 Field->setAccess(AS_public);
4765 if (UseSharedMemory) {
4766 SharedStaticRD->addDecl(Field);
4767 SharedRecs.push_back(&Records);
4768 } else {
4769 StaticRD->addDecl(Field);
4770 GlobalRecs.push_back(&Records);
4771 }
4772 Records.RecSize->setInitializer(llvm::ConstantInt::get(CGM.SizeTy, Size));
4773 Records.UseSharedMemory->setInitializer(
4774 llvm::ConstantInt::get(CGM.Int16Ty, UseSharedMemory ? 1 : 0));
4775 }
4776 // Allocate SharedMemorySize buffer for the shared memory.
4777 // FIXME: nvlink does not handle weak linkage correctly (object with the
4778 // different size are reported as erroneous).
4779 // Restore this code as sson as nvlink is fixed.
4780 if (!SharedStaticRD->field_empty()) {
4781 llvm::APInt ArySize(/*numBits=*/64, SharedMemorySize);
4782 QualType SubTy = C.getConstantArrayType(
4783 C.CharTy, ArySize, nullptr, ArrayType::Normal, /*IndexTypeQuals=*/0);
4784 auto *Field = FieldDecl::Create(
4785 C, SharedStaticRD, SourceLocation(), SourceLocation(), nullptr, SubTy,
4786 C.getTrivialTypeSourceInfo(SubTy, SourceLocation()),
4787 /*BW=*/nullptr, /*Mutable=*/false,
4788 /*InitStyle=*/ICIS_NoInit);
4789 Field->setAccess(AS_public);
4790 SharedStaticRD->addDecl(Field);
4791 }
4792 SharedStaticRD->completeDefinition();
4793 if (!SharedStaticRD->field_empty()) {
4794 QualType StaticTy = C.getRecordType(SharedStaticRD);
4795 llvm::Type *LLVMStaticTy = CGM.getTypes().ConvertTypeForMem(StaticTy);
4796 auto *GV = new llvm::GlobalVariable(
4797 CGM.getModule(), LLVMStaticTy,
4798 /*isConstant=*/false, llvm::GlobalValue::WeakAnyLinkage,
4799 llvm::UndefValue::get(LLVMStaticTy),
4800 "_openmp_shared_static_glob_rd_$_", /*InsertBefore=*/nullptr,
4801 llvm::GlobalValue::NotThreadLocal,
4802 C.getTargetAddressSpace(LangAS::cuda_shared));
4803 auto *Replacement = llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(
4804 GV, CGM.VoidPtrTy);
4805 for (const GlobalPtrSizeRecsTy *Rec : SharedRecs) {
4806 Rec->Buffer->replaceAllUsesWith(Replacement);
4807 Rec->Buffer->eraseFromParent();
4808 }
4809 }
4810 StaticRD->completeDefinition();
4811 if (!StaticRD->field_empty()) {
4812 QualType StaticTy = C.getRecordType(StaticRD);
4813 std::pair<unsigned, unsigned> SMsBlockPerSM = getSMsBlocksPerSM(CGM);
4814 llvm::APInt Size1(32, SMsBlockPerSM.second);
4815 QualType Arr1Ty =
4816 C.getConstantArrayType(StaticTy, Size1, nullptr, ArrayType::Normal,
4817 /*IndexTypeQuals=*/0);
4818 llvm::APInt Size2(32, SMsBlockPerSM.first);
4819 QualType Arr2Ty =
4820 C.getConstantArrayType(Arr1Ty, Size2, nullptr, ArrayType::Normal,
4821 /*IndexTypeQuals=*/0);
4822 llvm::Type *LLVMArr2Ty = CGM.getTypes().ConvertTypeForMem(Arr2Ty);
4823 // FIXME: nvlink does not handle weak linkage correctly (object with the
4824 // different size are reported as erroneous).
4825 // Restore CommonLinkage as soon as nvlink is fixed.
4826 auto *GV = new llvm::GlobalVariable(
4827 CGM.getModule(), LLVMArr2Ty,
4828 /*isConstant=*/false, llvm::GlobalValue::InternalLinkage,
4829 llvm::Constant::getNullValue(LLVMArr2Ty),
4830 "_openmp_static_glob_rd_$_");
4831 auto *Replacement = llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(
4832 GV, CGM.VoidPtrTy);
4833 for (const GlobalPtrSizeRecsTy *Rec : GlobalRecs) {
4834 Rec->Buffer->replaceAllUsesWith(Replacement);
4835 Rec->Buffer->eraseFromParent();
4836 }
4837 }
4838 }
4839 if (!TeamsReductions.empty()) {
4840 ASTContext &C = CGM.getContext();
4841 RecordDecl *StaticRD = C.buildImplicitRecord(
4842 "_openmp_teams_reduction_type_$_", RecordDecl::TagKind::TTK_Union);
4843 StaticRD->startDefinition();
4844 for (const RecordDecl *TeamReductionRec : TeamsReductions) {
4845 QualType RecTy = C.getRecordType(TeamReductionRec);
4846 auto *Field = FieldDecl::Create(
4847 C, StaticRD, SourceLocation(), SourceLocation(), nullptr, RecTy,
4848 C.getTrivialTypeSourceInfo(RecTy, SourceLocation()),
4849 /*BW=*/nullptr, /*Mutable=*/false,
4850 /*InitStyle=*/ICIS_NoInit);
4851 Field->setAccess(AS_public);
4852 StaticRD->addDecl(Field);
4853 }
4854 StaticRD->completeDefinition();
4855 QualType StaticTy = C.getRecordType(StaticRD);
4856 llvm::Type *LLVMReductionsBufferTy =
4857 CGM.getTypes().ConvertTypeForMem(StaticTy);
4858 // FIXME: nvlink does not handle weak linkage correctly (object with the
4859 // different size are reported as erroneous).
4860 // Restore CommonLinkage as soon as nvlink is fixed.
4861 auto *GV = new llvm::GlobalVariable(
4862 CGM.getModule(), LLVMReductionsBufferTy,
4863 /*isConstant=*/false, llvm::GlobalValue::InternalLinkage,
4864 llvm::Constant::getNullValue(LLVMReductionsBufferTy),
4865 "_openmp_teams_reductions_buffer_$_");
4866 KernelTeamsReductionPtr->setInitializer(
4867 llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(GV,
4868 CGM.VoidPtrTy));
4869 }
4870 CGOpenMPRuntime::clear();
4871 }
4872