1 //===- ScopBuilder.cpp ----------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // Create a polyhedral description for a static control flow region.
10 //
11 // The pass creates a polyhedral description of the Scops detected by the SCoP
12 // detection derived from their LLVM-IR code.
13 //
14 //===----------------------------------------------------------------------===//
15
16 #include "polly/ScopBuilder.h"
17 #include "polly/Options.h"
18 #include "polly/ScopDetection.h"
19 #include "polly/ScopInfo.h"
20 #include "polly/Support/GICHelper.h"
21 #include "polly/Support/ISLTools.h"
22 #include "polly/Support/SCEVValidator.h"
23 #include "polly/Support/ScopHelper.h"
24 #include "polly/Support/VirtualInstruction.h"
25 #include "llvm/ADT/ArrayRef.h"
26 #include "llvm/ADT/EquivalenceClasses.h"
27 #include "llvm/ADT/PostOrderIterator.h"
28 #include "llvm/ADT/SmallSet.h"
29 #include "llvm/ADT/Statistic.h"
30 #include "llvm/Analysis/AliasAnalysis.h"
31 #include "llvm/Analysis/AssumptionCache.h"
32 #include "llvm/Analysis/Loads.h"
33 #include "llvm/Analysis/LoopInfo.h"
34 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
35 #include "llvm/Analysis/RegionInfo.h"
36 #include "llvm/Analysis/RegionIterator.h"
37 #include "llvm/Analysis/ScalarEvolution.h"
38 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
39 #include "llvm/IR/BasicBlock.h"
40 #include "llvm/IR/DataLayout.h"
41 #include "llvm/IR/DebugLoc.h"
42 #include "llvm/IR/DerivedTypes.h"
43 #include "llvm/IR/Dominators.h"
44 #include "llvm/IR/Function.h"
45 #include "llvm/IR/InstrTypes.h"
46 #include "llvm/IR/Instruction.h"
47 #include "llvm/IR/Instructions.h"
48 #include "llvm/IR/Type.h"
49 #include "llvm/IR/Use.h"
50 #include "llvm/IR/Value.h"
51 #include "llvm/Support/CommandLine.h"
52 #include "llvm/Support/Compiler.h"
53 #include "llvm/Support/Debug.h"
54 #include "llvm/Support/ErrorHandling.h"
55 #include "llvm/Support/raw_ostream.h"
56 #include <cassert>
57
58 using namespace llvm;
59 using namespace polly;
60
61 #define DEBUG_TYPE "polly-scops"
62
63 STATISTIC(ScopFound, "Number of valid Scops");
64 STATISTIC(RichScopFound, "Number of Scops containing a loop");
65 STATISTIC(InfeasibleScops,
66 "Number of SCoPs with statically infeasible context.");
67
68 bool polly::ModelReadOnlyScalars;
69
70 // The maximal number of dimensions we allow during invariant load construction.
71 // More complex access ranges will result in very high compile time and are also
72 // unlikely to result in good code. This value is very high and should only
73 // trigger for corner cases (e.g., the "dct_luma" function in h264, SPEC2006).
74 static int const MaxDimensionsInAccessRange = 9;
75
76 static cl::opt<bool, true> XModelReadOnlyScalars(
77 "polly-analyze-read-only-scalars",
78 cl::desc("Model read-only scalar values in the scop description"),
79 cl::location(ModelReadOnlyScalars), cl::Hidden, cl::ZeroOrMore,
80 cl::init(true), cl::cat(PollyCategory));
81
82 static cl::opt<int>
83 OptComputeOut("polly-analysis-computeout",
84 cl::desc("Bound the scop analysis by a maximal amount of "
85 "computational steps (0 means no bound)"),
86 cl::Hidden, cl::init(800000), cl::ZeroOrMore,
87 cl::cat(PollyCategory));
88
89 static cl::opt<bool> PollyAllowDereferenceOfAllFunctionParams(
90 "polly-allow-dereference-of-all-function-parameters",
91 cl::desc(
92 "Treat all parameters to functions that are pointers as dereferencible."
93 " This is useful for invariant load hoisting, since we can generate"
94 " less runtime checks. This is only valid if all pointers to functions"
95 " are always initialized, so that Polly can choose to hoist"
96 " their loads. "),
97 cl::Hidden, cl::init(false), cl::cat(PollyCategory));
98
99 static cl::opt<bool>
100 PollyIgnoreInbounds("polly-ignore-inbounds",
101 cl::desc("Do not take inbounds assumptions at all"),
102 cl::Hidden, cl::init(false), cl::cat(PollyCategory));
103
104 static cl::opt<unsigned> RunTimeChecksMaxArraysPerGroup(
105 "polly-rtc-max-arrays-per-group",
106 cl::desc("The maximal number of arrays to compare in each alias group."),
107 cl::Hidden, cl::ZeroOrMore, cl::init(20), cl::cat(PollyCategory));
108
109 static cl::opt<int> RunTimeChecksMaxAccessDisjuncts(
110 "polly-rtc-max-array-disjuncts",
111 cl::desc("The maximal number of disjunts allowed in memory accesses to "
112 "to build RTCs."),
113 cl::Hidden, cl::ZeroOrMore, cl::init(8), cl::cat(PollyCategory));
114
115 static cl::opt<unsigned> RunTimeChecksMaxParameters(
116 "polly-rtc-max-parameters",
117 cl::desc("The maximal number of parameters allowed in RTCs."), cl::Hidden,
118 cl::ZeroOrMore, cl::init(8), cl::cat(PollyCategory));
119
120 static cl::opt<bool> UnprofitableScalarAccs(
121 "polly-unprofitable-scalar-accs",
122 cl::desc("Count statements with scalar accesses as not optimizable"),
123 cl::Hidden, cl::init(false), cl::cat(PollyCategory));
124
125 static cl::opt<std::string> UserContextStr(
126 "polly-context", cl::value_desc("isl parameter set"),
127 cl::desc("Provide additional constraints on the context parameters"),
128 cl::init(""), cl::cat(PollyCategory));
129
130 static cl::opt<bool> DetectFortranArrays(
131 "polly-detect-fortran-arrays",
132 cl::desc("Detect Fortran arrays and use this for code generation"),
133 cl::Hidden, cl::init(false), cl::cat(PollyCategory));
134
135 static cl::opt<bool> DetectReductions("polly-detect-reductions",
136 cl::desc("Detect and exploit reductions"),
137 cl::Hidden, cl::ZeroOrMore,
138 cl::init(true), cl::cat(PollyCategory));
139
140 // Multiplicative reductions can be disabled separately as these kind of
141 // operations can overflow easily. Additive reductions and bit operations
142 // are in contrast pretty stable.
143 static cl::opt<bool> DisableMultiplicativeReductions(
144 "polly-disable-multiplicative-reductions",
145 cl::desc("Disable multiplicative reductions"), cl::Hidden, cl::ZeroOrMore,
146 cl::init(false), cl::cat(PollyCategory));
147
148 enum class GranularityChoice { BasicBlocks, ScalarIndependence, Stores };
149
150 static cl::opt<GranularityChoice> StmtGranularity(
151 "polly-stmt-granularity",
152 cl::desc(
153 "Algorithm to use for splitting basic blocks into multiple statements"),
154 cl::values(clEnumValN(GranularityChoice::BasicBlocks, "bb",
155 "One statement per basic block"),
156 clEnumValN(GranularityChoice::ScalarIndependence, "scalar-indep",
157 "Scalar independence heuristic"),
158 clEnumValN(GranularityChoice::Stores, "store",
159 "Store-level granularity")),
160 cl::init(GranularityChoice::ScalarIndependence), cl::cat(PollyCategory));
161
162 /// Helper to treat non-affine regions and basic blocks the same.
163 ///
164 ///{
165
166 /// Return the block that is the representing block for @p RN.
getRegionNodeBasicBlock(RegionNode * RN)167 static inline BasicBlock *getRegionNodeBasicBlock(RegionNode *RN) {
168 return RN->isSubRegion() ? RN->getNodeAs<Region>()->getEntry()
169 : RN->getNodeAs<BasicBlock>();
170 }
171
172 /// Return the @p idx'th block that is executed after @p RN.
173 static inline BasicBlock *
getRegionNodeSuccessor(RegionNode * RN,Instruction * TI,unsigned idx)174 getRegionNodeSuccessor(RegionNode *RN, Instruction *TI, unsigned idx) {
175 if (RN->isSubRegion()) {
176 assert(idx == 0);
177 return RN->getNodeAs<Region>()->getExit();
178 }
179 return TI->getSuccessor(idx);
180 }
181
containsErrorBlock(RegionNode * RN,const Region & R,LoopInfo & LI,const DominatorTree & DT)182 static bool containsErrorBlock(RegionNode *RN, const Region &R, LoopInfo &LI,
183 const DominatorTree &DT) {
184 if (!RN->isSubRegion())
185 return isErrorBlock(*RN->getNodeAs<BasicBlock>(), R, LI, DT);
186 for (BasicBlock *BB : RN->getNodeAs<Region>()->blocks())
187 if (isErrorBlock(*BB, R, LI, DT))
188 return true;
189 return false;
190 }
191
192 ///}
193
194 /// Create a map to map from a given iteration to a subsequent iteration.
195 ///
196 /// This map maps from SetSpace -> SetSpace where the dimensions @p Dim
197 /// is incremented by one and all other dimensions are equal, e.g.,
198 /// [i0, i1, i2, i3] -> [i0, i1, i2 + 1, i3]
199 ///
200 /// if @p Dim is 2 and @p SetSpace has 4 dimensions.
createNextIterationMap(isl::space SetSpace,unsigned Dim)201 static isl::map createNextIterationMap(isl::space SetSpace, unsigned Dim) {
202 isl::space MapSpace = SetSpace.map_from_set();
203 isl::map NextIterationMap = isl::map::universe(MapSpace);
204 for (unsigned u = 0; u < NextIterationMap.dim(isl::dim::in); u++)
205 if (u != Dim)
206 NextIterationMap =
207 NextIterationMap.equate(isl::dim::in, u, isl::dim::out, u);
208 isl::constraint C =
209 isl::constraint::alloc_equality(isl::local_space(MapSpace));
210 C = C.set_constant_si(1);
211 C = C.set_coefficient_si(isl::dim::in, Dim, 1);
212 C = C.set_coefficient_si(isl::dim::out, Dim, -1);
213 NextIterationMap = NextIterationMap.add_constraint(C);
214 return NextIterationMap;
215 }
216
217 /// Add @p BSet to set @p BoundedParts if @p BSet is bounded.
collectBoundedParts(isl::set S)218 static isl::set collectBoundedParts(isl::set S) {
219 isl::set BoundedParts = isl::set::empty(S.get_space());
220 for (isl::basic_set BSet : S.get_basic_set_list())
221 if (BSet.is_bounded())
222 BoundedParts = BoundedParts.unite(isl::set(BSet));
223 return BoundedParts;
224 }
225
226 /// Compute the (un)bounded parts of @p S wrt. to dimension @p Dim.
227 ///
228 /// @returns A separation of @p S into first an unbounded then a bounded subset,
229 /// both with regards to the dimension @p Dim.
partitionSetParts(isl::set S,unsigned Dim)230 static std::pair<isl::set, isl::set> partitionSetParts(isl::set S,
231 unsigned Dim) {
232 for (unsigned u = 0, e = S.n_dim(); u < e; u++)
233 S = S.lower_bound_si(isl::dim::set, u, 0);
234
235 unsigned NumDimsS = S.n_dim();
236 isl::set OnlyDimS = S;
237
238 // Remove dimensions that are greater than Dim as they are not interesting.
239 assert(NumDimsS >= Dim + 1);
240 OnlyDimS = OnlyDimS.project_out(isl::dim::set, Dim + 1, NumDimsS - Dim - 1);
241
242 // Create artificial parametric upper bounds for dimensions smaller than Dim
243 // as we are not interested in them.
244 OnlyDimS = OnlyDimS.insert_dims(isl::dim::param, 0, Dim);
245
246 for (unsigned u = 0; u < Dim; u++) {
247 isl::constraint C = isl::constraint::alloc_inequality(
248 isl::local_space(OnlyDimS.get_space()));
249 C = C.set_coefficient_si(isl::dim::param, u, 1);
250 C = C.set_coefficient_si(isl::dim::set, u, -1);
251 OnlyDimS = OnlyDimS.add_constraint(C);
252 }
253
254 // Collect all bounded parts of OnlyDimS.
255 isl::set BoundedParts = collectBoundedParts(OnlyDimS);
256
257 // Create the dimensions greater than Dim again.
258 BoundedParts =
259 BoundedParts.insert_dims(isl::dim::set, Dim + 1, NumDimsS - Dim - 1);
260
261 // Remove the artificial upper bound parameters again.
262 BoundedParts = BoundedParts.remove_dims(isl::dim::param, 0, Dim);
263
264 isl::set UnboundedParts = S.subtract(BoundedParts);
265 return std::make_pair(UnboundedParts, BoundedParts);
266 }
267
268 /// Create the conditions under which @p L @p Pred @p R is true.
buildConditionSet(ICmpInst::Predicate Pred,isl::pw_aff L,isl::pw_aff R)269 static isl::set buildConditionSet(ICmpInst::Predicate Pred, isl::pw_aff L,
270 isl::pw_aff R) {
271 switch (Pred) {
272 case ICmpInst::ICMP_EQ:
273 return L.eq_set(R);
274 case ICmpInst::ICMP_NE:
275 return L.ne_set(R);
276 case ICmpInst::ICMP_SLT:
277 return L.lt_set(R);
278 case ICmpInst::ICMP_SLE:
279 return L.le_set(R);
280 case ICmpInst::ICMP_SGT:
281 return L.gt_set(R);
282 case ICmpInst::ICMP_SGE:
283 return L.ge_set(R);
284 case ICmpInst::ICMP_ULT:
285 return L.lt_set(R);
286 case ICmpInst::ICMP_UGT:
287 return L.gt_set(R);
288 case ICmpInst::ICMP_ULE:
289 return L.le_set(R);
290 case ICmpInst::ICMP_UGE:
291 return L.ge_set(R);
292 default:
293 llvm_unreachable("Non integer predicate not supported");
294 }
295 }
296
adjustDomainDimensions(isl::set Dom,Loop * OldL,Loop * NewL)297 isl::set ScopBuilder::adjustDomainDimensions(isl::set Dom, Loop *OldL,
298 Loop *NewL) {
299 // If the loops are the same there is nothing to do.
300 if (NewL == OldL)
301 return Dom;
302
303 int OldDepth = scop->getRelativeLoopDepth(OldL);
304 int NewDepth = scop->getRelativeLoopDepth(NewL);
305 // If both loops are non-affine loops there is nothing to do.
306 if (OldDepth == -1 && NewDepth == -1)
307 return Dom;
308
309 // Distinguish three cases:
310 // 1) The depth is the same but the loops are not.
311 // => One loop was left one was entered.
312 // 2) The depth increased from OldL to NewL.
313 // => One loop was entered, none was left.
314 // 3) The depth decreased from OldL to NewL.
315 // => Loops were left were difference of the depths defines how many.
316 if (OldDepth == NewDepth) {
317 assert(OldL->getParentLoop() == NewL->getParentLoop());
318 Dom = Dom.project_out(isl::dim::set, NewDepth, 1);
319 Dom = Dom.add_dims(isl::dim::set, 1);
320 } else if (OldDepth < NewDepth) {
321 assert(OldDepth + 1 == NewDepth);
322 auto &R = scop->getRegion();
323 (void)R;
324 assert(NewL->getParentLoop() == OldL ||
325 ((!OldL || !R.contains(OldL)) && R.contains(NewL)));
326 Dom = Dom.add_dims(isl::dim::set, 1);
327 } else {
328 assert(OldDepth > NewDepth);
329 int Diff = OldDepth - NewDepth;
330 int NumDim = Dom.n_dim();
331 assert(NumDim >= Diff);
332 Dom = Dom.project_out(isl::dim::set, NumDim - Diff, Diff);
333 }
334
335 return Dom;
336 }
337
338 /// Compute the isl representation for the SCEV @p E in this BB.
339 ///
340 /// @param BB The BB for which isl representation is to be
341 /// computed.
342 /// @param InvalidDomainMap A map of BB to their invalid domains.
343 /// @param E The SCEV that should be translated.
344 /// @param NonNegative Flag to indicate the @p E has to be non-negative.
345 ///
346 /// Note that this function will also adjust the invalid context accordingly.
347
348 __isl_give isl_pw_aff *
getPwAff(BasicBlock * BB,DenseMap<BasicBlock *,isl::set> & InvalidDomainMap,const SCEV * E,bool NonNegative)349 ScopBuilder::getPwAff(BasicBlock *BB,
350 DenseMap<BasicBlock *, isl::set> &InvalidDomainMap,
351 const SCEV *E, bool NonNegative) {
352 PWACtx PWAC = scop->getPwAff(E, BB, NonNegative, &RecordedAssumptions);
353 InvalidDomainMap[BB] = InvalidDomainMap[BB].unite(PWAC.second);
354 return PWAC.first.release();
355 }
356
357 /// Build condition sets for unsigned ICmpInst(s).
358 /// Special handling is required for unsigned operands to ensure that if
359 /// MSB (aka the Sign bit) is set for an operands in an unsigned ICmpInst
360 /// it should wrap around.
361 ///
362 /// @param IsStrictUpperBound holds information on the predicate relation
363 /// between TestVal and UpperBound, i.e,
364 /// TestVal < UpperBound OR TestVal <= UpperBound
buildUnsignedConditionSets(BasicBlock * BB,Value * Condition,__isl_keep isl_set * Domain,const SCEV * SCEV_TestVal,const SCEV * SCEV_UpperBound,DenseMap<BasicBlock *,isl::set> & InvalidDomainMap,bool IsStrictUpperBound)365 __isl_give isl_set *ScopBuilder::buildUnsignedConditionSets(
366 BasicBlock *BB, Value *Condition, __isl_keep isl_set *Domain,
367 const SCEV *SCEV_TestVal, const SCEV *SCEV_UpperBound,
368 DenseMap<BasicBlock *, isl::set> &InvalidDomainMap,
369 bool IsStrictUpperBound) {
370 // Do not take NonNeg assumption on TestVal
371 // as it might have MSB (Sign bit) set.
372 isl_pw_aff *TestVal = getPwAff(BB, InvalidDomainMap, SCEV_TestVal, false);
373 // Take NonNeg assumption on UpperBound.
374 isl_pw_aff *UpperBound =
375 getPwAff(BB, InvalidDomainMap, SCEV_UpperBound, true);
376
377 // 0 <= TestVal
378 isl_set *First =
379 isl_pw_aff_le_set(isl_pw_aff_zero_on_domain(isl_local_space_from_space(
380 isl_pw_aff_get_domain_space(TestVal))),
381 isl_pw_aff_copy(TestVal));
382
383 isl_set *Second;
384 if (IsStrictUpperBound)
385 // TestVal < UpperBound
386 Second = isl_pw_aff_lt_set(TestVal, UpperBound);
387 else
388 // TestVal <= UpperBound
389 Second = isl_pw_aff_le_set(TestVal, UpperBound);
390
391 isl_set *ConsequenceCondSet = isl_set_intersect(First, Second);
392 return ConsequenceCondSet;
393 }
394
buildConditionSets(BasicBlock * BB,SwitchInst * SI,Loop * L,__isl_keep isl_set * Domain,DenseMap<BasicBlock *,isl::set> & InvalidDomainMap,SmallVectorImpl<__isl_give isl_set * > & ConditionSets)395 bool ScopBuilder::buildConditionSets(
396 BasicBlock *BB, SwitchInst *SI, Loop *L, __isl_keep isl_set *Domain,
397 DenseMap<BasicBlock *, isl::set> &InvalidDomainMap,
398 SmallVectorImpl<__isl_give isl_set *> &ConditionSets) {
399 Value *Condition = getConditionFromTerminator(SI);
400 assert(Condition && "No condition for switch");
401
402 isl_pw_aff *LHS, *RHS;
403 LHS = getPwAff(BB, InvalidDomainMap, SE.getSCEVAtScope(Condition, L));
404
405 unsigned NumSuccessors = SI->getNumSuccessors();
406 ConditionSets.resize(NumSuccessors);
407 for (auto &Case : SI->cases()) {
408 unsigned Idx = Case.getSuccessorIndex();
409 ConstantInt *CaseValue = Case.getCaseValue();
410
411 RHS = getPwAff(BB, InvalidDomainMap, SE.getSCEV(CaseValue));
412 isl_set *CaseConditionSet =
413 buildConditionSet(ICmpInst::ICMP_EQ, isl::manage_copy(LHS),
414 isl::manage(RHS))
415 .release();
416 ConditionSets[Idx] = isl_set_coalesce(
417 isl_set_intersect(CaseConditionSet, isl_set_copy(Domain)));
418 }
419
420 assert(ConditionSets[0] == nullptr && "Default condition set was set");
421 isl_set *ConditionSetUnion = isl_set_copy(ConditionSets[1]);
422 for (unsigned u = 2; u < NumSuccessors; u++)
423 ConditionSetUnion =
424 isl_set_union(ConditionSetUnion, isl_set_copy(ConditionSets[u]));
425 ConditionSets[0] = isl_set_subtract(isl_set_copy(Domain), ConditionSetUnion);
426
427 isl_pw_aff_free(LHS);
428
429 return true;
430 }
431
buildConditionSets(BasicBlock * BB,Value * Condition,Instruction * TI,Loop * L,__isl_keep isl_set * Domain,DenseMap<BasicBlock *,isl::set> & InvalidDomainMap,SmallVectorImpl<__isl_give isl_set * > & ConditionSets)432 bool ScopBuilder::buildConditionSets(
433 BasicBlock *BB, Value *Condition, Instruction *TI, Loop *L,
434 __isl_keep isl_set *Domain,
435 DenseMap<BasicBlock *, isl::set> &InvalidDomainMap,
436 SmallVectorImpl<__isl_give isl_set *> &ConditionSets) {
437 isl_set *ConsequenceCondSet = nullptr;
438
439 if (auto Load = dyn_cast<LoadInst>(Condition)) {
440 const SCEV *LHSSCEV = SE.getSCEVAtScope(Load, L);
441 const SCEV *RHSSCEV = SE.getZero(LHSSCEV->getType());
442 bool NonNeg = false;
443 isl_pw_aff *LHS = getPwAff(BB, InvalidDomainMap, LHSSCEV, NonNeg);
444 isl_pw_aff *RHS = getPwAff(BB, InvalidDomainMap, RHSSCEV, NonNeg);
445 ConsequenceCondSet = buildConditionSet(ICmpInst::ICMP_SLE, isl::manage(LHS),
446 isl::manage(RHS))
447 .release();
448 } else if (auto *PHI = dyn_cast<PHINode>(Condition)) {
449 auto *Unique = dyn_cast<ConstantInt>(
450 getUniqueNonErrorValue(PHI, &scop->getRegion(), LI, DT));
451
452 if (Unique->isZero())
453 ConsequenceCondSet = isl_set_empty(isl_set_get_space(Domain));
454 else
455 ConsequenceCondSet = isl_set_universe(isl_set_get_space(Domain));
456 } else if (auto *CCond = dyn_cast<ConstantInt>(Condition)) {
457 if (CCond->isZero())
458 ConsequenceCondSet = isl_set_empty(isl_set_get_space(Domain));
459 else
460 ConsequenceCondSet = isl_set_universe(isl_set_get_space(Domain));
461 } else if (BinaryOperator *BinOp = dyn_cast<BinaryOperator>(Condition)) {
462 auto Opcode = BinOp->getOpcode();
463 assert(Opcode == Instruction::And || Opcode == Instruction::Or);
464
465 bool Valid = buildConditionSets(BB, BinOp->getOperand(0), TI, L, Domain,
466 InvalidDomainMap, ConditionSets) &&
467 buildConditionSets(BB, BinOp->getOperand(1), TI, L, Domain,
468 InvalidDomainMap, ConditionSets);
469 if (!Valid) {
470 while (!ConditionSets.empty())
471 isl_set_free(ConditionSets.pop_back_val());
472 return false;
473 }
474
475 isl_set_free(ConditionSets.pop_back_val());
476 isl_set *ConsCondPart0 = ConditionSets.pop_back_val();
477 isl_set_free(ConditionSets.pop_back_val());
478 isl_set *ConsCondPart1 = ConditionSets.pop_back_val();
479
480 if (Opcode == Instruction::And)
481 ConsequenceCondSet = isl_set_intersect(ConsCondPart0, ConsCondPart1);
482 else
483 ConsequenceCondSet = isl_set_union(ConsCondPart0, ConsCondPart1);
484 } else {
485 auto *ICond = dyn_cast<ICmpInst>(Condition);
486 assert(ICond &&
487 "Condition of exiting branch was neither constant nor ICmp!");
488
489 Region &R = scop->getRegion();
490
491 isl_pw_aff *LHS, *RHS;
492 // For unsigned comparisons we assumed the signed bit of neither operand
493 // to be set. The comparison is equal to a signed comparison under this
494 // assumption.
495 bool NonNeg = ICond->isUnsigned();
496 const SCEV *LeftOperand = SE.getSCEVAtScope(ICond->getOperand(0), L),
497 *RightOperand = SE.getSCEVAtScope(ICond->getOperand(1), L);
498
499 LeftOperand = tryForwardThroughPHI(LeftOperand, R, SE, LI, DT);
500 RightOperand = tryForwardThroughPHI(RightOperand, R, SE, LI, DT);
501
502 switch (ICond->getPredicate()) {
503 case ICmpInst::ICMP_ULT:
504 ConsequenceCondSet =
505 buildUnsignedConditionSets(BB, Condition, Domain, LeftOperand,
506 RightOperand, InvalidDomainMap, true);
507 break;
508 case ICmpInst::ICMP_ULE:
509 ConsequenceCondSet =
510 buildUnsignedConditionSets(BB, Condition, Domain, LeftOperand,
511 RightOperand, InvalidDomainMap, false);
512 break;
513 case ICmpInst::ICMP_UGT:
514 ConsequenceCondSet =
515 buildUnsignedConditionSets(BB, Condition, Domain, RightOperand,
516 LeftOperand, InvalidDomainMap, true);
517 break;
518 case ICmpInst::ICMP_UGE:
519 ConsequenceCondSet =
520 buildUnsignedConditionSets(BB, Condition, Domain, RightOperand,
521 LeftOperand, InvalidDomainMap, false);
522 break;
523 default:
524 LHS = getPwAff(BB, InvalidDomainMap, LeftOperand, NonNeg);
525 RHS = getPwAff(BB, InvalidDomainMap, RightOperand, NonNeg);
526 ConsequenceCondSet = buildConditionSet(ICond->getPredicate(),
527 isl::manage(LHS), isl::manage(RHS))
528 .release();
529 break;
530 }
531 }
532
533 // If no terminator was given we are only looking for parameter constraints
534 // under which @p Condition is true/false.
535 if (!TI)
536 ConsequenceCondSet = isl_set_params(ConsequenceCondSet);
537 assert(ConsequenceCondSet);
538 ConsequenceCondSet = isl_set_coalesce(
539 isl_set_intersect(ConsequenceCondSet, isl_set_copy(Domain)));
540
541 isl_set *AlternativeCondSet = nullptr;
542 bool TooComplex =
543 isl_set_n_basic_set(ConsequenceCondSet) >= MaxDisjunctsInDomain;
544
545 if (!TooComplex) {
546 AlternativeCondSet = isl_set_subtract(isl_set_copy(Domain),
547 isl_set_copy(ConsequenceCondSet));
548 TooComplex =
549 isl_set_n_basic_set(AlternativeCondSet) >= MaxDisjunctsInDomain;
550 }
551
552 if (TooComplex) {
553 scop->invalidate(COMPLEXITY, TI ? TI->getDebugLoc() : DebugLoc(),
554 TI ? TI->getParent() : nullptr /* BasicBlock */);
555 isl_set_free(AlternativeCondSet);
556 isl_set_free(ConsequenceCondSet);
557 return false;
558 }
559
560 ConditionSets.push_back(ConsequenceCondSet);
561 ConditionSets.push_back(isl_set_coalesce(AlternativeCondSet));
562
563 return true;
564 }
565
buildConditionSets(BasicBlock * BB,Instruction * TI,Loop * L,__isl_keep isl_set * Domain,DenseMap<BasicBlock *,isl::set> & InvalidDomainMap,SmallVectorImpl<__isl_give isl_set * > & ConditionSets)566 bool ScopBuilder::buildConditionSets(
567 BasicBlock *BB, Instruction *TI, Loop *L, __isl_keep isl_set *Domain,
568 DenseMap<BasicBlock *, isl::set> &InvalidDomainMap,
569 SmallVectorImpl<__isl_give isl_set *> &ConditionSets) {
570 if (SwitchInst *SI = dyn_cast<SwitchInst>(TI))
571 return buildConditionSets(BB, SI, L, Domain, InvalidDomainMap,
572 ConditionSets);
573
574 assert(isa<BranchInst>(TI) && "Terminator was neither branch nor switch.");
575
576 if (TI->getNumSuccessors() == 1) {
577 ConditionSets.push_back(isl_set_copy(Domain));
578 return true;
579 }
580
581 Value *Condition = getConditionFromTerminator(TI);
582 assert(Condition && "No condition for Terminator");
583
584 return buildConditionSets(BB, Condition, TI, L, Domain, InvalidDomainMap,
585 ConditionSets);
586 }
587
propagateDomainConstraints(Region * R,DenseMap<BasicBlock *,isl::set> & InvalidDomainMap)588 bool ScopBuilder::propagateDomainConstraints(
589 Region *R, DenseMap<BasicBlock *, isl::set> &InvalidDomainMap) {
590 // Iterate over the region R and propagate the domain constrains from the
591 // predecessors to the current node. In contrast to the
592 // buildDomainsWithBranchConstraints function, this one will pull the domain
593 // information from the predecessors instead of pushing it to the successors.
594 // Additionally, we assume the domains to be already present in the domain
595 // map here. However, we iterate again in reverse post order so we know all
596 // predecessors have been visited before a block or non-affine subregion is
597 // visited.
598
599 ReversePostOrderTraversal<Region *> RTraversal(R);
600 for (auto *RN : RTraversal) {
601 // Recurse for affine subregions but go on for basic blocks and non-affine
602 // subregions.
603 if (RN->isSubRegion()) {
604 Region *SubRegion = RN->getNodeAs<Region>();
605 if (!scop->isNonAffineSubRegion(SubRegion)) {
606 if (!propagateDomainConstraints(SubRegion, InvalidDomainMap))
607 return false;
608 continue;
609 }
610 }
611
612 BasicBlock *BB = getRegionNodeBasicBlock(RN);
613 isl::set &Domain = scop->getOrInitEmptyDomain(BB);
614 assert(Domain);
615
616 // Under the union of all predecessor conditions we can reach this block.
617 isl::set PredDom = getPredecessorDomainConstraints(BB, Domain);
618 Domain = Domain.intersect(PredDom).coalesce();
619 Domain = Domain.align_params(scop->getParamSpace());
620
621 Loop *BBLoop = getRegionNodeLoop(RN, LI);
622 if (BBLoop && BBLoop->getHeader() == BB && scop->contains(BBLoop))
623 if (!addLoopBoundsToHeaderDomain(BBLoop, InvalidDomainMap))
624 return false;
625 }
626
627 return true;
628 }
629
propagateDomainConstraintsToRegionExit(BasicBlock * BB,Loop * BBLoop,SmallPtrSetImpl<BasicBlock * > & FinishedExitBlocks,DenseMap<BasicBlock *,isl::set> & InvalidDomainMap)630 void ScopBuilder::propagateDomainConstraintsToRegionExit(
631 BasicBlock *BB, Loop *BBLoop,
632 SmallPtrSetImpl<BasicBlock *> &FinishedExitBlocks,
633 DenseMap<BasicBlock *, isl::set> &InvalidDomainMap) {
634 // Check if the block @p BB is the entry of a region. If so we propagate it's
635 // domain to the exit block of the region. Otherwise we are done.
636 auto *RI = scop->getRegion().getRegionInfo();
637 auto *BBReg = RI ? RI->getRegionFor(BB) : nullptr;
638 auto *ExitBB = BBReg ? BBReg->getExit() : nullptr;
639 if (!BBReg || BBReg->getEntry() != BB || !scop->contains(ExitBB))
640 return;
641
642 // Do not propagate the domain if there is a loop backedge inside the region
643 // that would prevent the exit block from being executed.
644 auto *L = BBLoop;
645 while (L && scop->contains(L)) {
646 SmallVector<BasicBlock *, 4> LatchBBs;
647 BBLoop->getLoopLatches(LatchBBs);
648 for (auto *LatchBB : LatchBBs)
649 if (BB != LatchBB && BBReg->contains(LatchBB))
650 return;
651 L = L->getParentLoop();
652 }
653
654 isl::set Domain = scop->getOrInitEmptyDomain(BB);
655 assert(Domain && "Cannot propagate a nullptr");
656
657 Loop *ExitBBLoop = getFirstNonBoxedLoopFor(ExitBB, LI, scop->getBoxedLoops());
658
659 // Since the dimensions of @p BB and @p ExitBB might be different we have to
660 // adjust the domain before we can propagate it.
661 isl::set AdjustedDomain = adjustDomainDimensions(Domain, BBLoop, ExitBBLoop);
662 isl::set &ExitDomain = scop->getOrInitEmptyDomain(ExitBB);
663
664 // If the exit domain is not yet created we set it otherwise we "add" the
665 // current domain.
666 ExitDomain = ExitDomain ? AdjustedDomain.unite(ExitDomain) : AdjustedDomain;
667
668 // Initialize the invalid domain.
669 InvalidDomainMap[ExitBB] = ExitDomain.empty(ExitDomain.get_space());
670
671 FinishedExitBlocks.insert(ExitBB);
672 }
673
getPredecessorDomainConstraints(BasicBlock * BB,isl::set Domain)674 isl::set ScopBuilder::getPredecessorDomainConstraints(BasicBlock *BB,
675 isl::set Domain) {
676 // If @p BB is the ScopEntry we are done
677 if (scop->getRegion().getEntry() == BB)
678 return isl::set::universe(Domain.get_space());
679
680 // The region info of this function.
681 auto &RI = *scop->getRegion().getRegionInfo();
682
683 Loop *BBLoop = getFirstNonBoxedLoopFor(BB, LI, scop->getBoxedLoops());
684
685 // A domain to collect all predecessor domains, thus all conditions under
686 // which the block is executed. To this end we start with the empty domain.
687 isl::set PredDom = isl::set::empty(Domain.get_space());
688
689 // Set of regions of which the entry block domain has been propagated to BB.
690 // all predecessors inside any of the regions can be skipped.
691 SmallSet<Region *, 8> PropagatedRegions;
692
693 for (auto *PredBB : predecessors(BB)) {
694 // Skip backedges.
695 if (DT.dominates(BB, PredBB))
696 continue;
697
698 // If the predecessor is in a region we used for propagation we can skip it.
699 auto PredBBInRegion = [PredBB](Region *PR) { return PR->contains(PredBB); };
700 if (std::any_of(PropagatedRegions.begin(), PropagatedRegions.end(),
701 PredBBInRegion)) {
702 continue;
703 }
704
705 // Check if there is a valid region we can use for propagation, thus look
706 // for a region that contains the predecessor and has @p BB as exit block.
707 auto *PredR = RI.getRegionFor(PredBB);
708 while (PredR->getExit() != BB && !PredR->contains(BB))
709 PredR->getParent();
710
711 // If a valid region for propagation was found use the entry of that region
712 // for propagation, otherwise the PredBB directly.
713 if (PredR->getExit() == BB) {
714 PredBB = PredR->getEntry();
715 PropagatedRegions.insert(PredR);
716 }
717
718 isl::set PredBBDom = scop->getDomainConditions(PredBB);
719 Loop *PredBBLoop =
720 getFirstNonBoxedLoopFor(PredBB, LI, scop->getBoxedLoops());
721 PredBBDom = adjustDomainDimensions(PredBBDom, PredBBLoop, BBLoop);
722 PredDom = PredDom.unite(PredBBDom);
723 }
724
725 return PredDom;
726 }
727
addLoopBoundsToHeaderDomain(Loop * L,DenseMap<BasicBlock *,isl::set> & InvalidDomainMap)728 bool ScopBuilder::addLoopBoundsToHeaderDomain(
729 Loop *L, DenseMap<BasicBlock *, isl::set> &InvalidDomainMap) {
730 int LoopDepth = scop->getRelativeLoopDepth(L);
731 assert(LoopDepth >= 0 && "Loop in region should have at least depth one");
732
733 BasicBlock *HeaderBB = L->getHeader();
734 assert(scop->isDomainDefined(HeaderBB));
735 isl::set &HeaderBBDom = scop->getOrInitEmptyDomain(HeaderBB);
736
737 isl::map NextIterationMap =
738 createNextIterationMap(HeaderBBDom.get_space(), LoopDepth);
739
740 isl::set UnionBackedgeCondition = HeaderBBDom.empty(HeaderBBDom.get_space());
741
742 SmallVector<BasicBlock *, 4> LatchBlocks;
743 L->getLoopLatches(LatchBlocks);
744
745 for (BasicBlock *LatchBB : LatchBlocks) {
746 // If the latch is only reachable via error statements we skip it.
747 if (!scop->isDomainDefined(LatchBB))
748 continue;
749
750 isl::set LatchBBDom = scop->getDomainConditions(LatchBB);
751
752 isl::set BackedgeCondition = nullptr;
753
754 Instruction *TI = LatchBB->getTerminator();
755 BranchInst *BI = dyn_cast<BranchInst>(TI);
756 assert(BI && "Only branch instructions allowed in loop latches");
757
758 if (BI->isUnconditional())
759 BackedgeCondition = LatchBBDom;
760 else {
761 SmallVector<isl_set *, 8> ConditionSets;
762 int idx = BI->getSuccessor(0) != HeaderBB;
763 if (!buildConditionSets(LatchBB, TI, L, LatchBBDom.get(),
764 InvalidDomainMap, ConditionSets))
765 return false;
766
767 // Free the non back edge condition set as we do not need it.
768 isl_set_free(ConditionSets[1 - idx]);
769
770 BackedgeCondition = isl::manage(ConditionSets[idx]);
771 }
772
773 int LatchLoopDepth = scop->getRelativeLoopDepth(LI.getLoopFor(LatchBB));
774 assert(LatchLoopDepth >= LoopDepth);
775 BackedgeCondition = BackedgeCondition.project_out(
776 isl::dim::set, LoopDepth + 1, LatchLoopDepth - LoopDepth);
777 UnionBackedgeCondition = UnionBackedgeCondition.unite(BackedgeCondition);
778 }
779
780 isl::map ForwardMap = ForwardMap.lex_le(HeaderBBDom.get_space());
781 for (int i = 0; i < LoopDepth; i++)
782 ForwardMap = ForwardMap.equate(isl::dim::in, i, isl::dim::out, i);
783
784 isl::set UnionBackedgeConditionComplement =
785 UnionBackedgeCondition.complement();
786 UnionBackedgeConditionComplement =
787 UnionBackedgeConditionComplement.lower_bound_si(isl::dim::set, LoopDepth,
788 0);
789 UnionBackedgeConditionComplement =
790 UnionBackedgeConditionComplement.apply(ForwardMap);
791 HeaderBBDom = HeaderBBDom.subtract(UnionBackedgeConditionComplement);
792 HeaderBBDom = HeaderBBDom.apply(NextIterationMap);
793
794 auto Parts = partitionSetParts(HeaderBBDom, LoopDepth);
795 HeaderBBDom = Parts.second;
796
797 // Check if there is a <nsw> tagged AddRec for this loop and if so do not add
798 // the bounded assumptions to the context as they are already implied by the
799 // <nsw> tag.
800 if (scop->hasNSWAddRecForLoop(L))
801 return true;
802
803 isl::set UnboundedCtx = Parts.first.params();
804 recordAssumption(&RecordedAssumptions, INFINITELOOP, UnboundedCtx,
805 HeaderBB->getTerminator()->getDebugLoc(), AS_RESTRICTION);
806 return true;
807 }
808
buildInvariantEquivalenceClasses()809 void ScopBuilder::buildInvariantEquivalenceClasses() {
810 DenseMap<std::pair<const SCEV *, Type *>, LoadInst *> EquivClasses;
811
812 const InvariantLoadsSetTy &RIL = scop->getRequiredInvariantLoads();
813 for (LoadInst *LInst : RIL) {
814 const SCEV *PointerSCEV = SE.getSCEV(LInst->getPointerOperand());
815
816 Type *Ty = LInst->getType();
817 LoadInst *&ClassRep = EquivClasses[std::make_pair(PointerSCEV, Ty)];
818 if (ClassRep) {
819 scop->addInvariantLoadMapping(LInst, ClassRep);
820 continue;
821 }
822
823 ClassRep = LInst;
824 scop->addInvariantEquivClass(
825 InvariantEquivClassTy{PointerSCEV, MemoryAccessList(), nullptr, Ty});
826 }
827 }
828
buildDomains(Region * R,DenseMap<BasicBlock *,isl::set> & InvalidDomainMap)829 bool ScopBuilder::buildDomains(
830 Region *R, DenseMap<BasicBlock *, isl::set> &InvalidDomainMap) {
831 bool IsOnlyNonAffineRegion = scop->isNonAffineSubRegion(R);
832 auto *EntryBB = R->getEntry();
833 auto *L = IsOnlyNonAffineRegion ? nullptr : LI.getLoopFor(EntryBB);
834 int LD = scop->getRelativeLoopDepth(L);
835 auto *S =
836 isl_set_universe(isl_space_set_alloc(scop->getIslCtx().get(), 0, LD + 1));
837
838 InvalidDomainMap[EntryBB] = isl::manage(isl_set_empty(isl_set_get_space(S)));
839 isl::noexceptions::set Domain = isl::manage(S);
840 scop->setDomain(EntryBB, Domain);
841
842 if (IsOnlyNonAffineRegion)
843 return !containsErrorBlock(R->getNode(), *R, LI, DT);
844
845 if (!buildDomainsWithBranchConstraints(R, InvalidDomainMap))
846 return false;
847
848 if (!propagateDomainConstraints(R, InvalidDomainMap))
849 return false;
850
851 // Error blocks and blocks dominated by them have been assumed to never be
852 // executed. Representing them in the Scop does not add any value. In fact,
853 // it is likely to cause issues during construction of the ScopStmts. The
854 // contents of error blocks have not been verified to be expressible and
855 // will cause problems when building up a ScopStmt for them.
856 // Furthermore, basic blocks dominated by error blocks may reference
857 // instructions in the error block which, if the error block is not modeled,
858 // can themselves not be constructed properly. To this end we will replace
859 // the domains of error blocks and those only reachable via error blocks
860 // with an empty set. Additionally, we will record for each block under which
861 // parameter combination it would be reached via an error block in its
862 // InvalidDomain. This information is needed during load hoisting.
863 if (!propagateInvalidStmtDomains(R, InvalidDomainMap))
864 return false;
865
866 return true;
867 }
868
buildDomainsWithBranchConstraints(Region * R,DenseMap<BasicBlock *,isl::set> & InvalidDomainMap)869 bool ScopBuilder::buildDomainsWithBranchConstraints(
870 Region *R, DenseMap<BasicBlock *, isl::set> &InvalidDomainMap) {
871 // To create the domain for each block in R we iterate over all blocks and
872 // subregions in R and propagate the conditions under which the current region
873 // element is executed. To this end we iterate in reverse post order over R as
874 // it ensures that we first visit all predecessors of a region node (either a
875 // basic block or a subregion) before we visit the region node itself.
876 // Initially, only the domain for the SCoP region entry block is set and from
877 // there we propagate the current domain to all successors, however we add the
878 // condition that the successor is actually executed next.
879 // As we are only interested in non-loop carried constraints here we can
880 // simply skip loop back edges.
881
882 SmallPtrSet<BasicBlock *, 8> FinishedExitBlocks;
883 ReversePostOrderTraversal<Region *> RTraversal(R);
884 for (auto *RN : RTraversal) {
885 // Recurse for affine subregions but go on for basic blocks and non-affine
886 // subregions.
887 if (RN->isSubRegion()) {
888 Region *SubRegion = RN->getNodeAs<Region>();
889 if (!scop->isNonAffineSubRegion(SubRegion)) {
890 if (!buildDomainsWithBranchConstraints(SubRegion, InvalidDomainMap))
891 return false;
892 continue;
893 }
894 }
895
896 if (containsErrorBlock(RN, scop->getRegion(), LI, DT))
897 scop->notifyErrorBlock();
898 ;
899
900 BasicBlock *BB = getRegionNodeBasicBlock(RN);
901 Instruction *TI = BB->getTerminator();
902
903 if (isa<UnreachableInst>(TI))
904 continue;
905
906 if (!scop->isDomainDefined(BB))
907 continue;
908 isl::set Domain = scop->getDomainConditions(BB);
909
910 scop->updateMaxLoopDepth(isl_set_n_dim(Domain.get()));
911
912 auto *BBLoop = getRegionNodeLoop(RN, LI);
913 // Propagate the domain from BB directly to blocks that have a superset
914 // domain, at the moment only region exit nodes of regions that start in BB.
915 propagateDomainConstraintsToRegionExit(BB, BBLoop, FinishedExitBlocks,
916 InvalidDomainMap);
917
918 // If all successors of BB have been set a domain through the propagation
919 // above we do not need to build condition sets but can just skip this
920 // block. However, it is important to note that this is a local property
921 // with regards to the region @p R. To this end FinishedExitBlocks is a
922 // local variable.
923 auto IsFinishedRegionExit = [&FinishedExitBlocks](BasicBlock *SuccBB) {
924 return FinishedExitBlocks.count(SuccBB);
925 };
926 if (std::all_of(succ_begin(BB), succ_end(BB), IsFinishedRegionExit))
927 continue;
928
929 // Build the condition sets for the successor nodes of the current region
930 // node. If it is a non-affine subregion we will always execute the single
931 // exit node, hence the single entry node domain is the condition set. For
932 // basic blocks we use the helper function buildConditionSets.
933 SmallVector<isl_set *, 8> ConditionSets;
934 if (RN->isSubRegion())
935 ConditionSets.push_back(Domain.copy());
936 else if (!buildConditionSets(BB, TI, BBLoop, Domain.get(), InvalidDomainMap,
937 ConditionSets))
938 return false;
939
940 // Now iterate over the successors and set their initial domain based on
941 // their condition set. We skip back edges here and have to be careful when
942 // we leave a loop not to keep constraints over a dimension that doesn't
943 // exist anymore.
944 assert(RN->isSubRegion() || TI->getNumSuccessors() == ConditionSets.size());
945 for (unsigned u = 0, e = ConditionSets.size(); u < e; u++) {
946 isl::set CondSet = isl::manage(ConditionSets[u]);
947 BasicBlock *SuccBB = getRegionNodeSuccessor(RN, TI, u);
948
949 // Skip blocks outside the region.
950 if (!scop->contains(SuccBB))
951 continue;
952
953 // If we propagate the domain of some block to "SuccBB" we do not have to
954 // adjust the domain.
955 if (FinishedExitBlocks.count(SuccBB))
956 continue;
957
958 // Skip back edges.
959 if (DT.dominates(SuccBB, BB))
960 continue;
961
962 Loop *SuccBBLoop =
963 getFirstNonBoxedLoopFor(SuccBB, LI, scop->getBoxedLoops());
964
965 CondSet = adjustDomainDimensions(CondSet, BBLoop, SuccBBLoop);
966
967 // Set the domain for the successor or merge it with an existing domain in
968 // case there are multiple paths (without loop back edges) to the
969 // successor block.
970 isl::set &SuccDomain = scop->getOrInitEmptyDomain(SuccBB);
971
972 if (SuccDomain) {
973 SuccDomain = SuccDomain.unite(CondSet).coalesce();
974 } else {
975 // Initialize the invalid domain.
976 InvalidDomainMap[SuccBB] = CondSet.empty(CondSet.get_space());
977 SuccDomain = CondSet;
978 }
979
980 SuccDomain = SuccDomain.detect_equalities();
981
982 // Check if the maximal number of domain disjunctions was reached.
983 // In case this happens we will clean up and bail.
984 if (SuccDomain.n_basic_set() < MaxDisjunctsInDomain)
985 continue;
986
987 scop->invalidate(COMPLEXITY, DebugLoc());
988 while (++u < ConditionSets.size())
989 isl_set_free(ConditionSets[u]);
990 return false;
991 }
992 }
993
994 return true;
995 }
996
propagateInvalidStmtDomains(Region * R,DenseMap<BasicBlock *,isl::set> & InvalidDomainMap)997 bool ScopBuilder::propagateInvalidStmtDomains(
998 Region *R, DenseMap<BasicBlock *, isl::set> &InvalidDomainMap) {
999 ReversePostOrderTraversal<Region *> RTraversal(R);
1000 for (auto *RN : RTraversal) {
1001
1002 // Recurse for affine subregions but go on for basic blocks and non-affine
1003 // subregions.
1004 if (RN->isSubRegion()) {
1005 Region *SubRegion = RN->getNodeAs<Region>();
1006 if (!scop->isNonAffineSubRegion(SubRegion)) {
1007 propagateInvalidStmtDomains(SubRegion, InvalidDomainMap);
1008 continue;
1009 }
1010 }
1011
1012 bool ContainsErrorBlock = containsErrorBlock(RN, scop->getRegion(), LI, DT);
1013 BasicBlock *BB = getRegionNodeBasicBlock(RN);
1014 isl::set &Domain = scop->getOrInitEmptyDomain(BB);
1015 assert(Domain && "Cannot propagate a nullptr");
1016
1017 isl::set InvalidDomain = InvalidDomainMap[BB];
1018
1019 bool IsInvalidBlock = ContainsErrorBlock || Domain.is_subset(InvalidDomain);
1020
1021 if (!IsInvalidBlock) {
1022 InvalidDomain = InvalidDomain.intersect(Domain);
1023 } else {
1024 InvalidDomain = Domain;
1025 isl::set DomPar = Domain.params();
1026 recordAssumption(&RecordedAssumptions, ERRORBLOCK, DomPar,
1027 BB->getTerminator()->getDebugLoc(), AS_RESTRICTION);
1028 Domain = isl::set::empty(Domain.get_space());
1029 }
1030
1031 if (InvalidDomain.is_empty()) {
1032 InvalidDomainMap[BB] = InvalidDomain;
1033 continue;
1034 }
1035
1036 auto *BBLoop = getRegionNodeLoop(RN, LI);
1037 auto *TI = BB->getTerminator();
1038 unsigned NumSuccs = RN->isSubRegion() ? 1 : TI->getNumSuccessors();
1039 for (unsigned u = 0; u < NumSuccs; u++) {
1040 auto *SuccBB = getRegionNodeSuccessor(RN, TI, u);
1041
1042 // Skip successors outside the SCoP.
1043 if (!scop->contains(SuccBB))
1044 continue;
1045
1046 // Skip backedges.
1047 if (DT.dominates(SuccBB, BB))
1048 continue;
1049
1050 Loop *SuccBBLoop =
1051 getFirstNonBoxedLoopFor(SuccBB, LI, scop->getBoxedLoops());
1052
1053 auto AdjustedInvalidDomain =
1054 adjustDomainDimensions(InvalidDomain, BBLoop, SuccBBLoop);
1055
1056 isl::set SuccInvalidDomain = InvalidDomainMap[SuccBB];
1057 SuccInvalidDomain = SuccInvalidDomain.unite(AdjustedInvalidDomain);
1058 SuccInvalidDomain = SuccInvalidDomain.coalesce();
1059
1060 InvalidDomainMap[SuccBB] = SuccInvalidDomain;
1061
1062 // Check if the maximal number of domain disjunctions was reached.
1063 // In case this happens we will bail.
1064 if (SuccInvalidDomain.n_basic_set() < MaxDisjunctsInDomain)
1065 continue;
1066
1067 InvalidDomainMap.erase(BB);
1068 scop->invalidate(COMPLEXITY, TI->getDebugLoc(), TI->getParent());
1069 return false;
1070 }
1071
1072 InvalidDomainMap[BB] = InvalidDomain;
1073 }
1074
1075 return true;
1076 }
1077
buildPHIAccesses(ScopStmt * PHIStmt,PHINode * PHI,Region * NonAffineSubRegion,bool IsExitBlock)1078 void ScopBuilder::buildPHIAccesses(ScopStmt *PHIStmt, PHINode *PHI,
1079 Region *NonAffineSubRegion,
1080 bool IsExitBlock) {
1081 // PHI nodes that are in the exit block of the region, hence if IsExitBlock is
1082 // true, are not modeled as ordinary PHI nodes as they are not part of the
1083 // region. However, we model the operands in the predecessor blocks that are
1084 // part of the region as regular scalar accesses.
1085
1086 // If we can synthesize a PHI we can skip it, however only if it is in
1087 // the region. If it is not it can only be in the exit block of the region.
1088 // In this case we model the operands but not the PHI itself.
1089 auto *Scope = LI.getLoopFor(PHI->getParent());
1090 if (!IsExitBlock && canSynthesize(PHI, *scop, &SE, Scope))
1091 return;
1092
1093 // PHI nodes are modeled as if they had been demoted prior to the SCoP
1094 // detection. Hence, the PHI is a load of a new memory location in which the
1095 // incoming value was written at the end of the incoming basic block.
1096 bool OnlyNonAffineSubRegionOperands = true;
1097 for (unsigned u = 0; u < PHI->getNumIncomingValues(); u++) {
1098 Value *Op = PHI->getIncomingValue(u);
1099 BasicBlock *OpBB = PHI->getIncomingBlock(u);
1100 ScopStmt *OpStmt = scop->getIncomingStmtFor(PHI->getOperandUse(u));
1101
1102 // Do not build PHI dependences inside a non-affine subregion, but make
1103 // sure that the necessary scalar values are still made available.
1104 if (NonAffineSubRegion && NonAffineSubRegion->contains(OpBB)) {
1105 auto *OpInst = dyn_cast<Instruction>(Op);
1106 if (!OpInst || !NonAffineSubRegion->contains(OpInst))
1107 ensureValueRead(Op, OpStmt);
1108 continue;
1109 }
1110
1111 OnlyNonAffineSubRegionOperands = false;
1112 ensurePHIWrite(PHI, OpStmt, OpBB, Op, IsExitBlock);
1113 }
1114
1115 if (!OnlyNonAffineSubRegionOperands && !IsExitBlock) {
1116 addPHIReadAccess(PHIStmt, PHI);
1117 }
1118 }
1119
buildScalarDependences(ScopStmt * UserStmt,Instruction * Inst)1120 void ScopBuilder::buildScalarDependences(ScopStmt *UserStmt,
1121 Instruction *Inst) {
1122 assert(!isa<PHINode>(Inst));
1123
1124 // Pull-in required operands.
1125 for (Use &Op : Inst->operands())
1126 ensureValueRead(Op.get(), UserStmt);
1127 }
1128
1129 // Create a sequence of two schedules. Either argument may be null and is
1130 // interpreted as the empty schedule. Can also return null if both schedules are
1131 // empty.
combineInSequence(isl::schedule Prev,isl::schedule Succ)1132 static isl::schedule combineInSequence(isl::schedule Prev, isl::schedule Succ) {
1133 if (!Prev)
1134 return Succ;
1135 if (!Succ)
1136 return Prev;
1137
1138 return Prev.sequence(Succ);
1139 }
1140
1141 // Create an isl_multi_union_aff that defines an identity mapping from the
1142 // elements of USet to their N-th dimension.
1143 //
1144 // # Example:
1145 //
1146 // Domain: { A[i,j]; B[i,j,k] }
1147 // N: 1
1148 //
1149 // Resulting Mapping: { {A[i,j] -> [(j)]; B[i,j,k] -> [(j)] }
1150 //
1151 // @param USet A union set describing the elements for which to generate a
1152 // mapping.
1153 // @param N The dimension to map to.
1154 // @returns A mapping from USet to its N-th dimension.
mapToDimension(isl::union_set USet,int N)1155 static isl::multi_union_pw_aff mapToDimension(isl::union_set USet, int N) {
1156 assert(N >= 0);
1157 assert(USet);
1158 assert(!USet.is_empty());
1159
1160 auto Result = isl::union_pw_multi_aff::empty(USet.get_space());
1161
1162 for (isl::set S : USet.get_set_list()) {
1163 int Dim = S.dim(isl::dim::set);
1164 auto PMA = isl::pw_multi_aff::project_out_map(S.get_space(), isl::dim::set,
1165 N, Dim - N);
1166 if (N > 1)
1167 PMA = PMA.drop_dims(isl::dim::out, 0, N - 1);
1168
1169 Result = Result.add_pw_multi_aff(PMA);
1170 }
1171
1172 return isl::multi_union_pw_aff(isl::union_pw_multi_aff(Result));
1173 }
1174
buildSchedule()1175 void ScopBuilder::buildSchedule() {
1176 Loop *L = getLoopSurroundingScop(*scop, LI);
1177 LoopStackTy LoopStack({LoopStackElementTy(L, nullptr, 0)});
1178 buildSchedule(scop->getRegion().getNode(), LoopStack);
1179 assert(LoopStack.size() == 1 && LoopStack.back().L == L);
1180 scop->setScheduleTree(LoopStack[0].Schedule);
1181 }
1182
1183 /// To generate a schedule for the elements in a Region we traverse the Region
1184 /// in reverse-post-order and add the contained RegionNodes in traversal order
1185 /// to the schedule of the loop that is currently at the top of the LoopStack.
1186 /// For loop-free codes, this results in a correct sequential ordering.
1187 ///
1188 /// Example:
1189 /// bb1(0)
1190 /// / \.
1191 /// bb2(1) bb3(2)
1192 /// \ / \.
1193 /// bb4(3) bb5(4)
1194 /// \ /
1195 /// bb6(5)
1196 ///
1197 /// Including loops requires additional processing. Whenever a loop header is
1198 /// encountered, the corresponding loop is added to the @p LoopStack. Starting
1199 /// from an empty schedule, we first process all RegionNodes that are within
1200 /// this loop and complete the sequential schedule at this loop-level before
1201 /// processing about any other nodes. To implement this
1202 /// loop-nodes-first-processing, the reverse post-order traversal is
1203 /// insufficient. Hence, we additionally check if the traversal yields
1204 /// sub-regions or blocks that are outside the last loop on the @p LoopStack.
1205 /// These region-nodes are then queue and only traverse after the all nodes
1206 /// within the current loop have been processed.
buildSchedule(Region * R,LoopStackTy & LoopStack)1207 void ScopBuilder::buildSchedule(Region *R, LoopStackTy &LoopStack) {
1208 Loop *OuterScopLoop = getLoopSurroundingScop(*scop, LI);
1209
1210 ReversePostOrderTraversal<Region *> RTraversal(R);
1211 std::deque<RegionNode *> WorkList(RTraversal.begin(), RTraversal.end());
1212 std::deque<RegionNode *> DelayList;
1213 bool LastRNWaiting = false;
1214
1215 // Iterate over the region @p R in reverse post-order but queue
1216 // sub-regions/blocks iff they are not part of the last encountered but not
1217 // completely traversed loop. The variable LastRNWaiting is a flag to indicate
1218 // that we queued the last sub-region/block from the reverse post-order
1219 // iterator. If it is set we have to explore the next sub-region/block from
1220 // the iterator (if any) to guarantee progress. If it is not set we first try
1221 // the next queued sub-region/blocks.
1222 while (!WorkList.empty() || !DelayList.empty()) {
1223 RegionNode *RN;
1224
1225 if ((LastRNWaiting && !WorkList.empty()) || DelayList.empty()) {
1226 RN = WorkList.front();
1227 WorkList.pop_front();
1228 LastRNWaiting = false;
1229 } else {
1230 RN = DelayList.front();
1231 DelayList.pop_front();
1232 }
1233
1234 Loop *L = getRegionNodeLoop(RN, LI);
1235 if (!scop->contains(L))
1236 L = OuterScopLoop;
1237
1238 Loop *LastLoop = LoopStack.back().L;
1239 if (LastLoop != L) {
1240 if (LastLoop && !LastLoop->contains(L)) {
1241 LastRNWaiting = true;
1242 DelayList.push_back(RN);
1243 continue;
1244 }
1245 LoopStack.push_back({L, nullptr, 0});
1246 }
1247 buildSchedule(RN, LoopStack);
1248 }
1249 }
1250
buildSchedule(RegionNode * RN,LoopStackTy & LoopStack)1251 void ScopBuilder::buildSchedule(RegionNode *RN, LoopStackTy &LoopStack) {
1252 if (RN->isSubRegion()) {
1253 auto *LocalRegion = RN->getNodeAs<Region>();
1254 if (!scop->isNonAffineSubRegion(LocalRegion)) {
1255 buildSchedule(LocalRegion, LoopStack);
1256 return;
1257 }
1258 }
1259
1260 assert(LoopStack.rbegin() != LoopStack.rend());
1261 auto LoopData = LoopStack.rbegin();
1262 LoopData->NumBlocksProcessed += getNumBlocksInRegionNode(RN);
1263
1264 for (auto *Stmt : scop->getStmtListFor(RN)) {
1265 isl::union_set UDomain{Stmt->getDomain()};
1266 auto StmtSchedule = isl::schedule::from_domain(UDomain);
1267 LoopData->Schedule = combineInSequence(LoopData->Schedule, StmtSchedule);
1268 }
1269
1270 // Check if we just processed the last node in this loop. If we did, finalize
1271 // the loop by:
1272 //
1273 // - adding new schedule dimensions
1274 // - folding the resulting schedule into the parent loop schedule
1275 // - dropping the loop schedule from the LoopStack.
1276 //
1277 // Then continue to check surrounding loops, which might also have been
1278 // completed by this node.
1279 size_t Dimension = LoopStack.size();
1280 while (LoopData->L &&
1281 LoopData->NumBlocksProcessed == getNumBlocksInLoop(LoopData->L)) {
1282 isl::schedule Schedule = LoopData->Schedule;
1283 auto NumBlocksProcessed = LoopData->NumBlocksProcessed;
1284
1285 assert(std::next(LoopData) != LoopStack.rend());
1286 ++LoopData;
1287 --Dimension;
1288
1289 if (Schedule) {
1290 isl::union_set Domain = Schedule.get_domain();
1291 isl::multi_union_pw_aff MUPA = mapToDimension(Domain, Dimension);
1292 Schedule = Schedule.insert_partial_schedule(MUPA);
1293 LoopData->Schedule = combineInSequence(LoopData->Schedule, Schedule);
1294 }
1295
1296 LoopData->NumBlocksProcessed += NumBlocksProcessed;
1297 }
1298 // Now pop all loops processed up there from the LoopStack
1299 LoopStack.erase(LoopStack.begin() + Dimension, LoopStack.end());
1300 }
1301
buildEscapingDependences(Instruction * Inst)1302 void ScopBuilder::buildEscapingDependences(Instruction *Inst) {
1303 // Check for uses of this instruction outside the scop. Because we do not
1304 // iterate over such instructions and therefore did not "ensure" the existence
1305 // of a write, we must determine such use here.
1306 if (scop->isEscaping(Inst))
1307 ensureValueWrite(Inst);
1308 }
1309
1310 /// Check that a value is a Fortran Array descriptor.
1311 ///
1312 /// We check if V has the following structure:
1313 /// %"struct.array1_real(kind=8)" = type { i8*, i<zz>, i<zz>,
1314 /// [<num> x %struct.descriptor_dimension] }
1315 ///
1316 ///
1317 /// %struct.descriptor_dimension = type { i<zz>, i<zz>, i<zz> }
1318 ///
1319 /// 1. V's type name starts with "struct.array"
1320 /// 2. V's type has layout as shown.
1321 /// 3. Final member of V's type has name "struct.descriptor_dimension",
1322 /// 4. "struct.descriptor_dimension" has layout as shown.
1323 /// 5. Consistent use of i<zz> where <zz> is some fixed integer number.
1324 ///
1325 /// We are interested in such types since this is the code that dragonegg
1326 /// generates for Fortran array descriptors.
1327 ///
1328 /// @param V the Value to be checked.
1329 ///
1330 /// @returns True if V is a Fortran array descriptor, False otherwise.
isFortranArrayDescriptor(Value * V)1331 bool isFortranArrayDescriptor(Value *V) {
1332 PointerType *PTy = dyn_cast<PointerType>(V->getType());
1333
1334 if (!PTy)
1335 return false;
1336
1337 Type *Ty = PTy->getElementType();
1338 assert(Ty && "Ty expected to be initialized");
1339 auto *StructArrTy = dyn_cast<StructType>(Ty);
1340
1341 if (!(StructArrTy && StructArrTy->hasName()))
1342 return false;
1343
1344 if (!StructArrTy->getName().startswith("struct.array"))
1345 return false;
1346
1347 if (StructArrTy->getNumElements() != 4)
1348 return false;
1349
1350 const ArrayRef<Type *> ArrMemberTys = StructArrTy->elements();
1351
1352 // i8* match
1353 if (ArrMemberTys[0] != Type::getInt8PtrTy(V->getContext()))
1354 return false;
1355
1356 // Get a reference to the int type and check that all the members
1357 // share the same int type
1358 Type *IntTy = ArrMemberTys[1];
1359 if (ArrMemberTys[2] != IntTy)
1360 return false;
1361
1362 // type: [<num> x %struct.descriptor_dimension]
1363 ArrayType *DescriptorDimArrayTy = dyn_cast<ArrayType>(ArrMemberTys[3]);
1364 if (!DescriptorDimArrayTy)
1365 return false;
1366
1367 // type: %struct.descriptor_dimension := type { ixx, ixx, ixx }
1368 StructType *DescriptorDimTy =
1369 dyn_cast<StructType>(DescriptorDimArrayTy->getElementType());
1370
1371 if (!(DescriptorDimTy && DescriptorDimTy->hasName()))
1372 return false;
1373
1374 if (DescriptorDimTy->getName() != "struct.descriptor_dimension")
1375 return false;
1376
1377 if (DescriptorDimTy->getNumElements() != 3)
1378 return false;
1379
1380 for (auto MemberTy : DescriptorDimTy->elements()) {
1381 if (MemberTy != IntTy)
1382 return false;
1383 }
1384
1385 return true;
1386 }
1387
findFADAllocationVisible(MemAccInst Inst)1388 Value *ScopBuilder::findFADAllocationVisible(MemAccInst Inst) {
1389 // match: 4.1 & 4.2 store/load
1390 if (!isa<LoadInst>(Inst) && !isa<StoreInst>(Inst))
1391 return nullptr;
1392
1393 // match: 4
1394 if (Inst.getAlignment() != 8)
1395 return nullptr;
1396
1397 Value *Address = Inst.getPointerOperand();
1398
1399 const BitCastInst *Bitcast = nullptr;
1400 // [match: 3]
1401 if (auto *Slot = dyn_cast<GetElementPtrInst>(Address)) {
1402 Value *TypedMem = Slot->getPointerOperand();
1403 // match: 2
1404 Bitcast = dyn_cast<BitCastInst>(TypedMem);
1405 } else {
1406 // match: 2
1407 Bitcast = dyn_cast<BitCastInst>(Address);
1408 }
1409
1410 if (!Bitcast)
1411 return nullptr;
1412
1413 auto *MallocMem = Bitcast->getOperand(0);
1414
1415 // match: 1
1416 auto *MallocCall = dyn_cast<CallInst>(MallocMem);
1417 if (!MallocCall)
1418 return nullptr;
1419
1420 Function *MallocFn = MallocCall->getCalledFunction();
1421 if (!(MallocFn && MallocFn->hasName() && MallocFn->getName() == "malloc"))
1422 return nullptr;
1423
1424 // Find all uses the malloc'd memory.
1425 // We are looking for a "store" into a struct with the type being the Fortran
1426 // descriptor type
1427 for (auto user : MallocMem->users()) {
1428 /// match: 5
1429 auto *MallocStore = dyn_cast<StoreInst>(user);
1430 if (!MallocStore)
1431 continue;
1432
1433 auto *DescriptorGEP =
1434 dyn_cast<GEPOperator>(MallocStore->getPointerOperand());
1435 if (!DescriptorGEP)
1436 continue;
1437
1438 // match: 5
1439 auto DescriptorType =
1440 dyn_cast<StructType>(DescriptorGEP->getSourceElementType());
1441 if (!(DescriptorType && DescriptorType->hasName()))
1442 continue;
1443
1444 Value *Descriptor = dyn_cast<Value>(DescriptorGEP->getPointerOperand());
1445
1446 if (!Descriptor)
1447 continue;
1448
1449 if (!isFortranArrayDescriptor(Descriptor))
1450 continue;
1451
1452 return Descriptor;
1453 }
1454
1455 return nullptr;
1456 }
1457
findFADAllocationInvisible(MemAccInst Inst)1458 Value *ScopBuilder::findFADAllocationInvisible(MemAccInst Inst) {
1459 // match: 3
1460 if (!isa<LoadInst>(Inst) && !isa<StoreInst>(Inst))
1461 return nullptr;
1462
1463 Value *Slot = Inst.getPointerOperand();
1464
1465 LoadInst *MemLoad = nullptr;
1466 // [match: 2]
1467 if (auto *SlotGEP = dyn_cast<GetElementPtrInst>(Slot)) {
1468 // match: 1
1469 MemLoad = dyn_cast<LoadInst>(SlotGEP->getPointerOperand());
1470 } else {
1471 // match: 1
1472 MemLoad = dyn_cast<LoadInst>(Slot);
1473 }
1474
1475 if (!MemLoad)
1476 return nullptr;
1477
1478 auto *BitcastOperator =
1479 dyn_cast<BitCastOperator>(MemLoad->getPointerOperand());
1480 if (!BitcastOperator)
1481 return nullptr;
1482
1483 Value *Descriptor = dyn_cast<Value>(BitcastOperator->getOperand(0));
1484 if (!Descriptor)
1485 return nullptr;
1486
1487 if (!isFortranArrayDescriptor(Descriptor))
1488 return nullptr;
1489
1490 return Descriptor;
1491 }
1492
addRecordedAssumptions()1493 void ScopBuilder::addRecordedAssumptions() {
1494 for (auto &AS : llvm::reverse(RecordedAssumptions)) {
1495
1496 if (!AS.BB) {
1497 scop->addAssumption(AS.Kind, AS.Set, AS.Loc, AS.Sign,
1498 nullptr /* BasicBlock */);
1499 continue;
1500 }
1501
1502 // If the domain was deleted the assumptions are void.
1503 isl_set *Dom = scop->getDomainConditions(AS.BB).release();
1504 if (!Dom)
1505 continue;
1506
1507 // If a basic block was given use its domain to simplify the assumption.
1508 // In case of restrictions we know they only have to hold on the domain,
1509 // thus we can intersect them with the domain of the block. However, for
1510 // assumptions the domain has to imply them, thus:
1511 // _ _____
1512 // Dom => S <==> A v B <==> A - B
1513 //
1514 // To avoid the complement we will register A - B as a restriction not an
1515 // assumption.
1516 isl_set *S = AS.Set.copy();
1517 if (AS.Sign == AS_RESTRICTION)
1518 S = isl_set_params(isl_set_intersect(S, Dom));
1519 else /* (AS.Sign == AS_ASSUMPTION) */
1520 S = isl_set_params(isl_set_subtract(Dom, S));
1521
1522 scop->addAssumption(AS.Kind, isl::manage(S), AS.Loc, AS_RESTRICTION, AS.BB);
1523 }
1524 }
1525
addUserAssumptions(AssumptionCache & AC,DenseMap<BasicBlock *,isl::set> & InvalidDomainMap)1526 void ScopBuilder::addUserAssumptions(
1527 AssumptionCache &AC, DenseMap<BasicBlock *, isl::set> &InvalidDomainMap) {
1528 for (auto &Assumption : AC.assumptions()) {
1529 auto *CI = dyn_cast_or_null<CallInst>(Assumption);
1530 if (!CI || CI->getNumArgOperands() != 1)
1531 continue;
1532
1533 bool InScop = scop->contains(CI);
1534 if (!InScop && !scop->isDominatedBy(DT, CI->getParent()))
1535 continue;
1536
1537 auto *L = LI.getLoopFor(CI->getParent());
1538 auto *Val = CI->getArgOperand(0);
1539 ParameterSetTy DetectedParams;
1540 auto &R = scop->getRegion();
1541 if (!isAffineConstraint(Val, &R, L, SE, DetectedParams)) {
1542 ORE.emit(
1543 OptimizationRemarkAnalysis(DEBUG_TYPE, "IgnoreUserAssumption", CI)
1544 << "Non-affine user assumption ignored.");
1545 continue;
1546 }
1547
1548 // Collect all newly introduced parameters.
1549 ParameterSetTy NewParams;
1550 for (auto *Param : DetectedParams) {
1551 Param = extractConstantFactor(Param, SE).second;
1552 Param = scop->getRepresentingInvariantLoadSCEV(Param);
1553 if (scop->isParam(Param))
1554 continue;
1555 NewParams.insert(Param);
1556 }
1557
1558 SmallVector<isl_set *, 2> ConditionSets;
1559 auto *TI = InScop ? CI->getParent()->getTerminator() : nullptr;
1560 BasicBlock *BB = InScop ? CI->getParent() : R.getEntry();
1561 auto *Dom = InScop ? isl_set_copy(scop->getDomainConditions(BB).get())
1562 : isl_set_copy(scop->getContext().get());
1563 assert(Dom && "Cannot propagate a nullptr.");
1564 bool Valid = buildConditionSets(BB, Val, TI, L, Dom, InvalidDomainMap,
1565 ConditionSets);
1566 isl_set_free(Dom);
1567
1568 if (!Valid)
1569 continue;
1570
1571 isl_set *AssumptionCtx = nullptr;
1572 if (InScop) {
1573 AssumptionCtx = isl_set_complement(isl_set_params(ConditionSets[1]));
1574 isl_set_free(ConditionSets[0]);
1575 } else {
1576 AssumptionCtx = isl_set_complement(ConditionSets[1]);
1577 AssumptionCtx = isl_set_intersect(AssumptionCtx, ConditionSets[0]);
1578 }
1579
1580 // Project out newly introduced parameters as they are not otherwise useful.
1581 if (!NewParams.empty()) {
1582 for (isl_size u = 0; u < isl_set_n_param(AssumptionCtx); u++) {
1583 auto *Id = isl_set_get_dim_id(AssumptionCtx, isl_dim_param, u);
1584 auto *Param = static_cast<const SCEV *>(isl_id_get_user(Id));
1585 isl_id_free(Id);
1586
1587 if (!NewParams.count(Param))
1588 continue;
1589
1590 AssumptionCtx =
1591 isl_set_project_out(AssumptionCtx, isl_dim_param, u--, 1);
1592 }
1593 }
1594 ORE.emit(OptimizationRemarkAnalysis(DEBUG_TYPE, "UserAssumption", CI)
1595 << "Use user assumption: " << stringFromIslObj(AssumptionCtx));
1596 isl::set newContext =
1597 scop->getContext().intersect(isl::manage(AssumptionCtx));
1598 scop->setContext(newContext);
1599 }
1600 }
1601
buildAccessMultiDimFixed(MemAccInst Inst,ScopStmt * Stmt)1602 bool ScopBuilder::buildAccessMultiDimFixed(MemAccInst Inst, ScopStmt *Stmt) {
1603 Value *Val = Inst.getValueOperand();
1604 Type *ElementType = Val->getType();
1605 Value *Address = Inst.getPointerOperand();
1606 const SCEV *AccessFunction =
1607 SE.getSCEVAtScope(Address, LI.getLoopFor(Inst->getParent()));
1608 const SCEVUnknown *BasePointer =
1609 dyn_cast<SCEVUnknown>(SE.getPointerBase(AccessFunction));
1610 enum MemoryAccess::AccessType AccType =
1611 isa<LoadInst>(Inst) ? MemoryAccess::READ : MemoryAccess::MUST_WRITE;
1612
1613 if (auto *BitCast = dyn_cast<BitCastInst>(Address)) {
1614 auto *Src = BitCast->getOperand(0);
1615 auto *SrcTy = Src->getType();
1616 auto *DstTy = BitCast->getType();
1617 // Do not try to delinearize non-sized (opaque) pointers.
1618 if ((SrcTy->isPointerTy() && !SrcTy->getPointerElementType()->isSized()) ||
1619 (DstTy->isPointerTy() && !DstTy->getPointerElementType()->isSized())) {
1620 return false;
1621 }
1622 if (SrcTy->isPointerTy() && DstTy->isPointerTy() &&
1623 DL.getTypeAllocSize(SrcTy->getPointerElementType()) ==
1624 DL.getTypeAllocSize(DstTy->getPointerElementType()))
1625 Address = Src;
1626 }
1627
1628 auto *GEP = dyn_cast<GetElementPtrInst>(Address);
1629 if (!GEP)
1630 return false;
1631
1632 SmallVector<const SCEV *, 4> Subscripts;
1633 SmallVector<int, 4> Sizes;
1634 SE.getIndexExpressionsFromGEP(GEP, Subscripts, Sizes);
1635 auto *BasePtr = GEP->getOperand(0);
1636
1637 if (auto *BasePtrCast = dyn_cast<BitCastInst>(BasePtr))
1638 BasePtr = BasePtrCast->getOperand(0);
1639
1640 // Check for identical base pointers to ensure that we do not miss index
1641 // offsets that have been added before this GEP is applied.
1642 if (BasePtr != BasePointer->getValue())
1643 return false;
1644
1645 std::vector<const SCEV *> SizesSCEV;
1646
1647 const InvariantLoadsSetTy &ScopRIL = scop->getRequiredInvariantLoads();
1648
1649 Loop *SurroundingLoop = Stmt->getSurroundingLoop();
1650 for (auto *Subscript : Subscripts) {
1651 InvariantLoadsSetTy AccessILS;
1652 if (!isAffineExpr(&scop->getRegion(), SurroundingLoop, Subscript, SE,
1653 &AccessILS))
1654 return false;
1655
1656 for (LoadInst *LInst : AccessILS)
1657 if (!ScopRIL.count(LInst))
1658 return false;
1659 }
1660
1661 if (Sizes.empty())
1662 return false;
1663
1664 SizesSCEV.push_back(nullptr);
1665
1666 for (auto V : Sizes)
1667 SizesSCEV.push_back(SE.getSCEV(
1668 ConstantInt::get(IntegerType::getInt64Ty(BasePtr->getContext()), V)));
1669
1670 addArrayAccess(Stmt, Inst, AccType, BasePointer->getValue(), ElementType,
1671 true, Subscripts, SizesSCEV, Val);
1672 return true;
1673 }
1674
buildAccessMultiDimParam(MemAccInst Inst,ScopStmt * Stmt)1675 bool ScopBuilder::buildAccessMultiDimParam(MemAccInst Inst, ScopStmt *Stmt) {
1676 if (!PollyDelinearize)
1677 return false;
1678
1679 Value *Address = Inst.getPointerOperand();
1680 Value *Val = Inst.getValueOperand();
1681 Type *ElementType = Val->getType();
1682 unsigned ElementSize = DL.getTypeAllocSize(ElementType);
1683 enum MemoryAccess::AccessType AccType =
1684 isa<LoadInst>(Inst) ? MemoryAccess::READ : MemoryAccess::MUST_WRITE;
1685
1686 const SCEV *AccessFunction =
1687 SE.getSCEVAtScope(Address, LI.getLoopFor(Inst->getParent()));
1688 const SCEVUnknown *BasePointer =
1689 dyn_cast<SCEVUnknown>(SE.getPointerBase(AccessFunction));
1690
1691 assert(BasePointer && "Could not find base pointer");
1692
1693 auto &InsnToMemAcc = scop->getInsnToMemAccMap();
1694 auto AccItr = InsnToMemAcc.find(Inst);
1695 if (AccItr == InsnToMemAcc.end())
1696 return false;
1697
1698 std::vector<const SCEV *> Sizes = {nullptr};
1699
1700 Sizes.insert(Sizes.end(), AccItr->second.Shape->DelinearizedSizes.begin(),
1701 AccItr->second.Shape->DelinearizedSizes.end());
1702
1703 // In case only the element size is contained in the 'Sizes' array, the
1704 // access does not access a real multi-dimensional array. Hence, we allow
1705 // the normal single-dimensional access construction to handle this.
1706 if (Sizes.size() == 1)
1707 return false;
1708
1709 // Remove the element size. This information is already provided by the
1710 // ElementSize parameter. In case the element size of this access and the
1711 // element size used for delinearization differs the delinearization is
1712 // incorrect. Hence, we invalidate the scop.
1713 //
1714 // TODO: Handle delinearization with differing element sizes.
1715 auto DelinearizedSize =
1716 cast<SCEVConstant>(Sizes.back())->getAPInt().getSExtValue();
1717 Sizes.pop_back();
1718 if (ElementSize != DelinearizedSize)
1719 scop->invalidate(DELINEARIZATION, Inst->getDebugLoc(), Inst->getParent());
1720
1721 addArrayAccess(Stmt, Inst, AccType, BasePointer->getValue(), ElementType,
1722 true, AccItr->second.DelinearizedSubscripts, Sizes, Val);
1723 return true;
1724 }
1725
buildAccessMemIntrinsic(MemAccInst Inst,ScopStmt * Stmt)1726 bool ScopBuilder::buildAccessMemIntrinsic(MemAccInst Inst, ScopStmt *Stmt) {
1727 auto *MemIntr = dyn_cast_or_null<MemIntrinsic>(Inst);
1728
1729 if (MemIntr == nullptr)
1730 return false;
1731
1732 auto *L = LI.getLoopFor(Inst->getParent());
1733 auto *LengthVal = SE.getSCEVAtScope(MemIntr->getLength(), L);
1734 assert(LengthVal);
1735
1736 // Check if the length val is actually affine or if we overapproximate it
1737 InvariantLoadsSetTy AccessILS;
1738 const InvariantLoadsSetTy &ScopRIL = scop->getRequiredInvariantLoads();
1739
1740 Loop *SurroundingLoop = Stmt->getSurroundingLoop();
1741 bool LengthIsAffine = isAffineExpr(&scop->getRegion(), SurroundingLoop,
1742 LengthVal, SE, &AccessILS);
1743 for (LoadInst *LInst : AccessILS)
1744 if (!ScopRIL.count(LInst))
1745 LengthIsAffine = false;
1746 if (!LengthIsAffine)
1747 LengthVal = nullptr;
1748
1749 auto *DestPtrVal = MemIntr->getDest();
1750 assert(DestPtrVal);
1751
1752 auto *DestAccFunc = SE.getSCEVAtScope(DestPtrVal, L);
1753 assert(DestAccFunc);
1754 // Ignore accesses to "NULL".
1755 // TODO: We could use this to optimize the region further, e.g., intersect
1756 // the context with
1757 // isl_set_complement(isl_set_params(getDomain()))
1758 // as we know it would be undefined to execute this instruction anyway.
1759 if (DestAccFunc->isZero())
1760 return true;
1761
1762 auto *DestPtrSCEV = dyn_cast<SCEVUnknown>(SE.getPointerBase(DestAccFunc));
1763 assert(DestPtrSCEV);
1764 DestAccFunc = SE.getMinusSCEV(DestAccFunc, DestPtrSCEV);
1765 addArrayAccess(Stmt, Inst, MemoryAccess::MUST_WRITE, DestPtrSCEV->getValue(),
1766 IntegerType::getInt8Ty(DestPtrVal->getContext()),
1767 LengthIsAffine, {DestAccFunc, LengthVal}, {nullptr},
1768 Inst.getValueOperand());
1769
1770 auto *MemTrans = dyn_cast<MemTransferInst>(MemIntr);
1771 if (!MemTrans)
1772 return true;
1773
1774 auto *SrcPtrVal = MemTrans->getSource();
1775 assert(SrcPtrVal);
1776
1777 auto *SrcAccFunc = SE.getSCEVAtScope(SrcPtrVal, L);
1778 assert(SrcAccFunc);
1779 // Ignore accesses to "NULL".
1780 // TODO: See above TODO
1781 if (SrcAccFunc->isZero())
1782 return true;
1783
1784 auto *SrcPtrSCEV = dyn_cast<SCEVUnknown>(SE.getPointerBase(SrcAccFunc));
1785 assert(SrcPtrSCEV);
1786 SrcAccFunc = SE.getMinusSCEV(SrcAccFunc, SrcPtrSCEV);
1787 addArrayAccess(Stmt, Inst, MemoryAccess::READ, SrcPtrSCEV->getValue(),
1788 IntegerType::getInt8Ty(SrcPtrVal->getContext()),
1789 LengthIsAffine, {SrcAccFunc, LengthVal}, {nullptr},
1790 Inst.getValueOperand());
1791
1792 return true;
1793 }
1794
buildAccessCallInst(MemAccInst Inst,ScopStmt * Stmt)1795 bool ScopBuilder::buildAccessCallInst(MemAccInst Inst, ScopStmt *Stmt) {
1796 auto *CI = dyn_cast_or_null<CallInst>(Inst);
1797
1798 if (CI == nullptr)
1799 return false;
1800
1801 if (CI->doesNotAccessMemory() || isIgnoredIntrinsic(CI) || isDebugCall(CI))
1802 return true;
1803
1804 bool ReadOnly = false;
1805 auto *AF = SE.getConstant(IntegerType::getInt64Ty(CI->getContext()), 0);
1806 auto *CalledFunction = CI->getCalledFunction();
1807 switch (AA.getModRefBehavior(CalledFunction)) {
1808 case FMRB_UnknownModRefBehavior:
1809 llvm_unreachable("Unknown mod ref behaviour cannot be represented.");
1810 case FMRB_DoesNotAccessMemory:
1811 return true;
1812 case FMRB_OnlyWritesMemory:
1813 case FMRB_OnlyWritesInaccessibleMem:
1814 case FMRB_OnlyWritesInaccessibleOrArgMem:
1815 case FMRB_OnlyAccessesInaccessibleMem:
1816 case FMRB_OnlyAccessesInaccessibleOrArgMem:
1817 return false;
1818 case FMRB_OnlyReadsMemory:
1819 case FMRB_OnlyReadsInaccessibleMem:
1820 case FMRB_OnlyReadsInaccessibleOrArgMem:
1821 GlobalReads.emplace_back(Stmt, CI);
1822 return true;
1823 case FMRB_OnlyReadsArgumentPointees:
1824 ReadOnly = true;
1825 LLVM_FALLTHROUGH;
1826 case FMRB_OnlyWritesArgumentPointees:
1827 case FMRB_OnlyAccessesArgumentPointees: {
1828 auto AccType = ReadOnly ? MemoryAccess::READ : MemoryAccess::MAY_WRITE;
1829 Loop *L = LI.getLoopFor(Inst->getParent());
1830 for (const auto &Arg : CI->arg_operands()) {
1831 if (!Arg->getType()->isPointerTy())
1832 continue;
1833
1834 auto *ArgSCEV = SE.getSCEVAtScope(Arg, L);
1835 if (ArgSCEV->isZero())
1836 continue;
1837
1838 auto *ArgBasePtr = cast<SCEVUnknown>(SE.getPointerBase(ArgSCEV));
1839 addArrayAccess(Stmt, Inst, AccType, ArgBasePtr->getValue(),
1840 ArgBasePtr->getType(), false, {AF}, {nullptr}, CI);
1841 }
1842 return true;
1843 }
1844 }
1845
1846 return true;
1847 }
1848
buildAccessSingleDim(MemAccInst Inst,ScopStmt * Stmt)1849 void ScopBuilder::buildAccessSingleDim(MemAccInst Inst, ScopStmt *Stmt) {
1850 Value *Address = Inst.getPointerOperand();
1851 Value *Val = Inst.getValueOperand();
1852 Type *ElementType = Val->getType();
1853 enum MemoryAccess::AccessType AccType =
1854 isa<LoadInst>(Inst) ? MemoryAccess::READ : MemoryAccess::MUST_WRITE;
1855
1856 const SCEV *AccessFunction =
1857 SE.getSCEVAtScope(Address, LI.getLoopFor(Inst->getParent()));
1858 const SCEVUnknown *BasePointer =
1859 dyn_cast<SCEVUnknown>(SE.getPointerBase(AccessFunction));
1860
1861 assert(BasePointer && "Could not find base pointer");
1862 AccessFunction = SE.getMinusSCEV(AccessFunction, BasePointer);
1863
1864 // Check if the access depends on a loop contained in a non-affine subregion.
1865 bool isVariantInNonAffineLoop = false;
1866 SetVector<const Loop *> Loops;
1867 findLoops(AccessFunction, Loops);
1868 for (const Loop *L : Loops)
1869 if (Stmt->contains(L)) {
1870 isVariantInNonAffineLoop = true;
1871 break;
1872 }
1873
1874 InvariantLoadsSetTy AccessILS;
1875
1876 Loop *SurroundingLoop = Stmt->getSurroundingLoop();
1877 bool IsAffine = !isVariantInNonAffineLoop &&
1878 isAffineExpr(&scop->getRegion(), SurroundingLoop,
1879 AccessFunction, SE, &AccessILS);
1880
1881 const InvariantLoadsSetTy &ScopRIL = scop->getRequiredInvariantLoads();
1882 for (LoadInst *LInst : AccessILS)
1883 if (!ScopRIL.count(LInst))
1884 IsAffine = false;
1885
1886 if (!IsAffine && AccType == MemoryAccess::MUST_WRITE)
1887 AccType = MemoryAccess::MAY_WRITE;
1888
1889 addArrayAccess(Stmt, Inst, AccType, BasePointer->getValue(), ElementType,
1890 IsAffine, {AccessFunction}, {nullptr}, Val);
1891 }
1892
buildMemoryAccess(MemAccInst Inst,ScopStmt * Stmt)1893 void ScopBuilder::buildMemoryAccess(MemAccInst Inst, ScopStmt *Stmt) {
1894 if (buildAccessMemIntrinsic(Inst, Stmt))
1895 return;
1896
1897 if (buildAccessCallInst(Inst, Stmt))
1898 return;
1899
1900 if (buildAccessMultiDimFixed(Inst, Stmt))
1901 return;
1902
1903 if (buildAccessMultiDimParam(Inst, Stmt))
1904 return;
1905
1906 buildAccessSingleDim(Inst, Stmt);
1907 }
1908
buildAccessFunctions()1909 void ScopBuilder::buildAccessFunctions() {
1910 for (auto &Stmt : *scop) {
1911 if (Stmt.isBlockStmt()) {
1912 buildAccessFunctions(&Stmt, *Stmt.getBasicBlock());
1913 continue;
1914 }
1915
1916 Region *R = Stmt.getRegion();
1917 for (BasicBlock *BB : R->blocks())
1918 buildAccessFunctions(&Stmt, *BB, R);
1919 }
1920
1921 // Build write accesses for values that are used after the SCoP.
1922 // The instructions defining them might be synthesizable and therefore not
1923 // contained in any statement, hence we iterate over the original instructions
1924 // to identify all escaping values.
1925 for (BasicBlock *BB : scop->getRegion().blocks()) {
1926 for (Instruction &Inst : *BB)
1927 buildEscapingDependences(&Inst);
1928 }
1929 }
1930
shouldModelInst(Instruction * Inst,Loop * L)1931 bool ScopBuilder::shouldModelInst(Instruction *Inst, Loop *L) {
1932 return !Inst->isTerminator() && !isIgnoredIntrinsic(Inst) &&
1933 !canSynthesize(Inst, *scop, &SE, L);
1934 }
1935
1936 /// Generate a name for a statement.
1937 ///
1938 /// @param BB The basic block the statement will represent.
1939 /// @param BBIdx The index of the @p BB relative to other BBs/regions.
1940 /// @param Count The index of the created statement in @p BB.
1941 /// @param IsMain Whether this is the main of all statement for @p BB. If true,
1942 /// no suffix will be added.
1943 /// @param IsLast Uses a special indicator for the last statement of a BB.
makeStmtName(BasicBlock * BB,long BBIdx,int Count,bool IsMain,bool IsLast=false)1944 static std::string makeStmtName(BasicBlock *BB, long BBIdx, int Count,
1945 bool IsMain, bool IsLast = false) {
1946 std::string Suffix;
1947 if (!IsMain) {
1948 if (UseInstructionNames)
1949 Suffix = '_';
1950 if (IsLast)
1951 Suffix += "last";
1952 else if (Count < 26)
1953 Suffix += 'a' + Count;
1954 else
1955 Suffix += std::to_string(Count);
1956 }
1957 return getIslCompatibleName("Stmt", BB, BBIdx, Suffix, UseInstructionNames);
1958 }
1959
1960 /// Generate a name for a statement that represents a non-affine subregion.
1961 ///
1962 /// @param R The region the statement will represent.
1963 /// @param RIdx The index of the @p R relative to other BBs/regions.
makeStmtName(Region * R,long RIdx)1964 static std::string makeStmtName(Region *R, long RIdx) {
1965 return getIslCompatibleName("Stmt", R->getNameStr(), RIdx, "",
1966 UseInstructionNames);
1967 }
1968
buildSequentialBlockStmts(BasicBlock * BB,bool SplitOnStore)1969 void ScopBuilder::buildSequentialBlockStmts(BasicBlock *BB, bool SplitOnStore) {
1970 Loop *SurroundingLoop = LI.getLoopFor(BB);
1971
1972 int Count = 0;
1973 long BBIdx = scop->getNextStmtIdx();
1974 std::vector<Instruction *> Instructions;
1975 for (Instruction &Inst : *BB) {
1976 if (shouldModelInst(&Inst, SurroundingLoop))
1977 Instructions.push_back(&Inst);
1978 if (Inst.getMetadata("polly_split_after") ||
1979 (SplitOnStore && isa<StoreInst>(Inst))) {
1980 std::string Name = makeStmtName(BB, BBIdx, Count, Count == 0);
1981 scop->addScopStmt(BB, Name, SurroundingLoop, Instructions);
1982 Count++;
1983 Instructions.clear();
1984 }
1985 }
1986
1987 std::string Name = makeStmtName(BB, BBIdx, Count, Count == 0);
1988 scop->addScopStmt(BB, Name, SurroundingLoop, Instructions);
1989 }
1990
1991 /// Is @p Inst an ordered instruction?
1992 ///
1993 /// An unordered instruction is an instruction, such that a sequence of
1994 /// unordered instructions can be permuted without changing semantics. Any
1995 /// instruction for which this is not always the case is ordered.
isOrderedInstruction(Instruction * Inst)1996 static bool isOrderedInstruction(Instruction *Inst) {
1997 return Inst->mayHaveSideEffects() || Inst->mayReadOrWriteMemory();
1998 }
1999
2000 /// Join instructions to the same statement if one uses the scalar result of the
2001 /// other.
joinOperandTree(EquivalenceClasses<Instruction * > & UnionFind,ArrayRef<Instruction * > ModeledInsts)2002 static void joinOperandTree(EquivalenceClasses<Instruction *> &UnionFind,
2003 ArrayRef<Instruction *> ModeledInsts) {
2004 for (Instruction *Inst : ModeledInsts) {
2005 if (isa<PHINode>(Inst))
2006 continue;
2007
2008 for (Use &Op : Inst->operands()) {
2009 Instruction *OpInst = dyn_cast<Instruction>(Op.get());
2010 if (!OpInst)
2011 continue;
2012
2013 // Check if OpInst is in the BB and is a modeled instruction.
2014 auto OpVal = UnionFind.findValue(OpInst);
2015 if (OpVal == UnionFind.end())
2016 continue;
2017
2018 UnionFind.unionSets(Inst, OpInst);
2019 }
2020 }
2021 }
2022
2023 /// Ensure that the order of ordered instructions does not change.
2024 ///
2025 /// If we encounter an ordered instruction enclosed in instructions belonging to
2026 /// a different statement (which might as well contain ordered instructions, but
2027 /// this is not tested here), join them.
2028 static void
joinOrderedInstructions(EquivalenceClasses<Instruction * > & UnionFind,ArrayRef<Instruction * > ModeledInsts)2029 joinOrderedInstructions(EquivalenceClasses<Instruction *> &UnionFind,
2030 ArrayRef<Instruction *> ModeledInsts) {
2031 SetVector<Instruction *> SeenLeaders;
2032 for (Instruction *Inst : ModeledInsts) {
2033 if (!isOrderedInstruction(Inst))
2034 continue;
2035
2036 Instruction *Leader = UnionFind.getLeaderValue(Inst);
2037 // Since previous iterations might have merged sets, some items in
2038 // SeenLeaders are not leaders anymore. However, The new leader of
2039 // previously merged instructions must be one of the former leaders of
2040 // these merged instructions.
2041 bool Inserted = SeenLeaders.insert(Leader);
2042 if (Inserted)
2043 continue;
2044
2045 // Merge statements to close holes. Say, we have already seen statements A
2046 // and B, in this order. Then we see an instruction of A again and we would
2047 // see the pattern "A B A". This function joins all statements until the
2048 // only seen occurrence of A.
2049 for (Instruction *Prev : reverse(SeenLeaders)) {
2050 // We are backtracking from the last element until we see Inst's leader
2051 // in SeenLeaders and merge all into one set. Although leaders of
2052 // instructions change during the execution of this loop, it's irrelevant
2053 // as we are just searching for the element that we already confirmed is
2054 // in the list.
2055 if (Prev == Leader)
2056 break;
2057 UnionFind.unionSets(Prev, Leader);
2058 }
2059 }
2060 }
2061
2062 /// If the BasicBlock has an edge from itself, ensure that the PHI WRITEs for
2063 /// the incoming values from this block are executed after the PHI READ.
2064 ///
2065 /// Otherwise it could overwrite the incoming value from before the BB with the
2066 /// value for the next execution. This can happen if the PHI WRITE is added to
2067 /// the statement with the instruction that defines the incoming value (instead
2068 /// of the last statement of the same BB). To ensure that the PHI READ and WRITE
2069 /// are in order, we put both into the statement. PHI WRITEs are always executed
2070 /// after PHI READs when they are in the same statement.
2071 ///
2072 /// TODO: This is an overpessimization. We only have to ensure that the PHI
2073 /// WRITE is not put into a statement containing the PHI itself. That could also
2074 /// be done by
2075 /// - having all (strongly connected) PHIs in a single statement,
2076 /// - unite only the PHIs in the operand tree of the PHI WRITE (because it only
2077 /// has a chance of being lifted before a PHI by being in a statement with a
2078 /// PHI that comes before in the basic block), or
2079 /// - when uniting statements, ensure that no (relevant) PHIs are overtaken.
joinOrderedPHIs(EquivalenceClasses<Instruction * > & UnionFind,ArrayRef<Instruction * > ModeledInsts)2080 static void joinOrderedPHIs(EquivalenceClasses<Instruction *> &UnionFind,
2081 ArrayRef<Instruction *> ModeledInsts) {
2082 for (Instruction *Inst : ModeledInsts) {
2083 PHINode *PHI = dyn_cast<PHINode>(Inst);
2084 if (!PHI)
2085 continue;
2086
2087 int Idx = PHI->getBasicBlockIndex(PHI->getParent());
2088 if (Idx < 0)
2089 continue;
2090
2091 Instruction *IncomingVal =
2092 dyn_cast<Instruction>(PHI->getIncomingValue(Idx));
2093 if (!IncomingVal)
2094 continue;
2095
2096 UnionFind.unionSets(PHI, IncomingVal);
2097 }
2098 }
2099
buildEqivClassBlockStmts(BasicBlock * BB)2100 void ScopBuilder::buildEqivClassBlockStmts(BasicBlock *BB) {
2101 Loop *L = LI.getLoopFor(BB);
2102
2103 // Extracting out modeled instructions saves us from checking
2104 // shouldModelInst() repeatedly.
2105 SmallVector<Instruction *, 32> ModeledInsts;
2106 EquivalenceClasses<Instruction *> UnionFind;
2107 Instruction *MainInst = nullptr, *MainLeader = nullptr;
2108 for (Instruction &Inst : *BB) {
2109 if (!shouldModelInst(&Inst, L))
2110 continue;
2111 ModeledInsts.push_back(&Inst);
2112 UnionFind.insert(&Inst);
2113
2114 // When a BB is split into multiple statements, the main statement is the
2115 // one containing the 'main' instruction. We select the first instruction
2116 // that is unlikely to be removed (because it has side-effects) as the main
2117 // one. It is used to ensure that at least one statement from the bb has the
2118 // same name as with -polly-stmt-granularity=bb.
2119 if (!MainInst && (isa<StoreInst>(Inst) ||
2120 (isa<CallInst>(Inst) && !isa<IntrinsicInst>(Inst))))
2121 MainInst = &Inst;
2122 }
2123
2124 joinOperandTree(UnionFind, ModeledInsts);
2125 joinOrderedInstructions(UnionFind, ModeledInsts);
2126 joinOrderedPHIs(UnionFind, ModeledInsts);
2127
2128 // The list of instructions for statement (statement represented by the leader
2129 // instruction).
2130 MapVector<Instruction *, std::vector<Instruction *>> LeaderToInstList;
2131
2132 // The order of statements must be preserved w.r.t. their ordered
2133 // instructions. Without this explicit scan, we would also use non-ordered
2134 // instructions (whose order is arbitrary) to determine statement order.
2135 for (Instruction *Inst : ModeledInsts) {
2136 if (!isOrderedInstruction(Inst))
2137 continue;
2138
2139 auto LeaderIt = UnionFind.findLeader(Inst);
2140 if (LeaderIt == UnionFind.member_end())
2141 continue;
2142
2143 // Insert element for the leader instruction.
2144 (void)LeaderToInstList[*LeaderIt];
2145 }
2146
2147 // Collect the instructions of all leaders. UnionFind's member iterator
2148 // unfortunately are not in any specific order.
2149 for (Instruction *Inst : ModeledInsts) {
2150 auto LeaderIt = UnionFind.findLeader(Inst);
2151 if (LeaderIt == UnionFind.member_end())
2152 continue;
2153
2154 if (Inst == MainInst)
2155 MainLeader = *LeaderIt;
2156 std::vector<Instruction *> &InstList = LeaderToInstList[*LeaderIt];
2157 InstList.push_back(Inst);
2158 }
2159
2160 // Finally build the statements.
2161 int Count = 0;
2162 long BBIdx = scop->getNextStmtIdx();
2163 for (auto &Instructions : LeaderToInstList) {
2164 std::vector<Instruction *> &InstList = Instructions.second;
2165
2166 // If there is no main instruction, make the first statement the main.
2167 bool IsMain = (MainInst ? MainLeader == Instructions.first : Count == 0);
2168
2169 std::string Name = makeStmtName(BB, BBIdx, Count, IsMain);
2170 scop->addScopStmt(BB, Name, L, std::move(InstList));
2171 Count += 1;
2172 }
2173
2174 // Unconditionally add an epilogue (last statement). It contains no
2175 // instructions, but holds the PHI write accesses for successor basic blocks,
2176 // if the incoming value is not defined in another statement if the same BB.
2177 // The epilogue becomes the main statement only if there is no other
2178 // statement that could become main.
2179 // The epilogue will be removed if no PHIWrite is added to it.
2180 std::string EpilogueName = makeStmtName(BB, BBIdx, Count, Count == 0, true);
2181 scop->addScopStmt(BB, EpilogueName, L, {});
2182 }
2183
buildStmts(Region & SR)2184 void ScopBuilder::buildStmts(Region &SR) {
2185 if (scop->isNonAffineSubRegion(&SR)) {
2186 std::vector<Instruction *> Instructions;
2187 Loop *SurroundingLoop =
2188 getFirstNonBoxedLoopFor(SR.getEntry(), LI, scop->getBoxedLoops());
2189 for (Instruction &Inst : *SR.getEntry())
2190 if (shouldModelInst(&Inst, SurroundingLoop))
2191 Instructions.push_back(&Inst);
2192 long RIdx = scop->getNextStmtIdx();
2193 std::string Name = makeStmtName(&SR, RIdx);
2194 scop->addScopStmt(&SR, Name, SurroundingLoop, Instructions);
2195 return;
2196 }
2197
2198 for (auto I = SR.element_begin(), E = SR.element_end(); I != E; ++I)
2199 if (I->isSubRegion())
2200 buildStmts(*I->getNodeAs<Region>());
2201 else {
2202 BasicBlock *BB = I->getNodeAs<BasicBlock>();
2203 switch (StmtGranularity) {
2204 case GranularityChoice::BasicBlocks:
2205 buildSequentialBlockStmts(BB);
2206 break;
2207 case GranularityChoice::ScalarIndependence:
2208 buildEqivClassBlockStmts(BB);
2209 break;
2210 case GranularityChoice::Stores:
2211 buildSequentialBlockStmts(BB, true);
2212 break;
2213 }
2214 }
2215 }
2216
buildAccessFunctions(ScopStmt * Stmt,BasicBlock & BB,Region * NonAffineSubRegion)2217 void ScopBuilder::buildAccessFunctions(ScopStmt *Stmt, BasicBlock &BB,
2218 Region *NonAffineSubRegion) {
2219 assert(
2220 Stmt &&
2221 "The exit BB is the only one that cannot be represented by a statement");
2222 assert(Stmt->represents(&BB));
2223
2224 // We do not build access functions for error blocks, as they may contain
2225 // instructions we can not model.
2226 if (isErrorBlock(BB, scop->getRegion(), LI, DT))
2227 return;
2228
2229 auto BuildAccessesForInst = [this, Stmt,
2230 NonAffineSubRegion](Instruction *Inst) {
2231 PHINode *PHI = dyn_cast<PHINode>(Inst);
2232 if (PHI)
2233 buildPHIAccesses(Stmt, PHI, NonAffineSubRegion, false);
2234
2235 if (auto MemInst = MemAccInst::dyn_cast(*Inst)) {
2236 assert(Stmt && "Cannot build access function in non-existing statement");
2237 buildMemoryAccess(MemInst, Stmt);
2238 }
2239
2240 // PHI nodes have already been modeled above and terminators that are
2241 // not part of a non-affine subregion are fully modeled and regenerated
2242 // from the polyhedral domains. Hence, they do not need to be modeled as
2243 // explicit data dependences.
2244 if (!PHI)
2245 buildScalarDependences(Stmt, Inst);
2246 };
2247
2248 const InvariantLoadsSetTy &RIL = scop->getRequiredInvariantLoads();
2249 bool IsEntryBlock = (Stmt->getEntryBlock() == &BB);
2250 if (IsEntryBlock) {
2251 for (Instruction *Inst : Stmt->getInstructions())
2252 BuildAccessesForInst(Inst);
2253 if (Stmt->isRegionStmt())
2254 BuildAccessesForInst(BB.getTerminator());
2255 } else {
2256 for (Instruction &Inst : BB) {
2257 if (isIgnoredIntrinsic(&Inst))
2258 continue;
2259
2260 // Invariant loads already have been processed.
2261 if (isa<LoadInst>(Inst) && RIL.count(cast<LoadInst>(&Inst)))
2262 continue;
2263
2264 BuildAccessesForInst(&Inst);
2265 }
2266 }
2267 }
2268
addMemoryAccess(ScopStmt * Stmt,Instruction * Inst,MemoryAccess::AccessType AccType,Value * BaseAddress,Type * ElementType,bool Affine,Value * AccessValue,ArrayRef<const SCEV * > Subscripts,ArrayRef<const SCEV * > Sizes,MemoryKind Kind)2269 MemoryAccess *ScopBuilder::addMemoryAccess(
2270 ScopStmt *Stmt, Instruction *Inst, MemoryAccess::AccessType AccType,
2271 Value *BaseAddress, Type *ElementType, bool Affine, Value *AccessValue,
2272 ArrayRef<const SCEV *> Subscripts, ArrayRef<const SCEV *> Sizes,
2273 MemoryKind Kind) {
2274 bool isKnownMustAccess = false;
2275
2276 // Accesses in single-basic block statements are always executed.
2277 if (Stmt->isBlockStmt())
2278 isKnownMustAccess = true;
2279
2280 if (Stmt->isRegionStmt()) {
2281 // Accesses that dominate the exit block of a non-affine region are always
2282 // executed. In non-affine regions there may exist MemoryKind::Values that
2283 // do not dominate the exit. MemoryKind::Values will always dominate the
2284 // exit and MemoryKind::PHIs only if there is at most one PHI_WRITE in the
2285 // non-affine region.
2286 if (Inst && DT.dominates(Inst->getParent(), Stmt->getRegion()->getExit()))
2287 isKnownMustAccess = true;
2288 }
2289
2290 // Non-affine PHI writes do not "happen" at a particular instruction, but
2291 // after exiting the statement. Therefore they are guaranteed to execute and
2292 // overwrite the old value.
2293 if (Kind == MemoryKind::PHI || Kind == MemoryKind::ExitPHI)
2294 isKnownMustAccess = true;
2295
2296 if (!isKnownMustAccess && AccType == MemoryAccess::MUST_WRITE)
2297 AccType = MemoryAccess::MAY_WRITE;
2298
2299 auto *Access = new MemoryAccess(Stmt, Inst, AccType, BaseAddress, ElementType,
2300 Affine, Subscripts, Sizes, AccessValue, Kind);
2301
2302 scop->addAccessFunction(Access);
2303 Stmt->addAccess(Access);
2304 return Access;
2305 }
2306
addArrayAccess(ScopStmt * Stmt,MemAccInst MemAccInst,MemoryAccess::AccessType AccType,Value * BaseAddress,Type * ElementType,bool IsAffine,ArrayRef<const SCEV * > Subscripts,ArrayRef<const SCEV * > Sizes,Value * AccessValue)2307 void ScopBuilder::addArrayAccess(ScopStmt *Stmt, MemAccInst MemAccInst,
2308 MemoryAccess::AccessType AccType,
2309 Value *BaseAddress, Type *ElementType,
2310 bool IsAffine,
2311 ArrayRef<const SCEV *> Subscripts,
2312 ArrayRef<const SCEV *> Sizes,
2313 Value *AccessValue) {
2314 ArrayBasePointers.insert(BaseAddress);
2315 auto *MemAccess = addMemoryAccess(Stmt, MemAccInst, AccType, BaseAddress,
2316 ElementType, IsAffine, AccessValue,
2317 Subscripts, Sizes, MemoryKind::Array);
2318
2319 if (!DetectFortranArrays)
2320 return;
2321
2322 if (Value *FAD = findFADAllocationInvisible(MemAccInst))
2323 MemAccess->setFortranArrayDescriptor(FAD);
2324 else if (Value *FAD = findFADAllocationVisible(MemAccInst))
2325 MemAccess->setFortranArrayDescriptor(FAD);
2326 }
2327
2328 /// Check if @p Expr is divisible by @p Size.
isDivisible(const SCEV * Expr,unsigned Size,ScalarEvolution & SE)2329 static bool isDivisible(const SCEV *Expr, unsigned Size, ScalarEvolution &SE) {
2330 assert(Size != 0);
2331 if (Size == 1)
2332 return true;
2333
2334 // Only one factor needs to be divisible.
2335 if (auto *MulExpr = dyn_cast<SCEVMulExpr>(Expr)) {
2336 for (auto *FactorExpr : MulExpr->operands())
2337 if (isDivisible(FactorExpr, Size, SE))
2338 return true;
2339 return false;
2340 }
2341
2342 // For other n-ary expressions (Add, AddRec, Max,...) all operands need
2343 // to be divisible.
2344 if (auto *NAryExpr = dyn_cast<SCEVNAryExpr>(Expr)) {
2345 for (auto *OpExpr : NAryExpr->operands())
2346 if (!isDivisible(OpExpr, Size, SE))
2347 return false;
2348 return true;
2349 }
2350
2351 auto *SizeSCEV = SE.getConstant(Expr->getType(), Size);
2352 auto *UDivSCEV = SE.getUDivExpr(Expr, SizeSCEV);
2353 auto *MulSCEV = SE.getMulExpr(UDivSCEV, SizeSCEV);
2354 return MulSCEV == Expr;
2355 }
2356
foldSizeConstantsToRight()2357 void ScopBuilder::foldSizeConstantsToRight() {
2358 isl::union_set Accessed = scop->getAccesses().range();
2359
2360 for (auto Array : scop->arrays()) {
2361 if (Array->getNumberOfDimensions() <= 1)
2362 continue;
2363
2364 isl::space Space = Array->getSpace();
2365 Space = Space.align_params(Accessed.get_space());
2366
2367 if (!Accessed.contains(Space))
2368 continue;
2369
2370 isl::set Elements = Accessed.extract_set(Space);
2371 isl::map Transform = isl::map::universe(Array->getSpace().map_from_set());
2372
2373 std::vector<int> Int;
2374 int Dims = Elements.dim(isl::dim::set);
2375 for (int i = 0; i < Dims; i++) {
2376 isl::set DimOnly = isl::set(Elements).project_out(isl::dim::set, 0, i);
2377 DimOnly = DimOnly.project_out(isl::dim::set, 1, Dims - i - 1);
2378 DimOnly = DimOnly.lower_bound_si(isl::dim::set, 0, 0);
2379
2380 isl::basic_set DimHull = DimOnly.affine_hull();
2381
2382 if (i == Dims - 1) {
2383 Int.push_back(1);
2384 Transform = Transform.equate(isl::dim::in, i, isl::dim::out, i);
2385 continue;
2386 }
2387
2388 if (DimHull.dim(isl::dim::div) == 1) {
2389 isl::aff Diff = DimHull.get_div(0);
2390 isl::val Val = Diff.get_denominator_val();
2391
2392 int ValInt = 1;
2393 if (Val.is_int()) {
2394 auto ValAPInt = APIntFromVal(Val);
2395 if (ValAPInt.isSignedIntN(32))
2396 ValInt = ValAPInt.getSExtValue();
2397 } else {
2398 }
2399
2400 Int.push_back(ValInt);
2401 isl::constraint C = isl::constraint::alloc_equality(
2402 isl::local_space(Transform.get_space()));
2403 C = C.set_coefficient_si(isl::dim::out, i, ValInt);
2404 C = C.set_coefficient_si(isl::dim::in, i, -1);
2405 Transform = Transform.add_constraint(C);
2406 continue;
2407 }
2408
2409 isl::basic_set ZeroSet = isl::basic_set(DimHull);
2410 ZeroSet = ZeroSet.fix_si(isl::dim::set, 0, 0);
2411
2412 int ValInt = 1;
2413 if (ZeroSet.is_equal(DimHull)) {
2414 ValInt = 0;
2415 }
2416
2417 Int.push_back(ValInt);
2418 Transform = Transform.equate(isl::dim::in, i, isl::dim::out, i);
2419 }
2420
2421 isl::set MappedElements = isl::map(Transform).domain();
2422 if (!Elements.is_subset(MappedElements))
2423 continue;
2424
2425 bool CanFold = true;
2426 if (Int[0] <= 1)
2427 CanFold = false;
2428
2429 unsigned NumDims = Array->getNumberOfDimensions();
2430 for (unsigned i = 1; i < NumDims - 1; i++)
2431 if (Int[0] != Int[i] && Int[i])
2432 CanFold = false;
2433
2434 if (!CanFold)
2435 continue;
2436
2437 for (auto &Access : scop->access_functions())
2438 if (Access->getScopArrayInfo() == Array)
2439 Access->setAccessRelation(
2440 Access->getAccessRelation().apply_range(Transform));
2441
2442 std::vector<const SCEV *> Sizes;
2443 for (unsigned i = 0; i < NumDims; i++) {
2444 auto Size = Array->getDimensionSize(i);
2445
2446 if (i == NumDims - 1)
2447 Size = SE.getMulExpr(Size, SE.getConstant(Size->getType(), Int[0]));
2448 Sizes.push_back(Size);
2449 }
2450
2451 Array->updateSizes(Sizes, false /* CheckConsistency */);
2452 }
2453 }
2454
markFortranArrays()2455 void ScopBuilder::markFortranArrays() {
2456 for (ScopStmt &Stmt : *scop) {
2457 for (MemoryAccess *MemAcc : Stmt) {
2458 Value *FAD = MemAcc->getFortranArrayDescriptor();
2459 if (!FAD)
2460 continue;
2461
2462 // TODO: const_cast-ing to edit
2463 ScopArrayInfo *SAI =
2464 const_cast<ScopArrayInfo *>(MemAcc->getLatestScopArrayInfo());
2465 assert(SAI && "memory access into a Fortran array does not "
2466 "have an associated ScopArrayInfo");
2467 SAI->applyAndSetFAD(FAD);
2468 }
2469 }
2470 }
2471
finalizeAccesses()2472 void ScopBuilder::finalizeAccesses() {
2473 updateAccessDimensionality();
2474 foldSizeConstantsToRight();
2475 foldAccessRelations();
2476 assumeNoOutOfBounds();
2477 markFortranArrays();
2478 }
2479
updateAccessDimensionality()2480 void ScopBuilder::updateAccessDimensionality() {
2481 // Check all array accesses for each base pointer and find a (virtual) element
2482 // size for the base pointer that divides all access functions.
2483 for (ScopStmt &Stmt : *scop)
2484 for (MemoryAccess *Access : Stmt) {
2485 if (!Access->isArrayKind())
2486 continue;
2487 ScopArrayInfo *Array =
2488 const_cast<ScopArrayInfo *>(Access->getScopArrayInfo());
2489
2490 if (Array->getNumberOfDimensions() != 1)
2491 continue;
2492 unsigned DivisibleSize = Array->getElemSizeInBytes();
2493 const SCEV *Subscript = Access->getSubscript(0);
2494 while (!isDivisible(Subscript, DivisibleSize, SE))
2495 DivisibleSize /= 2;
2496 auto *Ty = IntegerType::get(SE.getContext(), DivisibleSize * 8);
2497 Array->updateElementType(Ty);
2498 }
2499
2500 for (auto &Stmt : *scop)
2501 for (auto &Access : Stmt)
2502 Access->updateDimensionality();
2503 }
2504
foldAccessRelations()2505 void ScopBuilder::foldAccessRelations() {
2506 for (auto &Stmt : *scop)
2507 for (auto &Access : Stmt)
2508 Access->foldAccessRelation();
2509 }
2510
assumeNoOutOfBounds()2511 void ScopBuilder::assumeNoOutOfBounds() {
2512 if (PollyIgnoreInbounds)
2513 return;
2514 for (auto &Stmt : *scop)
2515 for (auto &Access : Stmt) {
2516 isl::set Outside = Access->assumeNoOutOfBound();
2517 const auto &Loc = Access->getAccessInstruction()
2518 ? Access->getAccessInstruction()->getDebugLoc()
2519 : DebugLoc();
2520 recordAssumption(&RecordedAssumptions, INBOUNDS, Outside, Loc,
2521 AS_ASSUMPTION);
2522 }
2523 }
2524
ensureValueWrite(Instruction * Inst)2525 void ScopBuilder::ensureValueWrite(Instruction *Inst) {
2526 // Find the statement that defines the value of Inst. That statement has to
2527 // write the value to make it available to those statements that read it.
2528 ScopStmt *Stmt = scop->getStmtFor(Inst);
2529
2530 // It is possible that the value is synthesizable within a loop (such that it
2531 // is not part of any statement), but not after the loop (where you need the
2532 // number of loop round-trips to synthesize it). In LCSSA-form a PHI node will
2533 // avoid this. In case the IR has no such PHI, use the last statement (where
2534 // the value is synthesizable) to write the value.
2535 if (!Stmt)
2536 Stmt = scop->getLastStmtFor(Inst->getParent());
2537
2538 // Inst not defined within this SCoP.
2539 if (!Stmt)
2540 return;
2541
2542 // Do not process further if the instruction is already written.
2543 if (Stmt->lookupValueWriteOf(Inst))
2544 return;
2545
2546 addMemoryAccess(Stmt, Inst, MemoryAccess::MUST_WRITE, Inst, Inst->getType(),
2547 true, Inst, ArrayRef<const SCEV *>(),
2548 ArrayRef<const SCEV *>(), MemoryKind::Value);
2549 }
2550
ensureValueRead(Value * V,ScopStmt * UserStmt)2551 void ScopBuilder::ensureValueRead(Value *V, ScopStmt *UserStmt) {
2552 // TODO: Make ScopStmt::ensureValueRead(Value*) offer the same functionality
2553 // to be able to replace this one. Currently, there is a split responsibility.
2554 // In a first step, the MemoryAccess is created, but without the
2555 // AccessRelation. In the second step by ScopStmt::buildAccessRelations(), the
2556 // AccessRelation is created. At least for scalar accesses, there is no new
2557 // information available at ScopStmt::buildAccessRelations(), so we could
2558 // create the AccessRelation right away. This is what
2559 // ScopStmt::ensureValueRead(Value*) does.
2560
2561 auto *Scope = UserStmt->getSurroundingLoop();
2562 auto VUse = VirtualUse::create(scop.get(), UserStmt, Scope, V, false);
2563 switch (VUse.getKind()) {
2564 case VirtualUse::Constant:
2565 case VirtualUse::Block:
2566 case VirtualUse::Synthesizable:
2567 case VirtualUse::Hoisted:
2568 case VirtualUse::Intra:
2569 // Uses of these kinds do not need a MemoryAccess.
2570 break;
2571
2572 case VirtualUse::ReadOnly:
2573 // Add MemoryAccess for invariant values only if requested.
2574 if (!ModelReadOnlyScalars)
2575 break;
2576
2577 LLVM_FALLTHROUGH;
2578 case VirtualUse::Inter:
2579
2580 // Do not create another MemoryAccess for reloading the value if one already
2581 // exists.
2582 if (UserStmt->lookupValueReadOf(V))
2583 break;
2584
2585 addMemoryAccess(UserStmt, nullptr, MemoryAccess::READ, V, V->getType(),
2586 true, V, ArrayRef<const SCEV *>(), ArrayRef<const SCEV *>(),
2587 MemoryKind::Value);
2588
2589 // Inter-statement uses need to write the value in their defining statement.
2590 if (VUse.isInter())
2591 ensureValueWrite(cast<Instruction>(V));
2592 break;
2593 }
2594 }
2595
ensurePHIWrite(PHINode * PHI,ScopStmt * IncomingStmt,BasicBlock * IncomingBlock,Value * IncomingValue,bool IsExitBlock)2596 void ScopBuilder::ensurePHIWrite(PHINode *PHI, ScopStmt *IncomingStmt,
2597 BasicBlock *IncomingBlock,
2598 Value *IncomingValue, bool IsExitBlock) {
2599 // As the incoming block might turn out to be an error statement ensure we
2600 // will create an exit PHI SAI object. It is needed during code generation
2601 // and would be created later anyway.
2602 if (IsExitBlock)
2603 scop->getOrCreateScopArrayInfo(PHI, PHI->getType(), {},
2604 MemoryKind::ExitPHI);
2605
2606 // This is possible if PHI is in the SCoP's entry block. The incoming blocks
2607 // from outside the SCoP's region have no statement representation.
2608 if (!IncomingStmt)
2609 return;
2610
2611 // Take care for the incoming value being available in the incoming block.
2612 // This must be done before the check for multiple PHI writes because multiple
2613 // exiting edges from subregion each can be the effective written value of the
2614 // subregion. As such, all of them must be made available in the subregion
2615 // statement.
2616 ensureValueRead(IncomingValue, IncomingStmt);
2617
2618 // Do not add more than one MemoryAccess per PHINode and ScopStmt.
2619 if (MemoryAccess *Acc = IncomingStmt->lookupPHIWriteOf(PHI)) {
2620 assert(Acc->getAccessInstruction() == PHI);
2621 Acc->addIncoming(IncomingBlock, IncomingValue);
2622 return;
2623 }
2624
2625 MemoryAccess *Acc = addMemoryAccess(
2626 IncomingStmt, PHI, MemoryAccess::MUST_WRITE, PHI, PHI->getType(), true,
2627 PHI, ArrayRef<const SCEV *>(), ArrayRef<const SCEV *>(),
2628 IsExitBlock ? MemoryKind::ExitPHI : MemoryKind::PHI);
2629 assert(Acc);
2630 Acc->addIncoming(IncomingBlock, IncomingValue);
2631 }
2632
addPHIReadAccess(ScopStmt * PHIStmt,PHINode * PHI)2633 void ScopBuilder::addPHIReadAccess(ScopStmt *PHIStmt, PHINode *PHI) {
2634 addMemoryAccess(PHIStmt, PHI, MemoryAccess::READ, PHI, PHI->getType(), true,
2635 PHI, ArrayRef<const SCEV *>(), ArrayRef<const SCEV *>(),
2636 MemoryKind::PHI);
2637 }
2638
buildDomain(ScopStmt & Stmt)2639 void ScopBuilder::buildDomain(ScopStmt &Stmt) {
2640 isl::id Id = isl::id::alloc(scop->getIslCtx(), Stmt.getBaseName(), &Stmt);
2641
2642 Stmt.Domain = scop->getDomainConditions(&Stmt);
2643 Stmt.Domain = Stmt.Domain.set_tuple_id(Id);
2644 }
2645
collectSurroundingLoops(ScopStmt & Stmt)2646 void ScopBuilder::collectSurroundingLoops(ScopStmt &Stmt) {
2647 isl::set Domain = Stmt.getDomain();
2648 BasicBlock *BB = Stmt.getEntryBlock();
2649
2650 Loop *L = LI.getLoopFor(BB);
2651
2652 while (L && Stmt.isRegionStmt() && Stmt.getRegion()->contains(L))
2653 L = L->getParentLoop();
2654
2655 SmallVector<llvm::Loop *, 8> Loops;
2656
2657 while (L && Stmt.getParent()->getRegion().contains(L)) {
2658 Loops.push_back(L);
2659 L = L->getParentLoop();
2660 }
2661
2662 Stmt.NestLoops.insert(Stmt.NestLoops.begin(), Loops.rbegin(), Loops.rend());
2663 }
2664
2665 /// Return the reduction type for a given binary operator.
getReductionType(const BinaryOperator * BinOp,const Instruction * Load)2666 static MemoryAccess::ReductionType getReductionType(const BinaryOperator *BinOp,
2667 const Instruction *Load) {
2668 if (!BinOp)
2669 return MemoryAccess::RT_NONE;
2670 switch (BinOp->getOpcode()) {
2671 case Instruction::FAdd:
2672 if (!BinOp->isFast())
2673 return MemoryAccess::RT_NONE;
2674 LLVM_FALLTHROUGH;
2675 case Instruction::Add:
2676 return MemoryAccess::RT_ADD;
2677 case Instruction::Or:
2678 return MemoryAccess::RT_BOR;
2679 case Instruction::Xor:
2680 return MemoryAccess::RT_BXOR;
2681 case Instruction::And:
2682 return MemoryAccess::RT_BAND;
2683 case Instruction::FMul:
2684 if (!BinOp->isFast())
2685 return MemoryAccess::RT_NONE;
2686 LLVM_FALLTHROUGH;
2687 case Instruction::Mul:
2688 if (DisableMultiplicativeReductions)
2689 return MemoryAccess::RT_NONE;
2690 return MemoryAccess::RT_MUL;
2691 default:
2692 return MemoryAccess::RT_NONE;
2693 }
2694 }
2695
checkForReductions(ScopStmt & Stmt)2696 void ScopBuilder::checkForReductions(ScopStmt &Stmt) {
2697 SmallVector<MemoryAccess *, 2> Loads;
2698 SmallVector<std::pair<MemoryAccess *, MemoryAccess *>, 4> Candidates;
2699
2700 // First collect candidate load-store reduction chains by iterating over all
2701 // stores and collecting possible reduction loads.
2702 for (MemoryAccess *StoreMA : Stmt) {
2703 if (StoreMA->isRead())
2704 continue;
2705
2706 Loads.clear();
2707 collectCandidateReductionLoads(StoreMA, Loads);
2708 for (MemoryAccess *LoadMA : Loads)
2709 Candidates.push_back(std::make_pair(LoadMA, StoreMA));
2710 }
2711
2712 // Then check each possible candidate pair.
2713 for (const auto &CandidatePair : Candidates) {
2714 bool Valid = true;
2715 isl::map LoadAccs = CandidatePair.first->getAccessRelation();
2716 isl::map StoreAccs = CandidatePair.second->getAccessRelation();
2717
2718 // Skip those with obviously unequal base addresses.
2719 if (!LoadAccs.has_equal_space(StoreAccs)) {
2720 continue;
2721 }
2722
2723 // And check if the remaining for overlap with other memory accesses.
2724 isl::map AllAccsRel = LoadAccs.unite(StoreAccs);
2725 AllAccsRel = AllAccsRel.intersect_domain(Stmt.getDomain());
2726 isl::set AllAccs = AllAccsRel.range();
2727
2728 for (MemoryAccess *MA : Stmt) {
2729 if (MA == CandidatePair.first || MA == CandidatePair.second)
2730 continue;
2731
2732 isl::map AccRel =
2733 MA->getAccessRelation().intersect_domain(Stmt.getDomain());
2734 isl::set Accs = AccRel.range();
2735
2736 if (AllAccs.has_equal_space(Accs)) {
2737 isl::set OverlapAccs = Accs.intersect(AllAccs);
2738 Valid = Valid && OverlapAccs.is_empty();
2739 }
2740 }
2741
2742 if (!Valid)
2743 continue;
2744
2745 const LoadInst *Load =
2746 dyn_cast<const LoadInst>(CandidatePair.first->getAccessInstruction());
2747 MemoryAccess::ReductionType RT =
2748 getReductionType(dyn_cast<BinaryOperator>(Load->user_back()), Load);
2749
2750 // If no overlapping access was found we mark the load and store as
2751 // reduction like.
2752 CandidatePair.first->markAsReductionLike(RT);
2753 CandidatePair.second->markAsReductionLike(RT);
2754 }
2755 }
2756
verifyInvariantLoads()2757 void ScopBuilder::verifyInvariantLoads() {
2758 auto &RIL = scop->getRequiredInvariantLoads();
2759 for (LoadInst *LI : RIL) {
2760 assert(LI && scop->contains(LI));
2761 // If there exists a statement in the scop which has a memory access for
2762 // @p LI, then mark this scop as infeasible for optimization.
2763 for (ScopStmt &Stmt : *scop)
2764 if (Stmt.getArrayAccessOrNULLFor(LI)) {
2765 scop->invalidate(INVARIANTLOAD, LI->getDebugLoc(), LI->getParent());
2766 return;
2767 }
2768 }
2769 }
2770
hoistInvariantLoads()2771 void ScopBuilder::hoistInvariantLoads() {
2772 if (!PollyInvariantLoadHoisting)
2773 return;
2774
2775 isl::union_map Writes = scop->getWrites();
2776 for (ScopStmt &Stmt : *scop) {
2777 InvariantAccessesTy InvariantAccesses;
2778
2779 for (MemoryAccess *Access : Stmt)
2780 if (isl::set NHCtx = getNonHoistableCtx(Access, Writes))
2781 InvariantAccesses.push_back({Access, NHCtx});
2782
2783 // Transfer the memory access from the statement to the SCoP.
2784 for (auto InvMA : InvariantAccesses)
2785 Stmt.removeMemoryAccess(InvMA.MA);
2786 addInvariantLoads(Stmt, InvariantAccesses);
2787 }
2788 }
2789
2790 /// Check if an access range is too complex.
2791 ///
2792 /// An access range is too complex, if it contains either many disjuncts or
2793 /// very complex expressions. As a simple heuristic, we assume if a set to
2794 /// be too complex if the sum of existentially quantified dimensions and
2795 /// set dimensions is larger than a threshold. This reliably detects both
2796 /// sets with many disjuncts as well as sets with many divisions as they
2797 /// arise in h264.
2798 ///
2799 /// @param AccessRange The range to check for complexity.
2800 ///
2801 /// @returns True if the access range is too complex.
isAccessRangeTooComplex(isl::set AccessRange)2802 static bool isAccessRangeTooComplex(isl::set AccessRange) {
2803 int NumTotalDims = 0;
2804
2805 for (isl::basic_set BSet : AccessRange.get_basic_set_list()) {
2806 NumTotalDims += BSet.dim(isl::dim::div);
2807 NumTotalDims += BSet.dim(isl::dim::set);
2808 }
2809
2810 if (NumTotalDims > MaxDimensionsInAccessRange)
2811 return true;
2812
2813 return false;
2814 }
2815
hasNonHoistableBasePtrInScop(MemoryAccess * MA,isl::union_map Writes)2816 bool ScopBuilder::hasNonHoistableBasePtrInScop(MemoryAccess *MA,
2817 isl::union_map Writes) {
2818 if (auto *BasePtrMA = scop->lookupBasePtrAccess(MA)) {
2819 return getNonHoistableCtx(BasePtrMA, Writes).is_null();
2820 }
2821
2822 Value *BaseAddr = MA->getOriginalBaseAddr();
2823 if (auto *BasePtrInst = dyn_cast<Instruction>(BaseAddr))
2824 if (!isa<LoadInst>(BasePtrInst))
2825 return scop->contains(BasePtrInst);
2826
2827 return false;
2828 }
2829
addUserContext()2830 void ScopBuilder::addUserContext() {
2831 if (UserContextStr.empty())
2832 return;
2833
2834 isl::set UserContext = isl::set(scop->getIslCtx(), UserContextStr.c_str());
2835 isl::space Space = scop->getParamSpace();
2836 if (Space.dim(isl::dim::param) != UserContext.dim(isl::dim::param)) {
2837 std::string SpaceStr = Space.to_str();
2838 errs() << "Error: the context provided in -polly-context has not the same "
2839 << "number of dimensions than the computed context. Due to this "
2840 << "mismatch, the -polly-context option is ignored. Please provide "
2841 << "the context in the parameter space: " << SpaceStr << ".\n";
2842 return;
2843 }
2844
2845 for (unsigned i = 0; i < Space.dim(isl::dim::param); i++) {
2846 std::string NameContext =
2847 scop->getContext().get_dim_name(isl::dim::param, i);
2848 std::string NameUserContext = UserContext.get_dim_name(isl::dim::param, i);
2849
2850 if (NameContext != NameUserContext) {
2851 std::string SpaceStr = Space.to_str();
2852 errs() << "Error: the name of dimension " << i
2853 << " provided in -polly-context "
2854 << "is '" << NameUserContext << "', but the name in the computed "
2855 << "context is '" << NameContext
2856 << "'. Due to this name mismatch, "
2857 << "the -polly-context option is ignored. Please provide "
2858 << "the context in the parameter space: " << SpaceStr << ".\n";
2859 return;
2860 }
2861
2862 UserContext = UserContext.set_dim_id(isl::dim::param, i,
2863 Space.get_dim_id(isl::dim::param, i));
2864 }
2865 isl::set newContext = scop->getContext().intersect(UserContext);
2866 scop->setContext(newContext);
2867 }
2868
getNonHoistableCtx(MemoryAccess * Access,isl::union_map Writes)2869 isl::set ScopBuilder::getNonHoistableCtx(MemoryAccess *Access,
2870 isl::union_map Writes) {
2871 // TODO: Loads that are not loop carried, hence are in a statement with
2872 // zero iterators, are by construction invariant, though we
2873 // currently "hoist" them anyway. This is necessary because we allow
2874 // them to be treated as parameters (e.g., in conditions) and our code
2875 // generation would otherwise use the old value.
2876
2877 auto &Stmt = *Access->getStatement();
2878 BasicBlock *BB = Stmt.getEntryBlock();
2879
2880 if (Access->isScalarKind() || Access->isWrite() || !Access->isAffine() ||
2881 Access->isMemoryIntrinsic())
2882 return nullptr;
2883
2884 // Skip accesses that have an invariant base pointer which is defined but
2885 // not loaded inside the SCoP. This can happened e.g., if a readnone call
2886 // returns a pointer that is used as a base address. However, as we want
2887 // to hoist indirect pointers, we allow the base pointer to be defined in
2888 // the region if it is also a memory access. Each ScopArrayInfo object
2889 // that has a base pointer origin has a base pointer that is loaded and
2890 // that it is invariant, thus it will be hoisted too. However, if there is
2891 // no base pointer origin we check that the base pointer is defined
2892 // outside the region.
2893 auto *LI = cast<LoadInst>(Access->getAccessInstruction());
2894 if (hasNonHoistableBasePtrInScop(Access, Writes))
2895 return nullptr;
2896
2897 isl::map AccessRelation = Access->getAccessRelation();
2898 assert(!AccessRelation.is_empty());
2899
2900 if (AccessRelation.involves_dims(isl::dim::in, 0, Stmt.getNumIterators()))
2901 return nullptr;
2902
2903 AccessRelation = AccessRelation.intersect_domain(Stmt.getDomain());
2904 isl::set SafeToLoad;
2905
2906 auto &DL = scop->getFunction().getParent()->getDataLayout();
2907 if (isSafeToLoadUnconditionally(LI->getPointerOperand(), LI->getType(),
2908 LI->getAlign(), DL)) {
2909 SafeToLoad = isl::set::universe(AccessRelation.get_space().range());
2910 } else if (BB != LI->getParent()) {
2911 // Skip accesses in non-affine subregions as they might not be executed
2912 // under the same condition as the entry of the non-affine subregion.
2913 return nullptr;
2914 } else {
2915 SafeToLoad = AccessRelation.range();
2916 }
2917
2918 if (isAccessRangeTooComplex(AccessRelation.range()))
2919 return nullptr;
2920
2921 isl::union_map Written = Writes.intersect_range(SafeToLoad);
2922 isl::set WrittenCtx = Written.params();
2923 bool IsWritten = !WrittenCtx.is_empty();
2924
2925 if (!IsWritten)
2926 return WrittenCtx;
2927
2928 WrittenCtx = WrittenCtx.remove_divs();
2929 bool TooComplex = WrittenCtx.n_basic_set() >= MaxDisjunctsInDomain;
2930 if (TooComplex || !isRequiredInvariantLoad(LI))
2931 return nullptr;
2932
2933 scop->addAssumption(INVARIANTLOAD, WrittenCtx, LI->getDebugLoc(),
2934 AS_RESTRICTION, LI->getParent());
2935 return WrittenCtx;
2936 }
2937
isAParameter(llvm::Value * maybeParam,const Function & F)2938 static bool isAParameter(llvm::Value *maybeParam, const Function &F) {
2939 for (const llvm::Argument &Arg : F.args())
2940 if (&Arg == maybeParam)
2941 return true;
2942
2943 return false;
2944 }
2945
canAlwaysBeHoisted(MemoryAccess * MA,bool StmtInvalidCtxIsEmpty,bool MAInvalidCtxIsEmpty,bool NonHoistableCtxIsEmpty)2946 bool ScopBuilder::canAlwaysBeHoisted(MemoryAccess *MA,
2947 bool StmtInvalidCtxIsEmpty,
2948 bool MAInvalidCtxIsEmpty,
2949 bool NonHoistableCtxIsEmpty) {
2950 LoadInst *LInst = cast<LoadInst>(MA->getAccessInstruction());
2951 const DataLayout &DL = LInst->getParent()->getModule()->getDataLayout();
2952 if (PollyAllowDereferenceOfAllFunctionParams &&
2953 isAParameter(LInst->getPointerOperand(), scop->getFunction()))
2954 return true;
2955
2956 // TODO: We can provide more information for better but more expensive
2957 // results.
2958 if (!isDereferenceableAndAlignedPointer(
2959 LInst->getPointerOperand(), LInst->getType(), LInst->getAlign(), DL))
2960 return false;
2961
2962 // If the location might be overwritten we do not hoist it unconditionally.
2963 //
2964 // TODO: This is probably too conservative.
2965 if (!NonHoistableCtxIsEmpty)
2966 return false;
2967
2968 // If a dereferenceable load is in a statement that is modeled precisely we
2969 // can hoist it.
2970 if (StmtInvalidCtxIsEmpty && MAInvalidCtxIsEmpty)
2971 return true;
2972
2973 // Even if the statement is not modeled precisely we can hoist the load if it
2974 // does not involve any parameters that might have been specialized by the
2975 // statement domain.
2976 for (const SCEV *Subscript : MA->subscripts())
2977 if (!isa<SCEVConstant>(Subscript))
2978 return false;
2979 return true;
2980 }
2981
addInvariantLoads(ScopStmt & Stmt,InvariantAccessesTy & InvMAs)2982 void ScopBuilder::addInvariantLoads(ScopStmt &Stmt,
2983 InvariantAccessesTy &InvMAs) {
2984 if (InvMAs.empty())
2985 return;
2986
2987 isl::set StmtInvalidCtx = Stmt.getInvalidContext();
2988 bool StmtInvalidCtxIsEmpty = StmtInvalidCtx.is_empty();
2989
2990 // Get the context under which the statement is executed but remove the error
2991 // context under which this statement is reached.
2992 isl::set DomainCtx = Stmt.getDomain().params();
2993 DomainCtx = DomainCtx.subtract(StmtInvalidCtx);
2994
2995 if (DomainCtx.n_basic_set() >= MaxDisjunctsInDomain) {
2996 auto *AccInst = InvMAs.front().MA->getAccessInstruction();
2997 scop->invalidate(COMPLEXITY, AccInst->getDebugLoc(), AccInst->getParent());
2998 return;
2999 }
3000
3001 // Project out all parameters that relate to loads in the statement. Otherwise
3002 // we could have cyclic dependences on the constraints under which the
3003 // hoisted loads are executed and we could not determine an order in which to
3004 // pre-load them. This happens because not only lower bounds are part of the
3005 // domain but also upper bounds.
3006 for (auto &InvMA : InvMAs) {
3007 auto *MA = InvMA.MA;
3008 Instruction *AccInst = MA->getAccessInstruction();
3009 if (SE.isSCEVable(AccInst->getType())) {
3010 SetVector<Value *> Values;
3011 for (const SCEV *Parameter : scop->parameters()) {
3012 Values.clear();
3013 findValues(Parameter, SE, Values);
3014 if (!Values.count(AccInst))
3015 continue;
3016
3017 if (isl::id ParamId = scop->getIdForParam(Parameter)) {
3018 int Dim = DomainCtx.find_dim_by_id(isl::dim::param, ParamId);
3019 if (Dim >= 0)
3020 DomainCtx = DomainCtx.eliminate(isl::dim::param, Dim, 1);
3021 }
3022 }
3023 }
3024 }
3025
3026 for (auto &InvMA : InvMAs) {
3027 auto *MA = InvMA.MA;
3028 isl::set NHCtx = InvMA.NonHoistableCtx;
3029
3030 // Check for another invariant access that accesses the same location as
3031 // MA and if found consolidate them. Otherwise create a new equivalence
3032 // class at the end of InvariantEquivClasses.
3033 LoadInst *LInst = cast<LoadInst>(MA->getAccessInstruction());
3034 Type *Ty = LInst->getType();
3035 const SCEV *PointerSCEV = SE.getSCEV(LInst->getPointerOperand());
3036
3037 isl::set MAInvalidCtx = MA->getInvalidContext();
3038 bool NonHoistableCtxIsEmpty = NHCtx.is_empty();
3039 bool MAInvalidCtxIsEmpty = MAInvalidCtx.is_empty();
3040
3041 isl::set MACtx;
3042 // Check if we know that this pointer can be speculatively accessed.
3043 if (canAlwaysBeHoisted(MA, StmtInvalidCtxIsEmpty, MAInvalidCtxIsEmpty,
3044 NonHoistableCtxIsEmpty)) {
3045 MACtx = isl::set::universe(DomainCtx.get_space());
3046 } else {
3047 MACtx = DomainCtx;
3048 MACtx = MACtx.subtract(MAInvalidCtx.unite(NHCtx));
3049 MACtx = MACtx.gist_params(scop->getContext());
3050 }
3051
3052 bool Consolidated = false;
3053 for (auto &IAClass : scop->invariantEquivClasses()) {
3054 if (PointerSCEV != IAClass.IdentifyingPointer || Ty != IAClass.AccessType)
3055 continue;
3056
3057 // If the pointer and the type is equal check if the access function wrt.
3058 // to the domain is equal too. It can happen that the domain fixes
3059 // parameter values and these can be different for distinct part of the
3060 // SCoP. If this happens we cannot consolidate the loads but need to
3061 // create a new invariant load equivalence class.
3062 auto &MAs = IAClass.InvariantAccesses;
3063 if (!MAs.empty()) {
3064 auto *LastMA = MAs.front();
3065
3066 isl::set AR = MA->getAccessRelation().range();
3067 isl::set LastAR = LastMA->getAccessRelation().range();
3068 bool SameAR = AR.is_equal(LastAR);
3069
3070 if (!SameAR)
3071 continue;
3072 }
3073
3074 // Add MA to the list of accesses that are in this class.
3075 MAs.push_front(MA);
3076
3077 Consolidated = true;
3078
3079 // Unify the execution context of the class and this statement.
3080 isl::set IAClassDomainCtx = IAClass.ExecutionContext;
3081 if (IAClassDomainCtx)
3082 IAClassDomainCtx = IAClassDomainCtx.unite(MACtx).coalesce();
3083 else
3084 IAClassDomainCtx = MACtx;
3085 IAClass.ExecutionContext = IAClassDomainCtx;
3086 break;
3087 }
3088
3089 if (Consolidated)
3090 continue;
3091
3092 MACtx = MACtx.coalesce();
3093
3094 // If we did not consolidate MA, thus did not find an equivalence class
3095 // for it, we create a new one.
3096 scop->addInvariantEquivClass(
3097 InvariantEquivClassTy{PointerSCEV, MemoryAccessList{MA}, MACtx, Ty});
3098 }
3099 }
3100
collectCandidateReductionLoads(MemoryAccess * StoreMA,SmallVectorImpl<MemoryAccess * > & Loads)3101 void ScopBuilder::collectCandidateReductionLoads(
3102 MemoryAccess *StoreMA, SmallVectorImpl<MemoryAccess *> &Loads) {
3103 ScopStmt *Stmt = StoreMA->getStatement();
3104
3105 auto *Store = dyn_cast<StoreInst>(StoreMA->getAccessInstruction());
3106 if (!Store)
3107 return;
3108
3109 // Skip if there is not one binary operator between the load and the store
3110 auto *BinOp = dyn_cast<BinaryOperator>(Store->getValueOperand());
3111 if (!BinOp)
3112 return;
3113
3114 // Skip if the binary operators has multiple uses
3115 if (BinOp->getNumUses() != 1)
3116 return;
3117
3118 // Skip if the opcode of the binary operator is not commutative/associative
3119 if (!BinOp->isCommutative() || !BinOp->isAssociative())
3120 return;
3121
3122 // Skip if the binary operator is outside the current SCoP
3123 if (BinOp->getParent() != Store->getParent())
3124 return;
3125
3126 // Skip if it is a multiplicative reduction and we disabled them
3127 if (DisableMultiplicativeReductions &&
3128 (BinOp->getOpcode() == Instruction::Mul ||
3129 BinOp->getOpcode() == Instruction::FMul))
3130 return;
3131
3132 // Check the binary operator operands for a candidate load
3133 auto *PossibleLoad0 = dyn_cast<LoadInst>(BinOp->getOperand(0));
3134 auto *PossibleLoad1 = dyn_cast<LoadInst>(BinOp->getOperand(1));
3135 if (!PossibleLoad0 && !PossibleLoad1)
3136 return;
3137
3138 // A load is only a candidate if it cannot escape (thus has only this use)
3139 if (PossibleLoad0 && PossibleLoad0->getNumUses() == 1)
3140 if (PossibleLoad0->getParent() == Store->getParent())
3141 Loads.push_back(&Stmt->getArrayAccessFor(PossibleLoad0));
3142 if (PossibleLoad1 && PossibleLoad1->getNumUses() == 1)
3143 if (PossibleLoad1->getParent() == Store->getParent())
3144 Loads.push_back(&Stmt->getArrayAccessFor(PossibleLoad1));
3145 }
3146
3147 /// Find the canonical scop array info object for a set of invariant load
3148 /// hoisted loads. The canonical array is the one that corresponds to the
3149 /// first load in the list of accesses which is used as base pointer of a
3150 /// scop array.
findCanonicalArray(Scop & S,MemoryAccessList & Accesses)3151 static const ScopArrayInfo *findCanonicalArray(Scop &S,
3152 MemoryAccessList &Accesses) {
3153 for (MemoryAccess *Access : Accesses) {
3154 const ScopArrayInfo *CanonicalArray = S.getScopArrayInfoOrNull(
3155 Access->getAccessInstruction(), MemoryKind::Array);
3156 if (CanonicalArray)
3157 return CanonicalArray;
3158 }
3159 return nullptr;
3160 }
3161
3162 /// Check if @p Array severs as base array in an invariant load.
isUsedForIndirectHoistedLoad(Scop & S,const ScopArrayInfo * Array)3163 static bool isUsedForIndirectHoistedLoad(Scop &S, const ScopArrayInfo *Array) {
3164 for (InvariantEquivClassTy &EqClass2 : S.getInvariantAccesses())
3165 for (MemoryAccess *Access2 : EqClass2.InvariantAccesses)
3166 if (Access2->getScopArrayInfo() == Array)
3167 return true;
3168 return false;
3169 }
3170
3171 /// Replace the base pointer arrays in all memory accesses referencing @p Old,
3172 /// with a reference to @p New.
replaceBasePtrArrays(Scop & S,const ScopArrayInfo * Old,const ScopArrayInfo * New)3173 static void replaceBasePtrArrays(Scop &S, const ScopArrayInfo *Old,
3174 const ScopArrayInfo *New) {
3175 for (ScopStmt &Stmt : S)
3176 for (MemoryAccess *Access : Stmt) {
3177 if (Access->getLatestScopArrayInfo() != Old)
3178 continue;
3179
3180 isl::id Id = New->getBasePtrId();
3181 isl::map Map = Access->getAccessRelation();
3182 Map = Map.set_tuple_id(isl::dim::out, Id);
3183 Access->setAccessRelation(Map);
3184 }
3185 }
3186
canonicalizeDynamicBasePtrs()3187 void ScopBuilder::canonicalizeDynamicBasePtrs() {
3188 for (InvariantEquivClassTy &EqClass : scop->InvariantEquivClasses) {
3189 MemoryAccessList &BasePtrAccesses = EqClass.InvariantAccesses;
3190
3191 const ScopArrayInfo *CanonicalBasePtrSAI =
3192 findCanonicalArray(*scop, BasePtrAccesses);
3193
3194 if (!CanonicalBasePtrSAI)
3195 continue;
3196
3197 for (MemoryAccess *BasePtrAccess : BasePtrAccesses) {
3198 const ScopArrayInfo *BasePtrSAI = scop->getScopArrayInfoOrNull(
3199 BasePtrAccess->getAccessInstruction(), MemoryKind::Array);
3200 if (!BasePtrSAI || BasePtrSAI == CanonicalBasePtrSAI ||
3201 !BasePtrSAI->isCompatibleWith(CanonicalBasePtrSAI))
3202 continue;
3203
3204 // we currently do not canonicalize arrays where some accesses are
3205 // hoisted as invariant loads. If we would, we need to update the access
3206 // function of the invariant loads as well. However, as this is not a
3207 // very common situation, we leave this for now to avoid further
3208 // complexity increases.
3209 if (isUsedForIndirectHoistedLoad(*scop, BasePtrSAI))
3210 continue;
3211
3212 replaceBasePtrArrays(*scop, BasePtrSAI, CanonicalBasePtrSAI);
3213 }
3214 }
3215 }
3216
buildAccessRelations(ScopStmt & Stmt)3217 void ScopBuilder::buildAccessRelations(ScopStmt &Stmt) {
3218 for (MemoryAccess *Access : Stmt.MemAccs) {
3219 Type *ElementType = Access->getElementType();
3220
3221 MemoryKind Ty;
3222 if (Access->isPHIKind())
3223 Ty = MemoryKind::PHI;
3224 else if (Access->isExitPHIKind())
3225 Ty = MemoryKind::ExitPHI;
3226 else if (Access->isValueKind())
3227 Ty = MemoryKind::Value;
3228 else
3229 Ty = MemoryKind::Array;
3230
3231 // Create isl::pw_aff for SCEVs which describe sizes. Collect all
3232 // assumptions which are taken. isl::pw_aff objects are cached internally
3233 // and they are used later by scop.
3234 for (const SCEV *Size : Access->Sizes) {
3235 if (!Size)
3236 continue;
3237 scop->getPwAff(Size, nullptr, false, &RecordedAssumptions);
3238 }
3239 auto *SAI = scop->getOrCreateScopArrayInfo(Access->getOriginalBaseAddr(),
3240 ElementType, Access->Sizes, Ty);
3241
3242 // Create isl::pw_aff for SCEVs which describe subscripts. Collect all
3243 // assumptions which are taken. isl::pw_aff objects are cached internally
3244 // and they are used later by scop.
3245 for (const SCEV *Subscript : Access->subscripts()) {
3246 if (!Access->isAffine() || !Subscript)
3247 continue;
3248 scop->getPwAff(Subscript, Stmt.getEntryBlock(), false,
3249 &RecordedAssumptions);
3250 }
3251 Access->buildAccessRelation(SAI);
3252 scop->addAccessData(Access);
3253 }
3254 }
3255
3256 /// Add the minimal/maximal access in @p Set to @p User.
3257 ///
3258 /// @return True if more accesses should be added, false if we reached the
3259 /// maximal number of run-time checks to be generated.
buildMinMaxAccess(isl::set Set,Scop::MinMaxVectorTy & MinMaxAccesses,Scop & S)3260 static bool buildMinMaxAccess(isl::set Set,
3261 Scop::MinMaxVectorTy &MinMaxAccesses, Scop &S) {
3262 isl::pw_multi_aff MinPMA, MaxPMA;
3263 isl::pw_aff LastDimAff;
3264 isl::aff OneAff;
3265 unsigned Pos;
3266
3267 Set = Set.remove_divs();
3268 polly::simplify(Set);
3269
3270 if (Set.n_basic_set() > RunTimeChecksMaxAccessDisjuncts)
3271 Set = Set.simple_hull();
3272
3273 // Restrict the number of parameters involved in the access as the lexmin/
3274 // lexmax computation will take too long if this number is high.
3275 //
3276 // Experiments with a simple test case using an i7 4800MQ:
3277 //
3278 // #Parameters involved | Time (in sec)
3279 // 6 | 0.01
3280 // 7 | 0.04
3281 // 8 | 0.12
3282 // 9 | 0.40
3283 // 10 | 1.54
3284 // 11 | 6.78
3285 // 12 | 30.38
3286 //
3287 if (isl_set_n_param(Set.get()) >
3288 static_cast<isl_size>(RunTimeChecksMaxParameters)) {
3289 unsigned InvolvedParams = 0;
3290 for (unsigned u = 0, e = isl_set_n_param(Set.get()); u < e; u++)
3291 if (Set.involves_dims(isl::dim::param, u, 1))
3292 InvolvedParams++;
3293
3294 if (InvolvedParams > RunTimeChecksMaxParameters)
3295 return false;
3296 }
3297
3298 MinPMA = Set.lexmin_pw_multi_aff();
3299 MaxPMA = Set.lexmax_pw_multi_aff();
3300
3301 MinPMA = MinPMA.coalesce();
3302 MaxPMA = MaxPMA.coalesce();
3303
3304 // Adjust the last dimension of the maximal access by one as we want to
3305 // enclose the accessed memory region by MinPMA and MaxPMA. The pointer
3306 // we test during code generation might now point after the end of the
3307 // allocated array but we will never dereference it anyway.
3308 assert((!MaxPMA || MaxPMA.dim(isl::dim::out)) &&
3309 "Assumed at least one output dimension");
3310
3311 Pos = MaxPMA.dim(isl::dim::out) - 1;
3312 LastDimAff = MaxPMA.get_pw_aff(Pos);
3313 OneAff = isl::aff(isl::local_space(LastDimAff.get_domain_space()));
3314 OneAff = OneAff.add_constant_si(1);
3315 LastDimAff = LastDimAff.add(OneAff);
3316 MaxPMA = MaxPMA.set_pw_aff(Pos, LastDimAff);
3317
3318 if (!MinPMA || !MaxPMA)
3319 return false;
3320
3321 MinMaxAccesses.push_back(std::make_pair(MinPMA, MaxPMA));
3322
3323 return true;
3324 }
3325
3326 /// Wrapper function to calculate minimal/maximal accesses to each array.
calculateMinMaxAccess(AliasGroupTy AliasGroup,Scop::MinMaxVectorTy & MinMaxAccesses)3327 bool ScopBuilder::calculateMinMaxAccess(AliasGroupTy AliasGroup,
3328 Scop::MinMaxVectorTy &MinMaxAccesses) {
3329 MinMaxAccesses.reserve(AliasGroup.size());
3330
3331 isl::union_set Domains = scop->getDomains();
3332 isl::union_map Accesses = isl::union_map::empty(scop->getParamSpace());
3333
3334 for (MemoryAccess *MA : AliasGroup)
3335 Accesses = Accesses.add_map(MA->getAccessRelation());
3336
3337 Accesses = Accesses.intersect_domain(Domains);
3338 isl::union_set Locations = Accesses.range();
3339
3340 bool LimitReached = false;
3341 for (isl::set Set : Locations.get_set_list()) {
3342 LimitReached |= !buildMinMaxAccess(Set, MinMaxAccesses, *scop);
3343 if (LimitReached)
3344 break;
3345 }
3346
3347 return !LimitReached;
3348 }
3349
getAccessDomain(MemoryAccess * MA)3350 static isl::set getAccessDomain(MemoryAccess *MA) {
3351 isl::set Domain = MA->getStatement()->getDomain();
3352 Domain = Domain.project_out(isl::dim::set, 0, Domain.n_dim());
3353 return Domain.reset_tuple_id();
3354 }
3355
buildAliasChecks()3356 bool ScopBuilder::buildAliasChecks() {
3357 if (!PollyUseRuntimeAliasChecks)
3358 return true;
3359
3360 if (buildAliasGroups()) {
3361 // Aliasing assumptions do not go through addAssumption but we still want to
3362 // collect statistics so we do it here explicitly.
3363 if (scop->getAliasGroups().size())
3364 Scop::incrementNumberOfAliasingAssumptions(1);
3365 return true;
3366 }
3367
3368 // If a problem occurs while building the alias groups we need to delete
3369 // this SCoP and pretend it wasn't valid in the first place. To this end
3370 // we make the assumed context infeasible.
3371 scop->invalidate(ALIASING, DebugLoc());
3372
3373 LLVM_DEBUG(
3374 dbgs() << "\n\nNOTE: Run time checks for " << scop->getNameStr()
3375 << " could not be created as the number of parameters involved "
3376 "is too high. The SCoP will be "
3377 "dismissed.\nUse:\n\t--polly-rtc-max-parameters=X\nto adjust "
3378 "the maximal number of parameters but be advised that the "
3379 "compile time might increase exponentially.\n\n");
3380 return false;
3381 }
3382
3383 std::tuple<ScopBuilder::AliasGroupVectorTy, DenseSet<const ScopArrayInfo *>>
buildAliasGroupsForAccesses()3384 ScopBuilder::buildAliasGroupsForAccesses() {
3385 AliasSetTracker AST(AA);
3386
3387 DenseMap<Value *, MemoryAccess *> PtrToAcc;
3388 DenseSet<const ScopArrayInfo *> HasWriteAccess;
3389 for (ScopStmt &Stmt : *scop) {
3390
3391 isl::set StmtDomain = Stmt.getDomain();
3392 bool StmtDomainEmpty = StmtDomain.is_empty();
3393
3394 // Statements with an empty domain will never be executed.
3395 if (StmtDomainEmpty)
3396 continue;
3397
3398 for (MemoryAccess *MA : Stmt) {
3399 if (MA->isScalarKind())
3400 continue;
3401 if (!MA->isRead())
3402 HasWriteAccess.insert(MA->getScopArrayInfo());
3403 MemAccInst Acc(MA->getAccessInstruction());
3404 if (MA->isRead() && isa<MemTransferInst>(Acc))
3405 PtrToAcc[cast<MemTransferInst>(Acc)->getRawSource()] = MA;
3406 else
3407 PtrToAcc[Acc.getPointerOperand()] = MA;
3408 AST.add(Acc);
3409 }
3410 }
3411
3412 AliasGroupVectorTy AliasGroups;
3413 for (AliasSet &AS : AST) {
3414 if (AS.isMustAlias() || AS.isForwardingAliasSet())
3415 continue;
3416 AliasGroupTy AG;
3417 for (auto &PR : AS)
3418 AG.push_back(PtrToAcc[PR.getValue()]);
3419 if (AG.size() < 2)
3420 continue;
3421 AliasGroups.push_back(std::move(AG));
3422 }
3423
3424 return std::make_tuple(AliasGroups, HasWriteAccess);
3425 }
3426
buildAliasGroups()3427 bool ScopBuilder::buildAliasGroups() {
3428 // To create sound alias checks we perform the following steps:
3429 // o) We partition each group into read only and non read only accesses.
3430 // o) For each group with more than one base pointer we then compute minimal
3431 // and maximal accesses to each array of a group in read only and non
3432 // read only partitions separately.
3433 AliasGroupVectorTy AliasGroups;
3434 DenseSet<const ScopArrayInfo *> HasWriteAccess;
3435
3436 std::tie(AliasGroups, HasWriteAccess) = buildAliasGroupsForAccesses();
3437
3438 splitAliasGroupsByDomain(AliasGroups);
3439
3440 for (AliasGroupTy &AG : AliasGroups) {
3441 if (!scop->hasFeasibleRuntimeContext())
3442 return false;
3443
3444 {
3445 IslMaxOperationsGuard MaxOpGuard(scop->getIslCtx().get(), OptComputeOut);
3446 bool Valid = buildAliasGroup(AG, HasWriteAccess);
3447 if (!Valid)
3448 return false;
3449 }
3450 if (isl_ctx_last_error(scop->getIslCtx().get()) == isl_error_quota) {
3451 scop->invalidate(COMPLEXITY, DebugLoc());
3452 return false;
3453 }
3454 }
3455
3456 return true;
3457 }
3458
buildAliasGroup(AliasGroupTy & AliasGroup,DenseSet<const ScopArrayInfo * > HasWriteAccess)3459 bool ScopBuilder::buildAliasGroup(
3460 AliasGroupTy &AliasGroup, DenseSet<const ScopArrayInfo *> HasWriteAccess) {
3461 AliasGroupTy ReadOnlyAccesses;
3462 AliasGroupTy ReadWriteAccesses;
3463 SmallPtrSet<const ScopArrayInfo *, 4> ReadWriteArrays;
3464 SmallPtrSet<const ScopArrayInfo *, 4> ReadOnlyArrays;
3465
3466 if (AliasGroup.size() < 2)
3467 return true;
3468
3469 for (MemoryAccess *Access : AliasGroup) {
3470 ORE.emit(OptimizationRemarkAnalysis(DEBUG_TYPE, "PossibleAlias",
3471 Access->getAccessInstruction())
3472 << "Possibly aliasing pointer, use restrict keyword.");
3473 const ScopArrayInfo *Array = Access->getScopArrayInfo();
3474 if (HasWriteAccess.count(Array)) {
3475 ReadWriteArrays.insert(Array);
3476 ReadWriteAccesses.push_back(Access);
3477 } else {
3478 ReadOnlyArrays.insert(Array);
3479 ReadOnlyAccesses.push_back(Access);
3480 }
3481 }
3482
3483 // If there are no read-only pointers, and less than two read-write pointers,
3484 // no alias check is needed.
3485 if (ReadOnlyAccesses.empty() && ReadWriteArrays.size() <= 1)
3486 return true;
3487
3488 // If there is no read-write pointer, no alias check is needed.
3489 if (ReadWriteArrays.empty())
3490 return true;
3491
3492 // For non-affine accesses, no alias check can be generated as we cannot
3493 // compute a sufficiently tight lower and upper bound: bail out.
3494 for (MemoryAccess *MA : AliasGroup) {
3495 if (!MA->isAffine()) {
3496 scop->invalidate(ALIASING, MA->getAccessInstruction()->getDebugLoc(),
3497 MA->getAccessInstruction()->getParent());
3498 return false;
3499 }
3500 }
3501
3502 // Ensure that for all memory accesses for which we generate alias checks,
3503 // their base pointers are available.
3504 for (MemoryAccess *MA : AliasGroup) {
3505 if (MemoryAccess *BasePtrMA = scop->lookupBasePtrAccess(MA))
3506 scop->addRequiredInvariantLoad(
3507 cast<LoadInst>(BasePtrMA->getAccessInstruction()));
3508 }
3509
3510 // scop->getAliasGroups().emplace_back();
3511 // Scop::MinMaxVectorPairTy &pair = scop->getAliasGroups().back();
3512 Scop::MinMaxVectorTy MinMaxAccessesReadWrite;
3513 Scop::MinMaxVectorTy MinMaxAccessesReadOnly;
3514
3515 bool Valid;
3516
3517 Valid = calculateMinMaxAccess(ReadWriteAccesses, MinMaxAccessesReadWrite);
3518
3519 if (!Valid)
3520 return false;
3521
3522 // Bail out if the number of values we need to compare is too large.
3523 // This is important as the number of comparisons grows quadratically with
3524 // the number of values we need to compare.
3525 if (MinMaxAccessesReadWrite.size() + ReadOnlyArrays.size() >
3526 RunTimeChecksMaxArraysPerGroup)
3527 return false;
3528
3529 Valid = calculateMinMaxAccess(ReadOnlyAccesses, MinMaxAccessesReadOnly);
3530
3531 scop->addAliasGroup(MinMaxAccessesReadWrite, MinMaxAccessesReadOnly);
3532 if (!Valid)
3533 return false;
3534
3535 return true;
3536 }
3537
splitAliasGroupsByDomain(AliasGroupVectorTy & AliasGroups)3538 void ScopBuilder::splitAliasGroupsByDomain(AliasGroupVectorTy &AliasGroups) {
3539 for (unsigned u = 0; u < AliasGroups.size(); u++) {
3540 AliasGroupTy NewAG;
3541 AliasGroupTy &AG = AliasGroups[u];
3542 AliasGroupTy::iterator AGI = AG.begin();
3543 isl::set AGDomain = getAccessDomain(*AGI);
3544 while (AGI != AG.end()) {
3545 MemoryAccess *MA = *AGI;
3546 isl::set MADomain = getAccessDomain(MA);
3547 if (AGDomain.is_disjoint(MADomain)) {
3548 NewAG.push_back(MA);
3549 AGI = AG.erase(AGI);
3550 } else {
3551 AGDomain = AGDomain.unite(MADomain);
3552 AGI++;
3553 }
3554 }
3555 if (NewAG.size() > 1)
3556 AliasGroups.push_back(std::move(NewAG));
3557 }
3558 }
3559
3560 #ifndef NDEBUG
verifyUse(Scop * S,Use & Op,LoopInfo & LI)3561 static void verifyUse(Scop *S, Use &Op, LoopInfo &LI) {
3562 auto PhysUse = VirtualUse::create(S, Op, &LI, false);
3563 auto VirtUse = VirtualUse::create(S, Op, &LI, true);
3564 assert(PhysUse.getKind() == VirtUse.getKind());
3565 }
3566
3567 /// Check the consistency of every statement's MemoryAccesses.
3568 ///
3569 /// The check is carried out by expecting the "physical" kind of use (derived
3570 /// from the BasicBlocks instructions resides in) to be same as the "virtual"
3571 /// kind of use (derived from a statement's MemoryAccess).
3572 ///
3573 /// The "physical" uses are taken by ensureValueRead to determine whether to
3574 /// create MemoryAccesses. When done, the kind of scalar access should be the
3575 /// same no matter which way it was derived.
3576 ///
3577 /// The MemoryAccesses might be changed by later SCoP-modifying passes and hence
3578 /// can intentionally influence on the kind of uses (not corresponding to the
3579 /// "physical" anymore, hence called "virtual"). The CodeGenerator therefore has
3580 /// to pick up the virtual uses. But here in the code generator, this has not
3581 /// happened yet, such that virtual and physical uses are equivalent.
verifyUses(Scop * S,LoopInfo & LI,DominatorTree & DT)3582 static void verifyUses(Scop *S, LoopInfo &LI, DominatorTree &DT) {
3583 for (auto *BB : S->getRegion().blocks()) {
3584 for (auto &Inst : *BB) {
3585 auto *Stmt = S->getStmtFor(&Inst);
3586 if (!Stmt)
3587 continue;
3588
3589 if (isIgnoredIntrinsic(&Inst))
3590 continue;
3591
3592 // Branch conditions are encoded in the statement domains.
3593 if (Inst.isTerminator() && Stmt->isBlockStmt())
3594 continue;
3595
3596 // Verify all uses.
3597 for (auto &Op : Inst.operands())
3598 verifyUse(S, Op, LI);
3599
3600 // Stores do not produce values used by other statements.
3601 if (isa<StoreInst>(Inst))
3602 continue;
3603
3604 // For every value defined in the block, also check that a use of that
3605 // value in the same statement would not be an inter-statement use. It can
3606 // still be synthesizable or load-hoisted, but these kind of instructions
3607 // are not directly copied in code-generation.
3608 auto VirtDef =
3609 VirtualUse::create(S, Stmt, Stmt->getSurroundingLoop(), &Inst, true);
3610 assert(VirtDef.getKind() == VirtualUse::Synthesizable ||
3611 VirtDef.getKind() == VirtualUse::Intra ||
3612 VirtDef.getKind() == VirtualUse::Hoisted);
3613 }
3614 }
3615
3616 if (S->hasSingleExitEdge())
3617 return;
3618
3619 // PHINodes in the SCoP region's exit block are also uses to be checked.
3620 if (!S->getRegion().isTopLevelRegion()) {
3621 for (auto &Inst : *S->getRegion().getExit()) {
3622 if (!isa<PHINode>(Inst))
3623 break;
3624
3625 for (auto &Op : Inst.operands())
3626 verifyUse(S, Op, LI);
3627 }
3628 }
3629 }
3630 #endif
3631
buildScop(Region & R,AssumptionCache & AC)3632 void ScopBuilder::buildScop(Region &R, AssumptionCache &AC) {
3633 scop.reset(new Scop(R, SE, LI, DT, *SD.getDetectionContext(&R), ORE,
3634 SD.getNextID()));
3635
3636 buildStmts(R);
3637
3638 // Create all invariant load instructions first. These are categorized as
3639 // 'synthesizable', therefore are not part of any ScopStmt but need to be
3640 // created somewhere.
3641 const InvariantLoadsSetTy &RIL = scop->getRequiredInvariantLoads();
3642 for (BasicBlock *BB : scop->getRegion().blocks()) {
3643 if (isErrorBlock(*BB, scop->getRegion(), LI, DT))
3644 continue;
3645
3646 for (Instruction &Inst : *BB) {
3647 LoadInst *Load = dyn_cast<LoadInst>(&Inst);
3648 if (!Load)
3649 continue;
3650
3651 if (!RIL.count(Load))
3652 continue;
3653
3654 // Invariant loads require a MemoryAccess to be created in some statement.
3655 // It is not important to which statement the MemoryAccess is added
3656 // because it will later be removed from the ScopStmt again. We chose the
3657 // first statement of the basic block the LoadInst is in.
3658 ArrayRef<ScopStmt *> List = scop->getStmtListFor(BB);
3659 assert(!List.empty());
3660 ScopStmt *RILStmt = List.front();
3661 buildMemoryAccess(Load, RILStmt);
3662 }
3663 }
3664 buildAccessFunctions();
3665
3666 // In case the region does not have an exiting block we will later (during
3667 // code generation) split the exit block. This will move potential PHI nodes
3668 // from the current exit block into the new region exiting block. Hence, PHI
3669 // nodes that are at this point not part of the region will be.
3670 // To handle these PHI nodes later we will now model their operands as scalar
3671 // accesses. Note that we do not model anything in the exit block if we have
3672 // an exiting block in the region, as there will not be any splitting later.
3673 if (!R.isTopLevelRegion() && !scop->hasSingleExitEdge()) {
3674 for (Instruction &Inst : *R.getExit()) {
3675 PHINode *PHI = dyn_cast<PHINode>(&Inst);
3676 if (!PHI)
3677 break;
3678
3679 buildPHIAccesses(nullptr, PHI, nullptr, true);
3680 }
3681 }
3682
3683 // Create memory accesses for global reads since all arrays are now known.
3684 auto *AF = SE.getConstant(IntegerType::getInt64Ty(SE.getContext()), 0);
3685 for (auto GlobalReadPair : GlobalReads) {
3686 ScopStmt *GlobalReadStmt = GlobalReadPair.first;
3687 Instruction *GlobalRead = GlobalReadPair.second;
3688 for (auto *BP : ArrayBasePointers)
3689 addArrayAccess(GlobalReadStmt, MemAccInst(GlobalRead), MemoryAccess::READ,
3690 BP, BP->getType(), false, {AF}, {nullptr}, GlobalRead);
3691 }
3692
3693 buildInvariantEquivalenceClasses();
3694
3695 /// A map from basic blocks to their invalid domains.
3696 DenseMap<BasicBlock *, isl::set> InvalidDomainMap;
3697
3698 if (!buildDomains(&R, InvalidDomainMap)) {
3699 LLVM_DEBUG(
3700 dbgs() << "Bailing-out because buildDomains encountered problems\n");
3701 return;
3702 }
3703
3704 addUserAssumptions(AC, InvalidDomainMap);
3705
3706 // Initialize the invalid domain.
3707 for (ScopStmt &Stmt : scop->Stmts)
3708 if (Stmt.isBlockStmt())
3709 Stmt.setInvalidDomain(InvalidDomainMap[Stmt.getEntryBlock()]);
3710 else
3711 Stmt.setInvalidDomain(InvalidDomainMap[getRegionNodeBasicBlock(
3712 Stmt.getRegion()->getNode())]);
3713
3714 // Remove empty statements.
3715 // Exit early in case there are no executable statements left in this scop.
3716 scop->removeStmtNotInDomainMap();
3717 scop->simplifySCoP(false);
3718 if (scop->isEmpty()) {
3719 LLVM_DEBUG(dbgs() << "Bailing-out because SCoP is empty\n");
3720 return;
3721 }
3722
3723 // The ScopStmts now have enough information to initialize themselves.
3724 for (ScopStmt &Stmt : *scop) {
3725 collectSurroundingLoops(Stmt);
3726
3727 buildDomain(Stmt);
3728 buildAccessRelations(Stmt);
3729
3730 if (DetectReductions)
3731 checkForReductions(Stmt);
3732 }
3733
3734 // Check early for a feasible runtime context.
3735 if (!scop->hasFeasibleRuntimeContext()) {
3736 LLVM_DEBUG(dbgs() << "Bailing-out because of unfeasible context (early)\n");
3737 return;
3738 }
3739
3740 // Check early for profitability. Afterwards it cannot change anymore,
3741 // only the runtime context could become infeasible.
3742 if (!scop->isProfitable(UnprofitableScalarAccs)) {
3743 scop->invalidate(PROFITABLE, DebugLoc());
3744 LLVM_DEBUG(
3745 dbgs() << "Bailing-out because SCoP is not considered profitable\n");
3746 return;
3747 }
3748
3749 buildSchedule();
3750
3751 finalizeAccesses();
3752
3753 scop->realignParams();
3754 addUserContext();
3755
3756 // After the context was fully constructed, thus all our knowledge about
3757 // the parameters is in there, we add all recorded assumptions to the
3758 // assumed/invalid context.
3759 addRecordedAssumptions();
3760
3761 scop->simplifyContexts();
3762 if (!buildAliasChecks()) {
3763 LLVM_DEBUG(dbgs() << "Bailing-out because could not build alias checks\n");
3764 return;
3765 }
3766
3767 hoistInvariantLoads();
3768 canonicalizeDynamicBasePtrs();
3769 verifyInvariantLoads();
3770 scop->simplifySCoP(true);
3771
3772 // Check late for a feasible runtime context because profitability did not
3773 // change.
3774 if (!scop->hasFeasibleRuntimeContext()) {
3775 LLVM_DEBUG(dbgs() << "Bailing-out because of unfeasible context (late)\n");
3776 return;
3777 }
3778
3779 #ifndef NDEBUG
3780 verifyUses(scop.get(), LI, DT);
3781 #endif
3782 }
3783
ScopBuilder(Region * R,AssumptionCache & AC,AliasAnalysis & AA,const DataLayout & DL,DominatorTree & DT,LoopInfo & LI,ScopDetection & SD,ScalarEvolution & SE,OptimizationRemarkEmitter & ORE)3784 ScopBuilder::ScopBuilder(Region *R, AssumptionCache &AC, AliasAnalysis &AA,
3785 const DataLayout &DL, DominatorTree &DT, LoopInfo &LI,
3786 ScopDetection &SD, ScalarEvolution &SE,
3787 OptimizationRemarkEmitter &ORE)
3788 : AA(AA), DL(DL), DT(DT), LI(LI), SD(SD), SE(SE), ORE(ORE) {
3789 DebugLoc Beg, End;
3790 auto P = getBBPairForRegion(R);
3791 getDebugLocations(P, Beg, End);
3792
3793 std::string Msg = "SCoP begins here.";
3794 ORE.emit(OptimizationRemarkAnalysis(DEBUG_TYPE, "ScopEntry", Beg, P.first)
3795 << Msg);
3796
3797 buildScop(*R, AC);
3798
3799 LLVM_DEBUG(dbgs() << *scop);
3800
3801 if (!scop->hasFeasibleRuntimeContext()) {
3802 InfeasibleScops++;
3803 Msg = "SCoP ends here but was dismissed.";
3804 LLVM_DEBUG(dbgs() << "SCoP detected but dismissed\n");
3805 RecordedAssumptions.clear();
3806 scop.reset();
3807 } else {
3808 Msg = "SCoP ends here.";
3809 ++ScopFound;
3810 if (scop->getMaxLoopDepth() > 0)
3811 ++RichScopFound;
3812 }
3813
3814 if (R->isTopLevelRegion())
3815 ORE.emit(OptimizationRemarkAnalysis(DEBUG_TYPE, "ScopEnd", End, P.first)
3816 << Msg);
3817 else
3818 ORE.emit(OptimizationRemarkAnalysis(DEBUG_TYPE, "ScopEnd", End, P.second)
3819 << Msg);
3820 }
3821