1 //===- LoopFusionUtils.cpp ---- Utilities for loop fusion ----------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements loop fusion transformation utility functions.
10 //
11 //===----------------------------------------------------------------------===//
12
13 #include "mlir/Transforms/LoopFusionUtils.h"
14
15 #include "mlir/Analysis/AffineAnalysis.h"
16 #include "mlir/Analysis/AffineStructures.h"
17 #include "mlir/Analysis/LoopAnalysis.h"
18 #include "mlir/Analysis/Utils.h"
19 #include "mlir/Dialect/Affine/IR/AffineOps.h"
20 #include "mlir/IR/AffineExpr.h"
21 #include "mlir/IR/AffineMap.h"
22 #include "mlir/IR/BlockAndValueMapping.h"
23 #include "mlir/IR/Builders.h"
24 #include "mlir/IR/BuiltinOps.h"
25 #include "mlir/IR/Operation.h"
26 #include "mlir/Transforms/LoopUtils.h"
27 #include "llvm/ADT/DenseMap.h"
28 #include "llvm/ADT/SmallVector.h"
29 #include "llvm/Support/Debug.h"
30 #include "llvm/Support/raw_ostream.h"
31
32 #define DEBUG_TYPE "loop-fusion-utils"
33
34 using namespace mlir;
35
36 // Gathers all load and store memref accesses in 'opA' into 'values', where
37 // 'values[memref] == true' for each store operation.
getLoadAndStoreMemRefAccesses(Operation * opA,DenseMap<Value,bool> & values)38 static void getLoadAndStoreMemRefAccesses(Operation *opA,
39 DenseMap<Value, bool> &values) {
40 opA->walk([&](Operation *op) {
41 if (auto loadOp = dyn_cast<AffineReadOpInterface>(op)) {
42 if (values.count(loadOp.getMemRef()) == 0)
43 values[loadOp.getMemRef()] = false;
44 } else if (auto storeOp = dyn_cast<AffineWriteOpInterface>(op)) {
45 values[storeOp.getMemRef()] = true;
46 }
47 });
48 }
49
50 /// Returns true if 'op' is a load or store operation which access a memref
51 /// accessed 'values' and at least one of the access is a store operation.
52 /// Returns false otherwise.
isDependentLoadOrStoreOp(Operation * op,DenseMap<Value,bool> & values)53 static bool isDependentLoadOrStoreOp(Operation *op,
54 DenseMap<Value, bool> &values) {
55 if (auto loadOp = dyn_cast<AffineReadOpInterface>(op)) {
56 return values.count(loadOp.getMemRef()) > 0 &&
57 values[loadOp.getMemRef()] == true;
58 } else if (auto storeOp = dyn_cast<AffineWriteOpInterface>(op)) {
59 return values.count(storeOp.getMemRef()) > 0;
60 }
61 return false;
62 }
63
64 // Returns the first operation in range ('opA', 'opB') which has a data
65 // dependence on 'opA'. Returns 'nullptr' of no dependence exists.
getFirstDependentOpInRange(Operation * opA,Operation * opB)66 static Operation *getFirstDependentOpInRange(Operation *opA, Operation *opB) {
67 // Record memref values from all loads/store in loop nest rooted at 'opA'.
68 // Map from memref value to bool which is true if store, false otherwise.
69 DenseMap<Value, bool> values;
70 getLoadAndStoreMemRefAccesses(opA, values);
71
72 // For each 'opX' in block in range ('opA', 'opB'), check if there is a data
73 // dependence from 'opA' to 'opX' ('opA' and 'opX' access the same memref
74 // and at least one of the accesses is a store).
75 Operation *firstDepOp = nullptr;
76 for (Block::iterator it = std::next(Block::iterator(opA));
77 it != Block::iterator(opB); ++it) {
78 Operation *opX = &(*it);
79 opX->walk([&](Operation *op) {
80 if (!firstDepOp && isDependentLoadOrStoreOp(op, values))
81 firstDepOp = opX;
82 });
83 if (firstDepOp)
84 break;
85 }
86 return firstDepOp;
87 }
88
89 // Returns the last operation 'opX' in range ('opA', 'opB'), for which there
90 // exists a data dependence from 'opX' to 'opB'.
91 // Returns 'nullptr' of no dependence exists.
getLastDependentOpInRange(Operation * opA,Operation * opB)92 static Operation *getLastDependentOpInRange(Operation *opA, Operation *opB) {
93 // Record memref values from all loads/store in loop nest rooted at 'opB'.
94 // Map from memref value to bool which is true if store, false otherwise.
95 DenseMap<Value, bool> values;
96 getLoadAndStoreMemRefAccesses(opB, values);
97
98 // For each 'opX' in block in range ('opA', 'opB') in reverse order,
99 // check if there is a data dependence from 'opX' to 'opB':
100 // *) 'opX' and 'opB' access the same memref and at least one of the accesses
101 // is a store.
102 // *) 'opX' produces an SSA Value which is used by 'opB'.
103 Operation *lastDepOp = nullptr;
104 for (Block::reverse_iterator it = std::next(Block::reverse_iterator(opB));
105 it != Block::reverse_iterator(opA); ++it) {
106 Operation *opX = &(*it);
107 opX->walk([&](Operation *op) {
108 if (isa<AffineReadOpInterface, AffineWriteOpInterface>(op)) {
109 if (isDependentLoadOrStoreOp(op, values)) {
110 lastDepOp = opX;
111 return WalkResult::interrupt();
112 }
113 return WalkResult::advance();
114 }
115 for (auto value : op->getResults()) {
116 for (Operation *user : value.getUsers()) {
117 SmallVector<AffineForOp, 4> loops;
118 // Check if any loop in loop nest surrounding 'user' is 'opB'.
119 getLoopIVs(*user, &loops);
120 if (llvm::is_contained(loops, cast<AffineForOp>(opB))) {
121 lastDepOp = opX;
122 return WalkResult::interrupt();
123 }
124 }
125 }
126 return WalkResult::advance();
127 });
128 if (lastDepOp)
129 break;
130 }
131 return lastDepOp;
132 }
133
134 // Computes and returns an insertion point operation, before which the
135 // the fused <srcForOp, dstForOp> loop nest can be inserted while preserving
136 // dependences. Returns nullptr if no such insertion point is found.
getFusedLoopNestInsertionPoint(AffineForOp srcForOp,AffineForOp dstForOp)137 static Operation *getFusedLoopNestInsertionPoint(AffineForOp srcForOp,
138 AffineForOp dstForOp) {
139 bool isSrcForOpBeforeDstForOp =
140 srcForOp->isBeforeInBlock(dstForOp.getOperation());
141 auto forOpA = isSrcForOpBeforeDstForOp ? srcForOp : dstForOp;
142 auto forOpB = isSrcForOpBeforeDstForOp ? dstForOp : srcForOp;
143
144 auto *firstDepOpA =
145 getFirstDependentOpInRange(forOpA.getOperation(), forOpB.getOperation());
146 auto *lastDepOpB =
147 getLastDependentOpInRange(forOpA.getOperation(), forOpB.getOperation());
148 // Block:
149 // ...
150 // |-- opA
151 // | ...
152 // | lastDepOpB --|
153 // | ... |
154 // |-> firstDepOpA |
155 // ... |
156 // opB <---------
157 //
158 // Valid insertion point range: (lastDepOpB, firstDepOpA)
159 //
160 if (firstDepOpA != nullptr) {
161 if (lastDepOpB != nullptr) {
162 if (firstDepOpA->isBeforeInBlock(lastDepOpB) || firstDepOpA == lastDepOpB)
163 // No valid insertion point exists which preserves dependences.
164 return nullptr;
165 }
166 // Return insertion point in valid range closest to 'opB'.
167 // TODO: Consider other insertion points in valid range.
168 return firstDepOpA;
169 }
170 // No dependences from 'opA' to operation in range ('opA', 'opB'), return
171 // 'opB' insertion point.
172 return forOpB.getOperation();
173 }
174
175 // Gathers all load and store ops in loop nest rooted at 'forOp' into
176 // 'loadAndStoreOps'.
177 static bool
gatherLoadsAndStores(AffineForOp forOp,SmallVectorImpl<Operation * > & loadAndStoreOps)178 gatherLoadsAndStores(AffineForOp forOp,
179 SmallVectorImpl<Operation *> &loadAndStoreOps) {
180 bool hasIfOp = false;
181 forOp.walk([&](Operation *op) {
182 if (isa<AffineReadOpInterface, AffineWriteOpInterface>(op))
183 loadAndStoreOps.push_back(op);
184 else if (isa<AffineIfOp>(op))
185 hasIfOp = true;
186 });
187 return !hasIfOp;
188 }
189
190 /// Returns the maximum loop depth at which we could fuse producer loop
191 /// 'srcForOp' into consumer loop 'dstForOp' without violating data dependences.
192 // TODO: Generalize this check for sibling and more generic fusion scenarios.
193 // TODO: Support forward slice fusion.
getMaxLoopDepth(ArrayRef<Operation * > dstOps,FusionStrategy fusionStrategy)194 static unsigned getMaxLoopDepth(ArrayRef<Operation *> dstOps,
195 FusionStrategy fusionStrategy) {
196 assert(fusionStrategy.strategy == FusionStrategy::ProducerConsumer &&
197 "Fusion strategy not supported");
198
199 if (dstOps.empty())
200 // Expected at least one memory operation.
201 // TODO: Revisit this case with a specific example.
202 return 0;
203
204 // Filter out ops in 'dstOps' that do not use the producer-consumer memref so
205 // that they are not considered for analysis.
206 // TODO: Currently, we pass the producer-consumer memref through
207 // fusionStrategy. We will retrieve the memrefs from 'srcOps' once we
208 // generalize the algorithm.
209 SmallVector<Operation *, 4> targetDstOps;
210 for (Operation *dstOp : dstOps) {
211 auto loadOp = dyn_cast<AffineReadOpInterface>(dstOp);
212 Value memref = loadOp ? loadOp.getMemRef()
213 : cast<AffineWriteOpInterface>(dstOp).getMemRef();
214 if (memref == fusionStrategy.memref)
215 targetDstOps.push_back(dstOp);
216 }
217
218 assert(!targetDstOps.empty() &&
219 "No dependences between 'srcForOp' and 'dstForOp'?");
220
221 // Compute the innermost common loop depth for loads and stores.
222 unsigned loopDepth = getInnermostCommonLoopDepth(targetDstOps);
223
224 // Return common loop depth for loads if there are no store ops.
225 if (all_of(targetDstOps,
226 [&](Operation *op) { return isa<AffineReadOpInterface>(op); }))
227 return loopDepth;
228
229 // Check dependences on all pairs of ops in 'targetDstOps' and store the
230 // minimum loop depth at which a dependence is satisfied.
231 for (unsigned i = 0, e = targetDstOps.size(); i < e; ++i) {
232 auto *srcOpInst = targetDstOps[i];
233 MemRefAccess srcAccess(srcOpInst);
234 for (unsigned j = 0; j < e; ++j) {
235 auto *dstOpInst = targetDstOps[j];
236 MemRefAccess dstAccess(dstOpInst);
237
238 unsigned numCommonLoops =
239 getNumCommonSurroundingLoops(*srcOpInst, *dstOpInst);
240 for (unsigned d = 1; d <= numCommonLoops + 1; ++d) {
241 FlatAffineConstraints dependenceConstraints;
242 // TODO: Cache dependence analysis results, check cache here.
243 DependenceResult result = checkMemrefAccessDependence(
244 srcAccess, dstAccess, d, &dependenceConstraints,
245 /*dependenceComponents=*/nullptr);
246 if (hasDependence(result)) {
247 // Store minimum loop depth and break because we want the min 'd' at
248 // which there is a dependence.
249 loopDepth = std::min(loopDepth, d - 1);
250 break;
251 }
252 }
253 }
254 }
255
256 return loopDepth;
257 }
258
259 // TODO: Prevent fusion of loop nests with side-effecting operations.
260 // TODO: This pass performs some computation that is the same for all the depths
261 // (e.g., getMaxLoopDepth). Implement a version of this utility that processes
262 // all the depths at once or only the legal maximal depth for maximal fusion.
canFuseLoops(AffineForOp srcForOp,AffineForOp dstForOp,unsigned dstLoopDepth,ComputationSliceState * srcSlice,FusionStrategy fusionStrategy)263 FusionResult mlir::canFuseLoops(AffineForOp srcForOp, AffineForOp dstForOp,
264 unsigned dstLoopDepth,
265 ComputationSliceState *srcSlice,
266 FusionStrategy fusionStrategy) {
267 // Return 'failure' if 'dstLoopDepth == 0'.
268 if (dstLoopDepth == 0) {
269 LLVM_DEBUG(llvm::dbgs() << "Cannot fuse loop nests at depth 0\n");
270 return FusionResult::FailPrecondition;
271 }
272 // Return 'failure' if 'srcForOp' and 'dstForOp' are not in the same block.
273 auto *block = srcForOp->getBlock();
274 if (block != dstForOp->getBlock()) {
275 LLVM_DEBUG(llvm::dbgs() << "Cannot fuse loop nests in different blocks\n");
276 return FusionResult::FailPrecondition;
277 }
278
279 // Return 'failure' if no valid insertion point for fused loop nest in 'block'
280 // exists which would preserve dependences.
281 if (!getFusedLoopNestInsertionPoint(srcForOp, dstForOp)) {
282 LLVM_DEBUG(llvm::dbgs() << "Fusion would violate dependences in block\n");
283 return FusionResult::FailBlockDependence;
284 }
285
286 // Check if 'srcForOp' precedes 'dstForOp' in 'block'.
287 bool isSrcForOpBeforeDstForOp =
288 srcForOp->isBeforeInBlock(dstForOp.getOperation());
289 // 'forOpA' executes before 'forOpB' in 'block'.
290 auto forOpA = isSrcForOpBeforeDstForOp ? srcForOp : dstForOp;
291 auto forOpB = isSrcForOpBeforeDstForOp ? dstForOp : srcForOp;
292
293 // Gather all load and store from 'forOpA' which precedes 'forOpB' in 'block'.
294 SmallVector<Operation *, 4> opsA;
295 if (!gatherLoadsAndStores(forOpA, opsA)) {
296 LLVM_DEBUG(llvm::dbgs() << "Fusing loops with affine.if unsupported\n");
297 return FusionResult::FailPrecondition;
298 }
299
300 // Gather all load and store from 'forOpB' which succeeds 'forOpA' in 'block'.
301 SmallVector<Operation *, 4> opsB;
302 if (!gatherLoadsAndStores(forOpB, opsB)) {
303 LLVM_DEBUG(llvm::dbgs() << "Fusing loops with affine.if unsupported\n");
304 return FusionResult::FailPrecondition;
305 }
306
307 // Return 'failure' if fusing loops at depth 'dstLoopDepth' wouldn't preserve
308 // loop dependences.
309 // TODO: Enable this check for sibling and more generic loop fusion
310 // strategies.
311 if (fusionStrategy.strategy == FusionStrategy::ProducerConsumer) {
312 // TODO: 'getMaxLoopDepth' does not support forward slice fusion.
313 assert(isSrcForOpBeforeDstForOp && "Unexpected forward slice fusion");
314 if (getMaxLoopDepth(opsB, fusionStrategy) < dstLoopDepth) {
315 LLVM_DEBUG(llvm::dbgs() << "Fusion would violate loop dependences\n");
316 return FusionResult::FailFusionDependence;
317 }
318 }
319
320 // Calculate the number of common loops surrounding 'srcForOp' and 'dstForOp'.
321 unsigned numCommonLoops = mlir::getNumCommonSurroundingLoops(
322 *srcForOp.getOperation(), *dstForOp.getOperation());
323
324 // Filter out ops in 'opsA' to compute the slice union based on the
325 // assumptions made by the fusion strategy.
326 SmallVector<Operation *, 4> strategyOpsA;
327 switch (fusionStrategy.strategy) {
328 case FusionStrategy::Generic:
329 // Generic fusion. Take into account all the memory operations to compute
330 // the slice union.
331 strategyOpsA.append(opsA.begin(), opsA.end());
332 break;
333 case FusionStrategy::ProducerConsumer:
334 // Producer-consumer fusion (AffineLoopFusion pass) only takes into
335 // account stores to 'memref' in 'srcForOp' to compute the slice union.
336 for (Operation *op : opsA) {
337 auto store = dyn_cast<AffineWriteOpInterface>(op);
338 if (store && store.getMemRef() == fusionStrategy.memref)
339 strategyOpsA.push_back(op);
340 }
341 break;
342 case FusionStrategy::Sibling:
343 // Sibling fusion (AffineLoopFusion pass) only takes into account the loads
344 // to 'memref' in 'srcForOp' to compute the slice union.
345 for (Operation *op : opsA) {
346 auto load = dyn_cast<AffineReadOpInterface>(op);
347 if (load && load.getMemRef() == fusionStrategy.memref)
348 strategyOpsA.push_back(op);
349 }
350 break;
351 }
352
353 // Compute union of computation slices computed between all pairs of ops
354 // from 'forOpA' and 'forOpB'.
355 if (failed(mlir::computeSliceUnion(strategyOpsA, opsB, dstLoopDepth,
356 numCommonLoops, isSrcForOpBeforeDstForOp,
357 srcSlice))) {
358 LLVM_DEBUG(llvm::dbgs() << "computeSliceUnion failed\n");
359 return FusionResult::FailPrecondition;
360 }
361
362 return FusionResult::Success;
363 }
364
365 /// Fuses 'srcForOp' into 'dstForOp' with destination loop block insertion point
366 /// and source slice loop bounds specified in 'srcSlice'.
fuseLoops(AffineForOp srcForOp,AffineForOp dstForOp,const ComputationSliceState & srcSlice)367 void mlir::fuseLoops(AffineForOp srcForOp, AffineForOp dstForOp,
368 const ComputationSliceState &srcSlice) {
369 // Clone 'srcForOp' into 'dstForOp' at 'srcSlice->insertPoint'.
370 OpBuilder b(srcSlice.insertPoint->getBlock(), srcSlice.insertPoint);
371 BlockAndValueMapping mapper;
372 b.clone(*srcForOp, mapper);
373
374 // Update 'sliceLoopNest' upper and lower bounds from computed 'srcSlice'.
375 SmallVector<AffineForOp, 4> sliceLoops;
376 for (unsigned i = 0, e = srcSlice.ivs.size(); i < e; ++i) {
377 auto loopIV = mapper.lookupOrNull(srcSlice.ivs[i]);
378 if (!loopIV)
379 continue;
380 auto forOp = getForInductionVarOwner(loopIV);
381 sliceLoops.push_back(forOp);
382 if (AffineMap lbMap = srcSlice.lbs[i]) {
383 auto lbOperands = srcSlice.lbOperands[i];
384 canonicalizeMapAndOperands(&lbMap, &lbOperands);
385 forOp.setLowerBound(lbOperands, lbMap);
386 }
387 if (AffineMap ubMap = srcSlice.ubs[i]) {
388 auto ubOperands = srcSlice.ubOperands[i];
389 canonicalizeMapAndOperands(&ubMap, &ubOperands);
390 forOp.setUpperBound(ubOperands, ubMap);
391 }
392 }
393
394 // Promote any single iteration slice loops.
395 for (AffineForOp forOp : sliceLoops)
396 promoteIfSingleIteration(forOp);
397 }
398
399 /// Collect loop nest statistics (eg. loop trip count and operation count)
400 /// in 'stats' for loop nest rooted at 'forOp'. Returns true on success,
401 /// returns false otherwise.
getLoopNestStats(AffineForOp forOpRoot,LoopNestStats * stats)402 bool mlir::getLoopNestStats(AffineForOp forOpRoot, LoopNestStats *stats) {
403 auto walkResult = forOpRoot.walk([&](AffineForOp forOp) {
404 auto *childForOp = forOp.getOperation();
405 auto *parentForOp = forOp->getParentOp();
406 if (!llvm::isa<FuncOp>(parentForOp)) {
407 if (!isa<AffineForOp>(parentForOp)) {
408 LLVM_DEBUG(llvm::dbgs() << "Expected parent AffineForOp");
409 return WalkResult::interrupt();
410 }
411 // Add mapping to 'forOp' from its parent AffineForOp.
412 stats->loopMap[parentForOp].push_back(forOp);
413 }
414
415 // Record the number of op operations in the body of 'forOp'.
416 unsigned count = 0;
417 stats->opCountMap[childForOp] = 0;
418 for (auto &op : *forOp.getBody()) {
419 if (!isa<AffineForOp, AffineIfOp>(op))
420 ++count;
421 }
422 stats->opCountMap[childForOp] = count;
423
424 // Record trip count for 'forOp'. Set flag if trip count is not
425 // constant.
426 Optional<uint64_t> maybeConstTripCount = getConstantTripCount(forOp);
427 if (!maybeConstTripCount.hasValue()) {
428 // Currently only constant trip count loop nests are supported.
429 LLVM_DEBUG(llvm::dbgs() << "Non-constant trip count unsupported");
430 return WalkResult::interrupt();
431 }
432
433 stats->tripCountMap[childForOp] = maybeConstTripCount.getValue();
434 return WalkResult::advance();
435 });
436 return !walkResult.wasInterrupted();
437 }
438
439 // Computes the total cost of the loop nest rooted at 'forOp'.
440 // Currently, the total cost is computed by counting the total operation
441 // instance count (i.e. total number of operations in the loop bodyloop
442 // operation count * loop trip count) for the entire loop nest.
443 // If 'tripCountOverrideMap' is non-null, overrides the trip count for loops
444 // specified in the map when computing the total op instance count.
445 // NOTEs: 1) This is used to compute the cost of computation slices, which are
446 // sliced along the iteration dimension, and thus reduce the trip count.
447 // If 'computeCostMap' is non-null, the total op count for forOps specified
448 // in the map is increased (not overridden) by adding the op count from the
449 // map to the existing op count for the for loop. This is done before
450 // multiplying by the loop's trip count, and is used to model the cost of
451 // inserting a sliced loop nest of known cost into the loop's body.
452 // 2) This is also used to compute the cost of fusing a slice of some loop nest
453 // within another loop.
getComputeCostHelper(Operation * forOp,LoopNestStats & stats,llvm::SmallDenseMap<Operation *,uint64_t,8> * tripCountOverrideMap,DenseMap<Operation *,int64_t> * computeCostMap)454 static int64_t getComputeCostHelper(
455 Operation *forOp, LoopNestStats &stats,
456 llvm::SmallDenseMap<Operation *, uint64_t, 8> *tripCountOverrideMap,
457 DenseMap<Operation *, int64_t> *computeCostMap) {
458 // 'opCount' is the total number operations in one iteration of 'forOp' body,
459 // minus terminator op which is a no-op.
460 int64_t opCount = stats.opCountMap[forOp] - 1;
461 if (stats.loopMap.count(forOp) > 0) {
462 for (auto childForOp : stats.loopMap[forOp]) {
463 opCount += getComputeCostHelper(childForOp.getOperation(), stats,
464 tripCountOverrideMap, computeCostMap);
465 }
466 }
467 // Add in additional op instances from slice (if specified in map).
468 if (computeCostMap != nullptr) {
469 auto it = computeCostMap->find(forOp);
470 if (it != computeCostMap->end()) {
471 opCount += it->second;
472 }
473 }
474 // Override trip count (if specified in map).
475 int64_t tripCount = stats.tripCountMap[forOp];
476 if (tripCountOverrideMap != nullptr) {
477 auto it = tripCountOverrideMap->find(forOp);
478 if (it != tripCountOverrideMap->end()) {
479 tripCount = it->second;
480 }
481 }
482 // Returns the total number of dynamic instances of operations in loop body.
483 return tripCount * opCount;
484 }
485
486 // TODO: extend this to handle multiple result maps.
getConstDifference(AffineMap lbMap,AffineMap ubMap)487 static Optional<uint64_t> getConstDifference(AffineMap lbMap, AffineMap ubMap) {
488 assert(lbMap.getNumResults() == 1 && "expected single result bound map");
489 assert(ubMap.getNumResults() == 1 && "expected single result bound map");
490 assert(lbMap.getNumDims() == ubMap.getNumDims());
491 assert(lbMap.getNumSymbols() == ubMap.getNumSymbols());
492 AffineExpr lbExpr(lbMap.getResult(0));
493 AffineExpr ubExpr(ubMap.getResult(0));
494 auto loopSpanExpr = simplifyAffineExpr(ubExpr - lbExpr, lbMap.getNumDims(),
495 lbMap.getNumSymbols());
496 auto cExpr = loopSpanExpr.dyn_cast<AffineConstantExpr>();
497 if (!cExpr)
498 return None;
499 return cExpr.getValue();
500 }
501
502 // Return the number of iterations in the given slice.
getSliceIterationCount(const llvm::SmallDenseMap<Operation *,uint64_t,8> & sliceTripCountMap)503 static uint64_t getSliceIterationCount(
504 const llvm::SmallDenseMap<Operation *, uint64_t, 8> &sliceTripCountMap) {
505 uint64_t iterCount = 1;
506 for (const auto &count : sliceTripCountMap) {
507 iterCount *= count.second;
508 }
509 return iterCount;
510 }
511
512 // Builds a map 'tripCountMap' from AffineForOp to constant trip count for loop
513 // nest surrounding represented by slice loop bounds in 'slice'.
514 // Returns true on success, false otherwise (if a non-constant trip count
515 // was encountered).
516 // TODO: Make this work with non-unit step loops.
buildSliceTripCountMap(const ComputationSliceState & slice,llvm::SmallDenseMap<Operation *,uint64_t,8> * tripCountMap)517 static bool buildSliceTripCountMap(
518 const ComputationSliceState &slice,
519 llvm::SmallDenseMap<Operation *, uint64_t, 8> *tripCountMap) {
520 unsigned numSrcLoopIVs = slice.ivs.size();
521 // Populate map from AffineForOp -> trip count
522 for (unsigned i = 0; i < numSrcLoopIVs; ++i) {
523 AffineForOp forOp = getForInductionVarOwner(slice.ivs[i]);
524 auto *op = forOp.getOperation();
525 AffineMap lbMap = slice.lbs[i];
526 AffineMap ubMap = slice.ubs[i];
527 if (lbMap == AffineMap() || ubMap == AffineMap()) {
528 // The iteration of src loop IV 'i' was not sliced. Use full loop bounds.
529 if (forOp.hasConstantLowerBound() && forOp.hasConstantUpperBound()) {
530 (*tripCountMap)[op] =
531 forOp.getConstantUpperBound() - forOp.getConstantLowerBound();
532 continue;
533 }
534 Optional<uint64_t> maybeConstTripCount = getConstantTripCount(forOp);
535 if (maybeConstTripCount.hasValue()) {
536 (*tripCountMap)[op] = maybeConstTripCount.getValue();
537 continue;
538 }
539 return false;
540 }
541 Optional<uint64_t> tripCount = getConstDifference(lbMap, ubMap);
542 // Slice bounds are created with a constant ub - lb difference.
543 if (!tripCount.hasValue())
544 return false;
545 (*tripCountMap)[op] = tripCount.getValue();
546 }
547 return true;
548 }
549
550 /// Computes the total cost of the loop nest rooted at 'forOp' using 'stats'.
551 /// Currently, the total cost is computed by counting the total operation
552 /// instance count (i.e. total number of operations in the loop body * loop
553 /// trip count) for the entire loop nest.
getComputeCost(AffineForOp forOp,LoopNestStats & stats)554 int64_t mlir::getComputeCost(AffineForOp forOp, LoopNestStats &stats) {
555 return getComputeCostHelper(forOp.getOperation(), stats,
556 /*tripCountOverrideMap=*/nullptr,
557 /*computeCostMap=*/nullptr);
558 }
559
560 /// Computes and returns in 'computeCost', the total compute cost of fusing the
561 /// 'slice' of the loop nest rooted at 'srcForOp' into 'dstForOp'. Currently,
562 /// the total cost is computed by counting the total operation instance count
563 /// (i.e. total number of operations in the loop body * loop trip count) for
564 /// the entire loop nest.
getFusionComputeCost(AffineForOp srcForOp,LoopNestStats & srcStats,AffineForOp dstForOp,LoopNestStats & dstStats,const ComputationSliceState & slice,int64_t * computeCost)565 bool mlir::getFusionComputeCost(AffineForOp srcForOp, LoopNestStats &srcStats,
566 AffineForOp dstForOp, LoopNestStats &dstStats,
567 const ComputationSliceState &slice,
568 int64_t *computeCost) {
569 llvm::SmallDenseMap<Operation *, uint64_t, 8> sliceTripCountMap;
570 DenseMap<Operation *, int64_t> computeCostMap;
571
572 // Build trip count map for computation slice.
573 if (!buildSliceTripCountMap(slice, &sliceTripCountMap))
574 return false;
575 // Checks whether a store to load forwarding will happen.
576 int64_t sliceIterationCount = getSliceIterationCount(sliceTripCountMap);
577 assert(sliceIterationCount > 0);
578 bool storeLoadFwdGuaranteed = (sliceIterationCount == 1);
579 auto *insertPointParent = slice.insertPoint->getParentOp();
580
581 // The store and loads to this memref will disappear.
582 // TODO: Add load coalescing to memref data flow opt pass.
583 if (storeLoadFwdGuaranteed) {
584 // Subtract from operation count the loads/store we expect load/store
585 // forwarding to remove.
586 unsigned storeCount = 0;
587 llvm::SmallDenseSet<Value, 4> storeMemrefs;
588 srcForOp.walk([&](Operation *op) {
589 if (auto storeOp = dyn_cast<AffineWriteOpInterface>(op)) {
590 storeMemrefs.insert(storeOp.getMemRef());
591 ++storeCount;
592 }
593 });
594 // Subtract out any store ops in single-iteration src slice loop nest.
595 if (storeCount > 0)
596 computeCostMap[insertPointParent] = -storeCount;
597 // Subtract out any load users of 'storeMemrefs' nested below
598 // 'insertPointParent'.
599 for (auto value : storeMemrefs) {
600 for (auto *user : value.getUsers()) {
601 if (auto loadOp = dyn_cast<AffineReadOpInterface>(user)) {
602 SmallVector<AffineForOp, 4> loops;
603 // Check if any loop in loop nest surrounding 'user' is
604 // 'insertPointParent'.
605 getLoopIVs(*user, &loops);
606 if (llvm::is_contained(loops, cast<AffineForOp>(insertPointParent))) {
607 if (auto forOp =
608 dyn_cast_or_null<AffineForOp>(user->getParentOp())) {
609 if (computeCostMap.count(forOp) == 0)
610 computeCostMap[forOp] = 0;
611 computeCostMap[forOp] -= 1;
612 }
613 }
614 }
615 }
616 }
617 }
618
619 // Compute op instance count for the src loop nest with iteration slicing.
620 int64_t sliceComputeCost = getComputeCostHelper(
621 srcForOp.getOperation(), srcStats, &sliceTripCountMap, &computeCostMap);
622
623 // Compute cost of fusion for this depth.
624 computeCostMap[insertPointParent] = sliceComputeCost;
625
626 *computeCost =
627 getComputeCostHelper(dstForOp.getOperation(), dstStats,
628 /*tripCountOverrideMap=*/nullptr, &computeCostMap);
629 return true;
630 }
631