1 //===- Utils.h - General transformation utilities ---------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This header file defines prototypes for various transformation utilities for
10 // memref's and non-loop IR structures. These are not passes by themselves but
11 // are used either by passes, optimization sequences, or in turn by other
12 // transformation utilities.
13 //
14 //===----------------------------------------------------------------------===//
15 
16 #ifndef MLIR_TRANSFORMS_UTILS_H
17 #define MLIR_TRANSFORMS_UTILS_H
18 
19 #include "mlir/Dialect/StandardOps/IR/Ops.h"
20 #include "mlir/IR/AffineMap.h"
21 #include "llvm/ADT/ArrayRef.h"
22 #include "llvm/ADT/DenseMap.h"
23 
24 namespace mlir {
25 
26 class AffineApplyOp;
27 class AffineForOp;
28 class Location;
29 class OpBuilder;
30 
31 /// Replaces all "dereferencing" uses of `oldMemRef` with `newMemRef` while
32 /// optionally remapping the old memref's indices using the supplied affine map,
33 /// `indexRemap`. The new memref could be of a different shape or rank.
34 /// `extraIndices` provides any additional access indices to be added to the
35 /// start.
36 ///
37 /// `indexRemap` remaps indices of the old memref access to a new set of indices
38 /// that are used to index the memref. Additional input operands to indexRemap
39 /// can be optionally provided in `extraOperands`, and they occupy the start
40 /// of its input list. `indexRemap`'s dimensional inputs are expected to
41 /// correspond to memref's indices, and its symbolic inputs if any should be
42 /// provided in `symbolOperands`.
43 ///
44 /// `domInstFilter`, if non-null, restricts the replacement to only those
45 /// operations that are dominated by the former; similarly, `postDomInstFilter`
46 /// restricts replacement to only those operations that are postdominated by it.
47 ///
48 /// 'allowNonDereferencingOps', if set, allows replacement of non-dereferencing
49 /// uses of a memref without any requirement for access index rewrites. The
50 /// default value of this flag variable is false.
51 ///
52 /// 'replaceInDeallocOp', if set, lets DeallocOp, a non-dereferencing user, to
53 /// also be a candidate for replacement. The default value of this flag is
54 /// false.
55 ///
56 /// Returns true on success and false if the replacement is not possible,
57 /// whenever a memref is used as an operand in a non-dereferencing context and
58 /// 'allowNonDereferencingOps' is false, except for dealloc's on the memref
59 /// which are left untouched. See comments at function definition for an
60 /// example.
61 //
62 //  Ex: to replace load %A[%i, %j] with load %Abuf[%t mod 2, %ii - %i, %j]:
63 //  The SSA value corresponding to '%t mod 2' should be in 'extraIndices', and
64 //  index remap will perform (%i, %j) -> (%ii - %i, %j), i.e., indexRemap = (d0,
65 //  d1, d2) -> (d0 - d1, d2), and %ii will be the extra operand. Without any
66 //  extra operands, note that 'indexRemap' would just be applied to existing
67 //  indices (%i, %j).
68 //  TODO: allow extraIndices to be added at any position.
69 LogicalResult replaceAllMemRefUsesWith(
70     Value oldMemRef, Value newMemRef, ArrayRef<Value> extraIndices = {},
71     AffineMap indexRemap = AffineMap(), ArrayRef<Value> extraOperands = {},
72     ArrayRef<Value> symbolOperands = {}, Operation *domInstFilter = nullptr,
73     Operation *postDomInstFilter = nullptr,
74     bool allowNonDereferencingOps = false, bool replaceInDeallocOp = false);
75 
76 /// Performs the same replacement as the other version above but only for the
77 /// dereferencing uses of `oldMemRef` in `op`, except in cases where
78 /// 'allowNonDereferencingOps' is set to true where we replace the
79 /// non-dereferencing uses as well.
80 LogicalResult replaceAllMemRefUsesWith(Value oldMemRef, Value newMemRef,
81                                        Operation *op,
82                                        ArrayRef<Value> extraIndices = {},
83                                        AffineMap indexRemap = AffineMap(),
84                                        ArrayRef<Value> extraOperands = {},
85                                        ArrayRef<Value> symbolOperands = {},
86                                        bool allowNonDereferencingOps = false);
87 
88 /// Rewrites the memref defined by this alloc op to have an identity layout map
89 /// and updates all its indexing uses. Returns failure if any of its uses
90 /// escape (while leaving the IR in a valid state).
91 LogicalResult normalizeMemRef(AllocOp op);
92 
93 /// Uses the old memref type map layout and computes the new memref type to have
94 /// a new shape and a layout map, where the old layout map has been normalized
95 /// to an identity layout map. It returns the old memref in case no
96 /// normalization was needed or a failure occurs while transforming the old map
97 /// layout to an identity layout map.
98 MemRefType normalizeMemRefType(MemRefType memrefType, OpBuilder builder,
99                                unsigned numSymbolicOperands);
100 
101 /// Creates and inserts into 'builder' a new AffineApplyOp, with the number of
102 /// its results equal to the number of operands, as a composition
103 /// of all other AffineApplyOps reachable from input parameter 'operands'. If
104 /// different operands were drawing results from multiple affine apply ops,
105 /// these will also be collected into a single (multi-result) affine apply op.
106 /// The final results of the composed AffineApplyOp are returned in output
107 /// parameter 'results'. Returns the affine apply op created.
108 Operation *createComposedAffineApplyOp(OpBuilder &builder, Location loc,
109                                        ArrayRef<Value> operands,
110                                        ArrayRef<Operation *> affineApplyOps,
111                                        SmallVectorImpl<Value> *results);
112 
113 /// Given an operation, inserts one or more single result affine apply
114 /// operations, results of which are exclusively used by this operation.
115 /// The operands of these newly created affine apply ops are
116 /// guaranteed to be loop iterators or terminal symbols of a function.
117 ///
118 /// Before
119 ///
120 /// affine.for %i = 0 to #map(%N)
121 ///   %idx = affine.apply (d0) -> (d0 mod 2) (%i)
122 ///   send %A[%idx], ...
123 ///   %v = "compute"(%idx, ...)
124 ///
125 /// After
126 ///
127 /// affine.for %i = 0 to #map(%N)
128 ///   %idx = affine.apply (d0) -> (d0 mod 2) (%i)
129 ///   send %A[%idx], ...
130 ///   %idx_ = affine.apply (d0) -> (d0 mod 2) (%i)
131 ///   %v = "compute"(%idx_, ...)
132 
133 /// This allows the application of different transformations on send and
134 /// compute (for eg. different shifts/delays)
135 ///
136 /// Fills `sliceOps` with the list of affine.apply operations.
137 /// In the following cases, `sliceOps` remains empty:
138 ///   1. If none of opInst's operands were the result of an affine.apply
139 ///      (i.e., there was no affine computation slice to create).
140 ///   2. If all the affine.apply op's supplying operands to this opInst did not
141 ///      have any uses other than those in this opInst.
142 void createAffineComputationSlice(Operation *opInst,
143                                   SmallVectorImpl<AffineApplyOp> *sliceOps);
144 
145 } // end namespace mlir
146 
147 #endif // MLIR_TRANSFORMS_UTILS_H
148