1 /* 2 * Copyright (C) 2014 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 #ifndef ART_COMPILER_OPTIMIZING_PARALLEL_MOVE_RESOLVER_H_ 18 #define ART_COMPILER_OPTIMIZING_PARALLEL_MOVE_RESOLVER_H_ 19 20 #include "base/value_object.h" 21 #include "utils/growable_array.h" 22 #include "locations.h" 23 #include "primitive.h" 24 25 namespace art { 26 27 class HParallelMove; 28 class MoveOperands; 29 30 // Helper classes to resolve a set of parallel moves. Architecture dependent code generator must 31 // have their own subclass that implements corresponding virtual functions. 32 class ParallelMoveResolver : public ValueObject { 33 public: ParallelMoveResolver(ArenaAllocator * allocator)34 explicit ParallelMoveResolver(ArenaAllocator* allocator) : moves_(allocator, 32) {} ~ParallelMoveResolver()35 virtual ~ParallelMoveResolver() {} 36 37 // Resolve a set of parallel moves, emitting assembler instructions. 38 virtual void EmitNativeCode(HParallelMove* parallel_move) = 0; 39 40 protected: 41 // Build the initial list of moves. 42 void BuildInitialMoveList(HParallelMove* parallel_move); 43 44 GrowableArray<MoveOperands*> moves_; 45 46 private: 47 DISALLOW_COPY_AND_ASSIGN(ParallelMoveResolver); 48 }; 49 50 // This helper class uses swap to resolve dependencies and may emit swap. 51 class ParallelMoveResolverWithSwap : public ParallelMoveResolver { 52 public: ParallelMoveResolverWithSwap(ArenaAllocator * allocator)53 explicit ParallelMoveResolverWithSwap(ArenaAllocator* allocator) 54 : ParallelMoveResolver(allocator) {} ~ParallelMoveResolverWithSwap()55 virtual ~ParallelMoveResolverWithSwap() {} 56 57 // Resolve a set of parallel moves, emitting assembler instructions. 58 void EmitNativeCode(HParallelMove* parallel_move) OVERRIDE; 59 60 protected: 61 class ScratchRegisterScope : public ValueObject { 62 public: 63 ScratchRegisterScope(ParallelMoveResolverWithSwap* resolver, 64 int blocked, 65 int if_scratch, 66 int number_of_registers); 67 ~ScratchRegisterScope(); 68 GetRegister()69 int GetRegister() const { return reg_; } IsSpilled()70 bool IsSpilled() const { return spilled_; } 71 72 private: 73 ParallelMoveResolverWithSwap* resolver_; 74 int reg_; 75 bool spilled_; 76 }; 77 78 // Return true if the location can be scratched. 79 bool IsScratchLocation(Location loc); 80 81 // Allocate a scratch register for performing a move. The method will try to use 82 // a register that is the destination of a move, but that move has not been emitted yet. 83 int AllocateScratchRegister(int blocked, int if_scratch, int register_count, bool* spilled); 84 85 // Emit a move. 86 virtual void EmitMove(size_t index) = 0; 87 88 // Execute a move by emitting a swap of two operands. 89 virtual void EmitSwap(size_t index) = 0; 90 91 virtual void SpillScratch(int reg) = 0; 92 virtual void RestoreScratch(int reg) = 0; 93 94 static constexpr int kNoRegister = -1; 95 96 private: 97 // Perform the move at the moves_ index in question (possibly requiring 98 // other moves to satisfy dependencies). 99 // 100 // Return whether another move in the dependency cycle needs to swap. This 101 // is to handle 64bits swaps: 102 // 1) In the case of register pairs, where we want the pair to swap first to avoid 103 // building pairs that are unexpected by the code generator. For example, if 104 // we were to swap R1 with R2, we would need to update all locations using 105 // R2 to R1. So a (R2,R3) pair register could become (R1,R3). We could make 106 // the code generator understand such pairs, but it's easier and cleaner to 107 // just not create such pairs and exchange pairs in priority. 108 // 2) Even when the architecture does not have pairs, we must handle 64bits swaps 109 // first. Consider the case: (R0->R1) (R1->S) (S->R0), where 'S' is a single 110 // stack slot. If we end up swapping S and R0, S will only contain the low bits 111 // of R0. If R0->R1 is for a 64bits instruction, R1 will therefore not contain 112 // the right value. 113 MoveOperands* PerformMove(size_t index); 114 115 DISALLOW_COPY_AND_ASSIGN(ParallelMoveResolverWithSwap); 116 }; 117 118 // This helper class uses additional scratch registers to resolve dependencies. It supports all kind 119 // of dependency cycles and does not care about the register layout. 120 class ParallelMoveResolverNoSwap : public ParallelMoveResolver { 121 public: ParallelMoveResolverNoSwap(ArenaAllocator * allocator)122 explicit ParallelMoveResolverNoSwap(ArenaAllocator* allocator) 123 : ParallelMoveResolver(allocator), scratches_(allocator, 32), 124 pending_moves_(allocator, 8), allocator_(allocator) {} ~ParallelMoveResolverNoSwap()125 virtual ~ParallelMoveResolverNoSwap() {} 126 127 // Resolve a set of parallel moves, emitting assembler instructions. 128 void EmitNativeCode(HParallelMove* parallel_move) OVERRIDE; 129 130 protected: 131 // Called at the beginning of EmitNativeCode(). A subclass may put some architecture dependent 132 // initialization here. 133 virtual void PrepareForEmitNativeCode() = 0; 134 135 // Called at the end of EmitNativeCode(). A subclass may put some architecture dependent cleanup 136 // here. All scratch locations will be removed after this call. 137 virtual void FinishEmitNativeCode() = 0; 138 139 // Allocate a scratch location to perform a move from input kind of location. A subclass should 140 // implement this to get the best fit location. If there is no suitable physical register, it can 141 // also return a stack slot. 142 virtual Location AllocateScratchLocationFor(Location::Kind kind) = 0; 143 144 // Called after a move which takes a scratch location as source. A subclass can defer the cleanup 145 // to FinishEmitNativeCode(). 146 virtual void FreeScratchLocation(Location loc) = 0; 147 148 // Emit a move. 149 virtual void EmitMove(size_t index) = 0; 150 151 // Return a scratch location from the moves which exactly matches the kind. 152 // Return Location::NoLocation() if no matching scratch location can be found. 153 Location GetScratchLocation(Location::Kind kind); 154 155 // Add a location to the scratch list which can be returned from GetScratchLocation() to resolve 156 // dependency cycles. 157 void AddScratchLocation(Location loc); 158 159 // Remove a location from the scratch list. 160 void RemoveScratchLocation(Location loc); 161 162 // List of scratch locations. 163 GrowableArray<Location> scratches_; 164 165 private: 166 // Perform the move at the given index in `moves_` (possibly requiring other moves to satisfy 167 // dependencies). 168 void PerformMove(size_t index); 169 170 void UpdateMoveSource(Location from, Location to); 171 172 void AddPendingMove(Location source, Location destination, Primitive::Type type); 173 174 void DeletePendingMove(MoveOperands* move); 175 176 // Find a move that may be unblocked after (loc -> XXX) is performed. 177 MoveOperands* GetUnblockedPendingMove(Location loc); 178 179 // Return true if the location is blocked by outstanding moves. 180 bool IsBlockedByMoves(Location loc); 181 182 // Return the number of pending moves. 183 size_t GetNumberOfPendingMoves(); 184 185 // Additional pending moves which might be added to resolve dependency cycle. 186 GrowableArray<MoveOperands*> pending_moves_; 187 188 // Used to allocate pending MoveOperands. 189 ArenaAllocator* const allocator_; 190 191 DISALLOW_COPY_AND_ASSIGN(ParallelMoveResolverNoSwap); 192 }; 193 194 } // namespace art 195 196 #endif // ART_COMPILER_OPTIMIZING_PARALLEL_MOVE_RESOLVER_H_ 197