1 /* 2 * Copyright (C) 2014 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 #ifndef ART_COMPILER_OPTIMIZING_PARALLEL_MOVE_RESOLVER_H_ 18 #define ART_COMPILER_OPTIMIZING_PARALLEL_MOVE_RESOLVER_H_ 19 20 #include "base/arena_containers.h" 21 #include "base/value_object.h" 22 #include "data_type.h" 23 #include "locations.h" 24 25 namespace art { 26 27 class HParallelMove; 28 class MoveOperands; 29 30 // Helper classes to resolve a set of parallel moves. Architecture dependent code generator must 31 // have their own subclass that implements corresponding virtual functions. 32 class ParallelMoveResolver : public ValueObject { 33 public: ParallelMoveResolver(ArenaAllocator * allocator)34 explicit ParallelMoveResolver(ArenaAllocator* allocator) 35 : moves_(allocator->Adapter(kArenaAllocParallelMoveResolver)) { 36 moves_.reserve(32); 37 } ~ParallelMoveResolver()38 virtual ~ParallelMoveResolver() {} 39 40 // Resolve a set of parallel moves, emitting assembler instructions. 41 virtual void EmitNativeCode(HParallelMove* parallel_move) = 0; 42 43 protected: 44 // Build the initial list of moves. 45 void BuildInitialMoveList(HParallelMove* parallel_move); 46 47 ArenaVector<MoveOperands*> moves_; 48 49 private: 50 DISALLOW_COPY_AND_ASSIGN(ParallelMoveResolver); 51 }; 52 53 // This helper class uses swap to resolve dependencies and may emit swap. 54 class ParallelMoveResolverWithSwap : public ParallelMoveResolver { 55 public: ParallelMoveResolverWithSwap(ArenaAllocator * allocator)56 explicit ParallelMoveResolverWithSwap(ArenaAllocator* allocator) 57 : ParallelMoveResolver(allocator) {} ~ParallelMoveResolverWithSwap()58 virtual ~ParallelMoveResolverWithSwap() {} 59 60 // Resolve a set of parallel moves, emitting assembler instructions. 61 void EmitNativeCode(HParallelMove* parallel_move) OVERRIDE; 62 63 protected: 64 class ScratchRegisterScope : public ValueObject { 65 public: 66 ScratchRegisterScope(ParallelMoveResolverWithSwap* resolver, 67 int blocked, 68 int if_scratch, 69 int number_of_registers); 70 ~ScratchRegisterScope(); 71 GetRegister()72 int GetRegister() const { return reg_; } IsSpilled()73 bool IsSpilled() const { return spilled_; } 74 75 private: 76 ParallelMoveResolverWithSwap* resolver_; 77 int reg_; 78 bool spilled_; 79 }; 80 81 // Return true if the location can be scratched. 82 bool IsScratchLocation(Location loc); 83 84 // Allocate a scratch register for performing a move. The method will try to use 85 // a register that is the destination of a move, but that move has not been emitted yet. 86 int AllocateScratchRegister(int blocked, int if_scratch, int register_count, bool* spilled); 87 88 // Emit a move. 89 virtual void EmitMove(size_t index) = 0; 90 91 // Execute a move by emitting a swap of two operands. 92 virtual void EmitSwap(size_t index) = 0; 93 94 virtual void SpillScratch(int reg) = 0; 95 virtual void RestoreScratch(int reg) = 0; 96 97 static constexpr int kNoRegister = -1; 98 99 private: 100 // Perform the move at the moves_ index in question (possibly requiring 101 // other moves to satisfy dependencies). 102 // 103 // Return whether another move in the dependency cycle needs to swap. This 104 // is to handle 64bits swaps: 105 // 1) In the case of register pairs, where we want the pair to swap first to avoid 106 // building pairs that are unexpected by the code generator. For example, if 107 // we were to swap R1 with R2, we would need to update all locations using 108 // R2 to R1. So a (R2,R3) pair register could become (R1,R3). We could make 109 // the code generator understand such pairs, but it's easier and cleaner to 110 // just not create such pairs and exchange pairs in priority. 111 // 2) Even when the architecture does not have pairs, we must handle 64bits swaps 112 // first. Consider the case: (R0->R1) (R1->S) (S->R0), where 'S' is a single 113 // stack slot. If we end up swapping S and R0, S will only contain the low bits 114 // of R0. If R0->R1 is for a 64bits instruction, R1 will therefore not contain 115 // the right value. 116 MoveOperands* PerformMove(size_t index); 117 118 DISALLOW_COPY_AND_ASSIGN(ParallelMoveResolverWithSwap); 119 }; 120 121 // This helper class uses additional scratch registers to resolve dependencies. It supports all kind 122 // of dependency cycles and does not care about the register layout. 123 class ParallelMoveResolverNoSwap : public ParallelMoveResolver { 124 public: ParallelMoveResolverNoSwap(ArenaAllocator * allocator)125 explicit ParallelMoveResolverNoSwap(ArenaAllocator* allocator) 126 : ParallelMoveResolver(allocator), 127 scratches_(allocator->Adapter(kArenaAllocParallelMoveResolver)), 128 pending_moves_(allocator->Adapter(kArenaAllocParallelMoveResolver)), 129 allocator_(allocator) { 130 scratches_.reserve(32); 131 pending_moves_.reserve(8); 132 } ~ParallelMoveResolverNoSwap()133 virtual ~ParallelMoveResolverNoSwap() {} 134 135 // Resolve a set of parallel moves, emitting assembler instructions. 136 void EmitNativeCode(HParallelMove* parallel_move) OVERRIDE; 137 138 protected: 139 // Called at the beginning of EmitNativeCode(). A subclass may put some architecture dependent 140 // initialization here. 141 virtual void PrepareForEmitNativeCode() = 0; 142 143 // Called at the end of EmitNativeCode(). A subclass may put some architecture dependent cleanup 144 // here. All scratch locations will be removed after this call. 145 virtual void FinishEmitNativeCode() = 0; 146 147 // Allocate a scratch location to perform a move from input kind of location. A subclass should 148 // implement this to get the best fit location. If there is no suitable physical register, it can 149 // also return a stack slot. 150 virtual Location AllocateScratchLocationFor(Location::Kind kind) = 0; 151 152 // Called after a move which takes a scratch location as source. A subclass can defer the cleanup 153 // to FinishEmitNativeCode(). 154 virtual void FreeScratchLocation(Location loc) = 0; 155 156 // Emit a move. 157 virtual void EmitMove(size_t index) = 0; 158 159 // Return a scratch location from the moves which exactly matches the kind. 160 // Return Location::NoLocation() if no matching scratch location can be found. 161 Location GetScratchLocation(Location::Kind kind); 162 163 // Add a location to the scratch list which can be returned from GetScratchLocation() to resolve 164 // dependency cycles. 165 void AddScratchLocation(Location loc); 166 167 // Remove a location from the scratch list. 168 void RemoveScratchLocation(Location loc); 169 170 // List of scratch locations. 171 ArenaVector<Location> scratches_; 172 173 private: 174 // Perform the move at the given index in `moves_` (possibly requiring other moves to satisfy 175 // dependencies). 176 void PerformMove(size_t index); 177 178 void UpdateMoveSource(Location from, Location to); 179 180 void AddPendingMove(Location source, Location destination, DataType::Type type); 181 182 void DeletePendingMove(MoveOperands* move); 183 184 // Find a move that may be unblocked after (loc -> XXX) is performed. 185 MoveOperands* GetUnblockedPendingMove(Location loc); 186 187 // Return true if the location is blocked by outstanding moves. 188 bool IsBlockedByMoves(Location loc); 189 190 // Return the number of pending moves. 191 size_t GetNumberOfPendingMoves(); 192 193 // Additional pending moves which might be added to resolve dependency cycle. 194 ArenaVector<MoveOperands*> pending_moves_; 195 196 // Used to allocate pending MoveOperands. 197 ArenaAllocator* const allocator_; 198 199 DISALLOW_COPY_AND_ASSIGN(ParallelMoveResolverNoSwap); 200 }; 201 202 } // namespace art 203 204 #endif // ART_COMPILER_OPTIMIZING_PARALLEL_MOVE_RESOLVER_H_ 205