1 /*
2  * Copyright (C) 2014 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ART_COMPILER_OPTIMIZING_CODE_GENERATOR_H_
18 #define ART_COMPILER_OPTIMIZING_CODE_GENERATOR_H_
19 
20 #include "base/bit_field.h"
21 #include "globals.h"
22 #include "instruction_set.h"
23 #include "locations.h"
24 #include "memory_region.h"
25 #include "nodes.h"
26 #include "utils/assembler.h"
27 
28 namespace art {
29 
30 static size_t constexpr kVRegSize = 4;
31 static size_t constexpr kUninitializedFrameSize = 0;
32 
33 class CodeGenerator;
34 class DexCompilationUnit;
35 
36 class CodeAllocator {
37  public:
CodeAllocator()38   CodeAllocator() {}
~CodeAllocator()39   virtual ~CodeAllocator() {}
40 
41   virtual uint8_t* Allocate(size_t size) = 0;
42 
43  private:
44   DISALLOW_COPY_AND_ASSIGN(CodeAllocator);
45 };
46 
47 struct PcInfo {
48   uint32_t dex_pc;
49   uintptr_t native_pc;
50 };
51 
52 class SlowPathCode : public ArenaObject {
53  public:
SlowPathCode()54   SlowPathCode() : entry_label_(), exit_label_() {}
~SlowPathCode()55   virtual ~SlowPathCode() {}
56 
GetEntryLabel()57   Label* GetEntryLabel() { return &entry_label_; }
GetExitLabel()58   Label* GetExitLabel() { return &exit_label_; }
59 
60   virtual void EmitNativeCode(CodeGenerator* codegen) = 0;
61 
62  private:
63   Label entry_label_;
64   Label exit_label_;
65 
66   DISALLOW_COPY_AND_ASSIGN(SlowPathCode);
67 };
68 
69 class CodeGenerator : public ArenaObject {
70  public:
71   // Compiles the graph to executable instructions. Returns whether the compilation
72   // succeeded.
73   void CompileBaseline(CodeAllocator* allocator, bool is_leaf = false);
74   void CompileOptimized(CodeAllocator* allocator);
75   static CodeGenerator* Create(ArenaAllocator* allocator,
76                                HGraph* graph,
77                                InstructionSet instruction_set);
78 
GetGraph()79   HGraph* GetGraph() const { return graph_; }
80 
81   Label* GetLabelOf(HBasicBlock* block) const;
82   bool GoesToNextBlock(HBasicBlock* current, HBasicBlock* next) const;
83 
GetStackSlotOfParameter(HParameterValue * parameter)84   size_t GetStackSlotOfParameter(HParameterValue* parameter) const {
85     // Note that this follows the current calling convention.
86     return GetFrameSize()
87         + kVRegSize  // Art method
88         + parameter->GetIndex() * kVRegSize;
89   }
90 
91   virtual void GenerateFrameEntry() = 0;
92   virtual void GenerateFrameExit() = 0;
93   virtual void Bind(Label* label) = 0;
94   virtual void Move(HInstruction* instruction, Location location, HInstruction* move_for) = 0;
95   virtual HGraphVisitor* GetLocationBuilder() = 0;
96   virtual HGraphVisitor* GetInstructionVisitor() = 0;
97   virtual Assembler* GetAssembler() = 0;
98   virtual size_t GetWordSize() const = 0;
99   void ComputeFrameSize(size_t number_of_spill_slots);
100   virtual size_t FrameEntrySpillSize() const = 0;
101   int32_t GetStackSlot(HLocal* local) const;
102   Location GetTemporaryLocation(HTemporary* temp) const;
103 
GetFrameSize()104   uint32_t GetFrameSize() const { return frame_size_; }
SetFrameSize(uint32_t size)105   void SetFrameSize(uint32_t size) { frame_size_ = size; }
GetCoreSpillMask()106   uint32_t GetCoreSpillMask() const { return core_spill_mask_; }
107 
108   virtual size_t GetNumberOfCoreRegisters() const = 0;
109   virtual size_t GetNumberOfFloatingPointRegisters() const = 0;
110   virtual size_t GetNumberOfRegisters() const = 0;
111   virtual void SetupBlockedRegisters(bool* blocked_registers) const = 0;
112   virtual void DumpCoreRegister(std::ostream& stream, int reg) const = 0;
113   virtual void DumpFloatingPointRegister(std::ostream& stream, int reg) const = 0;
114   virtual InstructionSet GetInstructionSet() const = 0;
115 
RecordPcInfo(uint32_t dex_pc)116   void RecordPcInfo(uint32_t dex_pc) {
117     struct PcInfo pc_info;
118     pc_info.dex_pc = dex_pc;
119     pc_info.native_pc = GetAssembler()->CodeSize();
120     pc_infos_.Add(pc_info);
121   }
122 
AddSlowPath(SlowPathCode * slow_path)123   void AddSlowPath(SlowPathCode* slow_path) {
124     slow_paths_.Add(slow_path);
125   }
126 
127   void GenerateSlowPaths();
128 
129   void BuildMappingTable(std::vector<uint8_t>* vector) const;
130   void BuildVMapTable(std::vector<uint8_t>* vector) const;
131   void BuildNativeGCMap(
132       std::vector<uint8_t>* vector, const DexCompilationUnit& dex_compilation_unit) const;
133 
IsLeafMethod()134   bool IsLeafMethod() const {
135     return is_leaf_;
136   }
137 
MarkNotLeaf()138   void MarkNotLeaf() {
139     is_leaf_ = false;
140   }
141 
142  protected:
CodeGenerator(HGraph * graph,size_t number_of_registers)143   CodeGenerator(HGraph* graph, size_t number_of_registers)
144       : frame_size_(kUninitializedFrameSize),
145         graph_(graph),
146         block_labels_(graph->GetArena(), 0),
147         pc_infos_(graph->GetArena(), 32),
148         slow_paths_(graph->GetArena(), 8),
149         blocked_registers_(graph->GetArena()->AllocArray<bool>(number_of_registers)),
150         is_leaf_(true) {}
~CodeGenerator()151   ~CodeGenerator() {}
152 
153   // Register allocation logic.
154   void AllocateRegistersLocally(HInstruction* instruction) const;
155 
156   // Backend specific implementation for allocating a register.
157   virtual ManagedRegister AllocateFreeRegister(Primitive::Type type,
158                                                bool* blocked_registers) const = 0;
159 
160   // Raw implementation of allocating a register: loops over blocked_registers to find
161   // the first available register.
162   size_t AllocateFreeRegisterInternal(bool* blocked_registers, size_t number_of_registers) const;
163 
164   virtual Location GetStackLocation(HLoadLocal* load) const = 0;
165 
166   // Frame size required for this method.
167   uint32_t frame_size_;
168   uint32_t core_spill_mask_;
169 
170  private:
171   void InitLocations(HInstruction* instruction);
172 
173   HGraph* const graph_;
174 
175   // Labels for each block that will be compiled.
176   GrowableArray<Label> block_labels_;
177   GrowableArray<PcInfo> pc_infos_;
178   GrowableArray<SlowPathCode*> slow_paths_;
179 
180   // Temporary data structure used when doing register allocation.
181   bool* const blocked_registers_;
182 
183   bool is_leaf_;
184 
185   DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
186 };
187 
188 template <typename T>
189 class CallingConvention {
190  public:
CallingConvention(const T * registers,int number_of_registers)191   CallingConvention(const T* registers, int number_of_registers)
192       : registers_(registers), number_of_registers_(number_of_registers) {}
193 
GetNumberOfRegisters()194   size_t GetNumberOfRegisters() const { return number_of_registers_; }
195 
GetRegisterAt(size_t index)196   T GetRegisterAt(size_t index) const {
197     DCHECK_LT(index, number_of_registers_);
198     return registers_[index];
199   }
200 
GetStackOffsetOf(size_t index)201   uint8_t GetStackOffsetOf(size_t index) const {
202     // We still reserve the space for parameters passed by registers.
203     // Add one for the method pointer.
204     return (index + 1) * kVRegSize;
205   }
206 
207  private:
208   const T* registers_;
209   const size_t number_of_registers_;
210 
211   DISALLOW_COPY_AND_ASSIGN(CallingConvention);
212 };
213 
214 }  // namespace art
215 
216 #endif  // ART_COMPILER_OPTIMIZING_CODE_GENERATOR_H_
217