1 /*
2  * Copyright (C) 2014 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ART_COMPILER_UTILS_ARM64_ASSEMBLER_ARM64_H_
18 #define ART_COMPILER_UTILS_ARM64_ASSEMBLER_ARM64_H_
19 
20 #include <stdint.h>
21 #include <memory>
22 #include <vector>
23 
24 #include "base/logging.h"
25 #include "constants_arm64.h"
26 #include "utils/arm64/managed_register_arm64.h"
27 #include "utils/assembler.h"
28 #include "offsets.h"
29 
30 // TODO: make vixl clean wrt -Wshadow.
31 #pragma GCC diagnostic push
32 #pragma GCC diagnostic ignored "-Wunknown-pragmas"
33 #pragma GCC diagnostic ignored "-Wshadow"
34 #pragma GCC diagnostic ignored "-Wmissing-noreturn"
35 #include "vixl/a64/macro-assembler-a64.h"
36 #include "vixl/a64/disasm-a64.h"
37 #pragma GCC diagnostic pop
38 
39 namespace art {
40 namespace arm64 {
41 
42 #define MEM_OP(...)      vixl::MemOperand(__VA_ARGS__)
43 
44 enum LoadOperandType {
45   kLoadSignedByte,
46   kLoadUnsignedByte,
47   kLoadSignedHalfword,
48   kLoadUnsignedHalfword,
49   kLoadWord,
50   kLoadCoreWord,
51   kLoadSWord,
52   kLoadDWord
53 };
54 
55 enum StoreOperandType {
56   kStoreByte,
57   kStoreHalfword,
58   kStoreWord,
59   kStoreCoreWord,
60   kStoreSWord,
61   kStoreDWord
62 };
63 
64 class Arm64Exception;
65 
66 class Arm64Assembler FINAL : public Assembler {
67  public:
68   // We indicate the size of the initial code generation buffer to the VIXL
69   // assembler. From there we it will automatically manage the buffer.
Arm64Assembler()70   Arm64Assembler() : vixl_masm_(new vixl::MacroAssembler(kArm64BaseBufferSize)) {}
71 
~Arm64Assembler()72   virtual ~Arm64Assembler() {
73     delete vixl_masm_;
74   }
75 
76   // Emit slow paths queued during assembly.
77   void EmitSlowPaths();
78 
79   // Size of generated code.
80   size_t CodeSize() const;
81 
82   // Copy instructions out of assembly buffer into the given region of memory.
83   void FinalizeInstructions(const MemoryRegion& region);
84 
85   void SpillRegisters(vixl::CPURegList registers, int offset);
86   void UnspillRegisters(vixl::CPURegList registers, int offset);
87 
88   // Emit code that will create an activation on the stack.
89   void BuildFrame(size_t frame_size, ManagedRegister method_reg,
90                   const std::vector<ManagedRegister>& callee_save_regs,
91                   const ManagedRegisterEntrySpills& entry_spills) OVERRIDE;
92 
93   // Emit code that will remove an activation from the stack.
94   void RemoveFrame(size_t frame_size, const std::vector<ManagedRegister>& callee_save_regs)
95       OVERRIDE;
96 
97   void IncreaseFrameSize(size_t adjust) OVERRIDE;
98   void DecreaseFrameSize(size_t adjust) OVERRIDE;
99 
100   // Store routines.
101   void Store(FrameOffset offs, ManagedRegister src, size_t size) OVERRIDE;
102   void StoreRef(FrameOffset dest, ManagedRegister src) OVERRIDE;
103   void StoreRawPtr(FrameOffset dest, ManagedRegister src) OVERRIDE;
104   void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister scratch) OVERRIDE;
105   void StoreImmediateToThread64(ThreadOffset<8> dest, uint32_t imm, ManagedRegister scratch)
106       OVERRIDE;
107   void StoreStackOffsetToThread64(ThreadOffset<8> thr_offs, FrameOffset fr_offs,
108                                   ManagedRegister scratch) OVERRIDE;
109   void StoreStackPointerToThread64(ThreadOffset<8> thr_offs) OVERRIDE;
110   void StoreSpanning(FrameOffset dest, ManagedRegister src, FrameOffset in_off,
111                      ManagedRegister scratch) OVERRIDE;
112 
113   // Load routines.
114   void Load(ManagedRegister dest, FrameOffset src, size_t size) OVERRIDE;
115   void LoadFromThread64(ManagedRegister dest, ThreadOffset<8> src, size_t size) OVERRIDE;
116   void LoadRef(ManagedRegister dest, FrameOffset src) OVERRIDE;
117   void LoadRef(ManagedRegister dest, ManagedRegister base, MemberOffset offs,
118                bool poison_reference) OVERRIDE;
119   void LoadRawPtr(ManagedRegister dest, ManagedRegister base, Offset offs) OVERRIDE;
120   void LoadRawPtrFromThread64(ManagedRegister dest, ThreadOffset<8> offs) OVERRIDE;
121 
122   // Copying routines.
123   void Move(ManagedRegister dest, ManagedRegister src, size_t size) OVERRIDE;
124   void CopyRawPtrFromThread64(FrameOffset fr_offs, ThreadOffset<8> thr_offs,
125                               ManagedRegister scratch) OVERRIDE;
126   void CopyRawPtrToThread64(ThreadOffset<8> thr_offs, FrameOffset fr_offs, ManagedRegister scratch)
127       OVERRIDE;
128   void CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister scratch) OVERRIDE;
129   void Copy(FrameOffset dest, FrameOffset src, ManagedRegister scratch, size_t size) OVERRIDE;
130   void Copy(FrameOffset dest, ManagedRegister src_base, Offset src_offset, ManagedRegister scratch,
131             size_t size) OVERRIDE;
132   void Copy(ManagedRegister dest_base, Offset dest_offset, FrameOffset src, ManagedRegister scratch,
133             size_t size) OVERRIDE;
134   void Copy(FrameOffset dest, FrameOffset src_base, Offset src_offset, ManagedRegister scratch,
135             size_t size) OVERRIDE;
136   void Copy(ManagedRegister dest, Offset dest_offset, ManagedRegister src, Offset src_offset,
137             ManagedRegister scratch, size_t size) OVERRIDE;
138   void Copy(FrameOffset dest, Offset dest_offset, FrameOffset src, Offset src_offset,
139             ManagedRegister scratch, size_t size) OVERRIDE;
140   void MemoryBarrier(ManagedRegister scratch) OVERRIDE;
141 
142   // Sign extension.
143   void SignExtend(ManagedRegister mreg, size_t size) OVERRIDE;
144 
145   // Zero extension.
146   void ZeroExtend(ManagedRegister mreg, size_t size) OVERRIDE;
147 
148   // Exploit fast access in managed code to Thread::Current().
149   void GetCurrentThread(ManagedRegister tr) OVERRIDE;
150   void GetCurrentThread(FrameOffset dest_offset, ManagedRegister scratch) OVERRIDE;
151 
152   // Set up out_reg to hold a Object** into the handle scope, or to be null if the
153   // value is null and null_allowed. in_reg holds a possibly stale reference
154   // that can be used to avoid loading the handle scope entry to see if the value is
155   // null.
156   void CreateHandleScopeEntry(ManagedRegister out_reg, FrameOffset handlescope_offset,
157                        ManagedRegister in_reg, bool null_allowed) OVERRIDE;
158 
159   // Set up out_off to hold a Object** into the handle scope, or to be null if the
160   // value is null and null_allowed.
161   void CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handlescope_offset,
162                        ManagedRegister scratch, bool null_allowed) OVERRIDE;
163 
164   // src holds a handle scope entry (Object**) load this into dst.
165   void LoadReferenceFromHandleScope(ManagedRegister dst, ManagedRegister src) OVERRIDE;
166 
167   // Heap::VerifyObject on src. In some cases (such as a reference to this) we
168   // know that src may not be null.
169   void VerifyObject(ManagedRegister src, bool could_be_null) OVERRIDE;
170   void VerifyObject(FrameOffset src, bool could_be_null) OVERRIDE;
171 
172   // Call to address held at [base+offset].
173   void Call(ManagedRegister base, Offset offset, ManagedRegister scratch) OVERRIDE;
174   void Call(FrameOffset base, Offset offset, ManagedRegister scratch) OVERRIDE;
175   void CallFromThread64(ThreadOffset<8> offset, ManagedRegister scratch) OVERRIDE;
176 
177   // Jump to address (not setting link register)
178   void JumpTo(ManagedRegister m_base, Offset offs, ManagedRegister m_scratch);
179 
180   // Generate code to check if Thread::Current()->exception_ is non-null
181   // and branch to a ExceptionSlowPath if it is.
182   void ExceptionPoll(ManagedRegister scratch, size_t stack_adjust) OVERRIDE;
183 
184  private:
reg_x(int code)185   static vixl::Register reg_x(int code) {
186     CHECK(code < kNumberOfXRegisters) << code;
187     if (code == SP) {
188       return vixl::sp;
189     } else if (code == XZR) {
190       return vixl::xzr;
191     }
192     return vixl::Register::XRegFromCode(code);
193   }
194 
reg_w(int code)195   static vixl::Register reg_w(int code) {
196     CHECK(code < kNumberOfWRegisters) << code;
197     if (code == WSP) {
198       return vixl::wsp;
199     } else if (code == WZR) {
200       return vixl::wzr;
201     }
202     return vixl::Register::WRegFromCode(code);
203   }
204 
reg_d(int code)205   static vixl::FPRegister reg_d(int code) {
206     return vixl::FPRegister::DRegFromCode(code);
207   }
208 
reg_s(int code)209   static vixl::FPRegister reg_s(int code) {
210     return vixl::FPRegister::SRegFromCode(code);
211   }
212 
213   // Emits Exception block.
214   void EmitExceptionPoll(Arm64Exception *exception);
215 
216   void StoreWToOffset(StoreOperandType type, WRegister source,
217                       XRegister base, int32_t offset);
218   void StoreToOffset(XRegister source, XRegister base, int32_t offset);
219   void StoreSToOffset(SRegister source, XRegister base, int32_t offset);
220   void StoreDToOffset(DRegister source, XRegister base, int32_t offset);
221 
222   void LoadImmediate(XRegister dest, int32_t value, vixl::Condition cond = vixl::al);
223   void Load(Arm64ManagedRegister dst, XRegister src, int32_t src_offset, size_t size);
224   void LoadWFromOffset(LoadOperandType type, WRegister dest,
225                       XRegister base, int32_t offset);
226   void LoadFromOffset(XRegister dest, XRegister base, int32_t offset);
227   void LoadSFromOffset(SRegister dest, XRegister base, int32_t offset);
228   void LoadDFromOffset(DRegister dest, XRegister base, int32_t offset);
229   void AddConstant(XRegister rd, int32_t value, vixl::Condition cond = vixl::al);
230   void AddConstant(XRegister rd, XRegister rn, int32_t value, vixl::Condition cond = vixl::al);
231 
232   // List of exception blocks to generate at the end of the code cache.
233   std::vector<Arm64Exception*> exception_blocks_;
234 
235  public:
236   // Vixl assembler.
237   vixl::MacroAssembler* const vixl_masm_;
238 
239   // Used for testing.
240   friend class Arm64ManagedRegister_VixlRegisters_Test;
241 };
242 
243 class Arm64Exception {
244  private:
Arm64Exception(Arm64ManagedRegister scratch,size_t stack_adjust)245   explicit Arm64Exception(Arm64ManagedRegister scratch, size_t stack_adjust)
246       : scratch_(scratch), stack_adjust_(stack_adjust) {
247     }
248 
Entry()249   vixl::Label* Entry() { return &exception_entry_; }
250 
251   // Register used for passing Thread::Current()->exception_ .
252   const Arm64ManagedRegister scratch_;
253 
254   // Stack adjust for ExceptionPool.
255   const size_t stack_adjust_;
256 
257   vixl::Label exception_entry_;
258 
259   friend class Arm64Assembler;
260   DISALLOW_COPY_AND_ASSIGN(Arm64Exception);
261 };
262 
263 }  // namespace arm64
264 }  // namespace art
265 
266 #endif  // ART_COMPILER_UTILS_ARM64_ASSEMBLER_ARM64_H_
267