1 /*
2  * Copyright (C) 2011 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ART_COMPILER_UTILS_ASSEMBLER_H_
18 #define ART_COMPILER_UTILS_ASSEMBLER_H_
19 
20 #include <vector>
21 
22 #include "base/logging.h"
23 #include "base/macros.h"
24 #include "arm/constants_arm.h"
25 #include "mips/constants_mips.h"
26 #include "x86/constants_x86.h"
27 #include "x86_64/constants_x86_64.h"
28 #include "instruction_set.h"
29 #include "managed_register.h"
30 #include "memory_region.h"
31 #include "offsets.h"
32 
33 namespace art {
34 
35 class Assembler;
36 class AssemblerBuffer;
37 class AssemblerFixup;
38 
39 namespace arm {
40   class ArmAssembler;
41   class Arm32Assembler;
42   class Thumb2Assembler;
43 }
44 namespace arm64 {
45   class Arm64Assembler;
46 }
47 namespace mips {
48   class MipsAssembler;
49 }
50 namespace x86 {
51   class X86Assembler;
52 }
53 namespace x86_64 {
54   class X86_64Assembler;
55 }
56 
57 class ExternalLabel {
58  public:
ExternalLabel(const char * name,uword address)59   ExternalLabel(const char* name, uword address)
60       : name_(name), address_(address) {
61     DCHECK(name != nullptr);
62   }
63 
name()64   const char* name() const { return name_; }
address()65   uword address() const {
66     return address_;
67   }
68 
69  private:
70   const char* name_;
71   const uword address_;
72 };
73 
74 class Label {
75  public:
Label()76   Label() : position_(0) {}
77 
~Label()78   ~Label() {
79     // Assert if label is being destroyed with unresolved branches pending.
80     CHECK(!IsLinked());
81   }
82 
83   // Returns the position for bound and linked labels. Cannot be used
84   // for unused labels.
Position()85   int Position() const {
86     CHECK(!IsUnused());
87     return IsBound() ? -position_ - kPointerSize : position_ - kPointerSize;
88   }
89 
LinkPosition()90   int LinkPosition() const {
91     CHECK(IsLinked());
92     return position_ - kPointerSize;
93   }
94 
IsBound()95   bool IsBound() const { return position_ < 0; }
IsUnused()96   bool IsUnused() const { return position_ == 0; }
IsLinked()97   bool IsLinked() const { return position_ > 0; }
98 
99  private:
100   int position_;
101 
Reinitialize()102   void Reinitialize() {
103     position_ = 0;
104   }
105 
BindTo(int position)106   void BindTo(int position) {
107     CHECK(!IsBound());
108     position_ = -position - kPointerSize;
109     CHECK(IsBound());
110   }
111 
LinkTo(int position)112   void LinkTo(int position) {
113     CHECK(!IsBound());
114     position_ = position + kPointerSize;
115     CHECK(IsLinked());
116   }
117 
118   friend class arm::ArmAssembler;
119   friend class arm::Arm32Assembler;
120   friend class arm::Thumb2Assembler;
121   friend class mips::MipsAssembler;
122   friend class x86::X86Assembler;
123   friend class x86_64::X86_64Assembler;
124 
125   DISALLOW_COPY_AND_ASSIGN(Label);
126 };
127 
128 
129 // Assembler fixups are positions in generated code that require processing
130 // after the code has been copied to executable memory. This includes building
131 // relocation information.
132 class AssemblerFixup {
133  public:
134   virtual void Process(const MemoryRegion& region, int position) = 0;
~AssemblerFixup()135   virtual ~AssemblerFixup() {}
136 
137  private:
138   AssemblerFixup* previous_;
139   int position_;
140 
previous()141   AssemblerFixup* previous() const { return previous_; }
set_previous(AssemblerFixup * previous)142   void set_previous(AssemblerFixup* previous) { previous_ = previous; }
143 
position()144   int position() const { return position_; }
set_position(int position)145   void set_position(int position) { position_ = position; }
146 
147   friend class AssemblerBuffer;
148 };
149 
150 // Parent of all queued slow paths, emitted during finalization
151 class SlowPath {
152  public:
SlowPath()153   SlowPath() : next_(NULL) {}
~SlowPath()154   virtual ~SlowPath() {}
155 
Continuation()156   Label* Continuation() { return &continuation_; }
Entry()157   Label* Entry() { return &entry_; }
158   // Generate code for slow path
159   virtual void Emit(Assembler *sp_asm) = 0;
160 
161  protected:
162   // Entry branched to by fast path
163   Label entry_;
164   // Optional continuation that is branched to at the end of the slow path
165   Label continuation_;
166   // Next in linked list of slow paths
167   SlowPath *next_;
168 
169  private:
170   friend class AssemblerBuffer;
171   DISALLOW_COPY_AND_ASSIGN(SlowPath);
172 };
173 
174 class AssemblerBuffer {
175  public:
176   AssemblerBuffer();
177   ~AssemblerBuffer();
178 
179   // Basic support for emitting, loading, and storing.
Emit(T value)180   template<typename T> void Emit(T value) {
181     CHECK(HasEnsuredCapacity());
182     *reinterpret_cast<T*>(cursor_) = value;
183     cursor_ += sizeof(T);
184   }
185 
Load(size_t position)186   template<typename T> T Load(size_t position) {
187     CHECK_LE(position, Size() - static_cast<int>(sizeof(T)));
188     return *reinterpret_cast<T*>(contents_ + position);
189   }
190 
Store(size_t position,T value)191   template<typename T> void Store(size_t position, T value) {
192     CHECK_LE(position, Size() - static_cast<int>(sizeof(T)));
193     *reinterpret_cast<T*>(contents_ + position) = value;
194   }
195 
Move(size_t newposition,size_t oldposition)196   void Move(size_t newposition, size_t oldposition) {
197     CHECK(HasEnsuredCapacity());
198     // Move the contents of the buffer from oldposition to
199     // newposition by nbytes.
200     size_t nbytes = Size() - oldposition;
201     memmove(contents_ + newposition, contents_ + oldposition, nbytes);
202     cursor_ += newposition - oldposition;
203   }
204 
205   // Emit a fixup at the current location.
EmitFixup(AssemblerFixup * fixup)206   void EmitFixup(AssemblerFixup* fixup) {
207     fixup->set_previous(fixup_);
208     fixup->set_position(Size());
209     fixup_ = fixup;
210   }
211 
EnqueueSlowPath(SlowPath * slowpath)212   void EnqueueSlowPath(SlowPath* slowpath) {
213     if (slow_path_ == NULL) {
214       slow_path_ = slowpath;
215     } else {
216       SlowPath* cur = slow_path_;
217       for ( ; cur->next_ != NULL ; cur = cur->next_) {}
218       cur->next_ = slowpath;
219     }
220   }
221 
EmitSlowPaths(Assembler * sp_asm)222   void EmitSlowPaths(Assembler* sp_asm) {
223     SlowPath* cur = slow_path_;
224     SlowPath* next = NULL;
225     slow_path_ = NULL;
226     for ( ; cur != NULL ; cur = next) {
227       cur->Emit(sp_asm);
228       next = cur->next_;
229       delete cur;
230     }
231   }
232 
233   // Get the size of the emitted code.
Size()234   size_t Size() const {
235     CHECK_GE(cursor_, contents_);
236     return cursor_ - contents_;
237   }
238 
contents()239   byte* contents() const { return contents_; }
240 
241   // Copy the assembled instructions into the specified memory block
242   // and apply all fixups.
243   void FinalizeInstructions(const MemoryRegion& region);
244 
245   // To emit an instruction to the assembler buffer, the EnsureCapacity helper
246   // must be used to guarantee that the underlying data area is big enough to
247   // hold the emitted instruction. Usage:
248   //
249   //     AssemblerBuffer buffer;
250   //     AssemblerBuffer::EnsureCapacity ensured(&buffer);
251   //     ... emit bytes for single instruction ...
252 
253 #ifndef NDEBUG
254 
255   class EnsureCapacity {
256    public:
EnsureCapacity(AssemblerBuffer * buffer)257     explicit EnsureCapacity(AssemblerBuffer* buffer) {
258       if (buffer->cursor() >= buffer->limit()) {
259         buffer->ExtendCapacity();
260       }
261       // In debug mode, we save the assembler buffer along with the gap
262       // size before we start emitting to the buffer. This allows us to
263       // check that any single generated instruction doesn't overflow the
264       // limit implied by the minimum gap size.
265       buffer_ = buffer;
266       gap_ = ComputeGap();
267       // Make sure that extending the capacity leaves a big enough gap
268       // for any kind of instruction.
269       CHECK_GE(gap_, kMinimumGap);
270       // Mark the buffer as having ensured the capacity.
271       CHECK(!buffer->HasEnsuredCapacity());  // Cannot nest.
272       buffer->has_ensured_capacity_ = true;
273     }
274 
~EnsureCapacity()275     ~EnsureCapacity() {
276       // Unmark the buffer, so we cannot emit after this.
277       buffer_->has_ensured_capacity_ = false;
278       // Make sure the generated instruction doesn't take up more
279       // space than the minimum gap.
280       int delta = gap_ - ComputeGap();
281       CHECK_LE(delta, kMinimumGap);
282     }
283 
284    private:
285     AssemblerBuffer* buffer_;
286     int gap_;
287 
ComputeGap()288     int ComputeGap() { return buffer_->Capacity() - buffer_->Size(); }
289   };
290 
291   bool has_ensured_capacity_;
HasEnsuredCapacity()292   bool HasEnsuredCapacity() const { return has_ensured_capacity_; }
293 
294 #else
295 
296   class EnsureCapacity {
297    public:
EnsureCapacity(AssemblerBuffer * buffer)298     explicit EnsureCapacity(AssemblerBuffer* buffer) {
299       if (buffer->cursor() >= buffer->limit()) buffer->ExtendCapacity();
300     }
301   };
302 
303   // When building the C++ tests, assertion code is enabled. To allow
304   // asserting that the user of the assembler buffer has ensured the
305   // capacity needed for emitting, we add a dummy method in non-debug mode.
HasEnsuredCapacity()306   bool HasEnsuredCapacity() const { return true; }
307 
308 #endif
309 
310   // Returns the position in the instruction stream.
GetPosition()311   int GetPosition() { return  cursor_ - contents_; }
312 
313  private:
314   // The limit is set to kMinimumGap bytes before the end of the data area.
315   // This leaves enough space for the longest possible instruction and allows
316   // for a single, fast space check per instruction.
317   static const int kMinimumGap = 32;
318 
319   byte* contents_;
320   byte* cursor_;
321   byte* limit_;
322   AssemblerFixup* fixup_;
323 #ifndef NDEBUG
324   bool fixups_processed_;
325 #endif
326 
327   // Head of linked list of slow paths
328   SlowPath* slow_path_;
329 
cursor()330   byte* cursor() const { return cursor_; }
limit()331   byte* limit() const { return limit_; }
Capacity()332   size_t Capacity() const {
333     CHECK_GE(limit_, contents_);
334     return (limit_ - contents_) + kMinimumGap;
335   }
336 
337   // Process the fixup chain starting at the given fixup. The offset is
338   // non-zero for fixups in the body if the preamble is non-empty.
339   void ProcessFixups(const MemoryRegion& region);
340 
341   // Compute the limit based on the data area and the capacity. See
342   // description of kMinimumGap for the reasoning behind the value.
ComputeLimit(byte * data,size_t capacity)343   static byte* ComputeLimit(byte* data, size_t capacity) {
344     return data + capacity - kMinimumGap;
345   }
346 
347   void ExtendCapacity();
348 
349   friend class AssemblerFixup;
350 };
351 
352 class Assembler {
353  public:
354   static Assembler* Create(InstructionSet instruction_set);
355 
356   // Emit slow paths queued during assembly
EmitSlowPaths()357   virtual void EmitSlowPaths() { buffer_.EmitSlowPaths(this); }
358 
359   // Size of generated code
CodeSize()360   virtual size_t CodeSize() const { return buffer_.Size(); }
361 
362   // Copy instructions out of assembly buffer into the given region of memory
FinalizeInstructions(const MemoryRegion & region)363   virtual void FinalizeInstructions(const MemoryRegion& region) {
364     buffer_.FinalizeInstructions(region);
365   }
366 
367   // TODO: Implement with disassembler.
Comment(const char * format,...)368   virtual void Comment(const char* format, ...) { }
369 
370   // Emit code that will create an activation on the stack
371   virtual void BuildFrame(size_t frame_size, ManagedRegister method_reg,
372                           const std::vector<ManagedRegister>& callee_save_regs,
373                           const ManagedRegisterEntrySpills& entry_spills) = 0;
374 
375   // Emit code that will remove an activation from the stack
376   virtual void RemoveFrame(size_t frame_size,
377                            const std::vector<ManagedRegister>& callee_save_regs) = 0;
378 
379   virtual void IncreaseFrameSize(size_t adjust) = 0;
380   virtual void DecreaseFrameSize(size_t adjust) = 0;
381 
382   // Store routines
383   virtual void Store(FrameOffset offs, ManagedRegister src, size_t size) = 0;
384   virtual void StoreRef(FrameOffset dest, ManagedRegister src) = 0;
385   virtual void StoreRawPtr(FrameOffset dest, ManagedRegister src) = 0;
386 
387   virtual void StoreImmediateToFrame(FrameOffset dest, uint32_t imm,
388                                      ManagedRegister scratch) = 0;
389 
390   virtual void StoreImmediateToThread32(ThreadOffset<4> dest, uint32_t imm,
391                                         ManagedRegister scratch);
392   virtual void StoreImmediateToThread64(ThreadOffset<8> dest, uint32_t imm,
393                                         ManagedRegister scratch);
394 
395   virtual void StoreStackOffsetToThread32(ThreadOffset<4> thr_offs,
396                                           FrameOffset fr_offs,
397                                           ManagedRegister scratch);
398   virtual void StoreStackOffsetToThread64(ThreadOffset<8> thr_offs,
399                                           FrameOffset fr_offs,
400                                           ManagedRegister scratch);
401 
402   virtual void StoreStackPointerToThread32(ThreadOffset<4> thr_offs);
403   virtual void StoreStackPointerToThread64(ThreadOffset<8> thr_offs);
404 
405   virtual void StoreSpanning(FrameOffset dest, ManagedRegister src,
406                              FrameOffset in_off, ManagedRegister scratch) = 0;
407 
408   // Load routines
409   virtual void Load(ManagedRegister dest, FrameOffset src, size_t size) = 0;
410 
411   virtual void LoadFromThread32(ManagedRegister dest, ThreadOffset<4> src, size_t size);
412   virtual void LoadFromThread64(ManagedRegister dest, ThreadOffset<8> src, size_t size);
413 
414   virtual void LoadRef(ManagedRegister dest, FrameOffset  src) = 0;
415   virtual void LoadRef(ManagedRegister dest, ManagedRegister base, MemberOffset offs) = 0;
416 
417   virtual void LoadRawPtr(ManagedRegister dest, ManagedRegister base, Offset offs) = 0;
418 
419   virtual void LoadRawPtrFromThread32(ManagedRegister dest, ThreadOffset<4> offs);
420   virtual void LoadRawPtrFromThread64(ManagedRegister dest, ThreadOffset<8> offs);
421 
422   // Copying routines
423   virtual void Move(ManagedRegister dest, ManagedRegister src, size_t size) = 0;
424 
425   virtual void CopyRawPtrFromThread32(FrameOffset fr_offs, ThreadOffset<4> thr_offs,
426                                       ManagedRegister scratch);
427   virtual void CopyRawPtrFromThread64(FrameOffset fr_offs, ThreadOffset<8> thr_offs,
428                                       ManagedRegister scratch);
429 
430   virtual void CopyRawPtrToThread32(ThreadOffset<4> thr_offs, FrameOffset fr_offs,
431                                     ManagedRegister scratch);
432   virtual void CopyRawPtrToThread64(ThreadOffset<8> thr_offs, FrameOffset fr_offs,
433                                     ManagedRegister scratch);
434 
435   virtual void CopyRef(FrameOffset dest, FrameOffset src,
436                        ManagedRegister scratch) = 0;
437 
438   virtual void Copy(FrameOffset dest, FrameOffset src, ManagedRegister scratch, size_t size) = 0;
439 
440   virtual void Copy(FrameOffset dest, ManagedRegister src_base, Offset src_offset,
441                     ManagedRegister scratch, size_t size) = 0;
442 
443   virtual void Copy(ManagedRegister dest_base, Offset dest_offset, FrameOffset src,
444                     ManagedRegister scratch, size_t size) = 0;
445 
446   virtual void Copy(FrameOffset dest, FrameOffset src_base, Offset src_offset,
447                     ManagedRegister scratch, size_t size) = 0;
448 
449   virtual void Copy(ManagedRegister dest, Offset dest_offset,
450                     ManagedRegister src, Offset src_offset,
451                     ManagedRegister scratch, size_t size) = 0;
452 
453   virtual void Copy(FrameOffset dest, Offset dest_offset, FrameOffset src, Offset src_offset,
454                     ManagedRegister scratch, size_t size) = 0;
455 
456   virtual void MemoryBarrier(ManagedRegister scratch) = 0;
457 
458   // Sign extension
459   virtual void SignExtend(ManagedRegister mreg, size_t size) = 0;
460 
461   // Zero extension
462   virtual void ZeroExtend(ManagedRegister mreg, size_t size) = 0;
463 
464   // Exploit fast access in managed code to Thread::Current()
465   virtual void GetCurrentThread(ManagedRegister tr) = 0;
466   virtual void GetCurrentThread(FrameOffset dest_offset,
467                                 ManagedRegister scratch) = 0;
468 
469   // Set up out_reg to hold a Object** into the handle scope, or to be NULL if the
470   // value is null and null_allowed. in_reg holds a possibly stale reference
471   // that can be used to avoid loading the handle scope entry to see if the value is
472   // NULL.
473   virtual void CreateHandleScopeEntry(ManagedRegister out_reg, FrameOffset handlescope_offset,
474                                ManagedRegister in_reg, bool null_allowed) = 0;
475 
476   // Set up out_off to hold a Object** into the handle scope, or to be NULL if the
477   // value is null and null_allowed.
478   virtual void CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handlescope_offset,
479                                ManagedRegister scratch, bool null_allowed) = 0;
480 
481   // src holds a handle scope entry (Object**) load this into dst
482   virtual void LoadReferenceFromHandleScope(ManagedRegister dst,
483                                      ManagedRegister src) = 0;
484 
485   // Heap::VerifyObject on src. In some cases (such as a reference to this) we
486   // know that src may not be null.
487   virtual void VerifyObject(ManagedRegister src, bool could_be_null) = 0;
488   virtual void VerifyObject(FrameOffset src, bool could_be_null) = 0;
489 
490   // Call to address held at [base+offset]
491   virtual void Call(ManagedRegister base, Offset offset,
492                     ManagedRegister scratch) = 0;
493   virtual void Call(FrameOffset base, Offset offset,
494                     ManagedRegister scratch) = 0;
495   virtual void CallFromThread32(ThreadOffset<4> offset, ManagedRegister scratch);
496   virtual void CallFromThread64(ThreadOffset<8> offset, ManagedRegister scratch);
497 
498   // Generate code to check if Thread::Current()->exception_ is non-null
499   // and branch to a ExceptionSlowPath if it is.
500   virtual void ExceptionPoll(ManagedRegister scratch, size_t stack_adjust) = 0;
501 
~Assembler()502   virtual ~Assembler() {}
503 
504  protected:
Assembler()505   Assembler() : buffer_() {}
506 
507   AssemblerBuffer buffer_;
508 };
509 
510 }  // namespace art
511 
512 #endif  // ART_COMPILER_UTILS_ASSEMBLER_H_
513