1 /*
2  * Copyright (C) 2011 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ART_RUNTIME_STACK_H_
18 #define ART_RUNTIME_STACK_H_
19 
20 #include <stdint.h>
21 #include <string>
22 
23 #include "arch/instruction_set.h"
24 #include "base/bit_utils.h"
25 #include "dex_file.h"
26 #include "gc_root.h"
27 #include "mirror/object_reference.h"
28 #include "read_barrier.h"
29 #include "verify_object.h"
30 
31 namespace art {
32 
33 namespace mirror {
34   class Object;
35 }  // namespace mirror
36 
37 class ArtMethod;
38 class Context;
39 class ShadowFrame;
40 class HandleScope;
41 class ScopedObjectAccess;
42 class StackVisitor;
43 class Thread;
44 
45 // The kind of vreg being accessed in calls to Set/GetVReg.
46 enum VRegKind {
47   kReferenceVReg,
48   kIntVReg,
49   kFloatVReg,
50   kLongLoVReg,
51   kLongHiVReg,
52   kDoubleLoVReg,
53   kDoubleHiVReg,
54   kConstant,
55   kImpreciseConstant,
56   kUndefined,
57 };
58 std::ostream& operator<<(std::ostream& os, const VRegKind& rhs);
59 
60 // A reference from the shadow stack to a MirrorType object within the Java heap.
61 template<class MirrorType>
62 class MANAGED StackReference : public mirror::CompressedReference<MirrorType> {
63 };
64 
65 // ShadowFrame has 2 possible layouts:
66 //  - interpreter - separate VRegs and reference arrays. References are in the reference array.
67 //  - JNI - just VRegs, but where every VReg holds a reference.
68 class ShadowFrame {
69  public:
70   // Compute size of ShadowFrame in bytes assuming it has a reference array.
ComputeSize(uint32_t num_vregs)71   static size_t ComputeSize(uint32_t num_vregs) {
72     return sizeof(ShadowFrame) + (sizeof(uint32_t) * num_vregs) +
73            (sizeof(StackReference<mirror::Object>) * num_vregs);
74   }
75 
76   // Create ShadowFrame in heap for deoptimization.
CreateDeoptimizedFrame(uint32_t num_vregs,ShadowFrame * link,ArtMethod * method,uint32_t dex_pc)77   static ShadowFrame* CreateDeoptimizedFrame(uint32_t num_vregs, ShadowFrame* link,
78                                              ArtMethod* method, uint32_t dex_pc) {
79     uint8_t* memory = new uint8_t[ComputeSize(num_vregs)];
80     return Create(num_vregs, link, method, dex_pc, memory);
81   }
82 
83   // Delete a ShadowFrame allocated on the heap for deoptimization.
DeleteDeoptimizedFrame(ShadowFrame * sf)84   static void DeleteDeoptimizedFrame(ShadowFrame* sf) {
85     uint8_t* memory = reinterpret_cast<uint8_t*>(sf);
86     delete[] memory;
87   }
88 
89   // Create ShadowFrame for interpreter using provided memory.
Create(uint32_t num_vregs,ShadowFrame * link,ArtMethod * method,uint32_t dex_pc,void * memory)90   static ShadowFrame* Create(uint32_t num_vregs, ShadowFrame* link,
91                              ArtMethod* method, uint32_t dex_pc, void* memory) {
92     ShadowFrame* sf = new (memory) ShadowFrame(num_vregs, link, method, dex_pc, true);
93     return sf;
94   }
~ShadowFrame()95   ~ShadowFrame() {}
96 
HasReferenceArray()97   bool HasReferenceArray() const {
98     return true;
99   }
100 
NumberOfVRegs()101   uint32_t NumberOfVRegs() const {
102     return number_of_vregs_;
103   }
104 
GetDexPC()105   uint32_t GetDexPC() const {
106     return dex_pc_;
107   }
108 
SetDexPC(uint32_t dex_pc)109   void SetDexPC(uint32_t dex_pc) {
110     dex_pc_ = dex_pc;
111   }
112 
GetLink()113   ShadowFrame* GetLink() const {
114     return link_;
115   }
116 
SetLink(ShadowFrame * frame)117   void SetLink(ShadowFrame* frame) {
118     DCHECK_NE(this, frame);
119     link_ = frame;
120   }
121 
GetVReg(size_t i)122   int32_t GetVReg(size_t i) const {
123     DCHECK_LT(i, NumberOfVRegs());
124     const uint32_t* vreg = &vregs_[i];
125     return *reinterpret_cast<const int32_t*>(vreg);
126   }
127 
GetVRegFloat(size_t i)128   float GetVRegFloat(size_t i) const {
129     DCHECK_LT(i, NumberOfVRegs());
130     // NOTE: Strict-aliasing?
131     const uint32_t* vreg = &vregs_[i];
132     return *reinterpret_cast<const float*>(vreg);
133   }
134 
GetVRegLong(size_t i)135   int64_t GetVRegLong(size_t i) const {
136     DCHECK_LT(i, NumberOfVRegs());
137     const uint32_t* vreg = &vregs_[i];
138     // Alignment attribute required for GCC 4.8
139     typedef const int64_t unaligned_int64 __attribute__ ((aligned (4)));
140     return *reinterpret_cast<unaligned_int64*>(vreg);
141   }
142 
GetVRegDouble(size_t i)143   double GetVRegDouble(size_t i) const {
144     DCHECK_LT(i, NumberOfVRegs());
145     const uint32_t* vreg = &vregs_[i];
146     // Alignment attribute required for GCC 4.8
147     typedef const double unaligned_double __attribute__ ((aligned (4)));
148     return *reinterpret_cast<unaligned_double*>(vreg);
149   }
150 
151   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
GetVRegReference(size_t i)152   mirror::Object* GetVRegReference(size_t i) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
153     DCHECK_LT(i, NumberOfVRegs());
154     mirror::Object* ref;
155     if (HasReferenceArray()) {
156       ref = References()[i].AsMirrorPtr();
157     } else {
158       const uint32_t* vreg_ptr = &vregs_[i];
159       ref = reinterpret_cast<const StackReference<mirror::Object>*>(vreg_ptr)->AsMirrorPtr();
160     }
161     if (kUseReadBarrier) {
162       ReadBarrier::AssertToSpaceInvariant(ref);
163     }
164     if (kVerifyFlags & kVerifyReads) {
165       VerifyObject(ref);
166     }
167     return ref;
168   }
169 
170   // Get view of vregs as range of consecutive arguments starting at i.
GetVRegArgs(size_t i)171   uint32_t* GetVRegArgs(size_t i) {
172     return &vregs_[i];
173   }
174 
SetVReg(size_t i,int32_t val)175   void SetVReg(size_t i, int32_t val) {
176     DCHECK_LT(i, NumberOfVRegs());
177     uint32_t* vreg = &vregs_[i];
178     *reinterpret_cast<int32_t*>(vreg) = val;
179     // This is needed for moving collectors since these can update the vreg references if they
180     // happen to agree with references in the reference array.
181     if (kMovingCollector && HasReferenceArray()) {
182       References()[i].Clear();
183     }
184   }
185 
SetVRegFloat(size_t i,float val)186   void SetVRegFloat(size_t i, float val) {
187     DCHECK_LT(i, NumberOfVRegs());
188     uint32_t* vreg = &vregs_[i];
189     *reinterpret_cast<float*>(vreg) = val;
190     // This is needed for moving collectors since these can update the vreg references if they
191     // happen to agree with references in the reference array.
192     if (kMovingCollector && HasReferenceArray()) {
193       References()[i].Clear();
194     }
195   }
196 
SetVRegLong(size_t i,int64_t val)197   void SetVRegLong(size_t i, int64_t val) {
198     DCHECK_LT(i, NumberOfVRegs());
199     uint32_t* vreg = &vregs_[i];
200     // Alignment attribute required for GCC 4.8
201     typedef int64_t unaligned_int64 __attribute__ ((aligned (4)));
202     *reinterpret_cast<unaligned_int64*>(vreg) = val;
203     // This is needed for moving collectors since these can update the vreg references if they
204     // happen to agree with references in the reference array.
205     if (kMovingCollector && HasReferenceArray()) {
206       References()[i].Clear();
207       References()[i + 1].Clear();
208     }
209   }
210 
SetVRegDouble(size_t i,double val)211   void SetVRegDouble(size_t i, double val) {
212     DCHECK_LT(i, NumberOfVRegs());
213     uint32_t* vreg = &vregs_[i];
214     // Alignment attribute required for GCC 4.8
215     typedef double unaligned_double __attribute__ ((aligned (4)));
216     *reinterpret_cast<unaligned_double*>(vreg) = val;
217     // This is needed for moving collectors since these can update the vreg references if they
218     // happen to agree with references in the reference array.
219     if (kMovingCollector && HasReferenceArray()) {
220       References()[i].Clear();
221       References()[i + 1].Clear();
222     }
223   }
224 
225   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
SetVRegReference(size_t i,mirror::Object * val)226   void SetVRegReference(size_t i, mirror::Object* val) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
227     DCHECK_LT(i, NumberOfVRegs());
228     if (kVerifyFlags & kVerifyWrites) {
229       VerifyObject(val);
230     }
231     if (kUseReadBarrier) {
232       ReadBarrier::AssertToSpaceInvariant(val);
233     }
234     uint32_t* vreg = &vregs_[i];
235     reinterpret_cast<StackReference<mirror::Object>*>(vreg)->Assign(val);
236     if (HasReferenceArray()) {
237       References()[i].Assign(val);
238     }
239   }
240 
GetMethod()241   ArtMethod* GetMethod() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
242     DCHECK(method_ != nullptr);
243     return method_;
244   }
245 
246   mirror::Object* GetThisObject() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
247 
248   mirror::Object* GetThisObject(uint16_t num_ins) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
249 
Contains(StackReference<mirror::Object> * shadow_frame_entry_obj)250   bool Contains(StackReference<mirror::Object>* shadow_frame_entry_obj) const {
251     if (HasReferenceArray()) {
252       return ((&References()[0] <= shadow_frame_entry_obj) &&
253               (shadow_frame_entry_obj <= (&References()[NumberOfVRegs() - 1])));
254     } else {
255       uint32_t* shadow_frame_entry = reinterpret_cast<uint32_t*>(shadow_frame_entry_obj);
256       return ((&vregs_[0] <= shadow_frame_entry) &&
257               (shadow_frame_entry <= (&vregs_[NumberOfVRegs() - 1])));
258     }
259   }
260 
LinkOffset()261   static size_t LinkOffset() {
262     return OFFSETOF_MEMBER(ShadowFrame, link_);
263   }
264 
MethodOffset()265   static size_t MethodOffset() {
266     return OFFSETOF_MEMBER(ShadowFrame, method_);
267   }
268 
DexPCOffset()269   static size_t DexPCOffset() {
270     return OFFSETOF_MEMBER(ShadowFrame, dex_pc_);
271   }
272 
NumberOfVRegsOffset()273   static size_t NumberOfVRegsOffset() {
274     return OFFSETOF_MEMBER(ShadowFrame, number_of_vregs_);
275   }
276 
VRegsOffset()277   static size_t VRegsOffset() {
278     return OFFSETOF_MEMBER(ShadowFrame, vregs_);
279   }
280 
281  private:
ShadowFrame(uint32_t num_vregs,ShadowFrame * link,ArtMethod * method,uint32_t dex_pc,bool has_reference_array)282   ShadowFrame(uint32_t num_vregs, ShadowFrame* link, ArtMethod* method,
283               uint32_t dex_pc, bool has_reference_array)
284       : number_of_vregs_(num_vregs), link_(link), method_(method), dex_pc_(dex_pc) {
285     if (has_reference_array) {
286       memset(vregs_, 0, num_vregs * (sizeof(uint32_t) + sizeof(StackReference<mirror::Object>)));
287     } else {
288       memset(vregs_, 0, num_vregs * sizeof(uint32_t));
289     }
290   }
291 
References()292   const StackReference<mirror::Object>* References() const {
293     DCHECK(HasReferenceArray());
294     const uint32_t* vreg_end = &vregs_[NumberOfVRegs()];
295     return reinterpret_cast<const StackReference<mirror::Object>*>(vreg_end);
296   }
297 
References()298   StackReference<mirror::Object>* References() {
299     return const_cast<StackReference<mirror::Object>*>(
300         const_cast<const ShadowFrame*>(this)->References());
301   }
302 
303   const uint32_t number_of_vregs_;
304   // Link to previous shadow frame or null.
305   ShadowFrame* link_;
306   ArtMethod* method_;
307   uint32_t dex_pc_;
308   uint32_t vregs_[0];
309 
310   DISALLOW_IMPLICIT_CONSTRUCTORS(ShadowFrame);
311 };
312 
313 class JavaFrameRootInfo : public RootInfo {
314  public:
JavaFrameRootInfo(uint32_t thread_id,const StackVisitor * stack_visitor,size_t vreg)315   JavaFrameRootInfo(uint32_t thread_id, const StackVisitor* stack_visitor, size_t vreg)
316      : RootInfo(kRootJavaFrame, thread_id), stack_visitor_(stack_visitor), vreg_(vreg) {
317   }
318   virtual void Describe(std::ostream& os) const OVERRIDE
319       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
320 
321  private:
322   const StackVisitor* const stack_visitor_;
323   const size_t vreg_;
324 };
325 
326 // The managed stack is used to record fragments of managed code stacks. Managed code stacks
327 // may either be shadow frames or lists of frames using fixed frame sizes. Transition records are
328 // necessary for transitions between code using different frame layouts and transitions into native
329 // code.
330 class PACKED(4) ManagedStack {
331  public:
ManagedStack()332   ManagedStack()
333       : top_quick_frame_(nullptr), link_(nullptr), top_shadow_frame_(nullptr) {}
334 
PushManagedStackFragment(ManagedStack * fragment)335   void PushManagedStackFragment(ManagedStack* fragment) {
336     // Copy this top fragment into given fragment.
337     memcpy(fragment, this, sizeof(ManagedStack));
338     // Clear this fragment, which has become the top.
339     memset(this, 0, sizeof(ManagedStack));
340     // Link our top fragment onto the given fragment.
341     link_ = fragment;
342   }
343 
PopManagedStackFragment(const ManagedStack & fragment)344   void PopManagedStackFragment(const ManagedStack& fragment) {
345     DCHECK(&fragment == link_);
346     // Copy this given fragment back to the top.
347     memcpy(this, &fragment, sizeof(ManagedStack));
348   }
349 
GetLink()350   ManagedStack* GetLink() const {
351     return link_;
352   }
353 
GetTopQuickFrame()354   ArtMethod** GetTopQuickFrame() const {
355     return top_quick_frame_;
356   }
357 
SetTopQuickFrame(ArtMethod ** top)358   void SetTopQuickFrame(ArtMethod** top) {
359     DCHECK(top_shadow_frame_ == nullptr);
360     top_quick_frame_ = top;
361   }
362 
TopQuickFrameOffset()363   static size_t TopQuickFrameOffset() {
364     return OFFSETOF_MEMBER(ManagedStack, top_quick_frame_);
365   }
366 
PushShadowFrame(ShadowFrame * new_top_frame)367   ShadowFrame* PushShadowFrame(ShadowFrame* new_top_frame) {
368     DCHECK(top_quick_frame_ == nullptr);
369     ShadowFrame* old_frame = top_shadow_frame_;
370     top_shadow_frame_ = new_top_frame;
371     new_top_frame->SetLink(old_frame);
372     return old_frame;
373   }
374 
PopShadowFrame()375   ShadowFrame* PopShadowFrame() {
376     DCHECK(top_quick_frame_ == nullptr);
377     CHECK(top_shadow_frame_ != nullptr);
378     ShadowFrame* frame = top_shadow_frame_;
379     top_shadow_frame_ = frame->GetLink();
380     return frame;
381   }
382 
GetTopShadowFrame()383   ShadowFrame* GetTopShadowFrame() const {
384     return top_shadow_frame_;
385   }
386 
SetTopShadowFrame(ShadowFrame * top)387   void SetTopShadowFrame(ShadowFrame* top) {
388     DCHECK(top_quick_frame_ == nullptr);
389     top_shadow_frame_ = top;
390   }
391 
TopShadowFrameOffset()392   static size_t TopShadowFrameOffset() {
393     return OFFSETOF_MEMBER(ManagedStack, top_shadow_frame_);
394   }
395 
396   size_t NumJniShadowFrameReferences() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
397 
398   bool ShadowFramesContain(StackReference<mirror::Object>* shadow_frame_entry) const;
399 
400  private:
401   ArtMethod** top_quick_frame_;
402   ManagedStack* link_;
403   ShadowFrame* top_shadow_frame_;
404 };
405 
406 class StackVisitor {
407  public:
408   // This enum defines a flag to control whether inlined frames are included
409   // when walking the stack.
410   enum class StackWalkKind {
411     kIncludeInlinedFrames,
412     kSkipInlinedFrames,
413   };
414 
415  protected:
416   StackVisitor(Thread* thread, Context* context, StackWalkKind walk_kind)
417       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
418 
419  public:
~StackVisitor()420   virtual ~StackVisitor() {}
421 
422   // Return 'true' if we should continue to visit more frames, 'false' to stop.
423   virtual bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0;
424 
425   void WalkStack(bool include_transitions = false)
426       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
427 
GetMethod()428   ArtMethod* GetMethod() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
429     if (cur_shadow_frame_ != nullptr) {
430       return cur_shadow_frame_->GetMethod();
431     } else if (cur_quick_frame_ != nullptr) {
432       return *cur_quick_frame_;
433     } else {
434       return nullptr;
435     }
436   }
437 
IsShadowFrame()438   bool IsShadowFrame() const {
439     return cur_shadow_frame_ != nullptr;
440   }
441 
442   uint32_t GetDexPc(bool abort_on_failure = true) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
443 
444   mirror::Object* GetThisObject() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
445 
446   size_t GetNativePcOffset() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
447 
CalleeSaveAddress(int num,size_t frame_size)448   uintptr_t* CalleeSaveAddress(int num, size_t frame_size) const
449       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
450     // Callee saves are held at the top of the frame
451     DCHECK(GetMethod() != nullptr);
452     uint8_t* save_addr =
453         reinterpret_cast<uint8_t*>(cur_quick_frame_) + frame_size - ((num + 1) * sizeof(void*));
454 #if defined(__i386__) || defined(__x86_64__)
455     save_addr -= sizeof(void*);  // account for return address
456 #endif
457     return reinterpret_cast<uintptr_t*>(save_addr);
458   }
459 
460   // Returns the height of the stack in the managed stack frames, including transitions.
GetFrameHeight()461   size_t GetFrameHeight() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
462     return GetNumFrames() - cur_depth_ - 1;
463   }
464 
465   // Returns a frame ID for JDWP use, starting from 1.
GetFrameId()466   size_t GetFrameId() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
467     return GetFrameHeight() + 1;
468   }
469 
GetNumFrames()470   size_t GetNumFrames() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
471     if (num_frames_ == 0) {
472       num_frames_ = ComputeNumFrames(thread_, walk_kind_);
473     }
474     return num_frames_;
475   }
476 
GetFrameDepth()477   size_t GetFrameDepth() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
478     return cur_depth_;
479   }
480 
481   // Get the method and dex pc immediately after the one that's currently being visited.
482   bool GetNextMethodAndDexPc(ArtMethod** next_method, uint32_t* next_dex_pc)
483       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
484 
485   bool IsReferenceVReg(ArtMethod* m, uint16_t vreg)
486       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
487 
488   bool GetVReg(ArtMethod* m, uint16_t vreg, VRegKind kind, uint32_t* val) const
489       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
490 
491   bool GetVRegPair(ArtMethod* m, uint16_t vreg, VRegKind kind_lo, VRegKind kind_hi,
492                    uint64_t* val) const
493       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
494 
495   bool SetVReg(ArtMethod* m, uint16_t vreg, uint32_t new_value, VRegKind kind)
496       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
497 
498   bool SetVRegPair(ArtMethod* m, uint16_t vreg, uint64_t new_value,
499                    VRegKind kind_lo, VRegKind kind_hi)
500       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
501 
502   uintptr_t* GetGPRAddress(uint32_t reg) const;
503 
504   // This is a fast-path for getting/setting values in a quick frame.
GetVRegAddrFromQuickCode(ArtMethod ** cur_quick_frame,const DexFile::CodeItem * code_item,uint32_t core_spills,uint32_t fp_spills,size_t frame_size,uint16_t vreg)505   uint32_t* GetVRegAddrFromQuickCode(ArtMethod** cur_quick_frame,
506                                      const DexFile::CodeItem* code_item,
507                                      uint32_t core_spills, uint32_t fp_spills, size_t frame_size,
508                                      uint16_t vreg) const {
509     int offset = GetVRegOffsetFromQuickCode(
510         code_item, core_spills, fp_spills, frame_size, vreg, kRuntimeISA);
511     DCHECK_EQ(cur_quick_frame, GetCurrentQuickFrame());
512     uint8_t* vreg_addr = reinterpret_cast<uint8_t*>(cur_quick_frame) + offset;
513     return reinterpret_cast<uint32_t*>(vreg_addr);
514   }
515 
516   uintptr_t GetReturnPc() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
517 
518   void SetReturnPc(uintptr_t new_ret_pc) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
519 
520   /*
521    * Return sp-relative offset for a Dalvik virtual register, compiler
522    * spill or Method* in bytes using Method*.
523    * Note that (reg == -1) denotes an invalid Dalvik register. For the
524    * positive values, the Dalvik registers come first, followed by the
525    * Method*, followed by other special temporaries if any, followed by
526    * regular compiler temporary. As of now we only have the Method* as
527    * as a special compiler temporary.
528    * A compiler temporary can be thought of as a virtual register that
529    * does not exist in the dex but holds intermediate values to help
530    * optimizations and code generation. A special compiler temporary is
531    * one whose location in frame is well known while non-special ones
532    * do not have a requirement on location in frame as long as code
533    * generator itself knows how to access them.
534    *
535    *     +-------------------------------+
536    *     | IN[ins-1]                     |  {Note: resides in caller's frame}
537    *     |       .                       |
538    *     | IN[0]                         |
539    *     | caller's ArtMethod            |  ... ArtMethod*
540    *     +===============================+  {Note: start of callee's frame}
541    *     | core callee-save spill        |  {variable sized}
542    *     +-------------------------------+
543    *     | fp callee-save spill          |
544    *     +-------------------------------+
545    *     | filler word                   |  {For compatibility, if V[locals-1] used as wide
546    *     +-------------------------------+
547    *     | V[locals-1]                   |
548    *     | V[locals-2]                   |
549    *     |      .                        |
550    *     |      .                        |  ... (reg == 2)
551    *     | V[1]                          |  ... (reg == 1)
552    *     | V[0]                          |  ... (reg == 0) <---- "locals_start"
553    *     +-------------------------------+
554    *     | stack alignment padding       |  {0 to (kStackAlignWords-1) of padding}
555    *     +-------------------------------+
556    *     | Compiler temp region          |  ... (reg >= max_num_special_temps)
557    *     |      .                        |
558    *     |      .                        |
559    *     | V[max_num_special_temps + 1]  |
560    *     | V[max_num_special_temps + 0]  |
561    *     +-------------------------------+
562    *     | OUT[outs-1]                   |
563    *     | OUT[outs-2]                   |
564    *     |       .                       |
565    *     | OUT[0]                        |
566    *     | ArtMethod*                    |  ... (reg == num_total_code_regs == special_temp_value) <<== sp, 16-byte aligned
567    *     +===============================+
568    */
569   static int GetVRegOffsetFromQuickCode(const DexFile::CodeItem* code_item,
570                                         uint32_t core_spills, uint32_t fp_spills,
571                                         size_t frame_size, int reg, InstructionSet isa);
572 
GetOutVROffset(uint16_t out_num,InstructionSet isa)573   static int GetOutVROffset(uint16_t out_num, InstructionSet isa) {
574     // According to stack model, the first out is above the Method referernce.
575     return InstructionSetPointerSize(isa) + out_num * sizeof(uint32_t);
576   }
577 
IsInInlinedFrame()578   bool IsInInlinedFrame() const {
579     return false;
580   }
581 
GetCurrentQuickFramePc()582   uintptr_t GetCurrentQuickFramePc() const {
583     return cur_quick_frame_pc_;
584   }
585 
GetCurrentQuickFrame()586   ArtMethod** GetCurrentQuickFrame() const {
587     return cur_quick_frame_;
588   }
589 
GetCurrentShadowFrame()590   ShadowFrame* GetCurrentShadowFrame() const {
591     return cur_shadow_frame_;
592   }
593 
GetCurrentHandleScope(size_t pointer_size)594   HandleScope* GetCurrentHandleScope(size_t pointer_size) const {
595     ArtMethod** sp = GetCurrentQuickFrame();
596     // Skip ArtMethod*; handle scope comes next;
597     return reinterpret_cast<HandleScope*>(reinterpret_cast<uintptr_t>(sp) + pointer_size);
598   }
599 
600   std::string DescribeLocation() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
601 
602   static size_t ComputeNumFrames(Thread* thread, StackWalkKind walk_kind)
603       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
604 
605   static void DescribeStack(Thread* thread) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
606 
607  private:
608   // Private constructor known in the case that num_frames_ has already been computed.
609   StackVisitor(Thread* thread, Context* context, StackWalkKind walk_kind, size_t num_frames)
610       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
611 
IsAccessibleRegister(uint32_t reg,bool is_float)612   bool IsAccessibleRegister(uint32_t reg, bool is_float) const {
613     return is_float ? IsAccessibleFPR(reg) : IsAccessibleGPR(reg);
614   }
GetRegister(uint32_t reg,bool is_float)615   uintptr_t GetRegister(uint32_t reg, bool is_float) const {
616     DCHECK(IsAccessibleRegister(reg, is_float));
617     return is_float ? GetFPR(reg) : GetGPR(reg);
618   }
SetRegister(uint32_t reg,uintptr_t value,bool is_float)619   void SetRegister(uint32_t reg, uintptr_t value, bool is_float) {
620     DCHECK(IsAccessibleRegister(reg, is_float));
621     if (is_float) {
622       SetFPR(reg, value);
623     } else {
624       SetGPR(reg, value);
625     }
626   }
627 
628   bool IsAccessibleGPR(uint32_t reg) const;
629   uintptr_t GetGPR(uint32_t reg) const;
630   void SetGPR(uint32_t reg, uintptr_t value);
631 
632   bool IsAccessibleFPR(uint32_t reg) const;
633   uintptr_t GetFPR(uint32_t reg) const;
634   void SetFPR(uint32_t reg, uintptr_t value);
635 
636   bool GetVRegFromQuickCode(ArtMethod* m, uint16_t vreg, VRegKind kind,
637                             uint32_t* val) const
638       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
639   bool GetVRegFromOptimizedCode(ArtMethod* m, uint16_t vreg, VRegKind kind,
640                                 uint32_t* val) const
641       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
642   bool GetRegisterIfAccessible(uint32_t reg, VRegKind kind, uint32_t* val) const
643       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
644 
645   bool GetVRegPairFromQuickCode(ArtMethod* m, uint16_t vreg, VRegKind kind_lo,
646                                 VRegKind kind_hi, uint64_t* val) const
647       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
648   bool GetVRegPairFromOptimizedCode(ArtMethod* m, uint16_t vreg,
649                                     VRegKind kind_lo, VRegKind kind_hi,
650                                     uint64_t* val) const
651       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
652   bool GetRegisterPairIfAccessible(uint32_t reg_lo, uint32_t reg_hi, VRegKind kind_lo,
653                                    uint64_t* val) const
654       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
655 
656   bool SetVRegFromQuickCode(ArtMethod* m, uint16_t vreg, uint32_t new_value,
657                             VRegKind kind)
658       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
659   bool SetRegisterIfAccessible(uint32_t reg, uint32_t new_value, VRegKind kind)
660       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
661 
662   bool SetVRegPairFromQuickCode(ArtMethod* m, uint16_t vreg, uint64_t new_value,
663                                 VRegKind kind_lo, VRegKind kind_hi)
664       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
665   bool SetRegisterPairIfAccessible(uint32_t reg_lo, uint32_t reg_hi, uint64_t new_value,
666                                    bool is_float)
667       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
668 
669   void SanityCheckFrame() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
670 
671   Thread* const thread_;
672   const StackWalkKind walk_kind_;
673   ShadowFrame* cur_shadow_frame_;
674   ArtMethod** cur_quick_frame_;
675   uintptr_t cur_quick_frame_pc_;
676   // Lazily computed, number of frames in the stack.
677   size_t num_frames_;
678   // Depth of the frame we're currently at.
679   size_t cur_depth_;
680 
681  protected:
682   Context* const context_;
683 };
684 
685 }  // namespace art
686 
687 #endif  // ART_RUNTIME_STACK_H_
688