1 /* 2 * Copyright (C) 2011 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 #ifndef ART_RUNTIME_STACK_H_ 18 #define ART_RUNTIME_STACK_H_ 19 20 #include <stdint.h> 21 #include <string> 22 23 #include "arch/instruction_set.h" 24 #include "base/macros.h" 25 #include "base/mutex.h" 26 #include "dex_file.h" 27 #include "gc_root.h" 28 #include "quick/quick_method_frame_info.h" 29 #include "read_barrier.h" 30 #include "stack_reference.h" 31 #include "verify_object.h" 32 33 namespace art { 34 35 namespace mirror { 36 class Object; 37 } // namespace mirror 38 39 class ArtMethod; 40 class Context; 41 class HandleScope; 42 class InlineInfo; 43 class OatQuickMethodHeader; 44 class ScopedObjectAccess; 45 class ShadowFrame; 46 class StackVisitor; 47 class Thread; 48 union JValue; 49 50 // The kind of vreg being accessed in calls to Set/GetVReg. 51 enum VRegKind { 52 kReferenceVReg, 53 kIntVReg, 54 kFloatVReg, 55 kLongLoVReg, 56 kLongHiVReg, 57 kDoubleLoVReg, 58 kDoubleHiVReg, 59 kConstant, 60 kImpreciseConstant, 61 kUndefined, 62 }; 63 std::ostream& operator<<(std::ostream& os, const VRegKind& rhs); 64 65 // Forward declaration. Just calls the destructor. 66 struct ShadowFrameDeleter; 67 using ShadowFrameAllocaUniquePtr = std::unique_ptr<ShadowFrame, ShadowFrameDeleter>; 68 69 // Size in bytes of the should_deoptimize flag on stack. 70 // We just need 4 bytes for our purpose regardless of the architecture. Frame size 71 // calculation will automatically do alignment for the final frame size. 72 static constexpr size_t kShouldDeoptimizeFlagSize = 4; 73 74 // Counting locks by storing object pointers into a vector. Duplicate entries mark recursive locks. 75 // The vector will be visited with the ShadowFrame during GC (so all the locked-on objects are 76 // thread roots). 77 // Note: implementation is split so that the call sites may be optimized to no-ops in case no 78 // lock counting is necessary. The actual implementation is in the cc file to avoid 79 // dependencies. 80 class LockCountData { 81 public: 82 // Add the given object to the list of monitors, that is, objects that have been locked. This 83 // will not throw (but be skipped if there is an exception pending on entry). 84 void AddMonitor(Thread* self, mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_); 85 86 // Try to remove the given object from the monitor list, indicating an unlock operation. 87 // This will throw an IllegalMonitorStateException (clearing any already pending exception), in 88 // case that there wasn't a lock recorded for the object. 89 void RemoveMonitorOrThrow(Thread* self, 90 const mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_); 91 92 // Check whether all acquired monitors have been released. This will potentially throw an 93 // IllegalMonitorStateException, clearing any already pending exception. Returns true if the 94 // check shows that everything is OK wrt/ lock counting, false otherwise. 95 bool CheckAllMonitorsReleasedOrThrow(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_); 96 97 template <typename T, typename... Args> VisitMonitors(T visitor,Args &&...args)98 void VisitMonitors(T visitor, Args&&... args) REQUIRES_SHARED(Locks::mutator_lock_) { 99 if (monitors_ != nullptr) { 100 // Visitors may change the Object*. Be careful with the foreach loop. 101 for (mirror::Object*& obj : *monitors_) { 102 visitor(/* inout */ &obj, std::forward<Args>(args)...); 103 } 104 } 105 } 106 107 private: 108 // Stores references to the locked-on objects. As noted, this should be visited during thread 109 // marking. 110 std::unique_ptr<std::vector<mirror::Object*>> monitors_; 111 }; 112 113 // ShadowFrame has 2 possible layouts: 114 // - interpreter - separate VRegs and reference arrays. References are in the reference array. 115 // - JNI - just VRegs, but where every VReg holds a reference. 116 class ShadowFrame { 117 public: 118 // Compute size of ShadowFrame in bytes assuming it has a reference array. ComputeSize(uint32_t num_vregs)119 static size_t ComputeSize(uint32_t num_vregs) { 120 return sizeof(ShadowFrame) + (sizeof(uint32_t) * num_vregs) + 121 (sizeof(StackReference<mirror::Object>) * num_vregs); 122 } 123 124 // Create ShadowFrame in heap for deoptimization. CreateDeoptimizedFrame(uint32_t num_vregs,ShadowFrame * link,ArtMethod * method,uint32_t dex_pc)125 static ShadowFrame* CreateDeoptimizedFrame(uint32_t num_vregs, ShadowFrame* link, 126 ArtMethod* method, uint32_t dex_pc) { 127 uint8_t* memory = new uint8_t[ComputeSize(num_vregs)]; 128 return CreateShadowFrameImpl(num_vregs, link, method, dex_pc, memory); 129 } 130 131 // Delete a ShadowFrame allocated on the heap for deoptimization. DeleteDeoptimizedFrame(ShadowFrame * sf)132 static void DeleteDeoptimizedFrame(ShadowFrame* sf) { 133 sf->~ShadowFrame(); // Explicitly destruct. 134 uint8_t* memory = reinterpret_cast<uint8_t*>(sf); 135 delete[] memory; 136 } 137 138 // Create a shadow frame in a fresh alloca. This needs to be in the context of the caller. 139 // Inlining doesn't work, the compiler will still undo the alloca. So this needs to be a macro. 140 #define CREATE_SHADOW_FRAME(num_vregs, link, method, dex_pc) ({ \ 141 size_t frame_size = ShadowFrame::ComputeSize(num_vregs); \ 142 void* alloca_mem = alloca(frame_size); \ 143 ShadowFrameAllocaUniquePtr( \ 144 ShadowFrame::CreateShadowFrameImpl((num_vregs), (link), (method), (dex_pc), \ 145 (alloca_mem))); \ 146 }) 147 ~ShadowFrame()148 ~ShadowFrame() {} 149 150 // TODO(iam): Clean references array up since they're always there, 151 // we don't need to do conditionals. HasReferenceArray()152 bool HasReferenceArray() const { 153 return true; 154 } 155 NumberOfVRegs()156 uint32_t NumberOfVRegs() const { 157 return number_of_vregs_; 158 } 159 GetDexPC()160 uint32_t GetDexPC() const { 161 return (dex_pc_ptr_ == nullptr) ? dex_pc_ : dex_pc_ptr_ - code_item_->insns_; 162 } 163 GetCachedHotnessCountdown()164 int16_t GetCachedHotnessCountdown() const { 165 return cached_hotness_countdown_; 166 } 167 SetCachedHotnessCountdown(int16_t cached_hotness_countdown)168 void SetCachedHotnessCountdown(int16_t cached_hotness_countdown) { 169 cached_hotness_countdown_ = cached_hotness_countdown; 170 } 171 GetHotnessCountdown()172 int16_t GetHotnessCountdown() const { 173 return hotness_countdown_; 174 } 175 SetHotnessCountdown(int16_t hotness_countdown)176 void SetHotnessCountdown(int16_t hotness_countdown) { 177 hotness_countdown_ = hotness_countdown; 178 } 179 SetDexPC(uint32_t dex_pc)180 void SetDexPC(uint32_t dex_pc) { 181 dex_pc_ = dex_pc; 182 dex_pc_ptr_ = nullptr; 183 } 184 GetLink()185 ShadowFrame* GetLink() const { 186 return link_; 187 } 188 SetLink(ShadowFrame * frame)189 void SetLink(ShadowFrame* frame) { 190 DCHECK_NE(this, frame); 191 link_ = frame; 192 } 193 GetVReg(size_t i)194 int32_t GetVReg(size_t i) const { 195 DCHECK_LT(i, NumberOfVRegs()); 196 const uint32_t* vreg = &vregs_[i]; 197 return *reinterpret_cast<const int32_t*>(vreg); 198 } 199 200 // Shorts are extended to Ints in VRegs. Interpreter intrinsics needs them as shorts. GetVRegShort(size_t i)201 int16_t GetVRegShort(size_t i) const { 202 return static_cast<int16_t>(GetVReg(i)); 203 } 204 GetVRegAddr(size_t i)205 uint32_t* GetVRegAddr(size_t i) { 206 return &vregs_[i]; 207 } 208 GetShadowRefAddr(size_t i)209 uint32_t* GetShadowRefAddr(size_t i) { 210 DCHECK(HasReferenceArray()); 211 DCHECK_LT(i, NumberOfVRegs()); 212 return &vregs_[i + NumberOfVRegs()]; 213 } 214 SetCodeItem(const DexFile::CodeItem * code_item)215 void SetCodeItem(const DexFile::CodeItem* code_item) { 216 code_item_ = code_item; 217 } 218 GetCodeItem()219 const DexFile::CodeItem* GetCodeItem() const { 220 return code_item_; 221 } 222 GetVRegFloat(size_t i)223 float GetVRegFloat(size_t i) const { 224 DCHECK_LT(i, NumberOfVRegs()); 225 // NOTE: Strict-aliasing? 226 const uint32_t* vreg = &vregs_[i]; 227 return *reinterpret_cast<const float*>(vreg); 228 } 229 GetVRegLong(size_t i)230 int64_t GetVRegLong(size_t i) const { 231 DCHECK_LT(i, NumberOfVRegs()); 232 const uint32_t* vreg = &vregs_[i]; 233 typedef const int64_t unaligned_int64 __attribute__ ((aligned (4))); 234 return *reinterpret_cast<unaligned_int64*>(vreg); 235 } 236 GetVRegDouble(size_t i)237 double GetVRegDouble(size_t i) const { 238 DCHECK_LT(i, NumberOfVRegs()); 239 const uint32_t* vreg = &vregs_[i]; 240 typedef const double unaligned_double __attribute__ ((aligned (4))); 241 return *reinterpret_cast<unaligned_double*>(vreg); 242 } 243 244 // Look up the reference given its virtual register number. 245 // If this returns non-null then this does not mean the vreg is currently a reference 246 // on non-moving collectors. Check that the raw reg with GetVReg is equal to this if not certain. 247 template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> GetVRegReference(size_t i)248 mirror::Object* GetVRegReference(size_t i) const REQUIRES_SHARED(Locks::mutator_lock_) { 249 DCHECK_LT(i, NumberOfVRegs()); 250 mirror::Object* ref; 251 if (HasReferenceArray()) { 252 ref = References()[i].AsMirrorPtr(); 253 } else { 254 const uint32_t* vreg_ptr = &vregs_[i]; 255 ref = reinterpret_cast<const StackReference<mirror::Object>*>(vreg_ptr)->AsMirrorPtr(); 256 } 257 if (kUseReadBarrier) { 258 ReadBarrier::AssertToSpaceInvariant(ref); 259 } 260 if (kVerifyFlags & kVerifyReads) { 261 VerifyObject(ref); 262 } 263 return ref; 264 } 265 266 // Get view of vregs as range of consecutive arguments starting at i. GetVRegArgs(size_t i)267 uint32_t* GetVRegArgs(size_t i) { 268 return &vregs_[i]; 269 } 270 SetVReg(size_t i,int32_t val)271 void SetVReg(size_t i, int32_t val) { 272 DCHECK_LT(i, NumberOfVRegs()); 273 uint32_t* vreg = &vregs_[i]; 274 *reinterpret_cast<int32_t*>(vreg) = val; 275 // This is needed for moving collectors since these can update the vreg references if they 276 // happen to agree with references in the reference array. 277 if (kMovingCollector && HasReferenceArray()) { 278 References()[i].Clear(); 279 } 280 } 281 SetVRegFloat(size_t i,float val)282 void SetVRegFloat(size_t i, float val) { 283 DCHECK_LT(i, NumberOfVRegs()); 284 uint32_t* vreg = &vregs_[i]; 285 *reinterpret_cast<float*>(vreg) = val; 286 // This is needed for moving collectors since these can update the vreg references if they 287 // happen to agree with references in the reference array. 288 if (kMovingCollector && HasReferenceArray()) { 289 References()[i].Clear(); 290 } 291 } 292 SetVRegLong(size_t i,int64_t val)293 void SetVRegLong(size_t i, int64_t val) { 294 DCHECK_LT(i, NumberOfVRegs()); 295 uint32_t* vreg = &vregs_[i]; 296 typedef int64_t unaligned_int64 __attribute__ ((aligned (4))); 297 *reinterpret_cast<unaligned_int64*>(vreg) = val; 298 // This is needed for moving collectors since these can update the vreg references if they 299 // happen to agree with references in the reference array. 300 if (kMovingCollector && HasReferenceArray()) { 301 References()[i].Clear(); 302 References()[i + 1].Clear(); 303 } 304 } 305 SetVRegDouble(size_t i,double val)306 void SetVRegDouble(size_t i, double val) { 307 DCHECK_LT(i, NumberOfVRegs()); 308 uint32_t* vreg = &vregs_[i]; 309 typedef double unaligned_double __attribute__ ((aligned (4))); 310 *reinterpret_cast<unaligned_double*>(vreg) = val; 311 // This is needed for moving collectors since these can update the vreg references if they 312 // happen to agree with references in the reference array. 313 if (kMovingCollector && HasReferenceArray()) { 314 References()[i].Clear(); 315 References()[i + 1].Clear(); 316 } 317 } 318 319 template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> SetVRegReference(size_t i,mirror::Object * val)320 void SetVRegReference(size_t i, mirror::Object* val) REQUIRES_SHARED(Locks::mutator_lock_) { 321 DCHECK_LT(i, NumberOfVRegs()); 322 if (kVerifyFlags & kVerifyWrites) { 323 VerifyObject(val); 324 } 325 if (kUseReadBarrier) { 326 ReadBarrier::AssertToSpaceInvariant(val); 327 } 328 uint32_t* vreg = &vregs_[i]; 329 reinterpret_cast<StackReference<mirror::Object>*>(vreg)->Assign(val); 330 if (HasReferenceArray()) { 331 References()[i].Assign(val); 332 } 333 } 334 SetMethod(ArtMethod * method)335 void SetMethod(ArtMethod* method) REQUIRES(Locks::mutator_lock_) { 336 DCHECK(method != nullptr); 337 DCHECK(method_ != nullptr); 338 method_ = method; 339 } 340 GetMethod()341 ArtMethod* GetMethod() const REQUIRES_SHARED(Locks::mutator_lock_) { 342 DCHECK(method_ != nullptr); 343 return method_; 344 } 345 346 mirror::Object* GetThisObject() const REQUIRES_SHARED(Locks::mutator_lock_); 347 348 mirror::Object* GetThisObject(uint16_t num_ins) const REQUIRES_SHARED(Locks::mutator_lock_); 349 Contains(StackReference<mirror::Object> * shadow_frame_entry_obj)350 bool Contains(StackReference<mirror::Object>* shadow_frame_entry_obj) const { 351 if (HasReferenceArray()) { 352 return ((&References()[0] <= shadow_frame_entry_obj) && 353 (shadow_frame_entry_obj <= (&References()[NumberOfVRegs() - 1]))); 354 } else { 355 uint32_t* shadow_frame_entry = reinterpret_cast<uint32_t*>(shadow_frame_entry_obj); 356 return ((&vregs_[0] <= shadow_frame_entry) && 357 (shadow_frame_entry <= (&vregs_[NumberOfVRegs() - 1]))); 358 } 359 } 360 GetLockCountData()361 LockCountData& GetLockCountData() { 362 return lock_count_data_; 363 } 364 LockCountDataOffset()365 static size_t LockCountDataOffset() { 366 return OFFSETOF_MEMBER(ShadowFrame, lock_count_data_); 367 } 368 LinkOffset()369 static size_t LinkOffset() { 370 return OFFSETOF_MEMBER(ShadowFrame, link_); 371 } 372 MethodOffset()373 static size_t MethodOffset() { 374 return OFFSETOF_MEMBER(ShadowFrame, method_); 375 } 376 DexPCOffset()377 static size_t DexPCOffset() { 378 return OFFSETOF_MEMBER(ShadowFrame, dex_pc_); 379 } 380 NumberOfVRegsOffset()381 static size_t NumberOfVRegsOffset() { 382 return OFFSETOF_MEMBER(ShadowFrame, number_of_vregs_); 383 } 384 VRegsOffset()385 static size_t VRegsOffset() { 386 return OFFSETOF_MEMBER(ShadowFrame, vregs_); 387 } 388 ResultRegisterOffset()389 static size_t ResultRegisterOffset() { 390 return OFFSETOF_MEMBER(ShadowFrame, result_register_); 391 } 392 DexPCPtrOffset()393 static size_t DexPCPtrOffset() { 394 return OFFSETOF_MEMBER(ShadowFrame, dex_pc_ptr_); 395 } 396 CodeItemOffset()397 static size_t CodeItemOffset() { 398 return OFFSETOF_MEMBER(ShadowFrame, code_item_); 399 } 400 CachedHotnessCountdownOffset()401 static size_t CachedHotnessCountdownOffset() { 402 return OFFSETOF_MEMBER(ShadowFrame, cached_hotness_countdown_); 403 } 404 HotnessCountdownOffset()405 static size_t HotnessCountdownOffset() { 406 return OFFSETOF_MEMBER(ShadowFrame, hotness_countdown_); 407 } 408 409 // Create ShadowFrame for interpreter using provided memory. CreateShadowFrameImpl(uint32_t num_vregs,ShadowFrame * link,ArtMethod * method,uint32_t dex_pc,void * memory)410 static ShadowFrame* CreateShadowFrameImpl(uint32_t num_vregs, 411 ShadowFrame* link, 412 ArtMethod* method, 413 uint32_t dex_pc, 414 void* memory) { 415 return new (memory) ShadowFrame(num_vregs, link, method, dex_pc, true); 416 } 417 GetDexPCPtr()418 const uint16_t* GetDexPCPtr() { 419 return dex_pc_ptr_; 420 } 421 SetDexPCPtr(uint16_t * dex_pc_ptr)422 void SetDexPCPtr(uint16_t* dex_pc_ptr) { 423 dex_pc_ptr_ = dex_pc_ptr; 424 } 425 GetResultRegister()426 JValue* GetResultRegister() { 427 return result_register_; 428 } 429 430 private: ShadowFrame(uint32_t num_vregs,ShadowFrame * link,ArtMethod * method,uint32_t dex_pc,bool has_reference_array)431 ShadowFrame(uint32_t num_vregs, ShadowFrame* link, ArtMethod* method, 432 uint32_t dex_pc, bool has_reference_array) 433 : link_(link), 434 method_(method), 435 result_register_(nullptr), 436 dex_pc_ptr_(nullptr), 437 code_item_(nullptr), 438 number_of_vregs_(num_vregs), 439 dex_pc_(dex_pc), 440 cached_hotness_countdown_(0), 441 hotness_countdown_(0) { 442 // TODO(iam): Remove this parameter, it's an an artifact of portable removal 443 DCHECK(has_reference_array); 444 if (has_reference_array) { 445 memset(vregs_, 0, num_vregs * (sizeof(uint32_t) + sizeof(StackReference<mirror::Object>))); 446 } else { 447 memset(vregs_, 0, num_vregs * sizeof(uint32_t)); 448 } 449 } 450 References()451 const StackReference<mirror::Object>* References() const { 452 DCHECK(HasReferenceArray()); 453 const uint32_t* vreg_end = &vregs_[NumberOfVRegs()]; 454 return reinterpret_cast<const StackReference<mirror::Object>*>(vreg_end); 455 } 456 References()457 StackReference<mirror::Object>* References() { 458 return const_cast<StackReference<mirror::Object>*>( 459 const_cast<const ShadowFrame*>(this)->References()); 460 } 461 462 // Link to previous shadow frame or null. 463 ShadowFrame* link_; 464 ArtMethod* method_; 465 JValue* result_register_; 466 const uint16_t* dex_pc_ptr_; 467 const DexFile::CodeItem* code_item_; 468 LockCountData lock_count_data_; // This may contain GC roots when lock counting is active. 469 const uint32_t number_of_vregs_; 470 uint32_t dex_pc_; 471 int16_t cached_hotness_countdown_; 472 int16_t hotness_countdown_; 473 474 // This is a two-part array: 475 // - [0..number_of_vregs) holds the raw virtual registers, and each element here is always 4 476 // bytes. 477 // - [number_of_vregs..number_of_vregs*2) holds only reference registers. Each element here is 478 // ptr-sized. 479 // In other words when a primitive is stored in vX, the second (reference) part of the array will 480 // be null. When a reference is stored in vX, the second (reference) part of the array will be a 481 // copy of vX. 482 uint32_t vregs_[0]; 483 484 DISALLOW_IMPLICIT_CONSTRUCTORS(ShadowFrame); 485 }; 486 487 struct ShadowFrameDeleter { operatorShadowFrameDeleter488 inline void operator()(ShadowFrame* frame) { 489 if (frame != nullptr) { 490 frame->~ShadowFrame(); 491 } 492 } 493 }; 494 495 class JavaFrameRootInfo FINAL : public RootInfo { 496 public: JavaFrameRootInfo(uint32_t thread_id,const StackVisitor * stack_visitor,size_t vreg)497 JavaFrameRootInfo(uint32_t thread_id, const StackVisitor* stack_visitor, size_t vreg) 498 : RootInfo(kRootJavaFrame, thread_id), stack_visitor_(stack_visitor), vreg_(vreg) { 499 } 500 void Describe(std::ostream& os) const OVERRIDE 501 REQUIRES_SHARED(Locks::mutator_lock_); 502 GetVReg()503 size_t GetVReg() const { 504 return vreg_; 505 } GetVisitor()506 const StackVisitor* GetVisitor() const { 507 return stack_visitor_; 508 } 509 510 private: 511 const StackVisitor* const stack_visitor_; 512 const size_t vreg_; 513 }; 514 515 // The managed stack is used to record fragments of managed code stacks. Managed code stacks 516 // may either be shadow frames or lists of frames using fixed frame sizes. Transition records are 517 // necessary for transitions between code using different frame layouts and transitions into native 518 // code. 519 class PACKED(4) ManagedStack { 520 public: ManagedStack()521 ManagedStack() 522 : top_quick_frame_(nullptr), link_(nullptr), top_shadow_frame_(nullptr) {} 523 PushManagedStackFragment(ManagedStack * fragment)524 void PushManagedStackFragment(ManagedStack* fragment) { 525 // Copy this top fragment into given fragment. 526 memcpy(fragment, this, sizeof(ManagedStack)); 527 // Clear this fragment, which has become the top. 528 memset(this, 0, sizeof(ManagedStack)); 529 // Link our top fragment onto the given fragment. 530 link_ = fragment; 531 } 532 PopManagedStackFragment(const ManagedStack & fragment)533 void PopManagedStackFragment(const ManagedStack& fragment) { 534 DCHECK(&fragment == link_); 535 // Copy this given fragment back to the top. 536 memcpy(this, &fragment, sizeof(ManagedStack)); 537 } 538 GetLink()539 ManagedStack* GetLink() const { 540 return link_; 541 } 542 GetTopQuickFrame()543 ArtMethod** GetTopQuickFrame() const { 544 return top_quick_frame_; 545 } 546 SetTopQuickFrame(ArtMethod ** top)547 void SetTopQuickFrame(ArtMethod** top) { 548 DCHECK(top_shadow_frame_ == nullptr); 549 top_quick_frame_ = top; 550 } 551 TopQuickFrameOffset()552 static size_t TopQuickFrameOffset() { 553 return OFFSETOF_MEMBER(ManagedStack, top_quick_frame_); 554 } 555 PushShadowFrame(ShadowFrame * new_top_frame)556 ShadowFrame* PushShadowFrame(ShadowFrame* new_top_frame) { 557 DCHECK(top_quick_frame_ == nullptr); 558 ShadowFrame* old_frame = top_shadow_frame_; 559 top_shadow_frame_ = new_top_frame; 560 new_top_frame->SetLink(old_frame); 561 return old_frame; 562 } 563 PopShadowFrame()564 ShadowFrame* PopShadowFrame() { 565 DCHECK(top_quick_frame_ == nullptr); 566 CHECK(top_shadow_frame_ != nullptr); 567 ShadowFrame* frame = top_shadow_frame_; 568 top_shadow_frame_ = frame->GetLink(); 569 return frame; 570 } 571 GetTopShadowFrame()572 ShadowFrame* GetTopShadowFrame() const { 573 return top_shadow_frame_; 574 } 575 SetTopShadowFrame(ShadowFrame * top)576 void SetTopShadowFrame(ShadowFrame* top) { 577 DCHECK(top_quick_frame_ == nullptr); 578 top_shadow_frame_ = top; 579 } 580 TopShadowFrameOffset()581 static size_t TopShadowFrameOffset() { 582 return OFFSETOF_MEMBER(ManagedStack, top_shadow_frame_); 583 } 584 585 size_t NumJniShadowFrameReferences() const REQUIRES_SHARED(Locks::mutator_lock_); 586 587 bool ShadowFramesContain(StackReference<mirror::Object>* shadow_frame_entry) const; 588 589 private: 590 ArtMethod** top_quick_frame_; 591 ManagedStack* link_; 592 ShadowFrame* top_shadow_frame_; 593 }; 594 595 class StackVisitor { 596 public: 597 // This enum defines a flag to control whether inlined frames are included 598 // when walking the stack. 599 enum class StackWalkKind { 600 kIncludeInlinedFrames, 601 kSkipInlinedFrames, 602 }; 603 604 protected: 605 StackVisitor(Thread* thread, 606 Context* context, 607 StackWalkKind walk_kind, 608 bool check_suspended = true); 609 610 bool GetRegisterIfAccessible(uint32_t reg, VRegKind kind, uint32_t* val) const 611 REQUIRES_SHARED(Locks::mutator_lock_); 612 613 public: ~StackVisitor()614 virtual ~StackVisitor() {} 615 616 // Return 'true' if we should continue to visit more frames, 'false' to stop. 617 virtual bool VisitFrame() REQUIRES_SHARED(Locks::mutator_lock_) = 0; 618 619 enum class CountTransitions { 620 kYes, 621 kNo, 622 }; 623 624 template <CountTransitions kCount = CountTransitions::kYes> 625 void WalkStack(bool include_transitions = false) 626 REQUIRES_SHARED(Locks::mutator_lock_); 627 GetThread()628 Thread* GetThread() const { 629 return thread_; 630 } 631 632 ArtMethod* GetMethod() const REQUIRES_SHARED(Locks::mutator_lock_); 633 634 // Sets this stack frame's method pointer. This requires a full lock of the MutatorLock. This 635 // doesn't work with inlined methods. 636 void SetMethod(ArtMethod* method) REQUIRES(Locks::mutator_lock_); 637 GetOuterMethod()638 ArtMethod* GetOuterMethod() const { 639 return *GetCurrentQuickFrame(); 640 } 641 IsShadowFrame()642 bool IsShadowFrame() const { 643 return cur_shadow_frame_ != nullptr; 644 } 645 646 uint32_t GetDexPc(bool abort_on_failure = true) const REQUIRES_SHARED(Locks::mutator_lock_); 647 648 mirror::Object* GetThisObject() const REQUIRES_SHARED(Locks::mutator_lock_); 649 650 size_t GetNativePcOffset() const REQUIRES_SHARED(Locks::mutator_lock_); 651 652 // Returns the height of the stack in the managed stack frames, including transitions. GetFrameHeight()653 size_t GetFrameHeight() REQUIRES_SHARED(Locks::mutator_lock_) { 654 return GetNumFrames() - cur_depth_ - 1; 655 } 656 657 // Returns a frame ID for JDWP use, starting from 1. GetFrameId()658 size_t GetFrameId() REQUIRES_SHARED(Locks::mutator_lock_) { 659 return GetFrameHeight() + 1; 660 } 661 GetNumFrames()662 size_t GetNumFrames() REQUIRES_SHARED(Locks::mutator_lock_) { 663 if (num_frames_ == 0) { 664 num_frames_ = ComputeNumFrames(thread_, walk_kind_); 665 } 666 return num_frames_; 667 } 668 GetFrameDepth()669 size_t GetFrameDepth() const REQUIRES_SHARED(Locks::mutator_lock_) { 670 return cur_depth_; 671 } 672 673 // Get the method and dex pc immediately after the one that's currently being visited. 674 bool GetNextMethodAndDexPc(ArtMethod** next_method, uint32_t* next_dex_pc) 675 REQUIRES_SHARED(Locks::mutator_lock_); 676 677 bool GetVReg(ArtMethod* m, uint16_t vreg, VRegKind kind, uint32_t* val) const 678 REQUIRES_SHARED(Locks::mutator_lock_); 679 680 bool GetVRegPair(ArtMethod* m, uint16_t vreg, VRegKind kind_lo, VRegKind kind_hi, 681 uint64_t* val) const 682 REQUIRES_SHARED(Locks::mutator_lock_); 683 684 // Values will be set in debugger shadow frames. Debugger will make sure deoptimization 685 // is triggered to make the values effective. 686 bool SetVReg(ArtMethod* m, uint16_t vreg, uint32_t new_value, VRegKind kind) 687 REQUIRES_SHARED(Locks::mutator_lock_); 688 689 // Values will be set in debugger shadow frames. Debugger will make sure deoptimization 690 // is triggered to make the values effective. 691 bool SetVRegPair(ArtMethod* m, 692 uint16_t vreg, 693 uint64_t new_value, 694 VRegKind kind_lo, 695 VRegKind kind_hi) 696 REQUIRES_SHARED(Locks::mutator_lock_); 697 698 uintptr_t* GetGPRAddress(uint32_t reg) const; 699 700 // This is a fast-path for getting/setting values in a quick frame. GetVRegAddrFromQuickCode(ArtMethod ** cur_quick_frame,const DexFile::CodeItem * code_item,uint32_t core_spills,uint32_t fp_spills,size_t frame_size,uint16_t vreg)701 uint32_t* GetVRegAddrFromQuickCode(ArtMethod** cur_quick_frame, 702 const DexFile::CodeItem* code_item, 703 uint32_t core_spills, uint32_t fp_spills, size_t frame_size, 704 uint16_t vreg) const { 705 int offset = GetVRegOffsetFromQuickCode( 706 code_item, core_spills, fp_spills, frame_size, vreg, kRuntimeISA); 707 DCHECK_EQ(cur_quick_frame, GetCurrentQuickFrame()); 708 uint8_t* vreg_addr = reinterpret_cast<uint8_t*>(cur_quick_frame) + offset; 709 return reinterpret_cast<uint32_t*>(vreg_addr); 710 } 711 712 uintptr_t GetReturnPc() const REQUIRES_SHARED(Locks::mutator_lock_); 713 714 void SetReturnPc(uintptr_t new_ret_pc) REQUIRES_SHARED(Locks::mutator_lock_); 715 716 /* 717 * Return sp-relative offset for a Dalvik virtual register, compiler 718 * spill or Method* in bytes using Method*. 719 * Note that (reg == -1) denotes an invalid Dalvik register. For the 720 * positive values, the Dalvik registers come first, followed by the 721 * Method*, followed by other special temporaries if any, followed by 722 * regular compiler temporary. As of now we only have the Method* as 723 * as a special compiler temporary. 724 * A compiler temporary can be thought of as a virtual register that 725 * does not exist in the dex but holds intermediate values to help 726 * optimizations and code generation. A special compiler temporary is 727 * one whose location in frame is well known while non-special ones 728 * do not have a requirement on location in frame as long as code 729 * generator itself knows how to access them. 730 * 731 * +-------------------------------+ 732 * | IN[ins-1] | {Note: resides in caller's frame} 733 * | . | 734 * | IN[0] | 735 * | caller's ArtMethod | ... ArtMethod* 736 * +===============================+ {Note: start of callee's frame} 737 * | core callee-save spill | {variable sized} 738 * +-------------------------------+ 739 * | fp callee-save spill | 740 * +-------------------------------+ 741 * | filler word | {For compatibility, if V[locals-1] used as wide 742 * +-------------------------------+ 743 * | V[locals-1] | 744 * | V[locals-2] | 745 * | . | 746 * | . | ... (reg == 2) 747 * | V[1] | ... (reg == 1) 748 * | V[0] | ... (reg == 0) <---- "locals_start" 749 * +-------------------------------+ 750 * | stack alignment padding | {0 to (kStackAlignWords-1) of padding} 751 * +-------------------------------+ 752 * | Compiler temp region | ... (reg >= max_num_special_temps) 753 * | . | 754 * | . | 755 * | V[max_num_special_temps + 1] | 756 * | V[max_num_special_temps + 0] | 757 * +-------------------------------+ 758 * | OUT[outs-1] | 759 * | OUT[outs-2] | 760 * | . | 761 * | OUT[0] | 762 * | ArtMethod* | ... (reg == num_total_code_regs == special_temp_value) <<== sp, 16-byte aligned 763 * +===============================+ 764 */ 765 static int GetVRegOffsetFromQuickCode(const DexFile::CodeItem* code_item, 766 uint32_t core_spills, uint32_t fp_spills, 767 size_t frame_size, int reg, InstructionSet isa); 768 GetOutVROffset(uint16_t out_num,InstructionSet isa)769 static int GetOutVROffset(uint16_t out_num, InstructionSet isa) { 770 // According to stack model, the first out is above the Method referernce. 771 return static_cast<size_t>(InstructionSetPointerSize(isa)) + out_num * sizeof(uint32_t); 772 } 773 IsInInlinedFrame()774 bool IsInInlinedFrame() const { 775 return current_inlining_depth_ != 0; 776 } 777 GetCurrentInliningDepth()778 size_t GetCurrentInliningDepth() const { 779 return current_inlining_depth_; 780 } 781 GetCurrentQuickFramePc()782 uintptr_t GetCurrentQuickFramePc() const { 783 return cur_quick_frame_pc_; 784 } 785 GetCurrentQuickFrame()786 ArtMethod** GetCurrentQuickFrame() const { 787 return cur_quick_frame_; 788 } 789 GetCurrentShadowFrame()790 ShadowFrame* GetCurrentShadowFrame() const { 791 return cur_shadow_frame_; 792 } 793 GetCurrentHandleScope(size_t pointer_size)794 HandleScope* GetCurrentHandleScope(size_t pointer_size) const { 795 ArtMethod** sp = GetCurrentQuickFrame(); 796 // Skip ArtMethod*; handle scope comes next; 797 return reinterpret_cast<HandleScope*>(reinterpret_cast<uintptr_t>(sp) + pointer_size); 798 } 799 800 std::string DescribeLocation() const REQUIRES_SHARED(Locks::mutator_lock_); 801 802 static size_t ComputeNumFrames(Thread* thread, StackWalkKind walk_kind) 803 REQUIRES_SHARED(Locks::mutator_lock_); 804 805 static void DescribeStack(Thread* thread) REQUIRES_SHARED(Locks::mutator_lock_); 806 GetCurrentOatQuickMethodHeader()807 const OatQuickMethodHeader* GetCurrentOatQuickMethodHeader() const { 808 return cur_oat_quick_method_header_; 809 } 810 811 QuickMethodFrameInfo GetCurrentQuickFrameInfo() const REQUIRES_SHARED(Locks::mutator_lock_); 812 813 private: 814 // Private constructor known in the case that num_frames_ has already been computed. 815 StackVisitor(Thread* thread, 816 Context* context, 817 StackWalkKind walk_kind, 818 size_t num_frames, 819 bool check_suspended = true) 820 REQUIRES_SHARED(Locks::mutator_lock_); 821 IsAccessibleRegister(uint32_t reg,bool is_float)822 bool IsAccessibleRegister(uint32_t reg, bool is_float) const { 823 return is_float ? IsAccessibleFPR(reg) : IsAccessibleGPR(reg); 824 } GetRegister(uint32_t reg,bool is_float)825 uintptr_t GetRegister(uint32_t reg, bool is_float) const { 826 DCHECK(IsAccessibleRegister(reg, is_float)); 827 return is_float ? GetFPR(reg) : GetGPR(reg); 828 } 829 830 bool IsAccessibleGPR(uint32_t reg) const; 831 uintptr_t GetGPR(uint32_t reg) const; 832 833 bool IsAccessibleFPR(uint32_t reg) const; 834 uintptr_t GetFPR(uint32_t reg) const; 835 836 bool GetVRegFromDebuggerShadowFrame(uint16_t vreg, VRegKind kind, uint32_t* val) const 837 REQUIRES_SHARED(Locks::mutator_lock_); 838 bool GetVRegFromOptimizedCode(ArtMethod* m, uint16_t vreg, VRegKind kind, 839 uint32_t* val) const 840 REQUIRES_SHARED(Locks::mutator_lock_); 841 842 bool GetVRegPairFromDebuggerShadowFrame(uint16_t vreg, VRegKind kind_lo, VRegKind kind_hi, 843 uint64_t* val) const 844 REQUIRES_SHARED(Locks::mutator_lock_); 845 bool GetVRegPairFromOptimizedCode(ArtMethod* m, uint16_t vreg, 846 VRegKind kind_lo, VRegKind kind_hi, 847 uint64_t* val) const 848 REQUIRES_SHARED(Locks::mutator_lock_); 849 bool GetRegisterPairIfAccessible(uint32_t reg_lo, uint32_t reg_hi, VRegKind kind_lo, 850 uint64_t* val) const 851 REQUIRES_SHARED(Locks::mutator_lock_); 852 853 void SanityCheckFrame() const REQUIRES_SHARED(Locks::mutator_lock_); 854 855 InlineInfo GetCurrentInlineInfo() const REQUIRES_SHARED(Locks::mutator_lock_); 856 857 Thread* const thread_; 858 const StackWalkKind walk_kind_; 859 ShadowFrame* cur_shadow_frame_; 860 ArtMethod** cur_quick_frame_; 861 uintptr_t cur_quick_frame_pc_; 862 const OatQuickMethodHeader* cur_oat_quick_method_header_; 863 // Lazily computed, number of frames in the stack. 864 size_t num_frames_; 865 // Depth of the frame we're currently at. 866 size_t cur_depth_; 867 // Current inlining depth of the method we are currently at. 868 // 0 if there is no inlined frame. 869 size_t current_inlining_depth_; 870 871 protected: 872 Context* const context_; 873 const bool check_suspended_; 874 }; 875 876 } // namespace art 877 878 #endif // ART_RUNTIME_STACK_H_ 879