1 /*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "stack.h"
18
19 #include "arch/context.h"
20 #include "art_method-inl.h"
21 #include "base/hex_dump.h"
22 #include "entrypoints/runtime_asm_entrypoints.h"
23 #include "gc_map.h"
24 #include "gc/space/image_space.h"
25 #include "gc/space/space-inl.h"
26 #include "linear_alloc.h"
27 #include "mirror/class-inl.h"
28 #include "mirror/object-inl.h"
29 #include "mirror/object_array-inl.h"
30 #include "quick/quick_method_frame_info.h"
31 #include "runtime.h"
32 #include "thread.h"
33 #include "thread_list.h"
34 #include "verify_object-inl.h"
35 #include "vmap_table.h"
36
37 namespace art {
38
39 static constexpr bool kDebugStackWalk = false;
40
GetThisObject() const41 mirror::Object* ShadowFrame::GetThisObject() const {
42 ArtMethod* m = GetMethod();
43 if (m->IsStatic()) {
44 return nullptr;
45 } else if (m->IsNative()) {
46 return GetVRegReference(0);
47 } else {
48 const DexFile::CodeItem* code_item = m->GetCodeItem();
49 CHECK(code_item != nullptr) << PrettyMethod(m);
50 uint16_t reg = code_item->registers_size_ - code_item->ins_size_;
51 return GetVRegReference(reg);
52 }
53 }
54
GetThisObject(uint16_t num_ins) const55 mirror::Object* ShadowFrame::GetThisObject(uint16_t num_ins) const {
56 ArtMethod* m = GetMethod();
57 if (m->IsStatic()) {
58 return nullptr;
59 } else {
60 return GetVRegReference(NumberOfVRegs() - num_ins);
61 }
62 }
63
NumJniShadowFrameReferences() const64 size_t ManagedStack::NumJniShadowFrameReferences() const {
65 size_t count = 0;
66 for (const ManagedStack* current_fragment = this; current_fragment != nullptr;
67 current_fragment = current_fragment->GetLink()) {
68 for (ShadowFrame* current_frame = current_fragment->top_shadow_frame_; current_frame != nullptr;
69 current_frame = current_frame->GetLink()) {
70 if (current_frame->GetMethod()->IsNative()) {
71 // The JNI ShadowFrame only contains references. (For indirect reference.)
72 count += current_frame->NumberOfVRegs();
73 }
74 }
75 }
76 return count;
77 }
78
ShadowFramesContain(StackReference<mirror::Object> * shadow_frame_entry) const79 bool ManagedStack::ShadowFramesContain(StackReference<mirror::Object>* shadow_frame_entry) const {
80 for (const ManagedStack* current_fragment = this; current_fragment != nullptr;
81 current_fragment = current_fragment->GetLink()) {
82 for (ShadowFrame* current_frame = current_fragment->top_shadow_frame_; current_frame != nullptr;
83 current_frame = current_frame->GetLink()) {
84 if (current_frame->Contains(shadow_frame_entry)) {
85 return true;
86 }
87 }
88 }
89 return false;
90 }
91
StackVisitor(Thread * thread,Context * context,StackWalkKind walk_kind)92 StackVisitor::StackVisitor(Thread* thread, Context* context, StackWalkKind walk_kind)
93 : StackVisitor(thread, context, walk_kind, 0) {}
94
StackVisitor(Thread * thread,Context * context,StackWalkKind walk_kind,size_t num_frames)95 StackVisitor::StackVisitor(Thread* thread,
96 Context* context,
97 StackWalkKind walk_kind,
98 size_t num_frames)
99 : thread_(thread),
100 walk_kind_(walk_kind),
101 cur_shadow_frame_(nullptr),
102 cur_quick_frame_(nullptr),
103 cur_quick_frame_pc_(0),
104 num_frames_(num_frames),
105 cur_depth_(0),
106 context_(context) {
107 DCHECK(thread == Thread::Current() || thread->IsSuspended()) << *thread;
108 }
109
GetDexPc(bool abort_on_failure) const110 uint32_t StackVisitor::GetDexPc(bool abort_on_failure) const {
111 if (cur_shadow_frame_ != nullptr) {
112 return cur_shadow_frame_->GetDexPC();
113 } else if (cur_quick_frame_ != nullptr) {
114 return GetMethod()->ToDexPc(cur_quick_frame_pc_, abort_on_failure);
115 } else {
116 return 0;
117 }
118 }
119
120 extern "C" mirror::Object* artQuickGetProxyThisObject(ArtMethod** sp)
121 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
122
GetThisObject() const123 mirror::Object* StackVisitor::GetThisObject() const {
124 DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), sizeof(void*));
125 ArtMethod* m = GetMethod();
126 if (m->IsStatic()) {
127 return nullptr;
128 } else if (m->IsNative()) {
129 if (cur_quick_frame_ != nullptr) {
130 HandleScope* hs = reinterpret_cast<HandleScope*>(
131 reinterpret_cast<char*>(cur_quick_frame_) + m->GetHandleScopeOffset().SizeValue());
132 return hs->GetReference(0);
133 } else {
134 return cur_shadow_frame_->GetVRegReference(0);
135 }
136 } else if (m->IsProxyMethod()) {
137 if (cur_quick_frame_ != nullptr) {
138 return artQuickGetProxyThisObject(cur_quick_frame_);
139 } else {
140 return cur_shadow_frame_->GetVRegReference(0);
141 }
142 } else {
143 const DexFile::CodeItem* code_item = m->GetCodeItem();
144 if (code_item == nullptr) {
145 UNIMPLEMENTED(ERROR) << "Failed to determine this object of abstract or proxy method: "
146 << PrettyMethod(m);
147 return nullptr;
148 } else {
149 uint16_t reg = code_item->registers_size_ - code_item->ins_size_;
150 uint32_t value = 0;
151 bool success = GetVReg(m, reg, kReferenceVReg, &value);
152 // We currently always guarantee the `this` object is live throughout the method.
153 CHECK(success) << "Failed to read the this object in " << PrettyMethod(m);
154 return reinterpret_cast<mirror::Object*>(value);
155 }
156 }
157 }
158
GetNativePcOffset() const159 size_t StackVisitor::GetNativePcOffset() const {
160 DCHECK(!IsShadowFrame());
161 return GetMethod()->NativeQuickPcOffset(cur_quick_frame_pc_);
162 }
163
IsReferenceVReg(ArtMethod * m,uint16_t vreg)164 bool StackVisitor::IsReferenceVReg(ArtMethod* m, uint16_t vreg) {
165 // Process register map (which native and runtime methods don't have)
166 if (m->IsNative() || m->IsRuntimeMethod() || m->IsProxyMethod()) {
167 return false;
168 }
169 if (m->IsOptimized(sizeof(void*))) {
170 return true; // TODO: Implement.
171 }
172 const uint8_t* native_gc_map = m->GetNativeGcMap(sizeof(void*));
173 CHECK(native_gc_map != nullptr) << PrettyMethod(m);
174 const DexFile::CodeItem* code_item = m->GetCodeItem();
175 // Can't be null or how would we compile its instructions?
176 DCHECK(code_item != nullptr) << PrettyMethod(m);
177 NativePcOffsetToReferenceMap map(native_gc_map);
178 size_t num_regs = std::min(map.RegWidth() * 8, static_cast<size_t>(code_item->registers_size_));
179 const uint8_t* reg_bitmap = nullptr;
180 if (num_regs > 0) {
181 Runtime* runtime = Runtime::Current();
182 const void* entry_point = runtime->GetInstrumentation()->GetQuickCodeFor(m, sizeof(void*));
183 uintptr_t native_pc_offset = m->NativeQuickPcOffset(GetCurrentQuickFramePc(), entry_point);
184 reg_bitmap = map.FindBitMap(native_pc_offset);
185 DCHECK(reg_bitmap != nullptr);
186 }
187 // Does this register hold a reference?
188 return vreg < num_regs && TestBitmap(vreg, reg_bitmap);
189 }
190
GetVReg(ArtMethod * m,uint16_t vreg,VRegKind kind,uint32_t * val) const191 bool StackVisitor::GetVReg(ArtMethod* m, uint16_t vreg, VRegKind kind, uint32_t* val) const {
192 if (cur_quick_frame_ != nullptr) {
193 DCHECK(context_ != nullptr); // You can't reliably read registers without a context.
194 DCHECK(m == GetMethod());
195 if (m->IsOptimized(sizeof(void*))) {
196 return GetVRegFromOptimizedCode(m, vreg, kind, val);
197 } else {
198 return GetVRegFromQuickCode(m, vreg, kind, val);
199 }
200 } else {
201 DCHECK(cur_shadow_frame_ != nullptr);
202 *val = cur_shadow_frame_->GetVReg(vreg);
203 return true;
204 }
205 }
206
GetVRegFromQuickCode(ArtMethod * m,uint16_t vreg,VRegKind kind,uint32_t * val) const207 bool StackVisitor::GetVRegFromQuickCode(ArtMethod* m, uint16_t vreg, VRegKind kind,
208 uint32_t* val) const {
209 const void* code_pointer = m->GetQuickOatCodePointer(sizeof(void*));
210 DCHECK(code_pointer != nullptr);
211 const VmapTable vmap_table(m->GetVmapTable(code_pointer, sizeof(void*)));
212 QuickMethodFrameInfo frame_info = m->GetQuickFrameInfo(code_pointer);
213 uint32_t vmap_offset;
214 // TODO: IsInContext stops before spotting floating point registers.
215 if (vmap_table.IsInContext(vreg, kind, &vmap_offset)) {
216 bool is_float = (kind == kFloatVReg) || (kind == kDoubleLoVReg) || (kind == kDoubleHiVReg);
217 uint32_t spill_mask = is_float ? frame_info.FpSpillMask() : frame_info.CoreSpillMask();
218 uint32_t reg = vmap_table.ComputeRegister(spill_mask, vmap_offset, kind);
219 return GetRegisterIfAccessible(reg, kind, val);
220 } else {
221 const DexFile::CodeItem* code_item = m->GetCodeItem();
222 DCHECK(code_item != nullptr) << PrettyMethod(m); // Can't be null or how would we compile
223 // its instructions?
224 *val = *GetVRegAddrFromQuickCode(cur_quick_frame_, code_item, frame_info.CoreSpillMask(),
225 frame_info.FpSpillMask(), frame_info.FrameSizeInBytes(), vreg);
226 return true;
227 }
228 }
229
GetVRegFromOptimizedCode(ArtMethod * m,uint16_t vreg,VRegKind kind,uint32_t * val) const230 bool StackVisitor::GetVRegFromOptimizedCode(ArtMethod* m, uint16_t vreg, VRegKind kind,
231 uint32_t* val) const {
232 const void* code_pointer = m->GetQuickOatCodePointer(sizeof(void*));
233 DCHECK(code_pointer != nullptr);
234 uint32_t native_pc_offset = m->NativeQuickPcOffset(cur_quick_frame_pc_);
235 CodeInfo code_info = m->GetOptimizedCodeInfo();
236 StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset);
237 const DexFile::CodeItem* code_item = m->GetCodeItem();
238 DCHECK(code_item != nullptr) << PrettyMethod(m); // Can't be null or how would we compile
239 // its instructions?
240 DCHECK_LT(vreg, code_item->registers_size_);
241 uint16_t number_of_dex_registers = code_item->registers_size_;
242 DexRegisterMap dex_register_map =
243 code_info.GetDexRegisterMapOf(stack_map, number_of_dex_registers);
244 DexRegisterLocation::Kind location_kind =
245 dex_register_map.GetLocationKind(vreg, number_of_dex_registers, code_info);
246 switch (location_kind) {
247 case DexRegisterLocation::Kind::kInStack: {
248 const int32_t offset =
249 dex_register_map.GetStackOffsetInBytes(vreg, number_of_dex_registers, code_info);
250 const uint8_t* addr = reinterpret_cast<const uint8_t*>(cur_quick_frame_) + offset;
251 *val = *reinterpret_cast<const uint32_t*>(addr);
252 return true;
253 }
254 case DexRegisterLocation::Kind::kInRegister:
255 case DexRegisterLocation::Kind::kInFpuRegister: {
256 uint32_t reg = dex_register_map.GetMachineRegister(vreg, number_of_dex_registers, code_info);
257 return GetRegisterIfAccessible(reg, kind, val);
258 }
259 case DexRegisterLocation::Kind::kConstant:
260 *val = dex_register_map.GetConstant(vreg, number_of_dex_registers, code_info);
261 return true;
262 case DexRegisterLocation::Kind::kNone:
263 return false;
264 default:
265 LOG(FATAL)
266 << "Unexpected location kind"
267 << DexRegisterLocation::PrettyDescriptor(
268 dex_register_map.GetLocationInternalKind(vreg, number_of_dex_registers, code_info));
269 UNREACHABLE();
270 }
271 }
272
GetRegisterIfAccessible(uint32_t reg,VRegKind kind,uint32_t * val) const273 bool StackVisitor::GetRegisterIfAccessible(uint32_t reg, VRegKind kind, uint32_t* val) const {
274 const bool is_float = (kind == kFloatVReg) || (kind == kDoubleLoVReg) || (kind == kDoubleHiVReg);
275 if (!IsAccessibleRegister(reg, is_float)) {
276 return false;
277 }
278 uintptr_t ptr_val = GetRegister(reg, is_float);
279 const bool target64 = Is64BitInstructionSet(kRuntimeISA);
280 if (target64) {
281 const bool wide_lo = (kind == kLongLoVReg) || (kind == kDoubleLoVReg);
282 const bool wide_hi = (kind == kLongHiVReg) || (kind == kDoubleHiVReg);
283 int64_t value_long = static_cast<int64_t>(ptr_val);
284 if (wide_lo) {
285 ptr_val = static_cast<uintptr_t>(Low32Bits(value_long));
286 } else if (wide_hi) {
287 ptr_val = static_cast<uintptr_t>(High32Bits(value_long));
288 }
289 }
290 *val = ptr_val;
291 return true;
292 }
293
GetVRegPair(ArtMethod * m,uint16_t vreg,VRegKind kind_lo,VRegKind kind_hi,uint64_t * val) const294 bool StackVisitor::GetVRegPair(ArtMethod* m, uint16_t vreg, VRegKind kind_lo,
295 VRegKind kind_hi, uint64_t* val) const {
296 if (kind_lo == kLongLoVReg) {
297 DCHECK_EQ(kind_hi, kLongHiVReg);
298 } else if (kind_lo == kDoubleLoVReg) {
299 DCHECK_EQ(kind_hi, kDoubleHiVReg);
300 } else {
301 LOG(FATAL) << "Expected long or double: kind_lo=" << kind_lo << ", kind_hi=" << kind_hi;
302 UNREACHABLE();
303 }
304 if (cur_quick_frame_ != nullptr) {
305 DCHECK(context_ != nullptr); // You can't reliably read registers without a context.
306 DCHECK(m == GetMethod());
307 if (m->IsOptimized(sizeof(void*))) {
308 return GetVRegPairFromOptimizedCode(m, vreg, kind_lo, kind_hi, val);
309 } else {
310 return GetVRegPairFromQuickCode(m, vreg, kind_lo, kind_hi, val);
311 }
312 } else {
313 DCHECK(cur_shadow_frame_ != nullptr);
314 *val = cur_shadow_frame_->GetVRegLong(vreg);
315 return true;
316 }
317 }
318
GetVRegPairFromQuickCode(ArtMethod * m,uint16_t vreg,VRegKind kind_lo,VRegKind kind_hi,uint64_t * val) const319 bool StackVisitor::GetVRegPairFromQuickCode(ArtMethod* m, uint16_t vreg, VRegKind kind_lo,
320 VRegKind kind_hi, uint64_t* val) const {
321 const void* code_pointer = m->GetQuickOatCodePointer(sizeof(void*));
322 DCHECK(code_pointer != nullptr);
323 const VmapTable vmap_table(m->GetVmapTable(code_pointer, sizeof(void*)));
324 QuickMethodFrameInfo frame_info = m->GetQuickFrameInfo(code_pointer);
325 uint32_t vmap_offset_lo, vmap_offset_hi;
326 // TODO: IsInContext stops before spotting floating point registers.
327 if (vmap_table.IsInContext(vreg, kind_lo, &vmap_offset_lo) &&
328 vmap_table.IsInContext(vreg + 1, kind_hi, &vmap_offset_hi)) {
329 bool is_float = (kind_lo == kDoubleLoVReg);
330 uint32_t spill_mask = is_float ? frame_info.FpSpillMask() : frame_info.CoreSpillMask();
331 uint32_t reg_lo = vmap_table.ComputeRegister(spill_mask, vmap_offset_lo, kind_lo);
332 uint32_t reg_hi = vmap_table.ComputeRegister(spill_mask, vmap_offset_hi, kind_hi);
333 return GetRegisterPairIfAccessible(reg_lo, reg_hi, kind_lo, val);
334 } else {
335 const DexFile::CodeItem* code_item = m->GetCodeItem();
336 DCHECK(code_item != nullptr) << PrettyMethod(m); // Can't be null or how would we compile
337 // its instructions?
338 uint32_t* addr = GetVRegAddrFromQuickCode(
339 cur_quick_frame_, code_item, frame_info.CoreSpillMask(),
340 frame_info.FpSpillMask(), frame_info.FrameSizeInBytes(), vreg);
341 *val = *reinterpret_cast<uint64_t*>(addr);
342 return true;
343 }
344 }
345
GetVRegPairFromOptimizedCode(ArtMethod * m,uint16_t vreg,VRegKind kind_lo,VRegKind kind_hi,uint64_t * val) const346 bool StackVisitor::GetVRegPairFromOptimizedCode(ArtMethod* m, uint16_t vreg,
347 VRegKind kind_lo, VRegKind kind_hi,
348 uint64_t* val) const {
349 uint32_t low_32bits;
350 uint32_t high_32bits;
351 bool success = GetVRegFromOptimizedCode(m, vreg, kind_lo, &low_32bits);
352 success &= GetVRegFromOptimizedCode(m, vreg + 1, kind_hi, &high_32bits);
353 if (success) {
354 *val = (static_cast<uint64_t>(high_32bits) << 32) | static_cast<uint64_t>(low_32bits);
355 }
356 return success;
357 }
358
GetRegisterPairIfAccessible(uint32_t reg_lo,uint32_t reg_hi,VRegKind kind_lo,uint64_t * val) const359 bool StackVisitor::GetRegisterPairIfAccessible(uint32_t reg_lo, uint32_t reg_hi,
360 VRegKind kind_lo, uint64_t* val) const {
361 const bool is_float = (kind_lo == kDoubleLoVReg);
362 if (!IsAccessibleRegister(reg_lo, is_float) || !IsAccessibleRegister(reg_hi, is_float)) {
363 return false;
364 }
365 uintptr_t ptr_val_lo = GetRegister(reg_lo, is_float);
366 uintptr_t ptr_val_hi = GetRegister(reg_hi, is_float);
367 bool target64 = Is64BitInstructionSet(kRuntimeISA);
368 if (target64) {
369 int64_t value_long_lo = static_cast<int64_t>(ptr_val_lo);
370 int64_t value_long_hi = static_cast<int64_t>(ptr_val_hi);
371 ptr_val_lo = static_cast<uintptr_t>(Low32Bits(value_long_lo));
372 ptr_val_hi = static_cast<uintptr_t>(High32Bits(value_long_hi));
373 }
374 *val = (static_cast<uint64_t>(ptr_val_hi) << 32) | static_cast<uint32_t>(ptr_val_lo);
375 return true;
376 }
377
SetVReg(ArtMethod * m,uint16_t vreg,uint32_t new_value,VRegKind kind)378 bool StackVisitor::SetVReg(ArtMethod* m, uint16_t vreg, uint32_t new_value,
379 VRegKind kind) {
380 if (cur_quick_frame_ != nullptr) {
381 DCHECK(context_ != nullptr); // You can't reliably write registers without a context.
382 DCHECK(m == GetMethod());
383 if (m->IsOptimized(sizeof(void*))) {
384 return false;
385 } else {
386 return SetVRegFromQuickCode(m, vreg, new_value, kind);
387 }
388 } else {
389 cur_shadow_frame_->SetVReg(vreg, new_value);
390 return true;
391 }
392 }
393
SetVRegFromQuickCode(ArtMethod * m,uint16_t vreg,uint32_t new_value,VRegKind kind)394 bool StackVisitor::SetVRegFromQuickCode(ArtMethod* m, uint16_t vreg, uint32_t new_value,
395 VRegKind kind) {
396 DCHECK(context_ != nullptr); // You can't reliably write registers without a context.
397 DCHECK(m == GetMethod());
398 const void* code_pointer = m->GetQuickOatCodePointer(sizeof(void*));
399 DCHECK(code_pointer != nullptr);
400 const VmapTable vmap_table(m->GetVmapTable(code_pointer, sizeof(void*)));
401 QuickMethodFrameInfo frame_info = m->GetQuickFrameInfo(code_pointer);
402 uint32_t vmap_offset;
403 // TODO: IsInContext stops before spotting floating point registers.
404 if (vmap_table.IsInContext(vreg, kind, &vmap_offset)) {
405 bool is_float = (kind == kFloatVReg) || (kind == kDoubleLoVReg) || (kind == kDoubleHiVReg);
406 uint32_t spill_mask = is_float ? frame_info.FpSpillMask() : frame_info.CoreSpillMask();
407 uint32_t reg = vmap_table.ComputeRegister(spill_mask, vmap_offset, kind);
408 return SetRegisterIfAccessible(reg, new_value, kind);
409 } else {
410 const DexFile::CodeItem* code_item = m->GetCodeItem();
411 DCHECK(code_item != nullptr) << PrettyMethod(m); // Can't be null or how would we compile
412 // its instructions?
413 uint32_t* addr = GetVRegAddrFromQuickCode(
414 cur_quick_frame_, code_item, frame_info.CoreSpillMask(),
415 frame_info.FpSpillMask(), frame_info.FrameSizeInBytes(), vreg);
416 *addr = new_value;
417 return true;
418 }
419 }
420
SetRegisterIfAccessible(uint32_t reg,uint32_t new_value,VRegKind kind)421 bool StackVisitor::SetRegisterIfAccessible(uint32_t reg, uint32_t new_value, VRegKind kind) {
422 const bool is_float = (kind == kFloatVReg) || (kind == kDoubleLoVReg) || (kind == kDoubleHiVReg);
423 if (!IsAccessibleRegister(reg, is_float)) {
424 return false;
425 }
426 const bool target64 = Is64BitInstructionSet(kRuntimeISA);
427
428 // Create a new value that can hold both low 32 and high 32 bits, in
429 // case we are running 64 bits.
430 uintptr_t full_new_value = new_value;
431 // Deal with 32 or 64-bit wide registers in a way that builds on all targets.
432 if (target64) {
433 bool wide_lo = (kind == kLongLoVReg) || (kind == kDoubleLoVReg);
434 bool wide_hi = (kind == kLongHiVReg) || (kind == kDoubleHiVReg);
435 if (wide_lo || wide_hi) {
436 uintptr_t old_reg_val = GetRegister(reg, is_float);
437 uint64_t new_vreg_portion = static_cast<uint64_t>(new_value);
438 uint64_t old_reg_val_as_wide = static_cast<uint64_t>(old_reg_val);
439 uint64_t mask = 0xffffffff;
440 if (wide_lo) {
441 mask = mask << 32;
442 } else {
443 new_vreg_portion = new_vreg_portion << 32;
444 }
445 full_new_value = static_cast<uintptr_t>((old_reg_val_as_wide & mask) | new_vreg_portion);
446 }
447 }
448 SetRegister(reg, full_new_value, is_float);
449 return true;
450 }
451
SetVRegPair(ArtMethod * m,uint16_t vreg,uint64_t new_value,VRegKind kind_lo,VRegKind kind_hi)452 bool StackVisitor::SetVRegPair(ArtMethod* m, uint16_t vreg, uint64_t new_value,
453 VRegKind kind_lo, VRegKind kind_hi) {
454 if (kind_lo == kLongLoVReg) {
455 DCHECK_EQ(kind_hi, kLongHiVReg);
456 } else if (kind_lo == kDoubleLoVReg) {
457 DCHECK_EQ(kind_hi, kDoubleHiVReg);
458 } else {
459 LOG(FATAL) << "Expected long or double: kind_lo=" << kind_lo << ", kind_hi=" << kind_hi;
460 }
461 if (cur_quick_frame_ != nullptr) {
462 DCHECK(context_ != nullptr); // You can't reliably write registers without a context.
463 DCHECK(m == GetMethod());
464 if (m->IsOptimized(sizeof(void*))) {
465 return false;
466 } else {
467 return SetVRegPairFromQuickCode(m, vreg, new_value, kind_lo, kind_hi);
468 }
469 } else {
470 DCHECK(cur_shadow_frame_ != nullptr);
471 cur_shadow_frame_->SetVRegLong(vreg, new_value);
472 return true;
473 }
474 }
475
SetVRegPairFromQuickCode(ArtMethod * m,uint16_t vreg,uint64_t new_value,VRegKind kind_lo,VRegKind kind_hi)476 bool StackVisitor::SetVRegPairFromQuickCode(
477 ArtMethod* m, uint16_t vreg, uint64_t new_value, VRegKind kind_lo, VRegKind kind_hi) {
478 const void* code_pointer = m->GetQuickOatCodePointer(sizeof(void*));
479 DCHECK(code_pointer != nullptr);
480 const VmapTable vmap_table(m->GetVmapTable(code_pointer, sizeof(void*)));
481 QuickMethodFrameInfo frame_info = m->GetQuickFrameInfo(code_pointer);
482 uint32_t vmap_offset_lo, vmap_offset_hi;
483 // TODO: IsInContext stops before spotting floating point registers.
484 if (vmap_table.IsInContext(vreg, kind_lo, &vmap_offset_lo) &&
485 vmap_table.IsInContext(vreg + 1, kind_hi, &vmap_offset_hi)) {
486 bool is_float = (kind_lo == kDoubleLoVReg);
487 uint32_t spill_mask = is_float ? frame_info.FpSpillMask() : frame_info.CoreSpillMask();
488 uint32_t reg_lo = vmap_table.ComputeRegister(spill_mask, vmap_offset_lo, kind_lo);
489 uint32_t reg_hi = vmap_table.ComputeRegister(spill_mask, vmap_offset_hi, kind_hi);
490 return SetRegisterPairIfAccessible(reg_lo, reg_hi, new_value, is_float);
491 } else {
492 const DexFile::CodeItem* code_item = m->GetCodeItem();
493 DCHECK(code_item != nullptr) << PrettyMethod(m); // Can't be null or how would we compile
494 // its instructions?
495 uint32_t* addr = GetVRegAddrFromQuickCode(
496 cur_quick_frame_, code_item, frame_info.CoreSpillMask(),
497 frame_info.FpSpillMask(), frame_info.FrameSizeInBytes(), vreg);
498 *reinterpret_cast<uint64_t*>(addr) = new_value;
499 return true;
500 }
501 }
502
SetRegisterPairIfAccessible(uint32_t reg_lo,uint32_t reg_hi,uint64_t new_value,bool is_float)503 bool StackVisitor::SetRegisterPairIfAccessible(uint32_t reg_lo, uint32_t reg_hi,
504 uint64_t new_value, bool is_float) {
505 if (!IsAccessibleRegister(reg_lo, is_float) || !IsAccessibleRegister(reg_hi, is_float)) {
506 return false;
507 }
508 uintptr_t new_value_lo = static_cast<uintptr_t>(new_value & 0xFFFFFFFF);
509 uintptr_t new_value_hi = static_cast<uintptr_t>(new_value >> 32);
510 bool target64 = Is64BitInstructionSet(kRuntimeISA);
511 // Deal with 32 or 64-bit wide registers in a way that builds on all targets.
512 if (target64) {
513 DCHECK_EQ(reg_lo, reg_hi);
514 SetRegister(reg_lo, new_value, is_float);
515 } else {
516 SetRegister(reg_lo, new_value_lo, is_float);
517 SetRegister(reg_hi, new_value_hi, is_float);
518 }
519 return true;
520 }
521
IsAccessibleGPR(uint32_t reg) const522 bool StackVisitor::IsAccessibleGPR(uint32_t reg) const {
523 DCHECK(context_ != nullptr);
524 return context_->IsAccessibleGPR(reg);
525 }
526
GetGPRAddress(uint32_t reg) const527 uintptr_t* StackVisitor::GetGPRAddress(uint32_t reg) const {
528 DCHECK(cur_quick_frame_ != nullptr) << "This is a quick frame routine";
529 DCHECK(context_ != nullptr);
530 return context_->GetGPRAddress(reg);
531 }
532
GetGPR(uint32_t reg) const533 uintptr_t StackVisitor::GetGPR(uint32_t reg) const {
534 DCHECK(cur_quick_frame_ != nullptr) << "This is a quick frame routine";
535 DCHECK(context_ != nullptr);
536 return context_->GetGPR(reg);
537 }
538
SetGPR(uint32_t reg,uintptr_t value)539 void StackVisitor::SetGPR(uint32_t reg, uintptr_t value) {
540 DCHECK(cur_quick_frame_ != nullptr) << "This is a quick frame routine";
541 DCHECK(context_ != nullptr);
542 context_->SetGPR(reg, value);
543 }
544
IsAccessibleFPR(uint32_t reg) const545 bool StackVisitor::IsAccessibleFPR(uint32_t reg) const {
546 DCHECK(context_ != nullptr);
547 return context_->IsAccessibleFPR(reg);
548 }
549
GetFPR(uint32_t reg) const550 uintptr_t StackVisitor::GetFPR(uint32_t reg) const {
551 DCHECK(cur_quick_frame_ != nullptr) << "This is a quick frame routine";
552 DCHECK(context_ != nullptr);
553 return context_->GetFPR(reg);
554 }
555
SetFPR(uint32_t reg,uintptr_t value)556 void StackVisitor::SetFPR(uint32_t reg, uintptr_t value) {
557 DCHECK(cur_quick_frame_ != nullptr) << "This is a quick frame routine";
558 DCHECK(context_ != nullptr);
559 context_->SetFPR(reg, value);
560 }
561
GetReturnPc() const562 uintptr_t StackVisitor::GetReturnPc() const {
563 uint8_t* sp = reinterpret_cast<uint8_t*>(GetCurrentQuickFrame());
564 DCHECK(sp != nullptr);
565 uint8_t* pc_addr = sp + GetMethod()->GetReturnPcOffset().SizeValue();
566 return *reinterpret_cast<uintptr_t*>(pc_addr);
567 }
568
SetReturnPc(uintptr_t new_ret_pc)569 void StackVisitor::SetReturnPc(uintptr_t new_ret_pc) {
570 uint8_t* sp = reinterpret_cast<uint8_t*>(GetCurrentQuickFrame());
571 CHECK(sp != nullptr);
572 uint8_t* pc_addr = sp + GetMethod()->GetReturnPcOffset().SizeValue();
573 *reinterpret_cast<uintptr_t*>(pc_addr) = new_ret_pc;
574 }
575
ComputeNumFrames(Thread * thread,StackWalkKind walk_kind)576 size_t StackVisitor::ComputeNumFrames(Thread* thread, StackWalkKind walk_kind) {
577 struct NumFramesVisitor : public StackVisitor {
578 NumFramesVisitor(Thread* thread_in, StackWalkKind walk_kind_in)
579 : StackVisitor(thread_in, nullptr, walk_kind_in), frames(0) {}
580
581 bool VisitFrame() OVERRIDE {
582 frames++;
583 return true;
584 }
585
586 size_t frames;
587 };
588 NumFramesVisitor visitor(thread, walk_kind);
589 visitor.WalkStack(true);
590 return visitor.frames;
591 }
592
GetNextMethodAndDexPc(ArtMethod ** next_method,uint32_t * next_dex_pc)593 bool StackVisitor::GetNextMethodAndDexPc(ArtMethod** next_method, uint32_t* next_dex_pc) {
594 struct HasMoreFramesVisitor : public StackVisitor {
595 HasMoreFramesVisitor(Thread* thread,
596 StackWalkKind walk_kind,
597 size_t num_frames,
598 size_t frame_height)
599 : StackVisitor(thread, nullptr, walk_kind, num_frames),
600 frame_height_(frame_height),
601 found_frame_(false),
602 has_more_frames_(false),
603 next_method_(nullptr),
604 next_dex_pc_(0) {
605 }
606
607 bool VisitFrame() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
608 if (found_frame_) {
609 ArtMethod* method = GetMethod();
610 if (method != nullptr && !method->IsRuntimeMethod()) {
611 has_more_frames_ = true;
612 next_method_ = method;
613 next_dex_pc_ = GetDexPc();
614 return false; // End stack walk once next method is found.
615 }
616 } else if (GetFrameHeight() == frame_height_) {
617 found_frame_ = true;
618 }
619 return true;
620 }
621
622 size_t frame_height_;
623 bool found_frame_;
624 bool has_more_frames_;
625 ArtMethod* next_method_;
626 uint32_t next_dex_pc_;
627 };
628 HasMoreFramesVisitor visitor(thread_, walk_kind_, GetNumFrames(), GetFrameHeight());
629 visitor.WalkStack(true);
630 *next_method = visitor.next_method_;
631 *next_dex_pc = visitor.next_dex_pc_;
632 return visitor.has_more_frames_;
633 }
634
DescribeStack(Thread * thread)635 void StackVisitor::DescribeStack(Thread* thread) {
636 struct DescribeStackVisitor : public StackVisitor {
637 explicit DescribeStackVisitor(Thread* thread_in)
638 : StackVisitor(thread_in, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames) {}
639
640 bool VisitFrame() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
641 LOG(INFO) << "Frame Id=" << GetFrameId() << " " << DescribeLocation();
642 return true;
643 }
644 };
645 DescribeStackVisitor visitor(thread);
646 visitor.WalkStack(true);
647 }
648
DescribeLocation() const649 std::string StackVisitor::DescribeLocation() const {
650 std::string result("Visiting method '");
651 ArtMethod* m = GetMethod();
652 if (m == nullptr) {
653 return "upcall";
654 }
655 result += PrettyMethod(m);
656 result += StringPrintf("' at dex PC 0x%04x", GetDexPc());
657 if (!IsShadowFrame()) {
658 result += StringPrintf(" (native PC %p)", reinterpret_cast<void*>(GetCurrentQuickFramePc()));
659 }
660 return result;
661 }
662
GetInstrumentationStackFrame(Thread * thread,uint32_t depth)663 static instrumentation::InstrumentationStackFrame& GetInstrumentationStackFrame(Thread* thread,
664 uint32_t depth) {
665 CHECK_LT(depth, thread->GetInstrumentationStack()->size());
666 return thread->GetInstrumentationStack()->at(depth);
667 }
668
SanityCheckFrame() const669 void StackVisitor::SanityCheckFrame() const {
670 if (kIsDebugBuild) {
671 ArtMethod* method = GetMethod();
672 auto* declaring_class = method->GetDeclaringClass();
673 // Runtime methods have null declaring class.
674 if (!method->IsRuntimeMethod()) {
675 CHECK(declaring_class != nullptr);
676 CHECK_EQ(declaring_class->GetClass(), declaring_class->GetClass()->GetClass())
677 << declaring_class;
678 } else {
679 CHECK(declaring_class == nullptr);
680 }
681 auto* runtime = Runtime::Current();
682 auto* la = runtime->GetLinearAlloc();
683 if (!la->Contains(method)) {
684 // Check image space.
685 bool in_image = false;
686 for (auto& space : runtime->GetHeap()->GetContinuousSpaces()) {
687 if (space->IsImageSpace()) {
688 auto* image_space = space->AsImageSpace();
689 const auto& header = image_space->GetImageHeader();
690 const auto* methods = &header.GetMethodsSection();
691 if (methods->Contains(reinterpret_cast<const uint8_t*>(method) - image_space->Begin())) {
692 in_image = true;
693 break;
694 }
695 }
696 }
697 CHECK(in_image) << PrettyMethod(method) << " not in linear alloc or image";
698 }
699 if (cur_quick_frame_ != nullptr) {
700 method->AssertPcIsWithinQuickCode(cur_quick_frame_pc_);
701 // Frame sanity.
702 size_t frame_size = method->GetFrameSizeInBytes();
703 CHECK_NE(frame_size, 0u);
704 // A rough guess at an upper size we expect to see for a frame.
705 // 256 registers
706 // 2 words HandleScope overhead
707 // 3+3 register spills
708 // TODO: this seems architecture specific for the case of JNI frames.
709 // TODO: 083-compiler-regressions ManyFloatArgs shows this estimate is wrong.
710 // const size_t kMaxExpectedFrameSize = (256 + 2 + 3 + 3) * sizeof(word);
711 const size_t kMaxExpectedFrameSize = 2 * KB;
712 CHECK_LE(frame_size, kMaxExpectedFrameSize);
713 size_t return_pc_offset = method->GetReturnPcOffset().SizeValue();
714 CHECK_LT(return_pc_offset, frame_size);
715 }
716 }
717 }
718
WalkStack(bool include_transitions)719 void StackVisitor::WalkStack(bool include_transitions) {
720 DCHECK(thread_ == Thread::Current() || thread_->IsSuspended());
721 CHECK_EQ(cur_depth_, 0U);
722 bool exit_stubs_installed = Runtime::Current()->GetInstrumentation()->AreExitStubsInstalled();
723 uint32_t instrumentation_stack_depth = 0;
724
725 for (const ManagedStack* current_fragment = thread_->GetManagedStack();
726 current_fragment != nullptr; current_fragment = current_fragment->GetLink()) {
727 cur_shadow_frame_ = current_fragment->GetTopShadowFrame();
728 cur_quick_frame_ = current_fragment->GetTopQuickFrame();
729 cur_quick_frame_pc_ = 0;
730
731 if (cur_quick_frame_ != nullptr) { // Handle quick stack frames.
732 // Can't be both a shadow and a quick fragment.
733 DCHECK(current_fragment->GetTopShadowFrame() == nullptr);
734 ArtMethod* method = *cur_quick_frame_;
735 while (method != nullptr) {
736 SanityCheckFrame();
737 bool should_continue = VisitFrame();
738 if (UNLIKELY(!should_continue)) {
739 return;
740 }
741
742 if (context_ != nullptr) {
743 context_->FillCalleeSaves(*this);
744 }
745 size_t frame_size = method->GetFrameSizeInBytes();
746 // Compute PC for next stack frame from return PC.
747 size_t return_pc_offset = method->GetReturnPcOffset(frame_size).SizeValue();
748 uint8_t* return_pc_addr = reinterpret_cast<uint8_t*>(cur_quick_frame_) + return_pc_offset;
749 uintptr_t return_pc = *reinterpret_cast<uintptr_t*>(return_pc_addr);
750 if (UNLIKELY(exit_stubs_installed)) {
751 // While profiling, the return pc is restored from the side stack, except when walking
752 // the stack for an exception where the side stack will be unwound in VisitFrame.
753 if (reinterpret_cast<uintptr_t>(GetQuickInstrumentationExitPc()) == return_pc) {
754 const instrumentation::InstrumentationStackFrame& instrumentation_frame =
755 GetInstrumentationStackFrame(thread_, instrumentation_stack_depth);
756 instrumentation_stack_depth++;
757 if (GetMethod() == Runtime::Current()->GetCalleeSaveMethod(Runtime::kSaveAll)) {
758 // Skip runtime save all callee frames which are used to deliver exceptions.
759 } else if (instrumentation_frame.interpreter_entry_) {
760 ArtMethod* callee = Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs);
761 CHECK_EQ(GetMethod(), callee) << "Expected: " << PrettyMethod(callee) << " Found: "
762 << PrettyMethod(GetMethod());
763 } else if (instrumentation_frame.method_ != GetMethod()) {
764 LOG(FATAL) << "Expected: " << PrettyMethod(instrumentation_frame.method_)
765 << " Found: " << PrettyMethod(GetMethod());
766 }
767 if (num_frames_ != 0) {
768 // Check agreement of frame Ids only if num_frames_ is computed to avoid infinite
769 // recursion.
770 CHECK(instrumentation_frame.frame_id_ == GetFrameId())
771 << "Expected: " << instrumentation_frame.frame_id_
772 << " Found: " << GetFrameId();
773 }
774 return_pc = instrumentation_frame.return_pc_;
775 }
776 }
777 cur_quick_frame_pc_ = return_pc;
778 uint8_t* next_frame = reinterpret_cast<uint8_t*>(cur_quick_frame_) + frame_size;
779 cur_quick_frame_ = reinterpret_cast<ArtMethod**>(next_frame);
780
781 if (kDebugStackWalk) {
782 LOG(INFO) << PrettyMethod(method) << "@" << method << " size=" << frame_size
783 << " optimized=" << method->IsOptimized(sizeof(void*))
784 << " native=" << method->IsNative()
785 << " entrypoints=" << method->GetEntryPointFromQuickCompiledCode()
786 << "," << method->GetEntryPointFromJni()
787 << "," << method->GetEntryPointFromInterpreter()
788 << " next=" << *cur_quick_frame_;
789 }
790
791 cur_depth_++;
792 method = *cur_quick_frame_;
793 }
794 } else if (cur_shadow_frame_ != nullptr) {
795 do {
796 SanityCheckFrame();
797 bool should_continue = VisitFrame();
798 if (UNLIKELY(!should_continue)) {
799 return;
800 }
801 cur_depth_++;
802 cur_shadow_frame_ = cur_shadow_frame_->GetLink();
803 } while (cur_shadow_frame_ != nullptr);
804 }
805 if (include_transitions) {
806 bool should_continue = VisitFrame();
807 if (!should_continue) {
808 return;
809 }
810 }
811 cur_depth_++;
812 }
813 if (num_frames_ != 0) {
814 CHECK_EQ(cur_depth_, num_frames_);
815 }
816 }
817
Describe(std::ostream & os) const818 void JavaFrameRootInfo::Describe(std::ostream& os) const {
819 const StackVisitor* visitor = stack_visitor_;
820 CHECK(visitor != nullptr);
821 os << "Type=" << GetType() << " thread_id=" << GetThreadId() << " location=" <<
822 visitor->DescribeLocation() << " vreg=" << vreg_;
823 }
824
GetVRegOffsetFromQuickCode(const DexFile::CodeItem * code_item,uint32_t core_spills,uint32_t fp_spills,size_t frame_size,int reg,InstructionSet isa)825 int StackVisitor::GetVRegOffsetFromQuickCode(const DexFile::CodeItem* code_item,
826 uint32_t core_spills, uint32_t fp_spills,
827 size_t frame_size, int reg, InstructionSet isa) {
828 size_t pointer_size = InstructionSetPointerSize(isa);
829 if (kIsDebugBuild) {
830 auto* runtime = Runtime::Current();
831 if (runtime != nullptr) {
832 CHECK_EQ(runtime->GetClassLinker()->GetImagePointerSize(), pointer_size);
833 }
834 }
835 DCHECK_EQ(frame_size & (kStackAlignment - 1), 0U);
836 DCHECK_NE(reg, -1);
837 int spill_size = POPCOUNT(core_spills) * GetBytesPerGprSpillLocation(isa)
838 + POPCOUNT(fp_spills) * GetBytesPerFprSpillLocation(isa)
839 + sizeof(uint32_t); // Filler.
840 int num_regs = code_item->registers_size_ - code_item->ins_size_;
841 int temp_threshold = code_item->registers_size_;
842 const int max_num_special_temps = 1;
843 if (reg == temp_threshold) {
844 // The current method pointer corresponds to special location on stack.
845 return 0;
846 } else if (reg >= temp_threshold + max_num_special_temps) {
847 /*
848 * Special temporaries may have custom locations and the logic above deals with that.
849 * However, non-special temporaries are placed relative to the outs.
850 */
851 int temps_start = code_item->outs_size_ * sizeof(uint32_t) + pointer_size /* art method */;
852 int relative_offset = (reg - (temp_threshold + max_num_special_temps)) * sizeof(uint32_t);
853 return temps_start + relative_offset;
854 } else if (reg < num_regs) {
855 int locals_start = frame_size - spill_size - num_regs * sizeof(uint32_t);
856 return locals_start + (reg * sizeof(uint32_t));
857 } else {
858 // Handle ins.
859 return frame_size + ((reg - num_regs) * sizeof(uint32_t)) + pointer_size /* art method */;
860 }
861 }
862
863 } // namespace art
864