1 /*
2 * Copyright (C) 2012 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #ifndef ART_RUNTIME_INTERPRETER_INTERPRETER_COMMON_H_
18 #define ART_RUNTIME_INTERPRETER_INTERPRETER_COMMON_H_
19
20 #include "android-base/macros.h"
21 #include "instrumentation.h"
22 #include "interpreter.h"
23
24 #include <math.h>
25
26 #include <atomic>
27 #include <iostream>
28 #include <sstream>
29
30 #include <android-base/logging.h>
31 #include <android-base/stringprintf.h>
32
33 #include "art_field-inl.h"
34 #include "art_method-inl.h"
35 #include "base/locks.h"
36 #include "base/logging.h"
37 #include "base/macros.h"
38 #include "base/pointer_size.h"
39 #include "class_linker-inl.h"
40 #include "class_root-inl.h"
41 #include "common_dex_operations.h"
42 #include "common_throws.h"
43 #include "dex/dex_file-inl.h"
44 #include "dex/dex_instruction-inl.h"
45 #include "entrypoints/entrypoint_utils-inl.h"
46 #include "handle_scope-inl.h"
47 #include "interpreter_cache-inl.h"
48 #include "interpreter_switch_impl.h"
49 #include "intrinsics_list.h"
50 #include "jit/jit-inl.h"
51 #include "mirror/call_site.h"
52 #include "mirror/class-inl.h"
53 #include "mirror/dex_cache.h"
54 #include "mirror/method.h"
55 #include "mirror/method_handles_lookup.h"
56 #include "mirror/object-inl.h"
57 #include "mirror/object_array-inl.h"
58 #include "mirror/string-inl.h"
59 #include "obj_ptr.h"
60 #include "stack.h"
61 #include "thread.h"
62 #include "thread-inl.h"
63 #include "unstarted_runtime.h"
64 #include "verifier/method_verifier.h"
65
66 namespace art HIDDEN {
67 namespace interpreter {
68
69 void ThrowNullPointerExceptionFromInterpreter()
70 REQUIRES_SHARED(Locks::mutator_lock_);
71
DoMonitorEnter(Thread * self,ShadowFrame * frame,ObjPtr<mirror::Object> ref)72 static inline void DoMonitorEnter(Thread* self, ShadowFrame* frame, ObjPtr<mirror::Object> ref)
73 NO_THREAD_SAFETY_ANALYSIS
74 REQUIRES(!Roles::uninterruptible_) {
75 DCHECK(!ref.IsNull());
76 StackHandleScope<1> hs(self);
77 Handle<mirror::Object> h_ref(hs.NewHandle(ref));
78 h_ref->MonitorEnter(self);
79 DCHECK(self->HoldsLock(h_ref.Get()));
80 if (UNLIKELY(self->IsExceptionPending())) {
81 bool unlocked = h_ref->MonitorExit(self);
82 DCHECK(unlocked);
83 return;
84 }
85 if (frame->GetMethod()->MustCountLocks()) {
86 DCHECK(!frame->GetMethod()->SkipAccessChecks());
87 frame->GetLockCountData().AddMonitor(self, h_ref.Get());
88 }
89 }
90
DoMonitorExit(Thread * self,ShadowFrame * frame,ObjPtr<mirror::Object> ref)91 static inline void DoMonitorExit(Thread* self, ShadowFrame* frame, ObjPtr<mirror::Object> ref)
92 NO_THREAD_SAFETY_ANALYSIS
93 REQUIRES(!Roles::uninterruptible_) {
94 StackHandleScope<1> hs(self);
95 Handle<mirror::Object> h_ref(hs.NewHandle(ref));
96 h_ref->MonitorExit(self);
97 if (frame->GetMethod()->MustCountLocks()) {
98 DCHECK(!frame->GetMethod()->SkipAccessChecks());
99 frame->GetLockCountData().RemoveMonitorOrThrow(self, h_ref.Get());
100 }
101 }
102
DoMonitorCheckOnExit(Thread * self,ShadowFrame * frame)103 static inline bool DoMonitorCheckOnExit(Thread* self, ShadowFrame* frame)
104 NO_THREAD_SAFETY_ANALYSIS
105 REQUIRES(!Roles::uninterruptible_) {
106 if (frame->GetMethod()->MustCountLocks()) {
107 DCHECK(!frame->GetMethod()->SkipAccessChecks());
108 return frame->GetLockCountData().CheckAllMonitorsReleasedOrThrow(self);
109 }
110 return true;
111 }
112
113 // Invokes the given method. This is part of the invocation support and is used by DoInvoke,
114 // DoFastInvoke and DoInvokeVirtualQuick functions.
115 // Returns true on success, otherwise throws an exception and returns false.
116 template<bool is_range>
117 bool DoCall(ArtMethod* called_method,
118 Thread* self,
119 ShadowFrame& shadow_frame,
120 const Instruction* inst,
121 uint16_t inst_data,
122 bool string_init,
123 JValue* result);
124
125 // Called by the switch interpreter to know if we can stay in it.
126 bool ShouldStayInSwitchInterpreter(ArtMethod* method)
127 REQUIRES_SHARED(Locks::mutator_lock_);
128
129 // Throws exception if we are getting close to the end of the stack.
130 NO_INLINE bool CheckStackOverflow(Thread* self, size_t frame_size)
131 REQUIRES_SHARED(Locks::mutator_lock_);
132
133
134 // Sends the normal method exit event.
135 // Returns true if the events succeeded and false if there is a pending exception.
136 template <typename T> bool SendMethodExitEvents(
137 Thread* self,
138 const instrumentation::Instrumentation* instrumentation,
139 ShadowFrame& frame,
140 ArtMethod* method,
141 T& result) REQUIRES_SHARED(Locks::mutator_lock_);
142
143 static inline ALWAYS_INLINE WARN_UNUSED bool
NeedsMethodExitEvent(const instrumentation::Instrumentation * ins)144 NeedsMethodExitEvent(const instrumentation::Instrumentation* ins)
145 REQUIRES_SHARED(Locks::mutator_lock_) {
146 return ins->HasMethodExitListeners() || ins->HasWatchedFramePopListeners();
147 }
148
149 COLD_ATTR void UnlockHeldMonitors(Thread* self, ShadowFrame* shadow_frame)
150 REQUIRES_SHARED(Locks::mutator_lock_);
151
152 void PerformNonStandardReturn(Thread* self,
153 ShadowFrame& frame,
154 JValue& result,
155 const instrumentation::Instrumentation* instrumentation,
156 bool unlock_monitors = true) REQUIRES_SHARED(Locks::mutator_lock_);
157
158 // Handles all invoke-XXX/range instructions except for invoke-polymorphic[/range].
159 // Returns true on success, otherwise throws an exception and returns false.
160 template<InvokeType type, bool is_range>
DoInvoke(Thread * self,ShadowFrame & shadow_frame,const Instruction * inst,uint16_t inst_data,JValue * result)161 static ALWAYS_INLINE bool DoInvoke(Thread* self,
162 ShadowFrame& shadow_frame,
163 const Instruction* inst,
164 uint16_t inst_data,
165 JValue* result)
166 REQUIRES_SHARED(Locks::mutator_lock_) {
167 // Make sure to check for async exceptions before anything else.
168 if (UNLIKELY(self->ObserveAsyncException())) {
169 return false;
170 }
171 const uint32_t vregC = is_range ? inst->VRegC_3rc() : inst->VRegC_35c();
172 ObjPtr<mirror::Object> obj = type == kStatic ? nullptr : shadow_frame.GetVRegReference(vregC);
173 ArtMethod* sf_method = shadow_frame.GetMethod();
174 bool string_init = false;
175 ArtMethod* called_method = FindMethodToCall<type>(
176 self, sf_method, &obj, *inst, /* only_lookup_tls_cache= */ false, &string_init);
177 if (called_method == nullptr) {
178 DCHECK(self->IsExceptionPending());
179 result->SetJ(0);
180 return false;
181 }
182
183 return DoCall<is_range>(
184 called_method, self, shadow_frame, inst, inst_data, string_init, result);
185 }
186
ResolveMethodHandle(Thread * self,uint32_t method_handle_index,ArtMethod * referrer)187 static inline ObjPtr<mirror::MethodHandle> ResolveMethodHandle(Thread* self,
188 uint32_t method_handle_index,
189 ArtMethod* referrer)
190 REQUIRES_SHARED(Locks::mutator_lock_) {
191 ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
192 return class_linker->ResolveMethodHandle(self, method_handle_index, referrer);
193 }
194
ResolveMethodType(Thread * self,dex::ProtoIndex method_type_index,ArtMethod * referrer)195 static inline ObjPtr<mirror::MethodType> ResolveMethodType(Thread* self,
196 dex::ProtoIndex method_type_index,
197 ArtMethod* referrer)
198 REQUIRES_SHARED(Locks::mutator_lock_) {
199 ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
200 return class_linker->ResolveMethodType(self, method_type_index, referrer);
201 }
202
203 #define DECLARE_SIGNATURE_POLYMORPHIC_HANDLER(Name, ...) \
204 bool Do ## Name(Thread* self, \
205 ShadowFrame& shadow_frame, \
206 const Instruction* inst, \
207 uint16_t inst_data, \
208 JValue* result) REQUIRES_SHARED(Locks::mutator_lock_);
209 ART_INTRINSICS_LIST(DECLARE_SIGNATURE_POLYMORPHIC_HANDLER)
210 #undef DECLARE_SIGNATURE_POLYMORPHIC_HANDLER
211
212 // Performs a invoke-polymorphic or invoke-polymorphic-range.
213 template<bool is_range>
214 bool DoInvokePolymorphic(Thread* self,
215 ShadowFrame& shadow_frame,
216 const Instruction* inst,
217 uint16_t inst_data,
218 JValue* result)
219 REQUIRES_SHARED(Locks::mutator_lock_);
220
221 bool DoInvokeCustom(Thread* self,
222 ShadowFrame& shadow_frame,
223 uint32_t call_site_idx,
224 const InstructionOperands* operands,
225 JValue* result)
226 REQUIRES_SHARED(Locks::mutator_lock_);
227
228 // Performs a custom invoke (invoke-custom/invoke-custom-range).
229 template<bool is_range>
DoInvokeCustom(Thread * self,ShadowFrame & shadow_frame,const Instruction * inst,uint16_t inst_data,JValue * result)230 bool DoInvokeCustom(Thread* self,
231 ShadowFrame& shadow_frame,
232 const Instruction* inst,
233 uint16_t inst_data,
234 JValue* result)
235 REQUIRES_SHARED(Locks::mutator_lock_) {
236 const uint32_t call_site_idx = is_range ? inst->VRegB_3rc() : inst->VRegB_35c();
237 if (is_range) {
238 RangeInstructionOperands operands(inst->VRegC_3rc(), inst->VRegA_3rc());
239 return DoInvokeCustom(self, shadow_frame, call_site_idx, &operands, result);
240 } else {
241 uint32_t args[Instruction::kMaxVarArgRegs];
242 inst->GetVarArgs(args, inst_data);
243 VarArgsInstructionOperands operands(args, inst->VRegA_35c());
244 return DoInvokeCustom(self, shadow_frame, call_site_idx, &operands, result);
245 }
246 }
247
248 template<Primitive::Type field_type>
GetFieldValue(const ShadowFrame & shadow_frame,uint32_t vreg)249 ALWAYS_INLINE static JValue GetFieldValue(const ShadowFrame& shadow_frame, uint32_t vreg)
250 REQUIRES_SHARED(Locks::mutator_lock_) {
251 JValue field_value;
252 switch (field_type) {
253 case Primitive::kPrimBoolean:
254 field_value.SetZ(static_cast<uint8_t>(shadow_frame.GetVReg(vreg)));
255 break;
256 case Primitive::kPrimByte:
257 field_value.SetB(static_cast<int8_t>(shadow_frame.GetVReg(vreg)));
258 break;
259 case Primitive::kPrimChar:
260 field_value.SetC(static_cast<uint16_t>(shadow_frame.GetVReg(vreg)));
261 break;
262 case Primitive::kPrimShort:
263 field_value.SetS(static_cast<int16_t>(shadow_frame.GetVReg(vreg)));
264 break;
265 case Primitive::kPrimInt:
266 field_value.SetI(shadow_frame.GetVReg(vreg));
267 break;
268 case Primitive::kPrimLong:
269 field_value.SetJ(shadow_frame.GetVRegLong(vreg));
270 break;
271 case Primitive::kPrimNot:
272 field_value.SetL(shadow_frame.GetVRegReference(vreg));
273 break;
274 default:
275 LOG(FATAL) << "Unreachable: " << field_type;
276 UNREACHABLE();
277 }
278 return field_value;
279 }
280
281 extern "C" size_t NterpGetStaticField(Thread* self,
282 ArtMethod* caller,
283 const uint16_t* dex_pc_ptr,
284 size_t resolve_field_type);
285
286 extern "C" uint32_t NterpGetInstanceFieldOffset(Thread* self,
287 ArtMethod* caller,
288 const uint16_t* dex_pc_ptr,
289 size_t resolve_field_type);
290
GetFieldInfo(Thread * self,ArtMethod * caller,const uint16_t * dex_pc_ptr,bool is_static,bool resolve_field_type,ArtField ** field,bool * is_volatile,MemberOffset * offset)291 static inline void GetFieldInfo(Thread* self,
292 ArtMethod* caller,
293 const uint16_t* dex_pc_ptr,
294 bool is_static,
295 bool resolve_field_type,
296 ArtField** field,
297 bool* is_volatile,
298 MemberOffset* offset) {
299 size_t tls_value = 0u;
300 if (!self->GetInterpreterCache()->Get(self, dex_pc_ptr, &tls_value)) {
301 if (is_static) {
302 tls_value = NterpGetStaticField(self, caller, dex_pc_ptr, resolve_field_type);
303 } else {
304 tls_value = NterpGetInstanceFieldOffset(self, caller, dex_pc_ptr, resolve_field_type);
305 }
306
307 if (self->IsExceptionPending()) {
308 return;
309 }
310 }
311
312 if (is_static) {
313 DCHECK_NE(tls_value, 0u);
314 *is_volatile = ((tls_value & 1) != 0);
315 *field = reinterpret_cast<ArtField*>(tls_value & ~static_cast<size_t>(1u));
316 *offset = (*field)->GetOffset();
317 } else {
318 *is_volatile = (static_cast<int32_t>(tls_value) < 0);
319 *offset = MemberOffset(std::abs(static_cast<int32_t>(tls_value)));
320 }
321 }
322
323 // Handles string resolution for const-string and const-string-jumbo instructions. Also ensures the
324 // java.lang.String class is initialized.
ResolveString(Thread * self,ShadowFrame & shadow_frame,dex::StringIndex string_idx)325 static inline ObjPtr<mirror::String> ResolveString(Thread* self,
326 ShadowFrame& shadow_frame,
327 dex::StringIndex string_idx)
328 REQUIRES_SHARED(Locks::mutator_lock_) {
329 ObjPtr<mirror::Class> java_lang_string_class = GetClassRoot<mirror::String>();
330 if (UNLIKELY(!java_lang_string_class->IsVisiblyInitialized())) {
331 StackHandleScope<1> hs(self);
332 Handle<mirror::Class> h_class(hs.NewHandle(java_lang_string_class));
333 if (UNLIKELY(!Runtime::Current()->GetClassLinker()->EnsureInitialized(
334 self, h_class, /*can_init_fields=*/ true, /*can_init_parents=*/ true))) {
335 DCHECK(self->IsExceptionPending());
336 return nullptr;
337 }
338 DCHECK(h_class->IsInitializing());
339 }
340 ArtMethod* method = shadow_frame.GetMethod();
341 ObjPtr<mirror::String> string_ptr =
342 Runtime::Current()->GetClassLinker()->ResolveString(string_idx, method);
343 return string_ptr;
344 }
345
346 // Handles div-int, div-int/2addr, div-int/li16 and div-int/lit8 instructions.
347 // Returns true on success, otherwise throws a java.lang.ArithmeticException and return false.
DoIntDivide(ShadowFrame & shadow_frame,size_t result_reg,int32_t dividend,int32_t divisor)348 static inline bool DoIntDivide(ShadowFrame& shadow_frame, size_t result_reg,
349 int32_t dividend, int32_t divisor)
350 REQUIRES_SHARED(Locks::mutator_lock_) {
351 constexpr int32_t kMinInt = std::numeric_limits<int32_t>::min();
352 if (UNLIKELY(divisor == 0)) {
353 ThrowArithmeticExceptionDivideByZero();
354 return false;
355 }
356 if (UNLIKELY(dividend == kMinInt && divisor == -1)) {
357 shadow_frame.SetVReg(result_reg, kMinInt);
358 } else {
359 shadow_frame.SetVReg(result_reg, dividend / divisor);
360 }
361 return true;
362 }
363
364 // Handles rem-int, rem-int/2addr, rem-int/li16 and rem-int/lit8 instructions.
365 // Returns true on success, otherwise throws a java.lang.ArithmeticException and return false.
DoIntRemainder(ShadowFrame & shadow_frame,size_t result_reg,int32_t dividend,int32_t divisor)366 static inline bool DoIntRemainder(ShadowFrame& shadow_frame, size_t result_reg,
367 int32_t dividend, int32_t divisor)
368 REQUIRES_SHARED(Locks::mutator_lock_) {
369 constexpr int32_t kMinInt = std::numeric_limits<int32_t>::min();
370 if (UNLIKELY(divisor == 0)) {
371 ThrowArithmeticExceptionDivideByZero();
372 return false;
373 }
374 if (UNLIKELY(dividend == kMinInt && divisor == -1)) {
375 shadow_frame.SetVReg(result_reg, 0);
376 } else {
377 shadow_frame.SetVReg(result_reg, dividend % divisor);
378 }
379 return true;
380 }
381
382 // Handles div-long and div-long-2addr instructions.
383 // Returns true on success, otherwise throws a java.lang.ArithmeticException and return false.
DoLongDivide(ShadowFrame & shadow_frame,size_t result_reg,int64_t dividend,int64_t divisor)384 static inline bool DoLongDivide(ShadowFrame& shadow_frame,
385 size_t result_reg,
386 int64_t dividend,
387 int64_t divisor)
388 REQUIRES_SHARED(Locks::mutator_lock_) {
389 const int64_t kMinLong = std::numeric_limits<int64_t>::min();
390 if (UNLIKELY(divisor == 0)) {
391 ThrowArithmeticExceptionDivideByZero();
392 return false;
393 }
394 if (UNLIKELY(dividend == kMinLong && divisor == -1)) {
395 shadow_frame.SetVRegLong(result_reg, kMinLong);
396 } else {
397 shadow_frame.SetVRegLong(result_reg, dividend / divisor);
398 }
399 return true;
400 }
401
402 // Handles rem-long and rem-long-2addr instructions.
403 // Returns true on success, otherwise throws a java.lang.ArithmeticException and return false.
DoLongRemainder(ShadowFrame & shadow_frame,size_t result_reg,int64_t dividend,int64_t divisor)404 static inline bool DoLongRemainder(ShadowFrame& shadow_frame,
405 size_t result_reg,
406 int64_t dividend,
407 int64_t divisor)
408 REQUIRES_SHARED(Locks::mutator_lock_) {
409 const int64_t kMinLong = std::numeric_limits<int64_t>::min();
410 if (UNLIKELY(divisor == 0)) {
411 ThrowArithmeticExceptionDivideByZero();
412 return false;
413 }
414 if (UNLIKELY(dividend == kMinLong && divisor == -1)) {
415 shadow_frame.SetVRegLong(result_reg, 0);
416 } else {
417 shadow_frame.SetVRegLong(result_reg, dividend % divisor);
418 }
419 return true;
420 }
421
422 // Handles filled-new-array and filled-new-array-range instructions.
423 // Returns true on success, otherwise throws an exception and returns false.
424 template <bool is_range>
425 bool DoFilledNewArray(const Instruction* inst,
426 const ShadowFrame& shadow_frame,
427 Thread* self,
428 JValue* result);
429
430 // Handles packed-switch instruction.
431 // Returns the branch offset to the next instruction to execute.
DoPackedSwitch(const Instruction * inst,const ShadowFrame & shadow_frame,uint16_t inst_data)432 static inline int32_t DoPackedSwitch(const Instruction* inst,
433 const ShadowFrame& shadow_frame,
434 uint16_t inst_data)
435 REQUIRES_SHARED(Locks::mutator_lock_) {
436 DCHECK(inst->Opcode() == Instruction::PACKED_SWITCH);
437 const uint16_t* switch_data = reinterpret_cast<const uint16_t*>(inst) + inst->VRegB_31t();
438 int32_t test_val = shadow_frame.GetVReg(inst->VRegA_31t(inst_data));
439 DCHECK_EQ(switch_data[0], static_cast<uint16_t>(Instruction::kPackedSwitchSignature));
440 uint16_t size = switch_data[1];
441 if (size == 0) {
442 // Empty packed switch, move forward by 3 (size of PACKED_SWITCH).
443 return 3;
444 }
445 const int32_t* keys = reinterpret_cast<const int32_t*>(&switch_data[2]);
446 DCHECK_ALIGNED(keys, 4);
447 int32_t first_key = keys[0];
448 const int32_t* targets = reinterpret_cast<const int32_t*>(&switch_data[4]);
449 DCHECK_ALIGNED(targets, 4);
450 int32_t index = test_val - first_key;
451 if (index >= 0 && index < size) {
452 return targets[index];
453 } else {
454 // No corresponding value: move forward by 3 (size of PACKED_SWITCH).
455 return 3;
456 }
457 }
458
459 // Handles sparse-switch instruction.
460 // Returns the branch offset to the next instruction to execute.
DoSparseSwitch(const Instruction * inst,const ShadowFrame & shadow_frame,uint16_t inst_data)461 static inline int32_t DoSparseSwitch(const Instruction* inst, const ShadowFrame& shadow_frame,
462 uint16_t inst_data)
463 REQUIRES_SHARED(Locks::mutator_lock_) {
464 DCHECK(inst->Opcode() == Instruction::SPARSE_SWITCH);
465 const uint16_t* switch_data = reinterpret_cast<const uint16_t*>(inst) + inst->VRegB_31t();
466 int32_t test_val = shadow_frame.GetVReg(inst->VRegA_31t(inst_data));
467 DCHECK_EQ(switch_data[0], static_cast<uint16_t>(Instruction::kSparseSwitchSignature));
468 uint16_t size = switch_data[1];
469 // Return length of SPARSE_SWITCH if size is 0.
470 if (size == 0) {
471 return 3;
472 }
473 const int32_t* keys = reinterpret_cast<const int32_t*>(&switch_data[2]);
474 DCHECK_ALIGNED(keys, 4);
475 const int32_t* entries = keys + size;
476 DCHECK_ALIGNED(entries, 4);
477 int lo = 0;
478 int hi = size - 1;
479 while (lo <= hi) {
480 int mid = (lo + hi) / 2;
481 int32_t foundVal = keys[mid];
482 if (test_val < foundVal) {
483 hi = mid - 1;
484 } else if (test_val > foundVal) {
485 lo = mid + 1;
486 } else {
487 return entries[mid];
488 }
489 }
490 // No corresponding value: move forward by 3 (size of SPARSE_SWITCH).
491 return 3;
492 }
493
494 // We execute any instrumentation events triggered by throwing and/or handing the pending exception
495 // and change the shadow_frames dex_pc to the appropriate exception handler if the current method
496 // has one. If the exception has been handled and the shadow_frame is now pointing to a catch clause
497 // we return true. If the current method is unable to handle the exception we return false.
498 // This function accepts a null Instrumentation* as a way to cause instrumentation events not to be
499 // reported.
500 // TODO We might wish to reconsider how we cause some events to be ignored.
501 bool MoveToExceptionHandler(Thread* self,
502 ShadowFrame& shadow_frame,
503 bool skip_listeners,
504 bool skip_throw_listener) REQUIRES_SHARED(Locks::mutator_lock_);
505
506 NO_RETURN void UnexpectedOpcode(const Instruction* inst, const ShadowFrame& shadow_frame)
507 __attribute__((cold))
508 REQUIRES_SHARED(Locks::mutator_lock_);
509
510 // Set true if you want TraceExecution invocation before each bytecode execution.
511 constexpr bool kTraceExecutionEnabled = false;
512
TraceExecution(const ShadowFrame & shadow_frame,const Instruction * inst,const uint32_t dex_pc)513 static inline void TraceExecution(const ShadowFrame& shadow_frame, const Instruction* inst,
514 const uint32_t dex_pc)
515 REQUIRES_SHARED(Locks::mutator_lock_) {
516 if (kTraceExecutionEnabled) {
517 #define TRACE_LOG std::cerr
518 std::ostringstream oss;
519 oss << shadow_frame.GetMethod()->PrettyMethod()
520 << android::base::StringPrintf("\n0x%x: ", dex_pc)
521 << inst->DumpString(shadow_frame.GetMethod()->GetDexFile()) << "\n";
522 for (uint32_t i = 0; i < shadow_frame.NumberOfVRegs(); ++i) {
523 uint32_t raw_value = shadow_frame.GetVReg(i);
524 ObjPtr<mirror::Object> ref_value = shadow_frame.GetVRegReference(i);
525 oss << android::base::StringPrintf(" vreg%u=0x%08X", i, raw_value);
526 if (ref_value != nullptr) {
527 if (ref_value->GetClass()->IsStringClass() &&
528 !ref_value->AsString()->IsValueNull()) {
529 oss << "/java.lang.String \"" << ref_value->AsString()->ToModifiedUtf8() << "\"";
530 } else {
531 oss << "/" << ref_value->PrettyTypeOf();
532 }
533 }
534 }
535 TRACE_LOG << oss.str() << "\n";
536 #undef TRACE_LOG
537 }
538 }
539
IsBackwardBranch(int32_t branch_offset)540 static inline bool IsBackwardBranch(int32_t branch_offset) {
541 return branch_offset <= 0;
542 }
543
544 // The arg_offset is the offset to the first input register in the frame.
545 void ArtInterpreterToCompiledCodeBridge(Thread* self,
546 ArtMethod* caller,
547 ShadowFrame* shadow_frame,
548 uint16_t arg_offset,
549 JValue* result);
550
551 // Set string value created from StringFactory.newStringFromXXX() into all aliases of
552 // StringFactory.newEmptyString().
553 void SetStringInitValueToAllAliases(ShadowFrame* shadow_frame,
554 uint16_t this_obj_vreg,
555 JValue result);
556
557 } // namespace interpreter
558 } // namespace art
559
560 #endif // ART_RUNTIME_INTERPRETER_INTERPRETER_COMMON_H_
561