1 /*
2 * Copyright (C) 2012 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "interpreter.h"
18
19 #include <limits>
20 #include <string_view>
21
22 #include "common_dex_operations.h"
23 #include "common_throws.h"
24 #include "dex/dex_file_types.h"
25 #include "interpreter_common.h"
26 #include "interpreter_mterp_impl.h"
27 #include "interpreter_switch_impl.h"
28 #include "jit/jit.h"
29 #include "jit/jit_code_cache.h"
30 #include "jvalue-inl.h"
31 #include "mirror/string-inl.h"
32 #include "mterp/mterp.h"
33 #include "nativehelper/scoped_local_ref.h"
34 #include "scoped_thread_state_change-inl.h"
35 #include "shadow_frame-inl.h"
36 #include "stack.h"
37 #include "thread-inl.h"
38 #include "unstarted_runtime.h"
39
40 namespace art {
41 namespace interpreter {
42
ObjArg(uint32_t arg)43 ALWAYS_INLINE static ObjPtr<mirror::Object> ObjArg(uint32_t arg)
44 REQUIRES_SHARED(Locks::mutator_lock_) {
45 return reinterpret_cast<mirror::Object*>(arg);
46 }
47
InterpreterJni(Thread * self,ArtMethod * method,std::string_view shorty,ObjPtr<mirror::Object> receiver,uint32_t * args,JValue * result)48 static void InterpreterJni(Thread* self,
49 ArtMethod* method,
50 std::string_view shorty,
51 ObjPtr<mirror::Object> receiver,
52 uint32_t* args,
53 JValue* result)
54 REQUIRES_SHARED(Locks::mutator_lock_) {
55 // TODO: The following enters JNI code using a typedef-ed function rather than the JNI compiler,
56 // it should be removed and JNI compiled stubs used instead.
57 ScopedObjectAccessUnchecked soa(self);
58 if (method->IsStatic()) {
59 if (shorty == "L") {
60 using fntype = jobject(JNIEnv*, jclass);
61 fntype* const fn = reinterpret_cast<fntype*>(method->GetEntryPointFromJni());
62 ScopedLocalRef<jclass> klass(soa.Env(),
63 soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
64 jobject jresult;
65 {
66 ScopedThreadStateChange tsc(self, kNative);
67 jresult = fn(soa.Env(), klass.get());
68 }
69 result->SetL(soa.Decode<mirror::Object>(jresult));
70 } else if (shorty == "V") {
71 using fntype = void(JNIEnv*, jclass);
72 fntype* const fn = reinterpret_cast<fntype*>(method->GetEntryPointFromJni());
73 ScopedLocalRef<jclass> klass(soa.Env(),
74 soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
75 ScopedThreadStateChange tsc(self, kNative);
76 fn(soa.Env(), klass.get());
77 } else if (shorty == "Z") {
78 using fntype = jboolean(JNIEnv*, jclass);
79 fntype* const fn = reinterpret_cast<fntype*>(method->GetEntryPointFromJni());
80 ScopedLocalRef<jclass> klass(soa.Env(),
81 soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
82 ScopedThreadStateChange tsc(self, kNative);
83 result->SetZ(fn(soa.Env(), klass.get()));
84 } else if (shorty == "BI") {
85 using fntype = jbyte(JNIEnv*, jclass, jint);
86 fntype* const fn = reinterpret_cast<fntype*>(method->GetEntryPointFromJni());
87 ScopedLocalRef<jclass> klass(soa.Env(),
88 soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
89 ScopedThreadStateChange tsc(self, kNative);
90 result->SetB(fn(soa.Env(), klass.get(), args[0]));
91 } else if (shorty == "II") {
92 using fntype = jint(JNIEnv*, jclass, jint);
93 fntype* const fn = reinterpret_cast<fntype*>(method->GetEntryPointFromJni());
94 ScopedLocalRef<jclass> klass(soa.Env(),
95 soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
96 ScopedThreadStateChange tsc(self, kNative);
97 result->SetI(fn(soa.Env(), klass.get(), args[0]));
98 } else if (shorty == "LL") {
99 using fntype = jobject(JNIEnv*, jclass, jobject);
100 fntype* const fn = reinterpret_cast<fntype*>(method->GetEntryPointFromJni());
101 ScopedLocalRef<jclass> klass(soa.Env(),
102 soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
103 ScopedLocalRef<jobject> arg0(soa.Env(),
104 soa.AddLocalReference<jobject>(ObjArg(args[0])));
105 jobject jresult;
106 {
107 ScopedThreadStateChange tsc(self, kNative);
108 jresult = fn(soa.Env(), klass.get(), arg0.get());
109 }
110 result->SetL(soa.Decode<mirror::Object>(jresult));
111 } else if (shorty == "IIZ") {
112 using fntype = jint(JNIEnv*, jclass, jint, jboolean);
113 fntype* const fn = reinterpret_cast<fntype*>(method->GetEntryPointFromJni());
114 ScopedLocalRef<jclass> klass(soa.Env(),
115 soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
116 ScopedThreadStateChange tsc(self, kNative);
117 result->SetI(fn(soa.Env(), klass.get(), args[0], args[1]));
118 } else if (shorty == "ILI") {
119 using fntype = jint(JNIEnv*, jclass, jobject, jint);
120 fntype* const fn = reinterpret_cast<fntype*>(const_cast<void*>(
121 method->GetEntryPointFromJni()));
122 ScopedLocalRef<jclass> klass(soa.Env(),
123 soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
124 ScopedLocalRef<jobject> arg0(soa.Env(),
125 soa.AddLocalReference<jobject>(ObjArg(args[0])));
126 ScopedThreadStateChange tsc(self, kNative);
127 result->SetI(fn(soa.Env(), klass.get(), arg0.get(), args[1]));
128 } else if (shorty == "SIZ") {
129 using fntype = jshort(JNIEnv*, jclass, jint, jboolean);
130 fntype* const fn =
131 reinterpret_cast<fntype*>(const_cast<void*>(method->GetEntryPointFromJni()));
132 ScopedLocalRef<jclass> klass(soa.Env(),
133 soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
134 ScopedThreadStateChange tsc(self, kNative);
135 result->SetS(fn(soa.Env(), klass.get(), args[0], args[1]));
136 } else if (shorty == "VIZ") {
137 using fntype = void(JNIEnv*, jclass, jint, jboolean);
138 fntype* const fn = reinterpret_cast<fntype*>(method->GetEntryPointFromJni());
139 ScopedLocalRef<jclass> klass(soa.Env(),
140 soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
141 ScopedThreadStateChange tsc(self, kNative);
142 fn(soa.Env(), klass.get(), args[0], args[1]);
143 } else if (shorty == "ZLL") {
144 using fntype = jboolean(JNIEnv*, jclass, jobject, jobject);
145 fntype* const fn = reinterpret_cast<fntype*>(method->GetEntryPointFromJni());
146 ScopedLocalRef<jclass> klass(soa.Env(),
147 soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
148 ScopedLocalRef<jobject> arg0(soa.Env(),
149 soa.AddLocalReference<jobject>(ObjArg(args[0])));
150 ScopedLocalRef<jobject> arg1(soa.Env(),
151 soa.AddLocalReference<jobject>(ObjArg(args[1])));
152 ScopedThreadStateChange tsc(self, kNative);
153 result->SetZ(fn(soa.Env(), klass.get(), arg0.get(), arg1.get()));
154 } else if (shorty == "ZILL") {
155 using fntype = jboolean(JNIEnv*, jclass, jint, jobject, jobject);
156 fntype* const fn = reinterpret_cast<fntype*>(method->GetEntryPointFromJni());
157 ScopedLocalRef<jclass> klass(soa.Env(),
158 soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
159 ScopedLocalRef<jobject> arg1(soa.Env(),
160 soa.AddLocalReference<jobject>(ObjArg(args[1])));
161 ScopedLocalRef<jobject> arg2(soa.Env(),
162 soa.AddLocalReference<jobject>(ObjArg(args[2])));
163 ScopedThreadStateChange tsc(self, kNative);
164 result->SetZ(fn(soa.Env(), klass.get(), args[0], arg1.get(), arg2.get()));
165 } else if (shorty == "VILII") {
166 using fntype = void(JNIEnv*, jclass, jint, jobject, jint, jint);
167 fntype* const fn = reinterpret_cast<fntype*>(method->GetEntryPointFromJni());
168 ScopedLocalRef<jclass> klass(soa.Env(),
169 soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
170 ScopedLocalRef<jobject> arg1(soa.Env(),
171 soa.AddLocalReference<jobject>(ObjArg(args[1])));
172 ScopedThreadStateChange tsc(self, kNative);
173 fn(soa.Env(), klass.get(), args[0], arg1.get(), args[2], args[3]);
174 } else if (shorty == "VLILII") {
175 using fntype = void(JNIEnv*, jclass, jobject, jint, jobject, jint, jint);
176 fntype* const fn = reinterpret_cast<fntype*>(method->GetEntryPointFromJni());
177 ScopedLocalRef<jclass> klass(soa.Env(),
178 soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
179 ScopedLocalRef<jobject> arg0(soa.Env(),
180 soa.AddLocalReference<jobject>(ObjArg(args[0])));
181 ScopedLocalRef<jobject> arg2(soa.Env(),
182 soa.AddLocalReference<jobject>(ObjArg(args[2])));
183 ScopedThreadStateChange tsc(self, kNative);
184 fn(soa.Env(), klass.get(), arg0.get(), args[1], arg2.get(), args[3], args[4]);
185 } else {
186 LOG(FATAL) << "Do something with static native method: " << method->PrettyMethod()
187 << " shorty: " << shorty;
188 }
189 } else {
190 if (shorty == "L") {
191 using fntype = jobject(JNIEnv*, jobject);
192 fntype* const fn = reinterpret_cast<fntype*>(method->GetEntryPointFromJni());
193 ScopedLocalRef<jobject> rcvr(soa.Env(),
194 soa.AddLocalReference<jobject>(receiver));
195 jobject jresult;
196 {
197 ScopedThreadStateChange tsc(self, kNative);
198 jresult = fn(soa.Env(), rcvr.get());
199 }
200 result->SetL(soa.Decode<mirror::Object>(jresult));
201 } else if (shorty == "V") {
202 using fntype = void(JNIEnv*, jobject);
203 fntype* const fn = reinterpret_cast<fntype*>(method->GetEntryPointFromJni());
204 ScopedLocalRef<jobject> rcvr(soa.Env(),
205 soa.AddLocalReference<jobject>(receiver));
206 ScopedThreadStateChange tsc(self, kNative);
207 fn(soa.Env(), rcvr.get());
208 } else if (shorty == "LL") {
209 using fntype = jobject(JNIEnv*, jobject, jobject);
210 fntype* const fn = reinterpret_cast<fntype*>(method->GetEntryPointFromJni());
211 ScopedLocalRef<jobject> rcvr(soa.Env(),
212 soa.AddLocalReference<jobject>(receiver));
213 ScopedLocalRef<jobject> arg0(soa.Env(),
214 soa.AddLocalReference<jobject>(ObjArg(args[0])));
215 jobject jresult;
216 {
217 ScopedThreadStateChange tsc(self, kNative);
218 jresult = fn(soa.Env(), rcvr.get(), arg0.get());
219 }
220 result->SetL(soa.Decode<mirror::Object>(jresult));
221 ScopedThreadStateChange tsc(self, kNative);
222 } else if (shorty == "III") {
223 using fntype = jint(JNIEnv*, jobject, jint, jint);
224 fntype* const fn = reinterpret_cast<fntype*>(method->GetEntryPointFromJni());
225 ScopedLocalRef<jobject> rcvr(soa.Env(),
226 soa.AddLocalReference<jobject>(receiver));
227 ScopedThreadStateChange tsc(self, kNative);
228 result->SetI(fn(soa.Env(), rcvr.get(), args[0], args[1]));
229 } else {
230 LOG(FATAL) << "Do something with native method: " << method->PrettyMethod()
231 << " shorty: " << shorty;
232 }
233 }
234 }
235
236 enum InterpreterImplKind {
237 kSwitchImplKind, // Switch-based interpreter implementation.
238 kMterpImplKind // Assembly interpreter
239 };
240
241 #if ART_USE_CXX_INTERPRETER
242 static constexpr InterpreterImplKind kInterpreterImplKind = kSwitchImplKind;
243 #else
244 static constexpr InterpreterImplKind kInterpreterImplKind = kMterpImplKind;
245 #endif
246
Execute(Thread * self,const CodeItemDataAccessor & accessor,ShadowFrame & shadow_frame,JValue result_register,bool stay_in_interpreter=false,bool from_deoptimize=false)247 static inline JValue Execute(
248 Thread* self,
249 const CodeItemDataAccessor& accessor,
250 ShadowFrame& shadow_frame,
251 JValue result_register,
252 bool stay_in_interpreter = false,
253 bool from_deoptimize = false) REQUIRES_SHARED(Locks::mutator_lock_) {
254 DCHECK(!shadow_frame.GetMethod()->IsAbstract());
255 DCHECK(!shadow_frame.GetMethod()->IsNative());
256
257 // Check that we are using the right interpreter.
258 if (kIsDebugBuild && self->UseMterp() != CanUseMterp()) {
259 // The flag might be currently being updated on all threads. Retry with lock.
260 MutexLock tll_mu(self, *Locks::thread_list_lock_);
261 DCHECK_EQ(self->UseMterp(), CanUseMterp());
262 }
263
264 if (LIKELY(!from_deoptimize)) { // Entering the method, but not via deoptimization.
265 if (kIsDebugBuild) {
266 CHECK_EQ(shadow_frame.GetDexPC(), 0u);
267 self->AssertNoPendingException();
268 }
269 instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
270 ArtMethod *method = shadow_frame.GetMethod();
271
272 if (UNLIKELY(instrumentation->HasMethodEntryListeners())) {
273 instrumentation->MethodEnterEvent(self,
274 shadow_frame.GetThisObject(accessor.InsSize()),
275 method,
276 0);
277 if (UNLIKELY(shadow_frame.GetForcePopFrame())) {
278 // The caller will retry this invoke or ignore the result. Just return immediately without
279 // any value.
280 DCHECK(Runtime::Current()->AreNonStandardExitsEnabled());
281 JValue ret = JValue();
282 bool res = PerformNonStandardReturn<MonitorState::kNoMonitorsLocked>(
283 self,
284 shadow_frame,
285 ret,
286 instrumentation,
287 accessor.InsSize(),
288 0);
289 DCHECK(res) << "Expected to perform non-standard return!";
290 return ret;
291 }
292 if (UNLIKELY(self->IsExceptionPending())) {
293 instrumentation->MethodUnwindEvent(self,
294 shadow_frame.GetThisObject(accessor.InsSize()),
295 method,
296 0);
297 JValue ret = JValue();
298 if (UNLIKELY(shadow_frame.GetForcePopFrame())) {
299 DCHECK(Runtime::Current()->AreNonStandardExitsEnabled());
300 bool res = PerformNonStandardReturn<MonitorState::kNoMonitorsLocked>(
301 self,
302 shadow_frame,
303 ret,
304 instrumentation,
305 accessor.InsSize(),
306 0);
307 DCHECK(res) << "Expected to perform non-standard return!";
308 }
309 return ret;
310 }
311 }
312
313 if (!stay_in_interpreter && !self->IsForceInterpreter()) {
314 jit::Jit* jit = Runtime::Current()->GetJit();
315 if (jit != nullptr) {
316 jit->MethodEntered(self, shadow_frame.GetMethod());
317 if (jit->CanInvokeCompiledCode(method)) {
318 JValue result;
319
320 // Pop the shadow frame before calling into compiled code.
321 self->PopShadowFrame();
322 // Calculate the offset of the first input reg. The input registers are in the high regs.
323 // It's ok to access the code item here since JIT code will have been touched by the
324 // interpreter and compiler already.
325 uint16_t arg_offset = accessor.RegistersSize() - accessor.InsSize();
326 ArtInterpreterToCompiledCodeBridge(self, nullptr, &shadow_frame, arg_offset, &result);
327 // Push the shadow frame back as the caller will expect it.
328 self->PushShadowFrame(&shadow_frame);
329
330 return result;
331 }
332 }
333 }
334 }
335
336 ArtMethod* method = shadow_frame.GetMethod();
337
338 DCheckStaticState(self, method);
339
340 // Lock counting is a special version of accessibility checks, and for simplicity and
341 // reduction of template parameters, we gate it behind access-checks mode.
342 DCHECK(!method->SkipAccessChecks() || !method->MustCountLocks());
343
344 bool transaction_active = Runtime::Current()->IsActiveTransaction();
345 VLOG(interpreter) << "Interpreting " << method->PrettyMethod();
346 if (LIKELY(method->SkipAccessChecks())) {
347 // Enter the "without access check" interpreter.
348 if (kInterpreterImplKind == kMterpImplKind) {
349 if (transaction_active) {
350 // No Mterp variant - just use the switch interpreter.
351 return ExecuteSwitchImpl<false, true>(self, accessor, shadow_frame, result_register,
352 false);
353 } else if (UNLIKELY(!Runtime::Current()->IsStarted())) {
354 return ExecuteSwitchImpl<false, false>(self, accessor, shadow_frame, result_register,
355 false);
356 } else {
357 while (true) {
358 // Mterp does not support all instrumentation/debugging.
359 if (!self->UseMterp()) {
360 return ExecuteSwitchImpl<false, false>(self, accessor, shadow_frame, result_register,
361 false);
362 }
363 bool returned = ExecuteMterpImpl(self,
364 accessor.Insns(),
365 &shadow_frame,
366 &result_register);
367 if (returned) {
368 return result_register;
369 } else {
370 // Mterp didn't like that instruction. Single-step it with the reference interpreter.
371 result_register = ExecuteSwitchImpl<false, false>(self, accessor, shadow_frame,
372 result_register, true);
373 if (shadow_frame.GetDexPC() == dex::kDexNoIndex) {
374 // Single-stepped a return or an exception not handled locally. Return to caller.
375 return result_register;
376 }
377 }
378 }
379 }
380 } else {
381 DCHECK_EQ(kInterpreterImplKind, kSwitchImplKind);
382 if (transaction_active) {
383 return ExecuteSwitchImpl<false, true>(self, accessor, shadow_frame, result_register,
384 false);
385 } else {
386 return ExecuteSwitchImpl<false, false>(self, accessor, shadow_frame, result_register,
387 false);
388 }
389 }
390 } else {
391 // Enter the "with access check" interpreter.
392
393 if (kInterpreterImplKind == kMterpImplKind) {
394 // No access check variants for Mterp. Just use the switch version.
395 if (transaction_active) {
396 return ExecuteSwitchImpl<true, true>(self, accessor, shadow_frame, result_register,
397 false);
398 } else {
399 return ExecuteSwitchImpl<true, false>(self, accessor, shadow_frame, result_register,
400 false);
401 }
402 } else {
403 DCHECK_EQ(kInterpreterImplKind, kSwitchImplKind);
404 if (transaction_active) {
405 return ExecuteSwitchImpl<true, true>(self, accessor, shadow_frame, result_register,
406 false);
407 } else {
408 return ExecuteSwitchImpl<true, false>(self, accessor, shadow_frame, result_register,
409 false);
410 }
411 }
412 }
413 }
414
EnterInterpreterFromInvoke(Thread * self,ArtMethod * method,ObjPtr<mirror::Object> receiver,uint32_t * args,JValue * result,bool stay_in_interpreter)415 void EnterInterpreterFromInvoke(Thread* self,
416 ArtMethod* method,
417 ObjPtr<mirror::Object> receiver,
418 uint32_t* args,
419 JValue* result,
420 bool stay_in_interpreter) {
421 DCHECK_EQ(self, Thread::Current());
422 bool implicit_check = !Runtime::Current()->ExplicitStackOverflowChecks();
423 if (UNLIKELY(__builtin_frame_address(0) < self->GetStackEndForInterpreter(implicit_check))) {
424 ThrowStackOverflowError(self);
425 return;
426 }
427
428 // This can happen if we are in forced interpreter mode and an obsolete method is called using
429 // reflection.
430 if (UNLIKELY(method->IsObsolete())) {
431 ThrowInternalError("Attempting to invoke obsolete version of '%s'.",
432 method->PrettyMethod().c_str());
433 return;
434 }
435
436 const char* old_cause = self->StartAssertNoThreadSuspension("EnterInterpreterFromInvoke");
437 CodeItemDataAccessor accessor(method->DexInstructionData());
438 uint16_t num_regs;
439 uint16_t num_ins;
440 if (accessor.HasCodeItem()) {
441 num_regs = accessor.RegistersSize();
442 num_ins = accessor.InsSize();
443 } else if (!method->IsInvokable()) {
444 self->EndAssertNoThreadSuspension(old_cause);
445 method->ThrowInvocationTimeError();
446 return;
447 } else {
448 DCHECK(method->IsNative());
449 num_regs = num_ins = ArtMethod::NumArgRegisters(method->GetShorty());
450 if (!method->IsStatic()) {
451 num_regs++;
452 num_ins++;
453 }
454 }
455 // Set up shadow frame with matching number of reference slots to vregs.
456 ShadowFrame* last_shadow_frame = self->GetManagedStack()->GetTopShadowFrame();
457 ShadowFrameAllocaUniquePtr shadow_frame_unique_ptr =
458 CREATE_SHADOW_FRAME(num_regs, last_shadow_frame, method, /* dex pc */ 0);
459 ShadowFrame* shadow_frame = shadow_frame_unique_ptr.get();
460 self->PushShadowFrame(shadow_frame);
461
462 size_t cur_reg = num_regs - num_ins;
463 if (!method->IsStatic()) {
464 CHECK(receiver != nullptr);
465 shadow_frame->SetVRegReference(cur_reg, receiver);
466 ++cur_reg;
467 }
468 uint32_t shorty_len = 0;
469 const char* shorty = method->GetShorty(&shorty_len);
470 for (size_t shorty_pos = 0, arg_pos = 0; cur_reg < num_regs; ++shorty_pos, ++arg_pos, cur_reg++) {
471 DCHECK_LT(shorty_pos + 1, shorty_len);
472 switch (shorty[shorty_pos + 1]) {
473 case 'L': {
474 ObjPtr<mirror::Object> o =
475 reinterpret_cast<StackReference<mirror::Object>*>(&args[arg_pos])->AsMirrorPtr();
476 shadow_frame->SetVRegReference(cur_reg, o);
477 break;
478 }
479 case 'J': case 'D': {
480 uint64_t wide_value = (static_cast<uint64_t>(args[arg_pos + 1]) << 32) | args[arg_pos];
481 shadow_frame->SetVRegLong(cur_reg, wide_value);
482 cur_reg++;
483 arg_pos++;
484 break;
485 }
486 default:
487 shadow_frame->SetVReg(cur_reg, args[arg_pos]);
488 break;
489 }
490 }
491 self->EndAssertNoThreadSuspension(old_cause);
492 // Do this after populating the shadow frame in case EnsureInitialized causes a GC.
493 if (method->IsStatic()) {
494 ObjPtr<mirror::Class> declaring_class = method->GetDeclaringClass();
495 if (UNLIKELY(!declaring_class->IsVisiblyInitialized())) {
496 StackHandleScope<1> hs(self);
497 Handle<mirror::Class> h_class(hs.NewHandle(declaring_class));
498 if (UNLIKELY(!Runtime::Current()->GetClassLinker()->EnsureInitialized(
499 self, h_class, /*can_init_fields=*/ true, /*can_init_parents=*/ true))) {
500 CHECK(self->IsExceptionPending());
501 self->PopShadowFrame();
502 return;
503 }
504 DCHECK(h_class->IsInitializing());
505 }
506 }
507 if (LIKELY(!method->IsNative())) {
508 JValue r = Execute(self, accessor, *shadow_frame, JValue(), stay_in_interpreter);
509 if (result != nullptr) {
510 *result = r;
511 }
512 } else {
513 // We don't expect to be asked to interpret native code (which is entered via a JNI compiler
514 // generated stub) except during testing and image writing.
515 // Update args to be the args in the shadow frame since the input ones could hold stale
516 // references pointers due to moving GC.
517 args = shadow_frame->GetVRegArgs(method->IsStatic() ? 0 : 1);
518 if (!Runtime::Current()->IsStarted()) {
519 UnstartedRuntime::Jni(self, method, receiver.Ptr(), args, result);
520 } else {
521 InterpreterJni(self, method, shorty, receiver, args, result);
522 }
523 }
524 self->PopShadowFrame();
525 }
526
GetReceiverRegisterForStringInit(const Instruction * instr)527 static int16_t GetReceiverRegisterForStringInit(const Instruction* instr) {
528 DCHECK(instr->Opcode() == Instruction::INVOKE_DIRECT_RANGE ||
529 instr->Opcode() == Instruction::INVOKE_DIRECT);
530 return (instr->Opcode() == Instruction::INVOKE_DIRECT_RANGE) ?
531 instr->VRegC_3rc() : instr->VRegC_35c();
532 }
533
EnterInterpreterFromDeoptimize(Thread * self,ShadowFrame * shadow_frame,JValue * ret_val,bool from_code,DeoptimizationMethodType deopt_method_type)534 void EnterInterpreterFromDeoptimize(Thread* self,
535 ShadowFrame* shadow_frame,
536 JValue* ret_val,
537 bool from_code,
538 DeoptimizationMethodType deopt_method_type)
539 REQUIRES_SHARED(Locks::mutator_lock_) {
540 JValue value;
541 // Set value to last known result in case the shadow frame chain is empty.
542 value.SetJ(ret_val->GetJ());
543 // How many frames we have executed.
544 size_t frame_cnt = 0;
545 while (shadow_frame != nullptr) {
546 // We do not want to recover lock state for lock counting when deoptimizing. Currently,
547 // the compiler should not have compiled a method that failed structured-locking checks.
548 DCHECK(!shadow_frame->GetMethod()->MustCountLocks());
549
550 self->SetTopOfShadowStack(shadow_frame);
551 CodeItemDataAccessor accessor(shadow_frame->GetMethod()->DexInstructionData());
552 const uint32_t dex_pc = shadow_frame->GetDexPC();
553 uint32_t new_dex_pc = dex_pc;
554 if (UNLIKELY(self->IsExceptionPending())) {
555 // If we deoptimize from the QuickExceptionHandler, we already reported the exception to
556 // the instrumentation. To prevent from reporting it a second time, we simply pass a
557 // null Instrumentation*.
558 const instrumentation::Instrumentation* const instrumentation =
559 frame_cnt == 0 ? nullptr : Runtime::Current()->GetInstrumentation();
560 new_dex_pc = MoveToExceptionHandler(
561 self, *shadow_frame, instrumentation) ? shadow_frame->GetDexPC() : dex::kDexNoIndex;
562 } else if (!from_code) {
563 // Deoptimization is not called from code directly.
564 const Instruction* instr = &accessor.InstructionAt(dex_pc);
565 if (deopt_method_type == DeoptimizationMethodType::kKeepDexPc ||
566 shadow_frame->GetForceRetryInstruction()) {
567 DCHECK(frame_cnt == 0 || (frame_cnt == 1 && shadow_frame->GetForceRetryInstruction()))
568 << "frame_cnt: " << frame_cnt
569 << " force-retry: " << shadow_frame->GetForceRetryInstruction();
570 // Need to re-execute the dex instruction.
571 // (1) An invocation might be split into class initialization and invoke.
572 // In this case, the invoke should not be skipped.
573 // (2) A suspend check should also execute the dex instruction at the
574 // corresponding dex pc.
575 // If the ForceRetryInstruction bit is set this must be the second frame (the first being
576 // the one that is being popped).
577 DCHECK_EQ(new_dex_pc, dex_pc);
578 shadow_frame->SetForceRetryInstruction(false);
579 } else if (instr->Opcode() == Instruction::MONITOR_ENTER ||
580 instr->Opcode() == Instruction::MONITOR_EXIT) {
581 DCHECK(deopt_method_type == DeoptimizationMethodType::kDefault);
582 DCHECK_EQ(frame_cnt, 0u);
583 // Non-idempotent dex instruction should not be re-executed.
584 // On the other hand, if a MONITOR_ENTER is at the dex_pc of a suspend
585 // check, that MONITOR_ENTER should be executed. That case is handled
586 // above.
587 new_dex_pc = dex_pc + instr->SizeInCodeUnits();
588 } else if (instr->IsInvoke()) {
589 DCHECK(deopt_method_type == DeoptimizationMethodType::kDefault);
590 if (IsStringInit(instr, shadow_frame->GetMethod())) {
591 uint16_t this_obj_vreg = GetReceiverRegisterForStringInit(instr);
592 // Move the StringFactory.newStringFromChars() result into the register representing
593 // "this object" when invoking the string constructor in the original dex instruction.
594 // Also move the result into all aliases.
595 DCHECK(value.GetL()->IsString());
596 SetStringInitValueToAllAliases(shadow_frame, this_obj_vreg, value);
597 // Calling string constructor in the original dex code doesn't generate a result value.
598 value.SetJ(0);
599 }
600 new_dex_pc = dex_pc + instr->SizeInCodeUnits();
601 } else if (instr->Opcode() == Instruction::NEW_INSTANCE) {
602 // A NEW_INSTANCE is simply re-executed, including
603 // "new-instance String" which is compiled into a call into
604 // StringFactory.newEmptyString().
605 DCHECK_EQ(new_dex_pc, dex_pc);
606 } else {
607 DCHECK(deopt_method_type == DeoptimizationMethodType::kDefault);
608 DCHECK_EQ(frame_cnt, 0u);
609 // By default, we re-execute the dex instruction since if they are not
610 // an invoke, so that we don't have to decode the dex instruction to move
611 // result into the right vreg. All slow paths have been audited to be
612 // idempotent except monitor-enter/exit and invocation stubs.
613 // TODO: move result and advance dex pc. That also requires that we
614 // can tell the return type of a runtime method, possibly by decoding
615 // the dex instruction at the caller.
616 DCHECK_EQ(new_dex_pc, dex_pc);
617 }
618 } else {
619 // Nothing to do, the dex_pc is the one at which the code requested
620 // the deoptimization.
621 DCHECK_EQ(frame_cnt, 0u);
622 DCHECK_EQ(new_dex_pc, dex_pc);
623 }
624 if (new_dex_pc != dex::kDexNoIndex) {
625 shadow_frame->SetDexPC(new_dex_pc);
626 value = Execute(self,
627 accessor,
628 *shadow_frame,
629 value,
630 /* stay_in_interpreter= */ true,
631 /* from_deoptimize= */ true);
632 }
633 ShadowFrame* old_frame = shadow_frame;
634 shadow_frame = shadow_frame->GetLink();
635 ShadowFrame::DeleteDeoptimizedFrame(old_frame);
636 // Following deoptimizations of shadow frames must be at invocation point
637 // and should advance dex pc past the invoke instruction.
638 from_code = false;
639 deopt_method_type = DeoptimizationMethodType::kDefault;
640 frame_cnt++;
641 }
642 ret_val->SetJ(value.GetJ());
643 }
644
EnterInterpreterFromEntryPoint(Thread * self,const CodeItemDataAccessor & accessor,ShadowFrame * shadow_frame)645 JValue EnterInterpreterFromEntryPoint(Thread* self, const CodeItemDataAccessor& accessor,
646 ShadowFrame* shadow_frame) {
647 DCHECK_EQ(self, Thread::Current());
648 bool implicit_check = !Runtime::Current()->ExplicitStackOverflowChecks();
649 if (UNLIKELY(__builtin_frame_address(0) < self->GetStackEndForInterpreter(implicit_check))) {
650 ThrowStackOverflowError(self);
651 return JValue();
652 }
653
654 jit::Jit* jit = Runtime::Current()->GetJit();
655 if (jit != nullptr) {
656 jit->NotifyCompiledCodeToInterpreterTransition(self, shadow_frame->GetMethod());
657 }
658 return Execute(self, accessor, *shadow_frame, JValue());
659 }
660
ArtInterpreterToInterpreterBridge(Thread * self,const CodeItemDataAccessor & accessor,ShadowFrame * shadow_frame,JValue * result)661 void ArtInterpreterToInterpreterBridge(Thread* self,
662 const CodeItemDataAccessor& accessor,
663 ShadowFrame* shadow_frame,
664 JValue* result) {
665 bool implicit_check = !Runtime::Current()->ExplicitStackOverflowChecks();
666 if (UNLIKELY(__builtin_frame_address(0) < self->GetStackEndForInterpreter(implicit_check))) {
667 ThrowStackOverflowError(self);
668 return;
669 }
670
671 self->PushShadowFrame(shadow_frame);
672 ArtMethod* method = shadow_frame->GetMethod();
673 // Ensure static methods are initialized.
674 const bool is_static = method->IsStatic();
675 if (is_static) {
676 ObjPtr<mirror::Class> declaring_class = method->GetDeclaringClass();
677 if (UNLIKELY(!declaring_class->IsVisiblyInitialized())) {
678 StackHandleScope<1> hs(self);
679 Handle<mirror::Class> h_class(hs.NewHandle(declaring_class));
680 if (UNLIKELY(!Runtime::Current()->GetClassLinker()->EnsureInitialized(
681 self, h_class, /*can_init_fields=*/ true, /*can_init_parents=*/ true))) {
682 DCHECK(self->IsExceptionPending());
683 self->PopShadowFrame();
684 return;
685 }
686 DCHECK(h_class->IsInitializing());
687 }
688 }
689
690 if (LIKELY(!shadow_frame->GetMethod()->IsNative())) {
691 result->SetJ(Execute(self, accessor, *shadow_frame, JValue()).GetJ());
692 } else {
693 // We don't expect to be asked to interpret native code (which is entered via a JNI compiler
694 // generated stub) except during testing and image writing.
695 CHECK(!Runtime::Current()->IsStarted());
696 ObjPtr<mirror::Object> receiver = is_static ? nullptr : shadow_frame->GetVRegReference(0);
697 uint32_t* args = shadow_frame->GetVRegArgs(is_static ? 0 : 1);
698 UnstartedRuntime::Jni(self, shadow_frame->GetMethod(), receiver.Ptr(), args, result);
699 }
700
701 self->PopShadowFrame();
702 }
703
CheckInterpreterAsmConstants()704 void CheckInterpreterAsmConstants() {
705 CheckMterpAsmConstants();
706 CheckNterpAsmConstants();
707 }
708
InitInterpreterTls(Thread * self)709 void InitInterpreterTls(Thread* self) {
710 InitMterpTls(self);
711 }
712
PrevFrameWillRetry(Thread * self,const ShadowFrame & frame)713 bool PrevFrameWillRetry(Thread* self, const ShadowFrame& frame) {
714 ShadowFrame* prev_frame = frame.GetLink();
715 if (prev_frame == nullptr) {
716 NthCallerVisitor vis(self, 1, false);
717 vis.WalkStack();
718 prev_frame = vis.GetCurrentShadowFrame();
719 if (prev_frame == nullptr) {
720 prev_frame = self->FindDebuggerShadowFrame(vis.GetFrameId());
721 }
722 }
723 return prev_frame != nullptr && prev_frame->GetForceRetryInstruction();
724 }
725
726 } // namespace interpreter
727 } // namespace art
728