1 /*
2 * Copyright (C) 2012 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "interpreter.h"
18
19 #include <limits>
20
21 #include "common_dex_operations.h"
22 #include "common_throws.h"
23 #include "dex/dex_file_types.h"
24 #include "interpreter_common.h"
25 #include "interpreter_mterp_impl.h"
26 #include "interpreter_switch_impl.h"
27 #include "jit/jit.h"
28 #include "jit/jit_code_cache.h"
29 #include "jvalue-inl.h"
30 #include "mirror/string-inl.h"
31 #include "mterp/mterp.h"
32 #include "nativehelper/scoped_local_ref.h"
33 #include "scoped_thread_state_change-inl.h"
34 #include "stack.h"
35 #include "thread-inl.h"
36 #include "unstarted_runtime.h"
37
38 namespace art {
39 namespace interpreter {
40
ObjArg(uint32_t arg)41 ALWAYS_INLINE static ObjPtr<mirror::Object> ObjArg(uint32_t arg)
42 REQUIRES_SHARED(Locks::mutator_lock_) {
43 return ObjPtr<mirror::Object>(reinterpret_cast<mirror::Object*>(arg));
44 }
45
InterpreterJni(Thread * self,ArtMethod * method,const StringPiece & shorty,ObjPtr<mirror::Object> receiver,uint32_t * args,JValue * result)46 static void InterpreterJni(Thread* self,
47 ArtMethod* method,
48 const StringPiece& shorty,
49 ObjPtr<mirror::Object> receiver,
50 uint32_t* args,
51 JValue* result)
52 REQUIRES_SHARED(Locks::mutator_lock_) {
53 // TODO: The following enters JNI code using a typedef-ed function rather than the JNI compiler,
54 // it should be removed and JNI compiled stubs used instead.
55 ScopedObjectAccessUnchecked soa(self);
56 if (method->IsStatic()) {
57 if (shorty == "L") {
58 typedef jobject (fntype)(JNIEnv*, jclass);
59 fntype* const fn = reinterpret_cast<fntype*>(method->GetEntryPointFromJni());
60 ScopedLocalRef<jclass> klass(soa.Env(),
61 soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
62 jobject jresult;
63 {
64 ScopedThreadStateChange tsc(self, kNative);
65 jresult = fn(soa.Env(), klass.get());
66 }
67 result->SetL(soa.Decode<mirror::Object>(jresult));
68 } else if (shorty == "V") {
69 typedef void (fntype)(JNIEnv*, jclass);
70 fntype* const fn = reinterpret_cast<fntype*>(method->GetEntryPointFromJni());
71 ScopedLocalRef<jclass> klass(soa.Env(),
72 soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
73 ScopedThreadStateChange tsc(self, kNative);
74 fn(soa.Env(), klass.get());
75 } else if (shorty == "Z") {
76 typedef jboolean (fntype)(JNIEnv*, jclass);
77 fntype* const fn = reinterpret_cast<fntype*>(method->GetEntryPointFromJni());
78 ScopedLocalRef<jclass> klass(soa.Env(),
79 soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
80 ScopedThreadStateChange tsc(self, kNative);
81 result->SetZ(fn(soa.Env(), klass.get()));
82 } else if (shorty == "BI") {
83 typedef jbyte (fntype)(JNIEnv*, jclass, jint);
84 fntype* const fn = reinterpret_cast<fntype*>(method->GetEntryPointFromJni());
85 ScopedLocalRef<jclass> klass(soa.Env(),
86 soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
87 ScopedThreadStateChange tsc(self, kNative);
88 result->SetB(fn(soa.Env(), klass.get(), args[0]));
89 } else if (shorty == "II") {
90 typedef jint (fntype)(JNIEnv*, jclass, jint);
91 fntype* const fn = reinterpret_cast<fntype*>(method->GetEntryPointFromJni());
92 ScopedLocalRef<jclass> klass(soa.Env(),
93 soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
94 ScopedThreadStateChange tsc(self, kNative);
95 result->SetI(fn(soa.Env(), klass.get(), args[0]));
96 } else if (shorty == "LL") {
97 typedef jobject (fntype)(JNIEnv*, jclass, jobject);
98 fntype* const fn = reinterpret_cast<fntype*>(method->GetEntryPointFromJni());
99 ScopedLocalRef<jclass> klass(soa.Env(),
100 soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
101 ScopedLocalRef<jobject> arg0(soa.Env(),
102 soa.AddLocalReference<jobject>(ObjArg(args[0])));
103 jobject jresult;
104 {
105 ScopedThreadStateChange tsc(self, kNative);
106 jresult = fn(soa.Env(), klass.get(), arg0.get());
107 }
108 result->SetL(soa.Decode<mirror::Object>(jresult));
109 } else if (shorty == "IIZ") {
110 typedef jint (fntype)(JNIEnv*, jclass, jint, jboolean);
111 fntype* const fn = reinterpret_cast<fntype*>(method->GetEntryPointFromJni());
112 ScopedLocalRef<jclass> klass(soa.Env(),
113 soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
114 ScopedThreadStateChange tsc(self, kNative);
115 result->SetI(fn(soa.Env(), klass.get(), args[0], args[1]));
116 } else if (shorty == "ILI") {
117 typedef jint (fntype)(JNIEnv*, jclass, jobject, jint);
118 fntype* const fn = reinterpret_cast<fntype*>(const_cast<void*>(
119 method->GetEntryPointFromJni()));
120 ScopedLocalRef<jclass> klass(soa.Env(),
121 soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
122 ScopedLocalRef<jobject> arg0(soa.Env(),
123 soa.AddLocalReference<jobject>(ObjArg(args[0])));
124 ScopedThreadStateChange tsc(self, kNative);
125 result->SetI(fn(soa.Env(), klass.get(), arg0.get(), args[1]));
126 } else if (shorty == "SIZ") {
127 typedef jshort (fntype)(JNIEnv*, jclass, jint, jboolean);
128 fntype* const fn =
129 reinterpret_cast<fntype*>(const_cast<void*>(method->GetEntryPointFromJni()));
130 ScopedLocalRef<jclass> klass(soa.Env(),
131 soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
132 ScopedThreadStateChange tsc(self, kNative);
133 result->SetS(fn(soa.Env(), klass.get(), args[0], args[1]));
134 } else if (shorty == "VIZ") {
135 typedef void (fntype)(JNIEnv*, jclass, jint, jboolean);
136 fntype* const fn = reinterpret_cast<fntype*>(method->GetEntryPointFromJni());
137 ScopedLocalRef<jclass> klass(soa.Env(),
138 soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
139 ScopedThreadStateChange tsc(self, kNative);
140 fn(soa.Env(), klass.get(), args[0], args[1]);
141 } else if (shorty == "ZLL") {
142 typedef jboolean (fntype)(JNIEnv*, jclass, jobject, jobject);
143 fntype* const fn = reinterpret_cast<fntype*>(method->GetEntryPointFromJni());
144 ScopedLocalRef<jclass> klass(soa.Env(),
145 soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
146 ScopedLocalRef<jobject> arg0(soa.Env(),
147 soa.AddLocalReference<jobject>(ObjArg(args[0])));
148 ScopedLocalRef<jobject> arg1(soa.Env(),
149 soa.AddLocalReference<jobject>(ObjArg(args[1])));
150 ScopedThreadStateChange tsc(self, kNative);
151 result->SetZ(fn(soa.Env(), klass.get(), arg0.get(), arg1.get()));
152 } else if (shorty == "ZILL") {
153 typedef jboolean (fntype)(JNIEnv*, jclass, jint, jobject, jobject);
154 fntype* const fn = reinterpret_cast<fntype*>(method->GetEntryPointFromJni());
155 ScopedLocalRef<jclass> klass(soa.Env(),
156 soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
157 ScopedLocalRef<jobject> arg1(soa.Env(),
158 soa.AddLocalReference<jobject>(ObjArg(args[1])));
159 ScopedLocalRef<jobject> arg2(soa.Env(),
160 soa.AddLocalReference<jobject>(ObjArg(args[2])));
161 ScopedThreadStateChange tsc(self, kNative);
162 result->SetZ(fn(soa.Env(), klass.get(), args[0], arg1.get(), arg2.get()));
163 } else if (shorty == "VILII") {
164 typedef void (fntype)(JNIEnv*, jclass, jint, jobject, jint, jint);
165 fntype* const fn = reinterpret_cast<fntype*>(method->GetEntryPointFromJni());
166 ScopedLocalRef<jclass> klass(soa.Env(),
167 soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
168 ScopedLocalRef<jobject> arg1(soa.Env(),
169 soa.AddLocalReference<jobject>(ObjArg(args[1])));
170 ScopedThreadStateChange tsc(self, kNative);
171 fn(soa.Env(), klass.get(), args[0], arg1.get(), args[2], args[3]);
172 } else if (shorty == "VLILII") {
173 typedef void (fntype)(JNIEnv*, jclass, jobject, jint, jobject, jint, jint);
174 fntype* const fn = reinterpret_cast<fntype*>(method->GetEntryPointFromJni());
175 ScopedLocalRef<jclass> klass(soa.Env(),
176 soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
177 ScopedLocalRef<jobject> arg0(soa.Env(),
178 soa.AddLocalReference<jobject>(ObjArg(args[0])));
179 ScopedLocalRef<jobject> arg2(soa.Env(),
180 soa.AddLocalReference<jobject>(ObjArg(args[2])));
181 ScopedThreadStateChange tsc(self, kNative);
182 fn(soa.Env(), klass.get(), arg0.get(), args[1], arg2.get(), args[3], args[4]);
183 } else {
184 LOG(FATAL) << "Do something with static native method: " << method->PrettyMethod()
185 << " shorty: " << shorty;
186 }
187 } else {
188 if (shorty == "L") {
189 typedef jobject (fntype)(JNIEnv*, jobject);
190 fntype* const fn = reinterpret_cast<fntype*>(method->GetEntryPointFromJni());
191 ScopedLocalRef<jobject> rcvr(soa.Env(),
192 soa.AddLocalReference<jobject>(receiver));
193 jobject jresult;
194 {
195 ScopedThreadStateChange tsc(self, kNative);
196 jresult = fn(soa.Env(), rcvr.get());
197 }
198 result->SetL(soa.Decode<mirror::Object>(jresult));
199 } else if (shorty == "V") {
200 typedef void (fntype)(JNIEnv*, jobject);
201 fntype* const fn = reinterpret_cast<fntype*>(method->GetEntryPointFromJni());
202 ScopedLocalRef<jobject> rcvr(soa.Env(),
203 soa.AddLocalReference<jobject>(receiver));
204 ScopedThreadStateChange tsc(self, kNative);
205 fn(soa.Env(), rcvr.get());
206 } else if (shorty == "LL") {
207 typedef jobject (fntype)(JNIEnv*, jobject, jobject);
208 fntype* const fn = reinterpret_cast<fntype*>(method->GetEntryPointFromJni());
209 ScopedLocalRef<jobject> rcvr(soa.Env(),
210 soa.AddLocalReference<jobject>(receiver));
211 ScopedLocalRef<jobject> arg0(soa.Env(),
212 soa.AddLocalReference<jobject>(ObjArg(args[0])));
213 jobject jresult;
214 {
215 ScopedThreadStateChange tsc(self, kNative);
216 jresult = fn(soa.Env(), rcvr.get(), arg0.get());
217 }
218 result->SetL(soa.Decode<mirror::Object>(jresult));
219 ScopedThreadStateChange tsc(self, kNative);
220 } else if (shorty == "III") {
221 typedef jint (fntype)(JNIEnv*, jobject, jint, jint);
222 fntype* const fn = reinterpret_cast<fntype*>(method->GetEntryPointFromJni());
223 ScopedLocalRef<jobject> rcvr(soa.Env(),
224 soa.AddLocalReference<jobject>(receiver));
225 ScopedThreadStateChange tsc(self, kNative);
226 result->SetI(fn(soa.Env(), rcvr.get(), args[0], args[1]));
227 } else {
228 LOG(FATAL) << "Do something with native method: " << method->PrettyMethod()
229 << " shorty: " << shorty;
230 }
231 }
232 }
233
234 enum InterpreterImplKind {
235 kSwitchImplKind, // Switch-based interpreter implementation.
236 kMterpImplKind // Assembly interpreter
237 };
238
239 static constexpr InterpreterImplKind kInterpreterImplKind = kMterpImplKind;
240
Execute(Thread * self,const CodeItemDataAccessor & accessor,ShadowFrame & shadow_frame,JValue result_register,bool stay_in_interpreter=false)241 static inline JValue Execute(
242 Thread* self,
243 const CodeItemDataAccessor& accessor,
244 ShadowFrame& shadow_frame,
245 JValue result_register,
246 bool stay_in_interpreter = false) REQUIRES_SHARED(Locks::mutator_lock_) {
247 DCHECK(!shadow_frame.GetMethod()->IsAbstract());
248 DCHECK(!shadow_frame.GetMethod()->IsNative());
249 if (LIKELY(shadow_frame.GetDexPC() == 0)) { // Entering the method, but not via deoptimization.
250 if (kIsDebugBuild) {
251 self->AssertNoPendingException();
252 }
253 instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
254 ArtMethod *method = shadow_frame.GetMethod();
255
256 if (UNLIKELY(instrumentation->HasMethodEntryListeners())) {
257 instrumentation->MethodEnterEvent(self,
258 shadow_frame.GetThisObject(accessor.InsSize()),
259 method,
260 0);
261 if (UNLIKELY(self->IsExceptionPending())) {
262 instrumentation->MethodUnwindEvent(self,
263 shadow_frame.GetThisObject(accessor.InsSize()),
264 method,
265 0);
266 return JValue();
267 }
268 }
269
270 if (!stay_in_interpreter) {
271 jit::Jit* jit = Runtime::Current()->GetJit();
272 if (jit != nullptr) {
273 jit->MethodEntered(self, shadow_frame.GetMethod());
274 if (jit->CanInvokeCompiledCode(method)) {
275 JValue result;
276
277 // Pop the shadow frame before calling into compiled code.
278 self->PopShadowFrame();
279 // Calculate the offset of the first input reg. The input registers are in the high regs.
280 // It's ok to access the code item here since JIT code will have been touched by the
281 // interpreter and compiler already.
282 uint16_t arg_offset = accessor.RegistersSize() - accessor.InsSize();
283 ArtInterpreterToCompiledCodeBridge(self, nullptr, &shadow_frame, arg_offset, &result);
284 // Push the shadow frame back as the caller will expect it.
285 self->PushShadowFrame(&shadow_frame);
286
287 return result;
288 }
289 }
290 }
291 }
292
293 ArtMethod* method = shadow_frame.GetMethod();
294
295 DCheckStaticState(self, method);
296
297 // Lock counting is a special version of accessibility checks, and for simplicity and
298 // reduction of template parameters, we gate it behind access-checks mode.
299 DCHECK(!method->SkipAccessChecks() || !method->MustCountLocks());
300
301 bool transaction_active = Runtime::Current()->IsActiveTransaction();
302 if (LIKELY(method->SkipAccessChecks())) {
303 // Enter the "without access check" interpreter.
304 if (kInterpreterImplKind == kMterpImplKind) {
305 if (transaction_active) {
306 // No Mterp variant - just use the switch interpreter.
307 return ExecuteSwitchImpl<false, true>(self, accessor, shadow_frame, result_register,
308 false);
309 } else if (UNLIKELY(!Runtime::Current()->IsStarted())) {
310 return ExecuteSwitchImpl<false, false>(self, accessor, shadow_frame, result_register,
311 false);
312 } else {
313 while (true) {
314 // Mterp does not support all instrumentation/debugging.
315 if (MterpShouldSwitchInterpreters() != 0) {
316 return ExecuteSwitchImpl<false, false>(self, accessor, shadow_frame, result_register,
317 false);
318 }
319 bool returned = ExecuteMterpImpl(self,
320 accessor.Insns(),
321 &shadow_frame,
322 &result_register);
323 if (returned) {
324 return result_register;
325 } else {
326 // Mterp didn't like that instruction. Single-step it with the reference interpreter.
327 result_register = ExecuteSwitchImpl<false, false>(self, accessor, shadow_frame,
328 result_register, true);
329 if (shadow_frame.GetDexPC() == dex::kDexNoIndex) {
330 // Single-stepped a return or an exception not handled locally. Return to caller.
331 return result_register;
332 }
333 }
334 }
335 }
336 } else {
337 DCHECK_EQ(kInterpreterImplKind, kSwitchImplKind);
338 if (transaction_active) {
339 return ExecuteSwitchImpl<false, true>(self, accessor, shadow_frame, result_register,
340 false);
341 } else {
342 return ExecuteSwitchImpl<false, false>(self, accessor, shadow_frame, result_register,
343 false);
344 }
345 }
346 } else {
347 // Enter the "with access check" interpreter.
348 if (kInterpreterImplKind == kMterpImplKind) {
349 // No access check variants for Mterp. Just use the switch version.
350 if (transaction_active) {
351 return ExecuteSwitchImpl<true, true>(self, accessor, shadow_frame, result_register,
352 false);
353 } else {
354 return ExecuteSwitchImpl<true, false>(self, accessor, shadow_frame, result_register,
355 false);
356 }
357 } else {
358 DCHECK_EQ(kInterpreterImplKind, kSwitchImplKind);
359 if (transaction_active) {
360 return ExecuteSwitchImpl<true, true>(self, accessor, shadow_frame, result_register,
361 false);
362 } else {
363 return ExecuteSwitchImpl<true, false>(self, accessor, shadow_frame, result_register,
364 false);
365 }
366 }
367 }
368 }
369
EnterInterpreterFromInvoke(Thread * self,ArtMethod * method,ObjPtr<mirror::Object> receiver,uint32_t * args,JValue * result,bool stay_in_interpreter)370 void EnterInterpreterFromInvoke(Thread* self,
371 ArtMethod* method,
372 ObjPtr<mirror::Object> receiver,
373 uint32_t* args,
374 JValue* result,
375 bool stay_in_interpreter) {
376 DCHECK_EQ(self, Thread::Current());
377 bool implicit_check = !Runtime::Current()->ExplicitStackOverflowChecks();
378 if (UNLIKELY(__builtin_frame_address(0) < self->GetStackEndForInterpreter(implicit_check))) {
379 ThrowStackOverflowError(self);
380 return;
381 }
382
383 // This can happen if we are in forced interpreter mode and an obsolete method is called using
384 // reflection.
385 if (UNLIKELY(method->IsObsolete())) {
386 ThrowInternalError("Attempting to invoke obsolete version of '%s'.",
387 method->PrettyMethod().c_str());
388 return;
389 }
390
391 const char* old_cause = self->StartAssertNoThreadSuspension("EnterInterpreterFromInvoke");
392 CodeItemDataAccessor accessor(method->DexInstructionData());
393 uint16_t num_regs;
394 uint16_t num_ins;
395 if (accessor.HasCodeItem()) {
396 num_regs = accessor.RegistersSize();
397 num_ins = accessor.InsSize();
398 } else if (!method->IsInvokable()) {
399 self->EndAssertNoThreadSuspension(old_cause);
400 method->ThrowInvocationTimeError();
401 return;
402 } else {
403 DCHECK(method->IsNative());
404 num_regs = num_ins = ArtMethod::NumArgRegisters(method->GetShorty());
405 if (!method->IsStatic()) {
406 num_regs++;
407 num_ins++;
408 }
409 }
410 // Set up shadow frame with matching number of reference slots to vregs.
411 ShadowFrame* last_shadow_frame = self->GetManagedStack()->GetTopShadowFrame();
412 ShadowFrameAllocaUniquePtr shadow_frame_unique_ptr =
413 CREATE_SHADOW_FRAME(num_regs, last_shadow_frame, method, /* dex pc */ 0);
414 ShadowFrame* shadow_frame = shadow_frame_unique_ptr.get();
415 self->PushShadowFrame(shadow_frame);
416
417 size_t cur_reg = num_regs - num_ins;
418 if (!method->IsStatic()) {
419 CHECK(receiver != nullptr);
420 shadow_frame->SetVRegReference(cur_reg, receiver.Ptr());
421 ++cur_reg;
422 }
423 uint32_t shorty_len = 0;
424 const char* shorty = method->GetShorty(&shorty_len);
425 for (size_t shorty_pos = 0, arg_pos = 0; cur_reg < num_regs; ++shorty_pos, ++arg_pos, cur_reg++) {
426 DCHECK_LT(shorty_pos + 1, shorty_len);
427 switch (shorty[shorty_pos + 1]) {
428 case 'L': {
429 ObjPtr<mirror::Object> o =
430 reinterpret_cast<StackReference<mirror::Object>*>(&args[arg_pos])->AsMirrorPtr();
431 shadow_frame->SetVRegReference(cur_reg, o.Ptr());
432 break;
433 }
434 case 'J': case 'D': {
435 uint64_t wide_value = (static_cast<uint64_t>(args[arg_pos + 1]) << 32) | args[arg_pos];
436 shadow_frame->SetVRegLong(cur_reg, wide_value);
437 cur_reg++;
438 arg_pos++;
439 break;
440 }
441 default:
442 shadow_frame->SetVReg(cur_reg, args[arg_pos]);
443 break;
444 }
445 }
446 self->EndAssertNoThreadSuspension(old_cause);
447 // Do this after populating the shadow frame in case EnsureInitialized causes a GC.
448 if (method->IsStatic() && UNLIKELY(!method->GetDeclaringClass()->IsInitialized())) {
449 ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
450 StackHandleScope<1> hs(self);
451 Handle<mirror::Class> h_class(hs.NewHandle(method->GetDeclaringClass()));
452 if (UNLIKELY(!class_linker->EnsureInitialized(self, h_class, true, true))) {
453 CHECK(self->IsExceptionPending());
454 self->PopShadowFrame();
455 return;
456 }
457 }
458 if (LIKELY(!method->IsNative())) {
459 JValue r = Execute(self, accessor, *shadow_frame, JValue(), stay_in_interpreter);
460 if (result != nullptr) {
461 *result = r;
462 }
463 } else {
464 // We don't expect to be asked to interpret native code (which is entered via a JNI compiler
465 // generated stub) except during testing and image writing.
466 // Update args to be the args in the shadow frame since the input ones could hold stale
467 // references pointers due to moving GC.
468 args = shadow_frame->GetVRegArgs(method->IsStatic() ? 0 : 1);
469 if (!Runtime::Current()->IsStarted()) {
470 UnstartedRuntime::Jni(self, method, receiver.Ptr(), args, result);
471 } else {
472 InterpreterJni(self, method, shorty, receiver, args, result);
473 }
474 }
475 self->PopShadowFrame();
476 }
477
GetReceiverRegisterForStringInit(const Instruction * instr)478 static int16_t GetReceiverRegisterForStringInit(const Instruction* instr) {
479 DCHECK(instr->Opcode() == Instruction::INVOKE_DIRECT_RANGE ||
480 instr->Opcode() == Instruction::INVOKE_DIRECT);
481 return (instr->Opcode() == Instruction::INVOKE_DIRECT_RANGE) ?
482 instr->VRegC_3rc() : instr->VRegC_35c();
483 }
484
EnterInterpreterFromDeoptimize(Thread * self,ShadowFrame * shadow_frame,JValue * ret_val,bool from_code,DeoptimizationMethodType deopt_method_type)485 void EnterInterpreterFromDeoptimize(Thread* self,
486 ShadowFrame* shadow_frame,
487 JValue* ret_val,
488 bool from_code,
489 DeoptimizationMethodType deopt_method_type)
490 REQUIRES_SHARED(Locks::mutator_lock_) {
491 JValue value;
492 // Set value to last known result in case the shadow frame chain is empty.
493 value.SetJ(ret_val->GetJ());
494 // Are we executing the first shadow frame?
495 bool first = true;
496 while (shadow_frame != nullptr) {
497 // We do not want to recover lock state for lock counting when deoptimizing. Currently,
498 // the compiler should not have compiled a method that failed structured-locking checks.
499 DCHECK(!shadow_frame->GetMethod()->MustCountLocks());
500
501 self->SetTopOfShadowStack(shadow_frame);
502 CodeItemDataAccessor accessor(shadow_frame->GetMethod()->DexInstructionData());
503 const uint32_t dex_pc = shadow_frame->GetDexPC();
504 uint32_t new_dex_pc = dex_pc;
505 if (UNLIKELY(self->IsExceptionPending())) {
506 // If we deoptimize from the QuickExceptionHandler, we already reported the exception to
507 // the instrumentation. To prevent from reporting it a second time, we simply pass a
508 // null Instrumentation*.
509 const instrumentation::Instrumentation* const instrumentation =
510 first ? nullptr : Runtime::Current()->GetInstrumentation();
511 new_dex_pc = MoveToExceptionHandler(
512 self, *shadow_frame, instrumentation) ? shadow_frame->GetDexPC() : dex::kDexNoIndex;
513 } else if (!from_code) {
514 // Deoptimization is not called from code directly.
515 const Instruction* instr = &accessor.InstructionAt(dex_pc);
516 if (deopt_method_type == DeoptimizationMethodType::kKeepDexPc) {
517 DCHECK(first);
518 // Need to re-execute the dex instruction.
519 // (1) An invocation might be split into class initialization and invoke.
520 // In this case, the invoke should not be skipped.
521 // (2) A suspend check should also execute the dex instruction at the
522 // corresponding dex pc.
523 DCHECK_EQ(new_dex_pc, dex_pc);
524 } else if (instr->Opcode() == Instruction::MONITOR_ENTER ||
525 instr->Opcode() == Instruction::MONITOR_EXIT) {
526 DCHECK(deopt_method_type == DeoptimizationMethodType::kDefault);
527 DCHECK(first);
528 // Non-idempotent dex instruction should not be re-executed.
529 // On the other hand, if a MONITOR_ENTER is at the dex_pc of a suspend
530 // check, that MONITOR_ENTER should be executed. That case is handled
531 // above.
532 new_dex_pc = dex_pc + instr->SizeInCodeUnits();
533 } else if (instr->IsInvoke()) {
534 DCHECK(deopt_method_type == DeoptimizationMethodType::kDefault);
535 if (IsStringInit(instr, shadow_frame->GetMethod())) {
536 uint16_t this_obj_vreg = GetReceiverRegisterForStringInit(instr);
537 // Move the StringFactory.newStringFromChars() result into the register representing
538 // "this object" when invoking the string constructor in the original dex instruction.
539 // Also move the result into all aliases.
540 DCHECK(value.GetL()->IsString());
541 SetStringInitValueToAllAliases(shadow_frame, this_obj_vreg, value);
542 // Calling string constructor in the original dex code doesn't generate a result value.
543 value.SetJ(0);
544 }
545 new_dex_pc = dex_pc + instr->SizeInCodeUnits();
546 } else if (instr->Opcode() == Instruction::NEW_INSTANCE) {
547 // A NEW_INSTANCE is simply re-executed, including
548 // "new-instance String" which is compiled into a call into
549 // StringFactory.newEmptyString().
550 DCHECK_EQ(new_dex_pc, dex_pc);
551 } else {
552 DCHECK(deopt_method_type == DeoptimizationMethodType::kDefault);
553 DCHECK(first);
554 // By default, we re-execute the dex instruction since if they are not
555 // an invoke, so that we don't have to decode the dex instruction to move
556 // result into the right vreg. All slow paths have been audited to be
557 // idempotent except monitor-enter/exit and invocation stubs.
558 // TODO: move result and advance dex pc. That also requires that we
559 // can tell the return type of a runtime method, possibly by decoding
560 // the dex instruction at the caller.
561 DCHECK_EQ(new_dex_pc, dex_pc);
562 }
563 } else {
564 // Nothing to do, the dex_pc is the one at which the code requested
565 // the deoptimization.
566 DCHECK(first);
567 DCHECK_EQ(new_dex_pc, dex_pc);
568 }
569 if (new_dex_pc != dex::kDexNoIndex) {
570 shadow_frame->SetDexPC(new_dex_pc);
571 value = Execute(self, accessor, *shadow_frame, value);
572 }
573 ShadowFrame* old_frame = shadow_frame;
574 shadow_frame = shadow_frame->GetLink();
575 ShadowFrame::DeleteDeoptimizedFrame(old_frame);
576 // Following deoptimizations of shadow frames must be at invocation point
577 // and should advance dex pc past the invoke instruction.
578 from_code = false;
579 deopt_method_type = DeoptimizationMethodType::kDefault;
580 first = false;
581 }
582 ret_val->SetJ(value.GetJ());
583 }
584
EnterInterpreterFromEntryPoint(Thread * self,const CodeItemDataAccessor & accessor,ShadowFrame * shadow_frame)585 JValue EnterInterpreterFromEntryPoint(Thread* self, const CodeItemDataAccessor& accessor,
586 ShadowFrame* shadow_frame) {
587 DCHECK_EQ(self, Thread::Current());
588 bool implicit_check = !Runtime::Current()->ExplicitStackOverflowChecks();
589 if (UNLIKELY(__builtin_frame_address(0) < self->GetStackEndForInterpreter(implicit_check))) {
590 ThrowStackOverflowError(self);
591 return JValue();
592 }
593
594 jit::Jit* jit = Runtime::Current()->GetJit();
595 if (jit != nullptr) {
596 jit->NotifyCompiledCodeToInterpreterTransition(self, shadow_frame->GetMethod());
597 }
598 return Execute(self, accessor, *shadow_frame, JValue());
599 }
600
ArtInterpreterToInterpreterBridge(Thread * self,const CodeItemDataAccessor & accessor,ShadowFrame * shadow_frame,JValue * result)601 void ArtInterpreterToInterpreterBridge(Thread* self,
602 const CodeItemDataAccessor& accessor,
603 ShadowFrame* shadow_frame,
604 JValue* result) {
605 bool implicit_check = !Runtime::Current()->ExplicitStackOverflowChecks();
606 if (UNLIKELY(__builtin_frame_address(0) < self->GetStackEndForInterpreter(implicit_check))) {
607 ThrowStackOverflowError(self);
608 return;
609 }
610
611 self->PushShadowFrame(shadow_frame);
612 ArtMethod* method = shadow_frame->GetMethod();
613 // Ensure static methods are initialized.
614 const bool is_static = method->IsStatic();
615 if (is_static) {
616 ObjPtr<mirror::Class> declaring_class = method->GetDeclaringClass();
617 if (UNLIKELY(!declaring_class->IsInitialized())) {
618 StackHandleScope<1> hs(self);
619 HandleWrapperObjPtr<mirror::Class> h_declaring_class(hs.NewHandleWrapper(&declaring_class));
620 if (UNLIKELY(!Runtime::Current()->GetClassLinker()->EnsureInitialized(
621 self, h_declaring_class, true, true))) {
622 DCHECK(self->IsExceptionPending());
623 self->PopShadowFrame();
624 return;
625 }
626 CHECK(h_declaring_class->IsInitializing());
627 }
628 }
629
630 if (LIKELY(!shadow_frame->GetMethod()->IsNative())) {
631 result->SetJ(Execute(self, accessor, *shadow_frame, JValue()).GetJ());
632 } else {
633 // We don't expect to be asked to interpret native code (which is entered via a JNI compiler
634 // generated stub) except during testing and image writing.
635 CHECK(!Runtime::Current()->IsStarted());
636 ObjPtr<mirror::Object> receiver = is_static ? nullptr : shadow_frame->GetVRegReference(0);
637 uint32_t* args = shadow_frame->GetVRegArgs(is_static ? 0 : 1);
638 UnstartedRuntime::Jni(self, shadow_frame->GetMethod(), receiver.Ptr(), args, result);
639 }
640
641 self->PopShadowFrame();
642 }
643
CheckInterpreterAsmConstants()644 void CheckInterpreterAsmConstants() {
645 CheckMterpAsmConstants();
646 }
647
InitInterpreterTls(Thread * self)648 void InitInterpreterTls(Thread* self) {
649 InitMterpTls(self);
650 }
651
652 } // namespace interpreter
653 } // namespace art
654