1 /*
2 * Copyright (C) 2016 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 /*
18 * Mterp entry point and support functions.
19 */
20 #include "interpreter/interpreter_common.h"
21 #include "interpreter/interpreter_intrinsics.h"
22 #include "entrypoints/entrypoint_utils-inl.h"
23 #include "mterp.h"
24 #include "debugger.h"
25
26 namespace art {
27 namespace interpreter {
28 /*
29 * Verify some constants used by the mterp interpreter.
30 */
CheckMterpAsmConstants()31 void CheckMterpAsmConstants() {
32 /*
33 * If we're using computed goto instruction transitions, make sure
34 * none of the handlers overflows the 128-byte limit. This won't tell
35 * which one did, but if any one is too big the total size will
36 * overflow.
37 */
38 const int width = 128;
39 int interp_size = (uintptr_t) artMterpAsmInstructionEnd -
40 (uintptr_t) artMterpAsmInstructionStart;
41 if ((interp_size == 0) || (interp_size != (art::kNumPackedOpcodes * width))) {
42 LOG(FATAL) << "ERROR: unexpected asm interp size " << interp_size
43 << "(did an instruction handler exceed " << width << " bytes?)";
44 }
45 }
46
InitMterpTls(Thread * self)47 void InitMterpTls(Thread* self) {
48 self->SetMterpDefaultIBase(artMterpAsmInstructionStart);
49 self->SetMterpAltIBase(artMterpAsmAltInstructionStart);
50 self->SetMterpCurrentIBase((kTraceExecutionEnabled || kTestExportPC) ?
51 artMterpAsmAltInstructionStart :
52 artMterpAsmInstructionStart);
53 }
54
55 /*
56 * Find the matching case. Returns the offset to the handler instructions.
57 *
58 * Returns 3 if we don't find a match (it's the size of the sparse-switch
59 * instruction).
60 */
MterpDoSparseSwitch(const uint16_t * switchData,int32_t testVal)61 extern "C" ssize_t MterpDoSparseSwitch(const uint16_t* switchData, int32_t testVal) {
62 const int kInstrLen = 3;
63 uint16_t size;
64 const int32_t* keys;
65 const int32_t* entries;
66
67 /*
68 * Sparse switch data format:
69 * ushort ident = 0x0200 magic value
70 * ushort size number of entries in the table; > 0
71 * int keys[size] keys, sorted low-to-high; 32-bit aligned
72 * int targets[size] branch targets, relative to switch opcode
73 *
74 * Total size is (2+size*4) 16-bit code units.
75 */
76
77 uint16_t signature = *switchData++;
78 DCHECK_EQ(signature, static_cast<uint16_t>(art::Instruction::kSparseSwitchSignature));
79
80 size = *switchData++;
81
82 /* The keys are guaranteed to be aligned on a 32-bit boundary;
83 * we can treat them as a native int array.
84 */
85 keys = reinterpret_cast<const int32_t*>(switchData);
86
87 /* The entries are guaranteed to be aligned on a 32-bit boundary;
88 * we can treat them as a native int array.
89 */
90 entries = keys + size;
91
92 /*
93 * Binary-search through the array of keys, which are guaranteed to
94 * be sorted low-to-high.
95 */
96 int lo = 0;
97 int hi = size - 1;
98 while (lo <= hi) {
99 int mid = (lo + hi) >> 1;
100
101 int32_t foundVal = keys[mid];
102 if (testVal < foundVal) {
103 hi = mid - 1;
104 } else if (testVal > foundVal) {
105 lo = mid + 1;
106 } else {
107 return entries[mid];
108 }
109 }
110 return kInstrLen;
111 }
112
MterpDoPackedSwitch(const uint16_t * switchData,int32_t testVal)113 extern "C" ssize_t MterpDoPackedSwitch(const uint16_t* switchData, int32_t testVal) {
114 const int kInstrLen = 3;
115
116 /*
117 * Packed switch data format:
118 * ushort ident = 0x0100 magic value
119 * ushort size number of entries in the table
120 * int first_key first (and lowest) switch case value
121 * int targets[size] branch targets, relative to switch opcode
122 *
123 * Total size is (4+size*2) 16-bit code units.
124 */
125 uint16_t signature = *switchData++;
126 DCHECK_EQ(signature, static_cast<uint16_t>(art::Instruction::kPackedSwitchSignature));
127
128 uint16_t size = *switchData++;
129
130 int32_t firstKey = *switchData++;
131 firstKey |= (*switchData++) << 16;
132
133 int index = testVal - firstKey;
134 if (index < 0 || index >= size) {
135 return kInstrLen;
136 }
137
138 /*
139 * The entries are guaranteed to be aligned on a 32-bit boundary;
140 * we can treat them as a native int array.
141 */
142 const int32_t* entries = reinterpret_cast<const int32_t*>(switchData);
143 return entries[index];
144 }
145
MterpShouldSwitchInterpreters()146 extern "C" size_t MterpShouldSwitchInterpreters()
147 REQUIRES_SHARED(Locks::mutator_lock_) {
148 const instrumentation::Instrumentation* const instrumentation =
149 Runtime::Current()->GetInstrumentation();
150 return instrumentation->NonJitProfilingActive() || Dbg::IsDebuggerActive();
151 }
152
153
MterpInvokeVirtual(Thread * self,ShadowFrame * shadow_frame,uint16_t * dex_pc_ptr,uint16_t inst_data)154 extern "C" size_t MterpInvokeVirtual(Thread* self,
155 ShadowFrame* shadow_frame,
156 uint16_t* dex_pc_ptr,
157 uint16_t inst_data)
158 REQUIRES_SHARED(Locks::mutator_lock_) {
159 JValue* result_register = shadow_frame->GetResultRegister();
160 const Instruction* inst = Instruction::At(dex_pc_ptr);
161 return DoFastInvoke<kVirtual>(
162 self, *shadow_frame, inst, inst_data, result_register);
163 }
164
MterpInvokeSuper(Thread * self,ShadowFrame * shadow_frame,uint16_t * dex_pc_ptr,uint16_t inst_data)165 extern "C" size_t MterpInvokeSuper(Thread* self,
166 ShadowFrame* shadow_frame,
167 uint16_t* dex_pc_ptr,
168 uint16_t inst_data)
169 REQUIRES_SHARED(Locks::mutator_lock_) {
170 JValue* result_register = shadow_frame->GetResultRegister();
171 const Instruction* inst = Instruction::At(dex_pc_ptr);
172 return DoInvoke<kSuper, false, false>(
173 self, *shadow_frame, inst, inst_data, result_register);
174 }
175
MterpInvokeInterface(Thread * self,ShadowFrame * shadow_frame,uint16_t * dex_pc_ptr,uint16_t inst_data)176 extern "C" size_t MterpInvokeInterface(Thread* self,
177 ShadowFrame* shadow_frame,
178 uint16_t* dex_pc_ptr,
179 uint16_t inst_data)
180 REQUIRES_SHARED(Locks::mutator_lock_) {
181 JValue* result_register = shadow_frame->GetResultRegister();
182 const Instruction* inst = Instruction::At(dex_pc_ptr);
183 return DoInvoke<kInterface, false, false>(
184 self, *shadow_frame, inst, inst_data, result_register);
185 }
186
MterpInvokeDirect(Thread * self,ShadowFrame * shadow_frame,uint16_t * dex_pc_ptr,uint16_t inst_data)187 extern "C" size_t MterpInvokeDirect(Thread* self,
188 ShadowFrame* shadow_frame,
189 uint16_t* dex_pc_ptr,
190 uint16_t inst_data)
191 REQUIRES_SHARED(Locks::mutator_lock_) {
192 JValue* result_register = shadow_frame->GetResultRegister();
193 const Instruction* inst = Instruction::At(dex_pc_ptr);
194 return DoFastInvoke<kDirect>(
195 self, *shadow_frame, inst, inst_data, result_register);
196 }
197
MterpInvokeStatic(Thread * self,ShadowFrame * shadow_frame,uint16_t * dex_pc_ptr,uint16_t inst_data)198 extern "C" size_t MterpInvokeStatic(Thread* self,
199 ShadowFrame* shadow_frame,
200 uint16_t* dex_pc_ptr,
201 uint16_t inst_data)
202 REQUIRES_SHARED(Locks::mutator_lock_) {
203 JValue* result_register = shadow_frame->GetResultRegister();
204 const Instruction* inst = Instruction::At(dex_pc_ptr);
205 return DoFastInvoke<kStatic>(
206 self, *shadow_frame, inst, inst_data, result_register);
207 }
208
MterpInvokeVirtualRange(Thread * self,ShadowFrame * shadow_frame,uint16_t * dex_pc_ptr,uint16_t inst_data)209 extern "C" size_t MterpInvokeVirtualRange(Thread* self,
210 ShadowFrame* shadow_frame,
211 uint16_t* dex_pc_ptr,
212 uint16_t inst_data)
213 REQUIRES_SHARED(Locks::mutator_lock_) {
214 JValue* result_register = shadow_frame->GetResultRegister();
215 const Instruction* inst = Instruction::At(dex_pc_ptr);
216 return DoInvoke<kVirtual, true, false>(
217 self, *shadow_frame, inst, inst_data, result_register);
218 }
219
MterpInvokeSuperRange(Thread * self,ShadowFrame * shadow_frame,uint16_t * dex_pc_ptr,uint16_t inst_data)220 extern "C" size_t MterpInvokeSuperRange(Thread* self,
221 ShadowFrame* shadow_frame,
222 uint16_t* dex_pc_ptr,
223 uint16_t inst_data)
224 REQUIRES_SHARED(Locks::mutator_lock_) {
225 JValue* result_register = shadow_frame->GetResultRegister();
226 const Instruction* inst = Instruction::At(dex_pc_ptr);
227 return DoInvoke<kSuper, true, false>(
228 self, *shadow_frame, inst, inst_data, result_register);
229 }
230
MterpInvokeInterfaceRange(Thread * self,ShadowFrame * shadow_frame,uint16_t * dex_pc_ptr,uint16_t inst_data)231 extern "C" size_t MterpInvokeInterfaceRange(Thread* self,
232 ShadowFrame* shadow_frame,
233 uint16_t* dex_pc_ptr,
234 uint16_t inst_data)
235 REQUIRES_SHARED(Locks::mutator_lock_) {
236 JValue* result_register = shadow_frame->GetResultRegister();
237 const Instruction* inst = Instruction::At(dex_pc_ptr);
238 return DoInvoke<kInterface, true, false>(
239 self, *shadow_frame, inst, inst_data, result_register);
240 }
241
MterpInvokeDirectRange(Thread * self,ShadowFrame * shadow_frame,uint16_t * dex_pc_ptr,uint16_t inst_data)242 extern "C" size_t MterpInvokeDirectRange(Thread* self,
243 ShadowFrame* shadow_frame,
244 uint16_t* dex_pc_ptr,
245 uint16_t inst_data)
246 REQUIRES_SHARED(Locks::mutator_lock_) {
247 JValue* result_register = shadow_frame->GetResultRegister();
248 const Instruction* inst = Instruction::At(dex_pc_ptr);
249 return DoInvoke<kDirect, true, false>(
250 self, *shadow_frame, inst, inst_data, result_register);
251 }
252
MterpInvokeStaticRange(Thread * self,ShadowFrame * shadow_frame,uint16_t * dex_pc_ptr,uint16_t inst_data)253 extern "C" size_t MterpInvokeStaticRange(Thread* self,
254 ShadowFrame* shadow_frame,
255 uint16_t* dex_pc_ptr,
256 uint16_t inst_data)
257 REQUIRES_SHARED(Locks::mutator_lock_) {
258 JValue* result_register = shadow_frame->GetResultRegister();
259 const Instruction* inst = Instruction::At(dex_pc_ptr);
260 return DoInvoke<kStatic, true, false>(
261 self, *shadow_frame, inst, inst_data, result_register);
262 }
263
MterpInvokeVirtualQuick(Thread * self,ShadowFrame * shadow_frame,uint16_t * dex_pc_ptr,uint16_t inst_data)264 extern "C" size_t MterpInvokeVirtualQuick(Thread* self,
265 ShadowFrame* shadow_frame,
266 uint16_t* dex_pc_ptr,
267 uint16_t inst_data)
268 REQUIRES_SHARED(Locks::mutator_lock_) {
269 JValue* result_register = shadow_frame->GetResultRegister();
270 const Instruction* inst = Instruction::At(dex_pc_ptr);
271 const uint32_t vregC = inst->VRegC_35c();
272 const uint32_t vtable_idx = inst->VRegB_35c();
273 ObjPtr<mirror::Object> const receiver = shadow_frame->GetVRegReference(vregC);
274 if (receiver != nullptr) {
275 ArtMethod* const called_method = receiver->GetClass()->GetEmbeddedVTableEntry(
276 vtable_idx, kRuntimePointerSize);
277 if ((called_method != nullptr) && called_method->IsIntrinsic()) {
278 if (MterpHandleIntrinsic(shadow_frame, called_method, inst, inst_data, result_register)) {
279 jit::Jit* jit = Runtime::Current()->GetJit();
280 if (jit != nullptr) {
281 jit->InvokeVirtualOrInterface(
282 receiver, shadow_frame->GetMethod(), shadow_frame->GetDexPC(), called_method);
283 jit->AddSamples(self, shadow_frame->GetMethod(), 1, /*with_backedges*/false);
284 }
285 return !self->IsExceptionPending();
286 }
287 }
288 }
289 return DoInvokeVirtualQuick<false>(
290 self, *shadow_frame, inst, inst_data, result_register);
291 }
292
MterpInvokeVirtualQuickRange(Thread * self,ShadowFrame * shadow_frame,uint16_t * dex_pc_ptr,uint16_t inst_data)293 extern "C" size_t MterpInvokeVirtualQuickRange(Thread* self,
294 ShadowFrame* shadow_frame,
295 uint16_t* dex_pc_ptr,
296 uint16_t inst_data)
297 REQUIRES_SHARED(Locks::mutator_lock_) {
298 JValue* result_register = shadow_frame->GetResultRegister();
299 const Instruction* inst = Instruction::At(dex_pc_ptr);
300 return DoInvokeVirtualQuick<true>(
301 self, *shadow_frame, inst, inst_data, result_register);
302 }
303
MterpThreadFenceForConstructor()304 extern "C" void MterpThreadFenceForConstructor() {
305 QuasiAtomic::ThreadFenceForConstructor();
306 }
307
MterpConstString(uint32_t index,uint32_t tgt_vreg,ShadowFrame * shadow_frame,Thread * self)308 extern "C" size_t MterpConstString(uint32_t index,
309 uint32_t tgt_vreg,
310 ShadowFrame* shadow_frame,
311 Thread* self)
312 REQUIRES_SHARED(Locks::mutator_lock_) {
313 ObjPtr<mirror::String> s = ResolveString(self, *shadow_frame, dex::StringIndex(index));
314 if (UNLIKELY(s == nullptr)) {
315 return true;
316 }
317 shadow_frame->SetVRegReference(tgt_vreg, s.Ptr());
318 return false;
319 }
320
MterpConstClass(uint32_t index,uint32_t tgt_vreg,ShadowFrame * shadow_frame,Thread * self)321 extern "C" size_t MterpConstClass(uint32_t index,
322 uint32_t tgt_vreg,
323 ShadowFrame* shadow_frame,
324 Thread* self)
325 REQUIRES_SHARED(Locks::mutator_lock_) {
326 mirror::Class* c = ResolveVerifyAndClinit(dex::TypeIndex(index),
327 shadow_frame->GetMethod(),
328 self,
329 false,
330 false);
331 if (UNLIKELY(c == nullptr)) {
332 return true;
333 }
334 shadow_frame->SetVRegReference(tgt_vreg, c);
335 return false;
336 }
337
MterpCheckCast(uint32_t index,StackReference<mirror::Object> * vreg_addr,art::ArtMethod * method,Thread * self)338 extern "C" size_t MterpCheckCast(uint32_t index,
339 StackReference<mirror::Object>* vreg_addr,
340 art::ArtMethod* method,
341 Thread* self)
342 REQUIRES_SHARED(Locks::mutator_lock_) {
343 ObjPtr<mirror::Class> c = ResolveVerifyAndClinit(dex::TypeIndex(index),
344 method,
345 self,
346 false,
347 false);
348 if (UNLIKELY(c == nullptr)) {
349 return true;
350 }
351 // Must load obj from vreg following ResolveVerifyAndClinit due to moving gc.
352 mirror::Object* obj = vreg_addr->AsMirrorPtr();
353 if (UNLIKELY(obj != nullptr && !obj->InstanceOf(c))) {
354 ThrowClassCastException(c, obj->GetClass());
355 return true;
356 }
357 return false;
358 }
359
MterpInstanceOf(uint32_t index,StackReference<mirror::Object> * vreg_addr,art::ArtMethod * method,Thread * self)360 extern "C" size_t MterpInstanceOf(uint32_t index,
361 StackReference<mirror::Object>* vreg_addr,
362 art::ArtMethod* method,
363 Thread* self)
364 REQUIRES_SHARED(Locks::mutator_lock_) {
365 ObjPtr<mirror::Class> c = ResolveVerifyAndClinit(dex::TypeIndex(index),
366 method,
367 self,
368 false,
369 false);
370 if (UNLIKELY(c == nullptr)) {
371 return false; // Caller will check for pending exception. Return value unimportant.
372 }
373 // Must load obj from vreg following ResolveVerifyAndClinit due to moving gc.
374 mirror::Object* obj = vreg_addr->AsMirrorPtr();
375 return (obj != nullptr) && obj->InstanceOf(c);
376 }
377
MterpFillArrayData(mirror::Object * obj,const Instruction::ArrayDataPayload * payload)378 extern "C" size_t MterpFillArrayData(mirror::Object* obj, const Instruction::ArrayDataPayload* payload)
379 REQUIRES_SHARED(Locks::mutator_lock_) {
380 return FillArrayData(obj, payload);
381 }
382
MterpNewInstance(ShadowFrame * shadow_frame,Thread * self,uint32_t inst_data)383 extern "C" size_t MterpNewInstance(ShadowFrame* shadow_frame, Thread* self, uint32_t inst_data)
384 REQUIRES_SHARED(Locks::mutator_lock_) {
385 const Instruction* inst = Instruction::At(shadow_frame->GetDexPCPtr());
386 mirror::Object* obj = nullptr;
387 mirror::Class* c = ResolveVerifyAndClinit(dex::TypeIndex(inst->VRegB_21c()),
388 shadow_frame->GetMethod(),
389 self,
390 false,
391 false);
392 if (LIKELY(c != nullptr)) {
393 if (UNLIKELY(c->IsStringClass())) {
394 gc::AllocatorType allocator_type = Runtime::Current()->GetHeap()->GetCurrentAllocator();
395 obj = mirror::String::AllocEmptyString<true>(self, allocator_type);
396 } else {
397 obj = AllocObjectFromCode<true>(c,
398 self,
399 Runtime::Current()->GetHeap()->GetCurrentAllocator());
400 }
401 }
402 if (UNLIKELY(obj == nullptr)) {
403 return false;
404 }
405 obj->GetClass()->AssertInitializedOrInitializingInThread(self);
406 shadow_frame->SetVRegReference(inst->VRegA_21c(inst_data), obj);
407 return true;
408 }
409
MterpSputObject(ShadowFrame * shadow_frame,uint16_t * dex_pc_ptr,uint32_t inst_data,Thread * self)410 extern "C" size_t MterpSputObject(ShadowFrame* shadow_frame, uint16_t* dex_pc_ptr,
411 uint32_t inst_data, Thread* self)
412 REQUIRES_SHARED(Locks::mutator_lock_) {
413 const Instruction* inst = Instruction::At(dex_pc_ptr);
414 return DoFieldPut<StaticObjectWrite, Primitive::kPrimNot, false, false>
415 (self, *shadow_frame, inst, inst_data);
416 }
417
MterpIputObject(ShadowFrame * shadow_frame,uint16_t * dex_pc_ptr,uint32_t inst_data,Thread * self)418 extern "C" size_t MterpIputObject(ShadowFrame* shadow_frame,
419 uint16_t* dex_pc_ptr,
420 uint32_t inst_data,
421 Thread* self)
422 REQUIRES_SHARED(Locks::mutator_lock_) {
423 const Instruction* inst = Instruction::At(dex_pc_ptr);
424 return DoFieldPut<InstanceObjectWrite, Primitive::kPrimNot, false, false>
425 (self, *shadow_frame, inst, inst_data);
426 }
427
MterpIputObjectQuick(ShadowFrame * shadow_frame,uint16_t * dex_pc_ptr,uint32_t inst_data)428 extern "C" size_t MterpIputObjectQuick(ShadowFrame* shadow_frame,
429 uint16_t* dex_pc_ptr,
430 uint32_t inst_data)
431 REQUIRES_SHARED(Locks::mutator_lock_) {
432 const Instruction* inst = Instruction::At(dex_pc_ptr);
433 return DoIPutQuick<Primitive::kPrimNot, false>(*shadow_frame, inst, inst_data);
434 }
435
MterpAputObject(ShadowFrame * shadow_frame,uint16_t * dex_pc_ptr,uint32_t inst_data)436 extern "C" size_t MterpAputObject(ShadowFrame* shadow_frame,
437 uint16_t* dex_pc_ptr,
438 uint32_t inst_data)
439 REQUIRES_SHARED(Locks::mutator_lock_) {
440 const Instruction* inst = Instruction::At(dex_pc_ptr);
441 mirror::Object* a = shadow_frame->GetVRegReference(inst->VRegB_23x());
442 if (UNLIKELY(a == nullptr)) {
443 return false;
444 }
445 int32_t index = shadow_frame->GetVReg(inst->VRegC_23x());
446 mirror::Object* val = shadow_frame->GetVRegReference(inst->VRegA_23x(inst_data));
447 mirror::ObjectArray<mirror::Object>* array = a->AsObjectArray<mirror::Object>();
448 if (array->CheckIsValidIndex(index) && array->CheckAssignable(val)) {
449 array->SetWithoutChecks<false>(index, val);
450 return true;
451 }
452 return false;
453 }
454
MterpFilledNewArray(ShadowFrame * shadow_frame,uint16_t * dex_pc_ptr,Thread * self)455 extern "C" size_t MterpFilledNewArray(ShadowFrame* shadow_frame,
456 uint16_t* dex_pc_ptr,
457 Thread* self)
458 REQUIRES_SHARED(Locks::mutator_lock_) {
459 const Instruction* inst = Instruction::At(dex_pc_ptr);
460 return DoFilledNewArray<false, false, false>(inst, *shadow_frame, self,
461 shadow_frame->GetResultRegister());
462 }
463
MterpFilledNewArrayRange(ShadowFrame * shadow_frame,uint16_t * dex_pc_ptr,Thread * self)464 extern "C" size_t MterpFilledNewArrayRange(ShadowFrame* shadow_frame,
465 uint16_t* dex_pc_ptr,
466 Thread* self)
467 REQUIRES_SHARED(Locks::mutator_lock_) {
468 const Instruction* inst = Instruction::At(dex_pc_ptr);
469 return DoFilledNewArray<true, false, false>(inst, *shadow_frame, self,
470 shadow_frame->GetResultRegister());
471 }
472
MterpNewArray(ShadowFrame * shadow_frame,uint16_t * dex_pc_ptr,uint32_t inst_data,Thread * self)473 extern "C" size_t MterpNewArray(ShadowFrame* shadow_frame,
474 uint16_t* dex_pc_ptr,
475 uint32_t inst_data, Thread* self)
476 REQUIRES_SHARED(Locks::mutator_lock_) {
477 const Instruction* inst = Instruction::At(dex_pc_ptr);
478 int32_t length = shadow_frame->GetVReg(inst->VRegB_22c(inst_data));
479 mirror::Object* obj = AllocArrayFromCode<false, true>(
480 dex::TypeIndex(inst->VRegC_22c()), length, shadow_frame->GetMethod(), self,
481 Runtime::Current()->GetHeap()->GetCurrentAllocator());
482 if (UNLIKELY(obj == nullptr)) {
483 return false;
484 }
485 shadow_frame->SetVRegReference(inst->VRegA_22c(inst_data), obj);
486 return true;
487 }
488
MterpHandleException(Thread * self,ShadowFrame * shadow_frame)489 extern "C" size_t MterpHandleException(Thread* self, ShadowFrame* shadow_frame)
490 REQUIRES_SHARED(Locks::mutator_lock_) {
491 DCHECK(self->IsExceptionPending());
492 const instrumentation::Instrumentation* const instrumentation =
493 Runtime::Current()->GetInstrumentation();
494 uint32_t found_dex_pc = FindNextInstructionFollowingException(self, *shadow_frame,
495 shadow_frame->GetDexPC(),
496 instrumentation);
497 if (found_dex_pc == DexFile::kDexNoIndex) {
498 return false;
499 }
500 // OK - we can deal with it. Update and continue.
501 shadow_frame->SetDexPC(found_dex_pc);
502 return true;
503 }
504
MterpCheckBefore(Thread * self,ShadowFrame * shadow_frame,uint16_t * dex_pc_ptr)505 extern "C" void MterpCheckBefore(Thread* self, ShadowFrame* shadow_frame, uint16_t* dex_pc_ptr)
506 REQUIRES_SHARED(Locks::mutator_lock_) {
507 const Instruction* inst = Instruction::At(dex_pc_ptr);
508 uint16_t inst_data = inst->Fetch16(0);
509 if (inst->Opcode(inst_data) == Instruction::MOVE_EXCEPTION) {
510 self->AssertPendingException();
511 } else {
512 self->AssertNoPendingException();
513 }
514 if (kTraceExecutionEnabled) {
515 uint32_t dex_pc = dex_pc_ptr - shadow_frame->GetCodeItem()->insns_;
516 TraceExecution(*shadow_frame, inst, dex_pc);
517 }
518 if (kTestExportPC) {
519 // Save invalid dex pc to force segfault if improperly used.
520 shadow_frame->SetDexPCPtr(reinterpret_cast<uint16_t*>(kExportPCPoison));
521 }
522 }
523
MterpLogDivideByZeroException(Thread * self,ShadowFrame * shadow_frame)524 extern "C" void MterpLogDivideByZeroException(Thread* self, ShadowFrame* shadow_frame)
525 REQUIRES_SHARED(Locks::mutator_lock_) {
526 UNUSED(self);
527 const Instruction* inst = Instruction::At(shadow_frame->GetDexPCPtr());
528 uint16_t inst_data = inst->Fetch16(0);
529 LOG(INFO) << "DivideByZero: " << inst->Opcode(inst_data);
530 }
531
MterpLogArrayIndexException(Thread * self,ShadowFrame * shadow_frame)532 extern "C" void MterpLogArrayIndexException(Thread* self, ShadowFrame* shadow_frame)
533 REQUIRES_SHARED(Locks::mutator_lock_) {
534 UNUSED(self);
535 const Instruction* inst = Instruction::At(shadow_frame->GetDexPCPtr());
536 uint16_t inst_data = inst->Fetch16(0);
537 LOG(INFO) << "ArrayIndex: " << inst->Opcode(inst_data);
538 }
539
MterpLogNegativeArraySizeException(Thread * self,ShadowFrame * shadow_frame)540 extern "C" void MterpLogNegativeArraySizeException(Thread* self, ShadowFrame* shadow_frame)
541 REQUIRES_SHARED(Locks::mutator_lock_) {
542 UNUSED(self);
543 const Instruction* inst = Instruction::At(shadow_frame->GetDexPCPtr());
544 uint16_t inst_data = inst->Fetch16(0);
545 LOG(INFO) << "NegativeArraySize: " << inst->Opcode(inst_data);
546 }
547
MterpLogNoSuchMethodException(Thread * self,ShadowFrame * shadow_frame)548 extern "C" void MterpLogNoSuchMethodException(Thread* self, ShadowFrame* shadow_frame)
549 REQUIRES_SHARED(Locks::mutator_lock_) {
550 UNUSED(self);
551 const Instruction* inst = Instruction::At(shadow_frame->GetDexPCPtr());
552 uint16_t inst_data = inst->Fetch16(0);
553 LOG(INFO) << "NoSuchMethod: " << inst->Opcode(inst_data);
554 }
555
MterpLogExceptionThrownException(Thread * self,ShadowFrame * shadow_frame)556 extern "C" void MterpLogExceptionThrownException(Thread* self, ShadowFrame* shadow_frame)
557 REQUIRES_SHARED(Locks::mutator_lock_) {
558 UNUSED(self);
559 const Instruction* inst = Instruction::At(shadow_frame->GetDexPCPtr());
560 uint16_t inst_data = inst->Fetch16(0);
561 LOG(INFO) << "ExceptionThrown: " << inst->Opcode(inst_data);
562 }
563
MterpLogNullObjectException(Thread * self,ShadowFrame * shadow_frame)564 extern "C" void MterpLogNullObjectException(Thread* self, ShadowFrame* shadow_frame)
565 REQUIRES_SHARED(Locks::mutator_lock_) {
566 UNUSED(self);
567 const Instruction* inst = Instruction::At(shadow_frame->GetDexPCPtr());
568 uint16_t inst_data = inst->Fetch16(0);
569 LOG(INFO) << "NullObject: " << inst->Opcode(inst_data);
570 }
571
MterpLogFallback(Thread * self,ShadowFrame * shadow_frame)572 extern "C" void MterpLogFallback(Thread* self, ShadowFrame* shadow_frame)
573 REQUIRES_SHARED(Locks::mutator_lock_) {
574 UNUSED(self);
575 const Instruction* inst = Instruction::At(shadow_frame->GetDexPCPtr());
576 uint16_t inst_data = inst->Fetch16(0);
577 LOG(INFO) << "Fallback: " << inst->Opcode(inst_data) << ", Suspend Pending?: "
578 << self->IsExceptionPending();
579 }
580
MterpLogOSR(Thread * self,ShadowFrame * shadow_frame,int32_t offset)581 extern "C" void MterpLogOSR(Thread* self, ShadowFrame* shadow_frame, int32_t offset)
582 REQUIRES_SHARED(Locks::mutator_lock_) {
583 UNUSED(self);
584 const Instruction* inst = Instruction::At(shadow_frame->GetDexPCPtr());
585 uint16_t inst_data = inst->Fetch16(0);
586 LOG(INFO) << "OSR: " << inst->Opcode(inst_data) << ", offset = " << offset;
587 }
588
MterpLogSuspendFallback(Thread * self,ShadowFrame * shadow_frame,uint32_t flags)589 extern "C" void MterpLogSuspendFallback(Thread* self, ShadowFrame* shadow_frame, uint32_t flags)
590 REQUIRES_SHARED(Locks::mutator_lock_) {
591 UNUSED(self);
592 const Instruction* inst = Instruction::At(shadow_frame->GetDexPCPtr());
593 uint16_t inst_data = inst->Fetch16(0);
594 if (flags & kCheckpointRequest) {
595 LOG(INFO) << "Checkpoint fallback: " << inst->Opcode(inst_data);
596 } else if (flags & kSuspendRequest) {
597 LOG(INFO) << "Suspend fallback: " << inst->Opcode(inst_data);
598 } else if (flags & kEmptyCheckpointRequest) {
599 LOG(INFO) << "Empty checkpoint fallback: " << inst->Opcode(inst_data);
600 }
601 }
602
MterpSuspendCheck(Thread * self)603 extern "C" size_t MterpSuspendCheck(Thread* self)
604 REQUIRES_SHARED(Locks::mutator_lock_) {
605 self->AllowThreadSuspension();
606 return MterpShouldSwitchInterpreters();
607 }
608
artSet8InstanceFromMterp(uint32_t field_idx,mirror::Object * obj,uint8_t new_value,ArtMethod * referrer)609 extern "C" ssize_t artSet8InstanceFromMterp(uint32_t field_idx,
610 mirror::Object* obj,
611 uint8_t new_value,
612 ArtMethod* referrer)
613 REQUIRES_SHARED(Locks::mutator_lock_) {
614 ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite, sizeof(int8_t));
615 if (LIKELY(field != nullptr && obj != nullptr)) {
616 Primitive::Type type = field->GetTypeAsPrimitiveType();
617 if (type == Primitive::kPrimBoolean) {
618 field->SetBoolean<false>(obj, new_value);
619 } else {
620 DCHECK_EQ(Primitive::kPrimByte, type);
621 field->SetByte<false>(obj, new_value);
622 }
623 return 0; // success
624 }
625 return -1; // failure
626 }
627
artSet16InstanceFromMterp(uint32_t field_idx,mirror::Object * obj,uint16_t new_value,ArtMethod * referrer)628 extern "C" ssize_t artSet16InstanceFromMterp(uint32_t field_idx,
629 mirror::Object* obj,
630 uint16_t new_value,
631 ArtMethod* referrer)
632 REQUIRES_SHARED(Locks::mutator_lock_) {
633 ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite,
634 sizeof(int16_t));
635 if (LIKELY(field != nullptr && obj != nullptr)) {
636 Primitive::Type type = field->GetTypeAsPrimitiveType();
637 if (type == Primitive::kPrimChar) {
638 field->SetChar<false>(obj, new_value);
639 } else {
640 DCHECK_EQ(Primitive::kPrimShort, type);
641 field->SetShort<false>(obj, new_value);
642 }
643 return 0; // success
644 }
645 return -1; // failure
646 }
647
artSet32InstanceFromMterp(uint32_t field_idx,mirror::Object * obj,uint32_t new_value,ArtMethod * referrer)648 extern "C" ssize_t artSet32InstanceFromMterp(uint32_t field_idx,
649 mirror::Object* obj,
650 uint32_t new_value,
651 ArtMethod* referrer)
652 REQUIRES_SHARED(Locks::mutator_lock_) {
653 ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite,
654 sizeof(int32_t));
655 if (LIKELY(field != nullptr && obj != nullptr)) {
656 field->Set32<false>(obj, new_value);
657 return 0; // success
658 }
659 return -1; // failure
660 }
661
artSet64InstanceFromMterp(uint32_t field_idx,mirror::Object * obj,uint64_t * new_value,ArtMethod * referrer)662 extern "C" ssize_t artSet64InstanceFromMterp(uint32_t field_idx,
663 mirror::Object* obj,
664 uint64_t* new_value,
665 ArtMethod* referrer)
666 REQUIRES_SHARED(Locks::mutator_lock_) {
667 ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite,
668 sizeof(int64_t));
669 if (LIKELY(field != nullptr && obj != nullptr)) {
670 field->Set64<false>(obj, *new_value);
671 return 0; // success
672 }
673 return -1; // failure
674 }
675
artSetObjInstanceFromMterp(uint32_t field_idx,mirror::Object * obj,mirror::Object * new_value,ArtMethod * referrer)676 extern "C" ssize_t artSetObjInstanceFromMterp(uint32_t field_idx,
677 mirror::Object* obj,
678 mirror::Object* new_value,
679 ArtMethod* referrer)
680 REQUIRES_SHARED(Locks::mutator_lock_) {
681 ArtField* field = FindFieldFast(field_idx, referrer, InstanceObjectWrite,
682 sizeof(mirror::HeapReference<mirror::Object>));
683 if (LIKELY(field != nullptr && obj != nullptr)) {
684 field->SetObj<false>(obj, new_value);
685 return 0; // success
686 }
687 return -1; // failure
688 }
689
690 template <typename return_type, Primitive::Type primitive_type>
MterpGetStatic(uint32_t field_idx,ArtMethod * referrer,Thread * self,return_type (ArtField::* func)(ObjPtr<mirror::Object>))691 ALWAYS_INLINE return_type MterpGetStatic(uint32_t field_idx,
692 ArtMethod* referrer,
693 Thread* self,
694 return_type (ArtField::*func)(ObjPtr<mirror::Object>))
695 REQUIRES_SHARED(Locks::mutator_lock_) {
696 return_type res = 0; // On exception, the result will be ignored.
697 ArtField* f =
698 FindFieldFromCode<StaticPrimitiveRead, false>(field_idx,
699 referrer,
700 self,
701 primitive_type);
702 if (LIKELY(f != nullptr)) {
703 ObjPtr<mirror::Object> obj = f->GetDeclaringClass();
704 res = (f->*func)(obj);
705 }
706 return res;
707 }
708
MterpGetBooleanStatic(uint32_t field_idx,ArtMethod * referrer,Thread * self)709 extern "C" int32_t MterpGetBooleanStatic(uint32_t field_idx,
710 ArtMethod* referrer,
711 Thread* self)
712 REQUIRES_SHARED(Locks::mutator_lock_) {
713 return MterpGetStatic<uint8_t, Primitive::kPrimBoolean>(field_idx,
714 referrer,
715 self,
716 &ArtField::GetBoolean);
717 }
718
MterpGetByteStatic(uint32_t field_idx,ArtMethod * referrer,Thread * self)719 extern "C" int32_t MterpGetByteStatic(uint32_t field_idx,
720 ArtMethod* referrer,
721 Thread* self)
722 REQUIRES_SHARED(Locks::mutator_lock_) {
723 return MterpGetStatic<int8_t, Primitive::kPrimByte>(field_idx,
724 referrer,
725 self,
726 &ArtField::GetByte);
727 }
728
MterpGetCharStatic(uint32_t field_idx,ArtMethod * referrer,Thread * self)729 extern "C" uint32_t MterpGetCharStatic(uint32_t field_idx,
730 ArtMethod* referrer,
731 Thread* self)
732 REQUIRES_SHARED(Locks::mutator_lock_) {
733 return MterpGetStatic<uint16_t, Primitive::kPrimChar>(field_idx,
734 referrer,
735 self,
736 &ArtField::GetChar);
737 }
738
MterpGetShortStatic(uint32_t field_idx,ArtMethod * referrer,Thread * self)739 extern "C" int32_t MterpGetShortStatic(uint32_t field_idx,
740 ArtMethod* referrer,
741 Thread* self)
742 REQUIRES_SHARED(Locks::mutator_lock_) {
743 return MterpGetStatic<int16_t, Primitive::kPrimShort>(field_idx,
744 referrer,
745 self,
746 &ArtField::GetShort);
747 }
748
MterpGetObjStatic(uint32_t field_idx,ArtMethod * referrer,Thread * self)749 extern "C" mirror::Object* MterpGetObjStatic(uint32_t field_idx,
750 ArtMethod* referrer,
751 Thread* self)
752 REQUIRES_SHARED(Locks::mutator_lock_) {
753 return MterpGetStatic<ObjPtr<mirror::Object>, Primitive::kPrimNot>(field_idx,
754 referrer,
755 self,
756 &ArtField::GetObject).Ptr();
757 }
758
MterpGet32Static(uint32_t field_idx,ArtMethod * referrer,Thread * self)759 extern "C" int32_t MterpGet32Static(uint32_t field_idx,
760 ArtMethod* referrer,
761 Thread* self)
762 REQUIRES_SHARED(Locks::mutator_lock_) {
763 return MterpGetStatic<int32_t, Primitive::kPrimInt>(field_idx,
764 referrer,
765 self,
766 &ArtField::GetInt);
767 }
768
MterpGet64Static(uint32_t field_idx,ArtMethod * referrer,Thread * self)769 extern "C" int64_t MterpGet64Static(uint32_t field_idx, ArtMethod* referrer, Thread* self)
770 REQUIRES_SHARED(Locks::mutator_lock_) {
771 return MterpGetStatic<int64_t, Primitive::kPrimLong>(field_idx,
772 referrer,
773 self,
774 &ArtField::GetLong);
775 }
776
777
778 template <typename field_type, Primitive::Type primitive_type>
MterpSetStatic(uint32_t field_idx,field_type new_value,ArtMethod * referrer,Thread * self,void (ArtField::* func)(ObjPtr<mirror::Object>,field_type val))779 int MterpSetStatic(uint32_t field_idx,
780 field_type new_value,
781 ArtMethod* referrer,
782 Thread* self,
783 void (ArtField::*func)(ObjPtr<mirror::Object>, field_type val))
784 REQUIRES_SHARED(Locks::mutator_lock_) {
785 int res = 0; // Assume success (following quick_field_entrypoints conventions)
786 ArtField* f =
787 FindFieldFromCode<StaticPrimitiveWrite, false>(field_idx, referrer, self, primitive_type);
788 if (LIKELY(f != nullptr)) {
789 ObjPtr<mirror::Object> obj = f->GetDeclaringClass();
790 (f->*func)(obj, new_value);
791 } else {
792 res = -1; // Failure
793 }
794 return res;
795 }
796
MterpSetBooleanStatic(uint32_t field_idx,uint8_t new_value,ArtMethod * referrer,Thread * self)797 extern "C" int MterpSetBooleanStatic(uint32_t field_idx,
798 uint8_t new_value,
799 ArtMethod* referrer,
800 Thread* self)
801 REQUIRES_SHARED(Locks::mutator_lock_) {
802 return MterpSetStatic<uint8_t, Primitive::kPrimBoolean>(field_idx,
803 new_value,
804 referrer,
805 self,
806 &ArtField::SetBoolean<false>);
807 }
808
MterpSetByteStatic(uint32_t field_idx,int8_t new_value,ArtMethod * referrer,Thread * self)809 extern "C" int MterpSetByteStatic(uint32_t field_idx,
810 int8_t new_value,
811 ArtMethod* referrer,
812 Thread* self)
813 REQUIRES_SHARED(Locks::mutator_lock_) {
814 return MterpSetStatic<int8_t, Primitive::kPrimByte>(field_idx,
815 new_value,
816 referrer,
817 self,
818 &ArtField::SetByte<false>);
819 }
820
MterpSetCharStatic(uint32_t field_idx,uint16_t new_value,ArtMethod * referrer,Thread * self)821 extern "C" int MterpSetCharStatic(uint32_t field_idx,
822 uint16_t new_value,
823 ArtMethod* referrer,
824 Thread* self)
825 REQUIRES_SHARED(Locks::mutator_lock_) {
826 return MterpSetStatic<uint16_t, Primitive::kPrimChar>(field_idx,
827 new_value,
828 referrer,
829 self,
830 &ArtField::SetChar<false>);
831 }
832
MterpSetShortStatic(uint32_t field_idx,int16_t new_value,ArtMethod * referrer,Thread * self)833 extern "C" int MterpSetShortStatic(uint32_t field_idx,
834 int16_t new_value,
835 ArtMethod* referrer,
836 Thread* self)
837 REQUIRES_SHARED(Locks::mutator_lock_) {
838 return MterpSetStatic<int16_t, Primitive::kPrimShort>(field_idx,
839 new_value,
840 referrer,
841 self,
842 &ArtField::SetShort<false>);
843 }
844
MterpSet32Static(uint32_t field_idx,int32_t new_value,ArtMethod * referrer,Thread * self)845 extern "C" int MterpSet32Static(uint32_t field_idx,
846 int32_t new_value,
847 ArtMethod* referrer,
848 Thread* self)
849 REQUIRES_SHARED(Locks::mutator_lock_) {
850 return MterpSetStatic<int32_t, Primitive::kPrimInt>(field_idx,
851 new_value,
852 referrer,
853 self,
854 &ArtField::SetInt<false>);
855 }
856
MterpSet64Static(uint32_t field_idx,int64_t * new_value,ArtMethod * referrer,Thread * self)857 extern "C" int MterpSet64Static(uint32_t field_idx,
858 int64_t* new_value,
859 ArtMethod* referrer,
860 Thread* self)
861 REQUIRES_SHARED(Locks::mutator_lock_) {
862 return MterpSetStatic<int64_t, Primitive::kPrimLong>(field_idx,
863 *new_value,
864 referrer,
865 self,
866 &ArtField::SetLong<false>);
867 }
868
artAGetObjectFromMterp(mirror::Object * arr,int32_t index)869 extern "C" mirror::Object* artAGetObjectFromMterp(mirror::Object* arr,
870 int32_t index)
871 REQUIRES_SHARED(Locks::mutator_lock_) {
872 if (UNLIKELY(arr == nullptr)) {
873 ThrowNullPointerExceptionFromInterpreter();
874 return nullptr;
875 }
876 mirror::ObjectArray<mirror::Object>* array = arr->AsObjectArray<mirror::Object>();
877 if (LIKELY(array->CheckIsValidIndex(index))) {
878 return array->GetWithoutChecks(index);
879 } else {
880 return nullptr;
881 }
882 }
883
artIGetObjectFromMterp(mirror::Object * obj,uint32_t field_offset)884 extern "C" mirror::Object* artIGetObjectFromMterp(mirror::Object* obj,
885 uint32_t field_offset)
886 REQUIRES_SHARED(Locks::mutator_lock_) {
887 if (UNLIKELY(obj == nullptr)) {
888 ThrowNullPointerExceptionFromInterpreter();
889 return nullptr;
890 }
891 return obj->GetFieldObject<mirror::Object>(MemberOffset(field_offset));
892 }
893
894 /*
895 * Create a hotness_countdown based on the current method hotness_count and profiling
896 * mode. In short, determine how many hotness events we hit before reporting back
897 * to the full instrumentation via MterpAddHotnessBatch. Called once on entry to the method,
898 * and regenerated following batch updates.
899 */
MterpSetUpHotnessCountdown(ArtMethod * method,ShadowFrame * shadow_frame)900 extern "C" ssize_t MterpSetUpHotnessCountdown(ArtMethod* method, ShadowFrame* shadow_frame)
901 REQUIRES_SHARED(Locks::mutator_lock_) {
902 uint16_t hotness_count = method->GetCounter();
903 int32_t countdown_value = jit::kJitHotnessDisabled;
904 jit::Jit* jit = Runtime::Current()->GetJit();
905 if (jit != nullptr) {
906 int32_t warm_threshold = jit->WarmMethodThreshold();
907 int32_t hot_threshold = jit->HotMethodThreshold();
908 int32_t osr_threshold = jit->OSRMethodThreshold();
909 if (hotness_count < warm_threshold) {
910 countdown_value = warm_threshold - hotness_count;
911 } else if (hotness_count < hot_threshold) {
912 countdown_value = hot_threshold - hotness_count;
913 } else if (hotness_count < osr_threshold) {
914 countdown_value = osr_threshold - hotness_count;
915 } else {
916 countdown_value = jit::kJitCheckForOSR;
917 }
918 if (jit::Jit::ShouldUsePriorityThreadWeight()) {
919 int32_t priority_thread_weight = jit->PriorityThreadWeight();
920 countdown_value = std::min(countdown_value, countdown_value / priority_thread_weight);
921 }
922 }
923 /*
924 * The actual hotness threshold may exceed the range of our int16_t countdown value. This is
925 * not a problem, though. We can just break it down into smaller chunks.
926 */
927 countdown_value = std::min(countdown_value,
928 static_cast<int32_t>(std::numeric_limits<int16_t>::max()));
929 shadow_frame->SetCachedHotnessCountdown(countdown_value);
930 shadow_frame->SetHotnessCountdown(countdown_value);
931 return countdown_value;
932 }
933
934 /*
935 * Report a batch of hotness events to the instrumentation and then return the new
936 * countdown value to the next time we should report.
937 */
MterpAddHotnessBatch(ArtMethod * method,ShadowFrame * shadow_frame,Thread * self)938 extern "C" ssize_t MterpAddHotnessBatch(ArtMethod* method,
939 ShadowFrame* shadow_frame,
940 Thread* self)
941 REQUIRES_SHARED(Locks::mutator_lock_) {
942 jit::Jit* jit = Runtime::Current()->GetJit();
943 if (jit != nullptr) {
944 int16_t count = shadow_frame->GetCachedHotnessCountdown() - shadow_frame->GetHotnessCountdown();
945 jit->AddSamples(self, method, count, /*with_backedges*/ true);
946 }
947 return MterpSetUpHotnessCountdown(method, shadow_frame);
948 }
949
MterpMaybeDoOnStackReplacement(Thread * self,ShadowFrame * shadow_frame,int32_t offset)950 extern "C" size_t MterpMaybeDoOnStackReplacement(Thread* self,
951 ShadowFrame* shadow_frame,
952 int32_t offset)
953 REQUIRES_SHARED(Locks::mutator_lock_) {
954 int16_t osr_countdown = shadow_frame->GetCachedHotnessCountdown() - 1;
955 bool did_osr = false;
956 /*
957 * To reduce the cost of polling the compiler to determine whether the requested OSR
958 * compilation has completed, only check every Nth time. NOTE: the "osr_countdown <= 0"
959 * condition is satisfied either by the decrement below or the initial setting of
960 * the cached countdown field to kJitCheckForOSR, which elsewhere is asserted to be -1.
961 */
962 if (osr_countdown <= 0) {
963 ArtMethod* method = shadow_frame->GetMethod();
964 JValue* result = shadow_frame->GetResultRegister();
965 uint32_t dex_pc = shadow_frame->GetDexPC();
966 jit::Jit* jit = Runtime::Current()->GetJit();
967 osr_countdown = jit::Jit::kJitRecheckOSRThreshold;
968 if (offset <= 0) {
969 // Keep updating hotness in case a compilation request was dropped. Eventually it will retry.
970 jit->AddSamples(self, method, osr_countdown, /*with_backedges*/ true);
971 }
972 did_osr = jit::Jit::MaybeDoOnStackReplacement(self, method, dex_pc, offset, result);
973 }
974 shadow_frame->SetCachedHotnessCountdown(osr_countdown);
975 return did_osr;
976 }
977
978 } // namespace interpreter
979 } // namespace art
980