1 /*
2  * Copyright (C) 2016 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 /*
18  * Mterp entry point and support functions.
19  */
20 #include "mterp.h"
21 
22 #include "base/quasi_atomic.h"
23 #include "debugger.h"
24 #include "entrypoints/entrypoint_utils-inl.h"
25 #include "interpreter/interpreter_common.h"
26 #include "interpreter/interpreter_intrinsics.h"
27 #include "interpreter/shadow_frame-inl.h"
28 #include "mirror/string-alloc-inl.h"
29 
30 namespace art {
31 namespace interpreter {
32 /*
33  * Verify some constants used by the mterp interpreter.
34  */
CheckMterpAsmConstants()35 void CheckMterpAsmConstants() {
36   /*
37    * If we're using computed goto instruction transitions, make sure
38    * none of the handlers overflows the byte limit.  This won't tell
39    * which one did, but if any one is too big the total size will
40    * overflow.
41    */
42   const int width = kMterpHandlerSize;
43   int interp_size = (uintptr_t) artMterpAsmInstructionEnd -
44                     (uintptr_t) artMterpAsmInstructionStart;
45   if ((interp_size == 0) || (interp_size != (art::kNumPackedOpcodes * width))) {
46       LOG(FATAL) << "ERROR: unexpected asm interp size " << interp_size
47                  << "(did an instruction handler exceed " << width << " bytes?)";
48   }
49 }
50 
InitMterpTls(Thread * self)51 void InitMterpTls(Thread* self) {
52   self->SetMterpCurrentIBase(artMterpAsmInstructionStart);
53 }
54 
55 /*
56  * Find the matching case.  Returns the offset to the handler instructions.
57  *
58  * Returns 3 if we don't find a match (it's the size of the sparse-switch
59  * instruction).
60  */
MterpDoSparseSwitch(const uint16_t * switchData,int32_t testVal)61 extern "C" ssize_t MterpDoSparseSwitch(const uint16_t* switchData, int32_t testVal) {
62   const int kInstrLen = 3;
63   uint16_t size;
64   const int32_t* keys;
65   const int32_t* entries;
66 
67   /*
68    * Sparse switch data format:
69    *  ushort ident = 0x0200   magic value
70    *  ushort size             number of entries in the table; > 0
71    *  int keys[size]          keys, sorted low-to-high; 32-bit aligned
72    *  int targets[size]       branch targets, relative to switch opcode
73    *
74    * Total size is (2+size*4) 16-bit code units.
75    */
76 
77   uint16_t signature = *switchData++;
78   DCHECK_EQ(signature, static_cast<uint16_t>(art::Instruction::kSparseSwitchSignature));
79 
80   size = *switchData++;
81 
82   /* The keys are guaranteed to be aligned on a 32-bit boundary;
83    * we can treat them as a native int array.
84    */
85   keys = reinterpret_cast<const int32_t*>(switchData);
86 
87   /* The entries are guaranteed to be aligned on a 32-bit boundary;
88    * we can treat them as a native int array.
89    */
90   entries = keys + size;
91 
92   /*
93    * Binary-search through the array of keys, which are guaranteed to
94    * be sorted low-to-high.
95    */
96   int lo = 0;
97   int hi = size - 1;
98   while (lo <= hi) {
99     int mid = (lo + hi) >> 1;
100 
101     int32_t foundVal = keys[mid];
102     if (testVal < foundVal) {
103       hi = mid - 1;
104     } else if (testVal > foundVal) {
105       lo = mid + 1;
106     } else {
107       return entries[mid];
108     }
109   }
110   return kInstrLen;
111 }
112 
MterpDoPackedSwitch(const uint16_t * switchData,int32_t testVal)113 extern "C" ssize_t MterpDoPackedSwitch(const uint16_t* switchData, int32_t testVal) {
114   const int kInstrLen = 3;
115 
116   /*
117    * Packed switch data format:
118    *  ushort ident = 0x0100   magic value
119    *  ushort size             number of entries in the table
120    *  int first_key           first (and lowest) switch case value
121    *  int targets[size]       branch targets, relative to switch opcode
122    *
123    * Total size is (4+size*2) 16-bit code units.
124    */
125   uint16_t signature = *switchData++;
126   DCHECK_EQ(signature, static_cast<uint16_t>(art::Instruction::kPackedSwitchSignature));
127 
128   uint16_t size = *switchData++;
129 
130   int32_t firstKey = *switchData++;
131   firstKey |= (*switchData++) << 16;
132 
133   int index = testVal - firstKey;
134   if (index < 0 || index >= size) {
135     return kInstrLen;
136   }
137 
138   /*
139    * The entries are guaranteed to be aligned on a 32-bit boundary;
140    * we can treat them as a native int array.
141    */
142   const int32_t* entries = reinterpret_cast<const int32_t*>(switchData);
143   return entries[index];
144 }
145 
CanUseMterp()146 bool CanUseMterp()
147     REQUIRES_SHARED(Locks::mutator_lock_) {
148   const Runtime* const runtime = Runtime::Current();
149   return
150       runtime->IsStarted() &&
151       !runtime->IsAotCompiler() &&
152       !runtime->GetInstrumentation()->IsActive() &&
153       // mterp only knows how to deal with the normal exits. It cannot handle any of the
154       // non-standard force-returns.
155       !runtime->AreNonStandardExitsEnabled() &&
156       // An async exception has been thrown. We need to go to the switch interpreter. MTerp doesn't
157       // know how to deal with these so we could end up never dealing with it if we are in an
158       // infinite loop.
159       !runtime->AreAsyncExceptionsThrown() &&
160       (runtime->GetJit() == nullptr || !runtime->GetJit()->JitAtFirstUse());
161 }
162 
163 
MterpInvokeVirtual(Thread * self,ShadowFrame * shadow_frame,uint16_t * dex_pc_ptr,uint16_t inst_data)164 extern "C" size_t MterpInvokeVirtual(Thread* self,
165                                      ShadowFrame* shadow_frame,
166                                      uint16_t* dex_pc_ptr,
167                                      uint16_t inst_data)
168     REQUIRES_SHARED(Locks::mutator_lock_) {
169   JValue* result_register = shadow_frame->GetResultRegister();
170   const Instruction* inst = Instruction::At(dex_pc_ptr);
171   return DoInvoke<kVirtual, /*is_range=*/ false, /*do_access_check=*/ false, /*is_mterp=*/ true>(
172       self, *shadow_frame, inst, inst_data, result_register) ? 1u : 0u;
173 }
174 
MterpInvokeSuper(Thread * self,ShadowFrame * shadow_frame,uint16_t * dex_pc_ptr,uint16_t inst_data)175 extern "C" size_t MterpInvokeSuper(Thread* self,
176                                    ShadowFrame* shadow_frame,
177                                    uint16_t* dex_pc_ptr,
178                                    uint16_t inst_data)
179     REQUIRES_SHARED(Locks::mutator_lock_) {
180   JValue* result_register = shadow_frame->GetResultRegister();
181   const Instruction* inst = Instruction::At(dex_pc_ptr);
182   return DoInvoke<kSuper, /*is_range=*/ false, /*do_access_check=*/ false, /*is_mterp=*/ true>(
183       self, *shadow_frame, inst, inst_data, result_register) ? 1u : 0u;
184 }
185 
MterpInvokeInterface(Thread * self,ShadowFrame * shadow_frame,uint16_t * dex_pc_ptr,uint16_t inst_data)186 extern "C" size_t MterpInvokeInterface(Thread* self,
187                                        ShadowFrame* shadow_frame,
188                                        uint16_t* dex_pc_ptr,
189                                        uint16_t inst_data)
190     REQUIRES_SHARED(Locks::mutator_lock_) {
191   JValue* result_register = shadow_frame->GetResultRegister();
192   const Instruction* inst = Instruction::At(dex_pc_ptr);
193   return DoInvoke<kInterface, /*is_range=*/ false, /*do_access_check=*/ false, /*is_mterp=*/ true>(
194       self, *shadow_frame, inst, inst_data, result_register) ? 1u : 0u;
195 }
196 
MterpInvokeDirect(Thread * self,ShadowFrame * shadow_frame,uint16_t * dex_pc_ptr,uint16_t inst_data)197 extern "C" size_t MterpInvokeDirect(Thread* self,
198                                     ShadowFrame* shadow_frame,
199                                     uint16_t* dex_pc_ptr,
200                                     uint16_t inst_data)
201     REQUIRES_SHARED(Locks::mutator_lock_) {
202   JValue* result_register = shadow_frame->GetResultRegister();
203   const Instruction* inst = Instruction::At(dex_pc_ptr);
204   return DoInvoke<kDirect, /*is_range=*/ false, /*do_access_check=*/ false, /*is_mterp=*/ true>(
205       self, *shadow_frame, inst, inst_data, result_register) ? 1u : 0u;
206 }
207 
MterpInvokeStatic(Thread * self,ShadowFrame * shadow_frame,uint16_t * dex_pc_ptr,uint16_t inst_data)208 extern "C" size_t MterpInvokeStatic(Thread* self,
209                                     ShadowFrame* shadow_frame,
210                                     uint16_t* dex_pc_ptr,
211                                     uint16_t inst_data)
212     REQUIRES_SHARED(Locks::mutator_lock_) {
213   JValue* result_register = shadow_frame->GetResultRegister();
214   const Instruction* inst = Instruction::At(dex_pc_ptr);
215   return DoInvoke<kStatic, /*is_range=*/ false, /*do_access_check=*/ false, /*is_mterp=*/ true>(
216       self, *shadow_frame, inst, inst_data, result_register) ? 1u : 0u;
217 }
218 
MterpInvokeCustom(Thread * self,ShadowFrame * shadow_frame,uint16_t * dex_pc_ptr,uint16_t inst_data)219 extern "C" size_t MterpInvokeCustom(Thread* self,
220                                     ShadowFrame* shadow_frame,
221                                     uint16_t* dex_pc_ptr,
222                                     uint16_t inst_data)
223     REQUIRES_SHARED(Locks::mutator_lock_) {
224   JValue* result_register = shadow_frame->GetResultRegister();
225   const Instruction* inst = Instruction::At(dex_pc_ptr);
226   return DoInvokeCustom</* is_range= */ false>(
227       self, *shadow_frame, inst, inst_data, result_register) ? 1u : 0u;
228 }
229 
MterpInvokePolymorphic(Thread * self,ShadowFrame * shadow_frame,uint16_t * dex_pc_ptr,uint16_t inst_data)230 extern "C" size_t MterpInvokePolymorphic(Thread* self,
231                                          ShadowFrame* shadow_frame,
232                                          uint16_t* dex_pc_ptr,
233                                          uint16_t inst_data)
234     REQUIRES_SHARED(Locks::mutator_lock_) {
235   JValue* result_register = shadow_frame->GetResultRegister();
236   const Instruction* inst = Instruction::At(dex_pc_ptr);
237   return DoInvokePolymorphic</* is_range= */ false>(
238       self, *shadow_frame, inst, inst_data, result_register) ? 1u : 0u;
239 }
240 
MterpInvokeVirtualRange(Thread * self,ShadowFrame * shadow_frame,uint16_t * dex_pc_ptr,uint16_t inst_data)241 extern "C" size_t MterpInvokeVirtualRange(Thread* self,
242                                           ShadowFrame* shadow_frame,
243                                           uint16_t* dex_pc_ptr,
244                                           uint16_t inst_data)
245     REQUIRES_SHARED(Locks::mutator_lock_) {
246   JValue* result_register = shadow_frame->GetResultRegister();
247   const Instruction* inst = Instruction::At(dex_pc_ptr);
248   return DoInvoke<kVirtual, /*is_range=*/ true, /*do_access_check=*/ false, /*is_mterp=*/ true>(
249       self, *shadow_frame, inst, inst_data, result_register) ? 1u : 0u;
250 }
251 
MterpInvokeSuperRange(Thread * self,ShadowFrame * shadow_frame,uint16_t * dex_pc_ptr,uint16_t inst_data)252 extern "C" size_t MterpInvokeSuperRange(Thread* self,
253                                         ShadowFrame* shadow_frame,
254                                         uint16_t* dex_pc_ptr,
255                                         uint16_t inst_data)
256     REQUIRES_SHARED(Locks::mutator_lock_) {
257   JValue* result_register = shadow_frame->GetResultRegister();
258   const Instruction* inst = Instruction::At(dex_pc_ptr);
259   return DoInvoke<kSuper, /*is_range=*/ true, /*do_access_check=*/ false, /*is_mterp=*/ true>(
260       self, *shadow_frame, inst, inst_data, result_register) ? 1u : 0u;
261 }
262 
MterpInvokeInterfaceRange(Thread * self,ShadowFrame * shadow_frame,uint16_t * dex_pc_ptr,uint16_t inst_data)263 extern "C" size_t MterpInvokeInterfaceRange(Thread* self,
264                                             ShadowFrame* shadow_frame,
265                                             uint16_t* dex_pc_ptr,
266                                             uint16_t inst_data)
267     REQUIRES_SHARED(Locks::mutator_lock_) {
268   JValue* result_register = shadow_frame->GetResultRegister();
269   const Instruction* inst = Instruction::At(dex_pc_ptr);
270   return DoInvoke<kInterface, /*is_range=*/ true, /*do_access_check=*/ false, /*is_mterp=*/ true>(
271       self, *shadow_frame, inst, inst_data, result_register) ? 1u : 0u;
272 }
273 
MterpInvokeDirectRange(Thread * self,ShadowFrame * shadow_frame,uint16_t * dex_pc_ptr,uint16_t inst_data)274 extern "C" size_t MterpInvokeDirectRange(Thread* self,
275                                          ShadowFrame* shadow_frame,
276                                          uint16_t* dex_pc_ptr,
277                                          uint16_t inst_data)
278     REQUIRES_SHARED(Locks::mutator_lock_) {
279   JValue* result_register = shadow_frame->GetResultRegister();
280   const Instruction* inst = Instruction::At(dex_pc_ptr);
281   return DoInvoke<kDirect, /*is_range=*/ true, /*do_access_check=*/ false, /*is_mterp=*/ true>(
282       self, *shadow_frame, inst, inst_data, result_register) ? 1u : 0u;
283 }
284 
MterpInvokeStaticRange(Thread * self,ShadowFrame * shadow_frame,uint16_t * dex_pc_ptr,uint16_t inst_data)285 extern "C" size_t MterpInvokeStaticRange(Thread* self,
286                                          ShadowFrame* shadow_frame,
287                                          uint16_t* dex_pc_ptr,
288                                          uint16_t inst_data)
289     REQUIRES_SHARED(Locks::mutator_lock_) {
290   JValue* result_register = shadow_frame->GetResultRegister();
291   const Instruction* inst = Instruction::At(dex_pc_ptr);
292   return DoInvoke<kStatic, /*is_range=*/ true, /*do_access_check=*/ false, /*is_mterp=*/ true>(
293       self, *shadow_frame, inst, inst_data, result_register) ? 1u : 0u;
294 }
295 
MterpInvokeCustomRange(Thread * self,ShadowFrame * shadow_frame,uint16_t * dex_pc_ptr,uint16_t inst_data)296 extern "C" size_t MterpInvokeCustomRange(Thread* self,
297                                          ShadowFrame* shadow_frame,
298                                          uint16_t* dex_pc_ptr,
299                                          uint16_t inst_data)
300     REQUIRES_SHARED(Locks::mutator_lock_) {
301   JValue* result_register = shadow_frame->GetResultRegister();
302   const Instruction* inst = Instruction::At(dex_pc_ptr);
303   return DoInvokeCustom</*is_range=*/ true>(
304       self, *shadow_frame, inst, inst_data, result_register) ? 1u : 0u;
305 }
306 
MterpInvokePolymorphicRange(Thread * self,ShadowFrame * shadow_frame,uint16_t * dex_pc_ptr,uint16_t inst_data)307 extern "C" size_t MterpInvokePolymorphicRange(Thread* self,
308                                               ShadowFrame* shadow_frame,
309                                               uint16_t* dex_pc_ptr,
310                                               uint16_t inst_data)
311     REQUIRES_SHARED(Locks::mutator_lock_) {
312   JValue* result_register = shadow_frame->GetResultRegister();
313   const Instruction* inst = Instruction::At(dex_pc_ptr);
314   return DoInvokePolymorphic</* is_range= */ true>(
315       self, *shadow_frame, inst, inst_data, result_register) ? 1u : 0u;
316 }
317 
MterpInvokeVirtualQuick(Thread * self,ShadowFrame * shadow_frame,uint16_t * dex_pc_ptr,uint16_t inst_data)318 extern "C" size_t MterpInvokeVirtualQuick(Thread* self,
319                                           ShadowFrame* shadow_frame,
320                                           uint16_t* dex_pc_ptr,
321                                           uint16_t inst_data)
322     REQUIRES_SHARED(Locks::mutator_lock_) {
323   JValue* result_register = shadow_frame->GetResultRegister();
324   const Instruction* inst = Instruction::At(dex_pc_ptr);
325   return DoInvoke<kVirtual, /*is_range=*/ false, /*do_access_check=*/ false, /*is_mterp=*/ true,
326       /*is_quick=*/ true>(self, *shadow_frame, inst, inst_data, result_register) ? 1u : 0u;
327 }
328 
MterpInvokeVirtualQuickRange(Thread * self,ShadowFrame * shadow_frame,uint16_t * dex_pc_ptr,uint16_t inst_data)329 extern "C" size_t MterpInvokeVirtualQuickRange(Thread* self,
330                                                ShadowFrame* shadow_frame,
331                                                uint16_t* dex_pc_ptr,
332                                                uint16_t inst_data)
333     REQUIRES_SHARED(Locks::mutator_lock_) {
334   JValue* result_register = shadow_frame->GetResultRegister();
335   const Instruction* inst = Instruction::At(dex_pc_ptr);
336   return DoInvoke<kVirtual, /*is_range=*/ true, /*do_access_check=*/ false, /*is_mterp=*/ true,
337       /*is_quick=*/ true>(self, *shadow_frame, inst, inst_data, result_register) ? 1u : 0u;
338 }
339 
MterpThreadFenceForConstructor()340 extern "C" void MterpThreadFenceForConstructor() {
341   QuasiAtomic::ThreadFenceForConstructor();
342 }
343 
MterpConstString(uint32_t index,uint32_t tgt_vreg,ShadowFrame * shadow_frame,Thread * self)344 extern "C" size_t MterpConstString(uint32_t index,
345                                    uint32_t tgt_vreg,
346                                    ShadowFrame* shadow_frame,
347                                    Thread* self)
348     REQUIRES_SHARED(Locks::mutator_lock_) {
349   ObjPtr<mirror::String> s = ResolveString(self, *shadow_frame, dex::StringIndex(index));
350   if (UNLIKELY(s == nullptr)) {
351     return 1u;
352   }
353   shadow_frame->SetVRegReference(tgt_vreg, s);
354   return 0u;
355 }
356 
MterpConstClass(uint32_t index,uint32_t tgt_vreg,ShadowFrame * shadow_frame,Thread * self)357 extern "C" size_t MterpConstClass(uint32_t index,
358                                   uint32_t tgt_vreg,
359                                   ShadowFrame* shadow_frame,
360                                   Thread* self)
361     REQUIRES_SHARED(Locks::mutator_lock_) {
362   ObjPtr<mirror::Class> c = ResolveVerifyAndClinit(dex::TypeIndex(index),
363                                                    shadow_frame->GetMethod(),
364                                                    self,
365                                                    /* can_run_clinit= */ false,
366                                                    /* verify_access= */ false);
367   if (UNLIKELY(c == nullptr)) {
368     return 1u;
369   }
370   shadow_frame->SetVRegReference(tgt_vreg, c);
371   return 0u;
372 }
373 
MterpConstMethodHandle(uint32_t index,uint32_t tgt_vreg,ShadowFrame * shadow_frame,Thread * self)374 extern "C" size_t MterpConstMethodHandle(uint32_t index,
375                                          uint32_t tgt_vreg,
376                                          ShadowFrame* shadow_frame,
377                                          Thread* self)
378     REQUIRES_SHARED(Locks::mutator_lock_) {
379   ObjPtr<mirror::MethodHandle> mh = ResolveMethodHandle(self, index, shadow_frame->GetMethod());
380   if (UNLIKELY(mh == nullptr)) {
381     return 1u;
382   }
383   shadow_frame->SetVRegReference(tgt_vreg, mh);
384   return 0u;
385 }
386 
MterpConstMethodType(uint32_t index,uint32_t tgt_vreg,ShadowFrame * shadow_frame,Thread * self)387 extern "C" size_t MterpConstMethodType(uint32_t index,
388                                        uint32_t tgt_vreg,
389                                        ShadowFrame* shadow_frame,
390                                        Thread* self)
391     REQUIRES_SHARED(Locks::mutator_lock_) {
392   ObjPtr<mirror::MethodType> mt =
393       ResolveMethodType(self, dex::ProtoIndex(index), shadow_frame->GetMethod());
394   if (UNLIKELY(mt == nullptr)) {
395     return 1u;
396   }
397   shadow_frame->SetVRegReference(tgt_vreg, mt);
398   return 0u;
399 }
400 
MterpCheckCast(uint32_t index,StackReference<mirror::Object> * vreg_addr,art::ArtMethod * method,Thread * self)401 extern "C" size_t MterpCheckCast(uint32_t index,
402                                  StackReference<mirror::Object>* vreg_addr,
403                                  art::ArtMethod* method,
404                                  Thread* self)
405     REQUIRES_SHARED(Locks::mutator_lock_) {
406   ObjPtr<mirror::Class> c = ResolveVerifyAndClinit(dex::TypeIndex(index),
407                                                    method,
408                                                    self,
409                                                    false,
410                                                    false);
411   if (UNLIKELY(c == nullptr)) {
412     return 1u;
413   }
414   // Must load obj from vreg following ResolveVerifyAndClinit due to moving gc.
415   ObjPtr<mirror::Object> obj = vreg_addr->AsMirrorPtr();
416   if (UNLIKELY(obj != nullptr && !obj->InstanceOf(c))) {
417     ThrowClassCastException(c, obj->GetClass());
418     return 1u;
419   }
420   return 0u;
421 }
422 
MterpInstanceOf(uint32_t index,StackReference<mirror::Object> * vreg_addr,art::ArtMethod * method,Thread * self)423 extern "C" size_t MterpInstanceOf(uint32_t index,
424                                   StackReference<mirror::Object>* vreg_addr,
425                                   art::ArtMethod* method,
426                                   Thread* self)
427     REQUIRES_SHARED(Locks::mutator_lock_) {
428   ObjPtr<mirror::Class> c = ResolveVerifyAndClinit(dex::TypeIndex(index),
429                                                    method,
430                                                    self,
431                                                    false,
432                                                    false);
433   if (UNLIKELY(c == nullptr)) {
434     return 0u;  // Caller will check for pending exception.  Return value unimportant.
435   }
436   // Must load obj from vreg following ResolveVerifyAndClinit due to moving gc.
437   ObjPtr<mirror::Object> obj = vreg_addr->AsMirrorPtr();
438   return (obj != nullptr) && obj->InstanceOf(c) ? 1u : 0u;
439 }
440 
MterpFillArrayData(mirror::Object * obj,const Instruction::ArrayDataPayload * payload)441 extern "C" size_t MterpFillArrayData(mirror::Object* obj,
442                                      const Instruction::ArrayDataPayload* payload)
443     REQUIRES_SHARED(Locks::mutator_lock_) {
444   return FillArrayData(obj, payload) ? 1u : 0u;
445 }
446 
MterpNewInstance(ShadowFrame * shadow_frame,Thread * self,uint32_t inst_data)447 extern "C" size_t MterpNewInstance(ShadowFrame* shadow_frame, Thread* self, uint32_t inst_data)
448     REQUIRES_SHARED(Locks::mutator_lock_) {
449   const Instruction* inst = Instruction::At(shadow_frame->GetDexPCPtr());
450   ObjPtr<mirror::Object> obj = nullptr;
451   ObjPtr<mirror::Class> c = ResolveVerifyAndClinit(dex::TypeIndex(inst->VRegB_21c()),
452                                                    shadow_frame->GetMethod(),
453                                                    self,
454                                                    /* can_run_clinit= */ false,
455                                                    /* verify_access= */ false);
456   if (LIKELY(c != nullptr)) {
457     if (UNLIKELY(c->IsStringClass())) {
458       gc::AllocatorType allocator_type = Runtime::Current()->GetHeap()->GetCurrentAllocator();
459       obj = mirror::String::AllocEmptyString(self, allocator_type);
460     } else {
461       obj = AllocObjectFromCode(c, self, Runtime::Current()->GetHeap()->GetCurrentAllocator());
462     }
463   }
464   if (UNLIKELY(obj == nullptr)) {
465     return 0u;
466   }
467   obj->GetClass()->AssertInitializedOrInitializingInThread(self);
468   shadow_frame->SetVRegReference(inst->VRegA_21c(inst_data), obj);
469   return 1u;
470 }
471 
MterpIputObjectQuick(ShadowFrame * shadow_frame,uint16_t * dex_pc_ptr,uint32_t inst_data)472 extern "C" size_t MterpIputObjectQuick(ShadowFrame* shadow_frame,
473                                        uint16_t* dex_pc_ptr,
474                                        uint32_t inst_data)
475     REQUIRES_SHARED(Locks::mutator_lock_) {
476   const Instruction* inst = Instruction::At(dex_pc_ptr);
477   return DoIPutQuick<Primitive::kPrimNot, false>(*shadow_frame, inst, inst_data) ? 1u : 0u;
478 }
479 
MterpAputObject(ShadowFrame * shadow_frame,uint16_t * dex_pc_ptr,uint32_t inst_data)480 extern "C" size_t MterpAputObject(ShadowFrame* shadow_frame,
481                                   uint16_t* dex_pc_ptr,
482                                   uint32_t inst_data)
483     REQUIRES_SHARED(Locks::mutator_lock_) {
484   const Instruction* inst = Instruction::At(dex_pc_ptr);
485   ObjPtr<mirror::Object> a = shadow_frame->GetVRegReference(inst->VRegB_23x());
486   if (UNLIKELY(a == nullptr)) {
487     return 0u;
488   }
489   int32_t index = shadow_frame->GetVReg(inst->VRegC_23x());
490   ObjPtr<mirror::Object> val = shadow_frame->GetVRegReference(inst->VRegA_23x(inst_data));
491   ObjPtr<mirror::ObjectArray<mirror::Object>> array = a->AsObjectArray<mirror::Object>();
492   if (array->CheckIsValidIndex(index) && array->CheckAssignable(val)) {
493     array->SetWithoutChecks<false>(index, val);
494     return 1u;
495   }
496   return 0u;
497 }
498 
MterpFilledNewArray(ShadowFrame * shadow_frame,uint16_t * dex_pc_ptr,Thread * self)499 extern "C" size_t MterpFilledNewArray(ShadowFrame* shadow_frame,
500                                       uint16_t* dex_pc_ptr,
501                                       Thread* self)
502     REQUIRES_SHARED(Locks::mutator_lock_) {
503   const Instruction* inst = Instruction::At(dex_pc_ptr);
504   return DoFilledNewArray<false, false, false>(inst, *shadow_frame, self,
505                                                shadow_frame->GetResultRegister()) ? 1u : 0u;
506 }
507 
MterpFilledNewArrayRange(ShadowFrame * shadow_frame,uint16_t * dex_pc_ptr,Thread * self)508 extern "C" size_t MterpFilledNewArrayRange(ShadowFrame* shadow_frame,
509                                            uint16_t* dex_pc_ptr,
510                                            Thread* self)
511     REQUIRES_SHARED(Locks::mutator_lock_) {
512   const Instruction* inst = Instruction::At(dex_pc_ptr);
513   return DoFilledNewArray<true, false, false>(inst, *shadow_frame, self,
514                                               shadow_frame->GetResultRegister()) ? 1u : 0u;
515 }
516 
MterpNewArray(ShadowFrame * shadow_frame,uint16_t * dex_pc_ptr,uint32_t inst_data,Thread * self)517 extern "C" size_t MterpNewArray(ShadowFrame* shadow_frame,
518                                 uint16_t* dex_pc_ptr,
519                                 uint32_t inst_data, Thread* self)
520     REQUIRES_SHARED(Locks::mutator_lock_) {
521   const Instruction* inst = Instruction::At(dex_pc_ptr);
522   int32_t length = shadow_frame->GetVReg(inst->VRegB_22c(inst_data));
523   ObjPtr<mirror::Object> obj = AllocArrayFromCode</*kAccessCheck=*/ false>(
524       dex::TypeIndex(inst->VRegC_22c()), length, shadow_frame->GetMethod(), self,
525       Runtime::Current()->GetHeap()->GetCurrentAllocator());
526   if (UNLIKELY(obj == nullptr)) {
527       return 0u;
528   }
529   shadow_frame->SetVRegReference(inst->VRegA_22c(inst_data), obj);
530   return 1u;
531 }
532 
MterpHandleException(Thread * self,ShadowFrame * shadow_frame)533 extern "C" size_t MterpHandleException(Thread* self, ShadowFrame* shadow_frame)
534     REQUIRES_SHARED(Locks::mutator_lock_) {
535   DCHECK(self->IsExceptionPending());
536   const instrumentation::Instrumentation* const instrumentation =
537       Runtime::Current()->GetInstrumentation();
538   return MoveToExceptionHandler(self, *shadow_frame, instrumentation) ? 1u : 0u;
539 }
540 
541 struct MterpCheckHelper {
542   DECLARE_RUNTIME_DEBUG_FLAG(kSlowMode);
543 };
544 DEFINE_RUNTIME_DEBUG_FLAG(MterpCheckHelper, kSlowMode);
545 
MterpCheckBefore(Thread * self,ShadowFrame * shadow_frame,uint16_t * dex_pc_ptr)546 extern "C" void MterpCheckBefore(Thread* self, ShadowFrame* shadow_frame, uint16_t* dex_pc_ptr)
547     REQUIRES_SHARED(Locks::mutator_lock_) {
548   // Check that we are using the right interpreter.
549   if (kIsDebugBuild && self->UseMterp() != CanUseMterp()) {
550     // The flag might be currently being updated on all threads. Retry with lock.
551     MutexLock tll_mu(self, *Locks::thread_list_lock_);
552     DCHECK_EQ(self->UseMterp(), CanUseMterp());
553   }
554   DCHECK(!Runtime::Current()->IsActiveTransaction());
555   const Instruction* inst = Instruction::At(dex_pc_ptr);
556   uint16_t inst_data = inst->Fetch16(0);
557   if (inst->Opcode(inst_data) == Instruction::MOVE_EXCEPTION) {
558     self->AssertPendingException();
559   } else {
560     self->AssertNoPendingException();
561   }
562   if (kTraceExecutionEnabled) {
563     uint32_t dex_pc = dex_pc_ptr - shadow_frame->GetDexInstructions();
564     TraceExecution(*shadow_frame, inst, dex_pc);
565   }
566   if (kTestExportPC) {
567     // Save invalid dex pc to force segfault if improperly used.
568     shadow_frame->SetDexPCPtr(reinterpret_cast<uint16_t*>(kExportPCPoison));
569   }
570   if (MterpCheckHelper::kSlowMode) {
571     shadow_frame->CheckConsistentVRegs();
572   }
573 }
574 
MterpLogDivideByZeroException(Thread * self,ShadowFrame * shadow_frame)575 extern "C" void MterpLogDivideByZeroException(Thread* self, ShadowFrame* shadow_frame)
576     REQUIRES_SHARED(Locks::mutator_lock_) {
577   UNUSED(self);
578   const Instruction* inst = Instruction::At(shadow_frame->GetDexPCPtr());
579   uint16_t inst_data = inst->Fetch16(0);
580   LOG(INFO) << "DivideByZero: " << inst->Opcode(inst_data);
581 }
582 
MterpLogArrayIndexException(Thread * self,ShadowFrame * shadow_frame)583 extern "C" void MterpLogArrayIndexException(Thread* self, ShadowFrame* shadow_frame)
584     REQUIRES_SHARED(Locks::mutator_lock_) {
585   UNUSED(self);
586   const Instruction* inst = Instruction::At(shadow_frame->GetDexPCPtr());
587   uint16_t inst_data = inst->Fetch16(0);
588   LOG(INFO) << "ArrayIndex: " << inst->Opcode(inst_data);
589 }
590 
MterpLogNegativeArraySizeException(Thread * self,ShadowFrame * shadow_frame)591 extern "C" void MterpLogNegativeArraySizeException(Thread* self, ShadowFrame* shadow_frame)
592     REQUIRES_SHARED(Locks::mutator_lock_) {
593   UNUSED(self);
594   const Instruction* inst = Instruction::At(shadow_frame->GetDexPCPtr());
595   uint16_t inst_data = inst->Fetch16(0);
596   LOG(INFO) << "NegativeArraySize: " << inst->Opcode(inst_data);
597 }
598 
MterpLogNoSuchMethodException(Thread * self,ShadowFrame * shadow_frame)599 extern "C" void MterpLogNoSuchMethodException(Thread* self, ShadowFrame* shadow_frame)
600     REQUIRES_SHARED(Locks::mutator_lock_) {
601   UNUSED(self);
602   const Instruction* inst = Instruction::At(shadow_frame->GetDexPCPtr());
603   uint16_t inst_data = inst->Fetch16(0);
604   LOG(INFO) << "NoSuchMethod: " << inst->Opcode(inst_data);
605 }
606 
MterpLogExceptionThrownException(Thread * self,ShadowFrame * shadow_frame)607 extern "C" void MterpLogExceptionThrownException(Thread* self, ShadowFrame* shadow_frame)
608     REQUIRES_SHARED(Locks::mutator_lock_) {
609   UNUSED(self);
610   const Instruction* inst = Instruction::At(shadow_frame->GetDexPCPtr());
611   uint16_t inst_data = inst->Fetch16(0);
612   LOG(INFO) << "ExceptionThrown: " << inst->Opcode(inst_data);
613 }
614 
MterpLogNullObjectException(Thread * self,ShadowFrame * shadow_frame)615 extern "C" void MterpLogNullObjectException(Thread* self, ShadowFrame* shadow_frame)
616     REQUIRES_SHARED(Locks::mutator_lock_) {
617   UNUSED(self);
618   const Instruction* inst = Instruction::At(shadow_frame->GetDexPCPtr());
619   uint16_t inst_data = inst->Fetch16(0);
620   LOG(INFO) << "NullObject: " << inst->Opcode(inst_data);
621 }
622 
MterpLogFallback(Thread * self,ShadowFrame * shadow_frame)623 extern "C" void MterpLogFallback(Thread* self, ShadowFrame* shadow_frame)
624     REQUIRES_SHARED(Locks::mutator_lock_) {
625   UNUSED(self);
626   const Instruction* inst = Instruction::At(shadow_frame->GetDexPCPtr());
627   uint16_t inst_data = inst->Fetch16(0);
628   LOG(INFO) << "Fallback: " << inst->Opcode(inst_data) << ", Suspend Pending?: "
629             << self->IsExceptionPending();
630 }
631 
MterpLogOSR(Thread * self,ShadowFrame * shadow_frame,int32_t offset)632 extern "C" void MterpLogOSR(Thread* self, ShadowFrame* shadow_frame, int32_t offset)
633     REQUIRES_SHARED(Locks::mutator_lock_) {
634   UNUSED(self);
635   const Instruction* inst = Instruction::At(shadow_frame->GetDexPCPtr());
636   uint16_t inst_data = inst->Fetch16(0);
637   LOG(INFO) << "OSR: " << inst->Opcode(inst_data) << ", offset = " << offset;
638 }
639 
MterpLogSuspendFallback(Thread * self,ShadowFrame * shadow_frame,uint32_t flags)640 extern "C" void MterpLogSuspendFallback(Thread* self, ShadowFrame* shadow_frame, uint32_t flags)
641     REQUIRES_SHARED(Locks::mutator_lock_) {
642   UNUSED(self);
643   const Instruction* inst = Instruction::At(shadow_frame->GetDexPCPtr());
644   uint16_t inst_data = inst->Fetch16(0);
645   if (flags & kCheckpointRequest) {
646     LOG(INFO) << "Checkpoint fallback: " << inst->Opcode(inst_data);
647   } else if (flags & kSuspendRequest) {
648     LOG(INFO) << "Suspend fallback: " << inst->Opcode(inst_data);
649   } else if (flags & kEmptyCheckpointRequest) {
650     LOG(INFO) << "Empty checkpoint fallback: " << inst->Opcode(inst_data);
651   }
652 }
653 
MterpSuspendCheck(Thread * self)654 extern "C" size_t MterpSuspendCheck(Thread* self)
655     REQUIRES_SHARED(Locks::mutator_lock_) {
656   self->AllowThreadSuspension();
657   return !self->UseMterp();
658 }
659 
660 // Execute single field access instruction (get/put, static/instance).
661 // The template arguments reduce this to fairly small amount of code.
662 // It requires the target object and field to be already resolved.
663 template<typename PrimType, FindFieldType kAccessType>
MterpFieldAccess(Instruction * inst,uint16_t inst_data,ShadowFrame * shadow_frame,ObjPtr<mirror::Object> obj,MemberOffset offset,bool is_volatile)664 ALWAYS_INLINE void MterpFieldAccess(Instruction* inst,
665                                     uint16_t inst_data,
666                                     ShadowFrame* shadow_frame,
667                                     ObjPtr<mirror::Object> obj,
668                                     MemberOffset offset,
669                                     bool is_volatile)
670     REQUIRES_SHARED(Locks::mutator_lock_) {
671   static_assert(std::is_integral<PrimType>::value, "Unexpected primitive type");
672   constexpr bool kIsStatic = (kAccessType & FindFieldFlags::StaticBit) != 0;
673   constexpr bool kIsPrimitive = (kAccessType & FindFieldFlags::PrimitiveBit) != 0;
674   constexpr bool kIsRead = (kAccessType & FindFieldFlags::ReadBit) != 0;
675 
676   uint16_t vRegA = kIsStatic ? inst->VRegA_21c(inst_data) : inst->VRegA_22c(inst_data);
677   if (kIsPrimitive) {
678     if (kIsRead) {
679       PrimType value = UNLIKELY(is_volatile)
680           ? obj->GetFieldPrimitive<PrimType, /*kIsVolatile=*/ true>(offset)
681           : obj->GetFieldPrimitive<PrimType, /*kIsVolatile=*/ false>(offset);
682       if (sizeof(PrimType) == sizeof(uint64_t)) {
683         shadow_frame->SetVRegLong(vRegA, value);  // Set two consecutive registers.
684       } else {
685         shadow_frame->SetVReg(vRegA, static_cast<int32_t>(value));  // Sign/zero extend.
686       }
687     } else {  // Write.
688       uint64_t value = (sizeof(PrimType) == sizeof(uint64_t))
689           ? shadow_frame->GetVRegLong(vRegA)
690           : shadow_frame->GetVReg(vRegA);
691       if (UNLIKELY(is_volatile)) {
692         obj->SetFieldPrimitive<PrimType, /*kIsVolatile=*/ true>(offset, value);
693       } else {
694         obj->SetFieldPrimitive<PrimType, /*kIsVolatile=*/ false>(offset, value);
695       }
696     }
697   } else {  // Object.
698     if (kIsRead) {
699       ObjPtr<mirror::Object> value = UNLIKELY(is_volatile)
700           ? obj->GetFieldObjectVolatile<mirror::Object>(offset)
701           : obj->GetFieldObject<mirror::Object>(offset);
702       shadow_frame->SetVRegReference(vRegA, value);
703     } else {  // Write.
704       ObjPtr<mirror::Object> value = shadow_frame->GetVRegReference(vRegA);
705       if (UNLIKELY(is_volatile)) {
706         obj->SetFieldObjectVolatile</*kTransactionActive=*/ false>(offset, value);
707       } else {
708         obj->SetFieldObject</*kTransactionActive=*/ false>(offset, value);
709       }
710     }
711   }
712 }
713 
714 template<typename PrimType, FindFieldType kAccessType>
MterpFieldAccessSlow(Instruction * inst,uint16_t inst_data,ShadowFrame * shadow_frame,Thread * self)715 NO_INLINE bool MterpFieldAccessSlow(Instruction* inst,
716                                     uint16_t inst_data,
717                                     ShadowFrame* shadow_frame,
718                                     Thread* self)
719     REQUIRES_SHARED(Locks::mutator_lock_) {
720   constexpr bool kIsStatic = (kAccessType & FindFieldFlags::StaticBit) != 0;
721   constexpr bool kIsRead = (kAccessType & FindFieldFlags::ReadBit) != 0;
722 
723   // Update the dex pc in shadow frame, just in case anything throws.
724   shadow_frame->SetDexPCPtr(reinterpret_cast<uint16_t*>(inst));
725   ArtMethod* referrer = shadow_frame->GetMethod();
726   uint32_t field_idx = kIsStatic ? inst->VRegB_21c() : inst->VRegC_22c();
727   ArtField* field = FindFieldFromCode<kAccessType, /* access_checks= */ false>(
728       field_idx, referrer, self, sizeof(PrimType));
729   if (UNLIKELY(field == nullptr)) {
730     DCHECK(self->IsExceptionPending());
731     return false;
732   }
733   ObjPtr<mirror::Object> obj = kIsStatic
734       ? field->GetDeclaringClass().Ptr()
735       : shadow_frame->GetVRegReference(inst->VRegB_22c(inst_data));
736   if (UNLIKELY(obj == nullptr)) {
737     ThrowNullPointerExceptionForFieldAccess(field, kIsRead);
738     return false;
739   }
740   MterpFieldAccess<PrimType, kAccessType>(
741       inst, inst_data, shadow_frame, obj, field->GetOffset(), field->IsVolatile());
742   return true;
743 }
744 
745 // This methods is called from assembly to handle field access instructions.
746 //
747 // This method is fairly hot.  It is long, but it has been carefully optimized.
748 // It contains only fully inlined methods -> no spills -> no prologue/epilogue.
749 template<typename PrimType, FindFieldType kAccessType>
MterpFieldAccessFast(Instruction * inst,uint16_t inst_data,ShadowFrame * shadow_frame,Thread * self)750 ALWAYS_INLINE bool MterpFieldAccessFast(Instruction* inst,
751                                         uint16_t inst_data,
752                                         ShadowFrame* shadow_frame,
753                                         Thread* self)
754     REQUIRES_SHARED(Locks::mutator_lock_) {
755   constexpr bool kIsStatic = (kAccessType & FindFieldFlags::StaticBit) != 0;
756 
757   // Try to find the field in small thread-local cache first.
758   InterpreterCache* tls_cache = self->GetInterpreterCache();
759   size_t tls_value;
760   if (LIKELY(tls_cache->Get(inst, &tls_value))) {
761     // The meaning of the cache value is opcode-specific.
762     // It is ArtFiled* for static fields and the raw offset for instance fields.
763     size_t offset = kIsStatic
764         ? reinterpret_cast<ArtField*>(tls_value)->GetOffset().SizeValue()
765         : tls_value;
766     if (kIsDebugBuild) {
767       uint32_t field_idx = kIsStatic ? inst->VRegB_21c() : inst->VRegC_22c();
768       ArtField* field = FindFieldFromCode<kAccessType, /* access_checks= */ false>(
769           field_idx, shadow_frame->GetMethod(), self, sizeof(PrimType));
770       DCHECK_EQ(offset, field->GetOffset().SizeValue());
771     }
772     ObjPtr<mirror::Object> obj = kIsStatic
773         ? reinterpret_cast<ArtField*>(tls_value)->GetDeclaringClass()
774         : ObjPtr<mirror::Object>(shadow_frame->GetVRegReference(inst->VRegB_22c(inst_data)));
775     if (LIKELY(obj != nullptr)) {
776       MterpFieldAccess<PrimType, kAccessType>(
777           inst, inst_data, shadow_frame, obj, MemberOffset(offset), /* is_volatile= */ false);
778       return true;
779     }
780   }
781 
782   // This effectively inlines the fast path from ArtMethod::GetDexCache.
783   ArtMethod* referrer = shadow_frame->GetMethod();
784   if (LIKELY(!referrer->IsObsolete())) {
785     // Avoid read barriers, since we need only the pointer to the native (non-movable)
786     // DexCache field array which we can get even through from-space objects.
787     ObjPtr<mirror::Class> klass = referrer->GetDeclaringClass<kWithoutReadBarrier>();
788     ObjPtr<mirror::DexCache> dex_cache =
789         klass->GetDexCache<kDefaultVerifyFlags, kWithoutReadBarrier>();
790 
791     // Try to find the desired field in DexCache.
792     uint32_t field_idx = kIsStatic ? inst->VRegB_21c() : inst->VRegC_22c();
793     ArtField* field = dex_cache->GetResolvedField(field_idx, kRuntimePointerSize);
794     if (LIKELY(field != nullptr)) {
795       bool visibly_initialized = !kIsStatic || field->GetDeclaringClass()->IsVisiblyInitialized();
796       if (LIKELY(visibly_initialized)) {
797         DCHECK_EQ(field, (FindFieldFromCode<kAccessType, /* access_checks= */ false>(
798             field_idx, referrer, self, sizeof(PrimType))));
799         ObjPtr<mirror::Object> obj = kIsStatic
800             ? field->GetDeclaringClass().Ptr()
801             : shadow_frame->GetVRegReference(inst->VRegB_22c(inst_data));
802         if (LIKELY(kIsStatic || obj != nullptr)) {
803           // Only non-volatile fields are allowed in the thread-local cache.
804           if (LIKELY(!field->IsVolatile())) {
805             if (kIsStatic) {
806               tls_cache->Set(inst, reinterpret_cast<uintptr_t>(field));
807             } else {
808               tls_cache->Set(inst, field->GetOffset().SizeValue());
809             }
810           }
811           MterpFieldAccess<PrimType, kAccessType>(
812               inst, inst_data, shadow_frame, obj, field->GetOffset(), field->IsVolatile());
813           return true;
814         }
815       }
816     }
817   }
818 
819   // Slow path. Last and with identical arguments so that it becomes single instruction tail call.
820   return MterpFieldAccessSlow<PrimType, kAccessType>(inst, inst_data, shadow_frame, self);
821 }
822 
823 #define MTERP_FIELD_ACCESSOR(Name, PrimType, AccessType)                                          \
824 extern "C" bool Name(Instruction* inst, uint16_t inst_data, ShadowFrame* sf, Thread* self)        \
825     REQUIRES_SHARED(Locks::mutator_lock_) {                                                       \
826   return MterpFieldAccessFast<PrimType, AccessType>(inst, inst_data, sf, self);                   \
827 }
828 
829 #define MTERP_FIELD_ACCESSORS_FOR_TYPE(Sufix, PrimType, Kind)                                     \
830   MTERP_FIELD_ACCESSOR(MterpIGet##Sufix, PrimType, Instance##Kind##Read)                          \
831   MTERP_FIELD_ACCESSOR(MterpIPut##Sufix, PrimType, Instance##Kind##Write)                         \
832   MTERP_FIELD_ACCESSOR(MterpSGet##Sufix, PrimType, Static##Kind##Read)                            \
833   MTERP_FIELD_ACCESSOR(MterpSPut##Sufix, PrimType, Static##Kind##Write)
834 
835 MTERP_FIELD_ACCESSORS_FOR_TYPE(I8, int8_t, Primitive)
836 MTERP_FIELD_ACCESSORS_FOR_TYPE(U8, uint8_t, Primitive)
837 MTERP_FIELD_ACCESSORS_FOR_TYPE(I16, int16_t, Primitive)
838 MTERP_FIELD_ACCESSORS_FOR_TYPE(U16, uint16_t, Primitive)
839 MTERP_FIELD_ACCESSORS_FOR_TYPE(U32, uint32_t, Primitive)
840 MTERP_FIELD_ACCESSORS_FOR_TYPE(U64, uint64_t, Primitive)
841 MTERP_FIELD_ACCESSORS_FOR_TYPE(Obj, uint32_t, Object)
842 
843 // Check that the primitive type for Obj variant above is correct.
844 // It really must be primitive type for the templates to compile.
845 // In the case of objects, it is only used to get the field size.
846 static_assert(kHeapReferenceSize == sizeof(uint32_t), "Unexpected kHeapReferenceSize");
847 
848 #undef MTERP_FIELD_ACCESSORS_FOR_TYPE
849 #undef MTERP_FIELD_ACCESSOR
850 
artAGetObjectFromMterp(mirror::Object * arr,int32_t index)851 extern "C" mirror::Object* artAGetObjectFromMterp(mirror::Object* arr,
852                                                   int32_t index)
853     REQUIRES_SHARED(Locks::mutator_lock_) {
854   if (UNLIKELY(arr == nullptr)) {
855     ThrowNullPointerExceptionFromInterpreter();
856     return nullptr;
857   }
858   ObjPtr<mirror::ObjectArray<mirror::Object>> array = arr->AsObjectArray<mirror::Object>();
859   if (LIKELY(array->CheckIsValidIndex(index))) {
860     return array->GetWithoutChecks(index).Ptr();
861   } else {
862     return nullptr;
863   }
864 }
865 
artIGetObjectFromMterp(mirror::Object * obj,uint32_t field_offset)866 extern "C" mirror::Object* artIGetObjectFromMterp(mirror::Object* obj,
867                                                   uint32_t field_offset)
868     REQUIRES_SHARED(Locks::mutator_lock_) {
869   if (UNLIKELY(obj == nullptr)) {
870     ThrowNullPointerExceptionFromInterpreter();
871     return nullptr;
872   }
873   return obj->GetFieldObject<mirror::Object>(MemberOffset(field_offset));
874 }
875 
876 /*
877  * Create a hotness_countdown based on the current method hotness_count and profiling
878  * mode.  In short, determine how many hotness events we hit before reporting back
879  * to the full instrumentation via MterpAddHotnessBatch.  Called once on entry to the method,
880  * and regenerated following batch updates.
881  */
MterpSetUpHotnessCountdown(ArtMethod * method,ShadowFrame * shadow_frame,Thread * self)882 extern "C" ssize_t MterpSetUpHotnessCountdown(ArtMethod* method,
883                                               ShadowFrame* shadow_frame,
884                                               Thread* self)
885     REQUIRES_SHARED(Locks::mutator_lock_) {
886   uint16_t hotness_count = method->GetCounter();
887   int32_t countdown_value = jit::kJitHotnessDisabled;
888   jit::Jit* jit = Runtime::Current()->GetJit();
889   if (jit != nullptr) {
890     int32_t warm_threshold = jit->WarmMethodThreshold();
891     int32_t hot_threshold = jit->HotMethodThreshold();
892     int32_t osr_threshold = jit->OSRMethodThreshold();
893     if (hotness_count < warm_threshold) {
894       countdown_value = warm_threshold - hotness_count;
895     } else if (hotness_count < hot_threshold) {
896       countdown_value = hot_threshold - hotness_count;
897     } else if (hotness_count < osr_threshold) {
898       countdown_value = osr_threshold - hotness_count;
899     } else {
900       countdown_value = jit::kJitCheckForOSR;
901     }
902     if (jit::Jit::ShouldUsePriorityThreadWeight(self)) {
903       int32_t priority_thread_weight = jit->PriorityThreadWeight();
904       countdown_value = std::min(countdown_value, countdown_value / priority_thread_weight);
905     }
906   }
907   /*
908    * The actual hotness threshold may exceed the range of our int16_t countdown value.  This is
909    * not a problem, though.  We can just break it down into smaller chunks.
910    */
911   countdown_value = std::min(countdown_value,
912                              static_cast<int32_t>(std::numeric_limits<int16_t>::max()));
913   shadow_frame->SetCachedHotnessCountdown(countdown_value);
914   shadow_frame->SetHotnessCountdown(countdown_value);
915   return countdown_value;
916 }
917 
918 /*
919  * Report a batch of hotness events to the instrumentation and then return the new
920  * countdown value to the next time we should report.
921  */
MterpAddHotnessBatch(ArtMethod * method,ShadowFrame * shadow_frame,Thread * self)922 extern "C" ssize_t MterpAddHotnessBatch(ArtMethod* method,
923                                         ShadowFrame* shadow_frame,
924                                         Thread* self)
925     REQUIRES_SHARED(Locks::mutator_lock_) {
926   jit::Jit* jit = Runtime::Current()->GetJit();
927   if (jit != nullptr) {
928     int16_t count = shadow_frame->GetCachedHotnessCountdown() - shadow_frame->GetHotnessCountdown();
929     jit->AddSamples(self, method, count, /*with_backedges=*/ true);
930   }
931   return MterpSetUpHotnessCountdown(method, shadow_frame, self);
932 }
933 
MterpMaybeDoOnStackReplacement(Thread * self,ShadowFrame * shadow_frame,int32_t offset)934 extern "C" size_t MterpMaybeDoOnStackReplacement(Thread* self,
935                                                  ShadowFrame* shadow_frame,
936                                                  int32_t offset)
937     REQUIRES_SHARED(Locks::mutator_lock_) {
938   int16_t osr_countdown = shadow_frame->GetCachedHotnessCountdown() - 1;
939   bool did_osr = false;
940   /*
941    * To reduce the cost of polling the compiler to determine whether the requested OSR
942    * compilation has completed, only check every Nth time.  NOTE: the "osr_countdown <= 0"
943    * condition is satisfied either by the decrement below or the initial setting of
944    * the cached countdown field to kJitCheckForOSR, which elsewhere is asserted to be -1.
945    */
946   if (osr_countdown <= 0) {
947     ArtMethod* method = shadow_frame->GetMethod();
948     JValue* result = shadow_frame->GetResultRegister();
949     uint32_t dex_pc = shadow_frame->GetDexPC();
950     jit::Jit* jit = Runtime::Current()->GetJit();
951     osr_countdown = jit::Jit::kJitRecheckOSRThreshold;
952     if (offset <= 0) {
953       // Keep updating hotness in case a compilation request was dropped.  Eventually it will retry.
954       jit->AddSamples(self, method, osr_countdown, /*with_backedges=*/ true);
955     }
956     did_osr = jit::Jit::MaybeDoOnStackReplacement(self, method, dex_pc, offset, result);
957   }
958   shadow_frame->SetCachedHotnessCountdown(osr_countdown);
959   return did_osr ? 1u : 0u;
960 }
961 
962 }  // namespace interpreter
963 }  // namespace art
964