1 /*
2 * Copyright (C) 2019 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 /*
18 * Mterp entry point and support functions.
19 */
20 #include "nterp.h"
21
22 #include "arch/instruction_set.h"
23 #include "base/quasi_atomic.h"
24 #include "class_linker-inl.h"
25 #include "dex/dex_instruction_utils.h"
26 #include "debugger.h"
27 #include "entrypoints/entrypoint_utils-inl.h"
28 #include "interpreter/interpreter_cache-inl.h"
29 #include "interpreter/interpreter_common.h"
30 #include "interpreter/shadow_frame-inl.h"
31 #include "mirror/string-alloc-inl.h"
32 #include "nterp_helpers.h"
33
34 namespace art HIDDEN {
35 namespace interpreter {
36
IsNterpSupported()37 bool IsNterpSupported() {
38 switch (kRuntimeISA) {
39 case InstructionSet::kArm:
40 case InstructionSet::kThumb2:
41 case InstructionSet::kArm64:
42 return kReserveMarkingRegister && !kUseTableLookupReadBarrier;
43 case InstructionSet::kRiscv64:
44 return true;
45 case InstructionSet::kX86:
46 case InstructionSet::kX86_64:
47 return !kUseTableLookupReadBarrier;
48 default:
49 return false;
50 }
51 }
52
CanRuntimeUseNterp()53 bool CanRuntimeUseNterp() REQUIRES_SHARED(Locks::mutator_lock_) {
54 Runtime* runtime = Runtime::Current();
55 instrumentation::Instrumentation* instr = runtime->GetInstrumentation();
56 // If the runtime is interpreter only, we currently don't use nterp as some
57 // parts of the runtime (like instrumentation) make assumption on an
58 // interpreter-only runtime to always be in a switch-like interpreter.
59 return IsNterpSupported() && !runtime->IsJavaDebuggable() && !instr->EntryExitStubsInstalled() &&
60 !instr->InterpretOnly() && !runtime->IsAotCompiler() &&
61 !instr->NeedsSlowInterpreterForListeners() &&
62 // An async exception has been thrown. We need to go to the switch interpreter. nterp
63 // doesn't know how to deal with these so we could end up never dealing with it if we are
64 // in an infinite loop.
65 !runtime->AreAsyncExceptionsThrown() &&
66 (runtime->GetJit() == nullptr || !runtime->GetJit()->JitAtFirstUse());
67 }
68
69 // The entrypoint for nterp, which ArtMethods can directly point to.
70 extern "C" void ExecuteNterpImpl() REQUIRES_SHARED(Locks::mutator_lock_);
71 extern "C" void EndExecuteNterpImpl() REQUIRES_SHARED(Locks::mutator_lock_);
72
GetNterpEntryPoint()73 const void* GetNterpEntryPoint() {
74 return reinterpret_cast<const void*>(interpreter::ExecuteNterpImpl);
75 }
76
NterpImpl()77 ArrayRef<const uint8_t> NterpImpl() {
78 const uint8_t* entry_point = reinterpret_cast<const uint8_t*>(ExecuteNterpImpl);
79 size_t size = reinterpret_cast<const uint8_t*>(EndExecuteNterpImpl) - entry_point;
80 const uint8_t* code = reinterpret_cast<const uint8_t*>(EntryPointToCodePointer(entry_point));
81 return ArrayRef<const uint8_t>(code, size);
82 }
83
84 // Another entrypoint, which does a clinit check at entry.
85 extern "C" void ExecuteNterpWithClinitImpl() REQUIRES_SHARED(Locks::mutator_lock_);
86 extern "C" void EndExecuteNterpWithClinitImpl() REQUIRES_SHARED(Locks::mutator_lock_);
87
GetNterpWithClinitEntryPoint()88 const void* GetNterpWithClinitEntryPoint() {
89 return reinterpret_cast<const void*>(interpreter::ExecuteNterpWithClinitImpl);
90 }
91
NterpWithClinitImpl()92 ArrayRef<const uint8_t> NterpWithClinitImpl() {
93 const uint8_t* entry_point = reinterpret_cast<const uint8_t*>(ExecuteNterpWithClinitImpl);
94 size_t size = reinterpret_cast<const uint8_t*>(EndExecuteNterpWithClinitImpl) - entry_point;
95 const uint8_t* code = reinterpret_cast<const uint8_t*>(EntryPointToCodePointer(entry_point));
96 return ArrayRef<const uint8_t>(code, size);
97 }
98
99 /*
100 * Verify some constants used by the nterp interpreter.
101 */
CheckNterpAsmConstants()102 void CheckNterpAsmConstants() {
103 /*
104 * If we're using computed goto instruction transitions, make sure
105 * none of the handlers overflows the byte limit. This won't tell
106 * which one did, but if any one is too big the total size will
107 * overflow.
108 */
109 const int width = kNterpHandlerSize;
110 ptrdiff_t interp_size = reinterpret_cast<uintptr_t>(artNterpAsmInstructionEnd) -
111 reinterpret_cast<uintptr_t>(artNterpAsmInstructionStart);
112 if ((interp_size == 0) || (interp_size != (art::kNumPackedOpcodes * width))) {
113 LOG(FATAL) << "ERROR: unexpected asm interp size " << interp_size
114 << "(did an instruction handler exceed " << width << " bytes?)";
115 }
116 }
117
UpdateHotness(ArtMethod * method)118 inline void UpdateHotness(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_) {
119 // The hotness we will add to a method when we perform a
120 // field/method/class/string lookup.
121 Runtime* runtime = Runtime::Current();
122 bool increase_hotness_for_ui = runtime->GetStartupCompleted() &&
123 runtime->InJankPerceptibleProcessState() &&
124 Thread::Current()->IsJitSensitiveThread();
125 method->UpdateCounter(increase_hotness_for_ui ? 0x6ff : 0xf);
126 }
127
128 template<typename T>
UpdateCache(Thread * self,const uint16_t * dex_pc_ptr,T value)129 inline void UpdateCache(Thread* self, const uint16_t* dex_pc_ptr, T value) {
130 self->GetInterpreterCache()->Set(self, dex_pc_ptr, value);
131 }
132
133 template<typename T>
UpdateCache(Thread * self,const uint16_t * dex_pc_ptr,T * value)134 inline void UpdateCache(Thread* self, const uint16_t* dex_pc_ptr, T* value) {
135 UpdateCache(self, dex_pc_ptr, reinterpret_cast<size_t>(value));
136 }
137
138 #ifdef __arm__
139
NterpStoreArm32Fprs(const char * shorty,uint32_t * registers,uint32_t * stack_args,const uint32_t * fprs)140 extern "C" void NterpStoreArm32Fprs(const char* shorty,
141 uint32_t* registers,
142 uint32_t* stack_args,
143 const uint32_t* fprs) {
144 // Note `shorty` has already the returned type removed.
145 ScopedAssertNoThreadSuspension sants("In nterp");
146 uint32_t arg_index = 0;
147 uint32_t fpr_double_index = 0;
148 uint32_t fpr_index = 0;
149 for (uint32_t shorty_index = 0; shorty[shorty_index] != '\0'; ++shorty_index) {
150 char arg_type = shorty[shorty_index];
151 switch (arg_type) {
152 case 'D': {
153 // Double should not overlap with float.
154 fpr_double_index = std::max(fpr_double_index, RoundUp(fpr_index, 2));
155 if (fpr_double_index < 16) {
156 registers[arg_index] = fprs[fpr_double_index++];
157 registers[arg_index + 1] = fprs[fpr_double_index++];
158 } else {
159 registers[arg_index] = stack_args[arg_index];
160 registers[arg_index + 1] = stack_args[arg_index + 1];
161 }
162 arg_index += 2;
163 break;
164 }
165 case 'F': {
166 if (fpr_index % 2 == 0) {
167 fpr_index = std::max(fpr_double_index, fpr_index);
168 }
169 if (fpr_index < 16) {
170 registers[arg_index] = fprs[fpr_index++];
171 } else {
172 registers[arg_index] = stack_args[arg_index];
173 }
174 arg_index++;
175 break;
176 }
177 case 'J': {
178 arg_index += 2;
179 break;
180 }
181 default: {
182 arg_index++;
183 break;
184 }
185 }
186 }
187 }
188
NterpSetupArm32Fprs(const char * shorty,uint32_t dex_register,uint32_t stack_index,uint32_t * fprs,uint32_t * registers,uint32_t * stack_args)189 extern "C" void NterpSetupArm32Fprs(const char* shorty,
190 uint32_t dex_register,
191 uint32_t stack_index,
192 uint32_t* fprs,
193 uint32_t* registers,
194 uint32_t* stack_args) {
195 // Note `shorty` has already the returned type removed.
196 ScopedAssertNoThreadSuspension sants("In nterp");
197 uint32_t fpr_double_index = 0;
198 uint32_t fpr_index = 0;
199 for (uint32_t shorty_index = 0; shorty[shorty_index] != '\0'; ++shorty_index) {
200 char arg_type = shorty[shorty_index];
201 switch (arg_type) {
202 case 'D': {
203 // Double should not overlap with float.
204 fpr_double_index = std::max(fpr_double_index, RoundUp(fpr_index, 2));
205 if (fpr_double_index < 16) {
206 fprs[fpr_double_index++] = registers[dex_register++];
207 fprs[fpr_double_index++] = registers[dex_register++];
208 stack_index += 2;
209 } else {
210 stack_args[stack_index++] = registers[dex_register++];
211 stack_args[stack_index++] = registers[dex_register++];
212 }
213 break;
214 }
215 case 'F': {
216 if (fpr_index % 2 == 0) {
217 fpr_index = std::max(fpr_double_index, fpr_index);
218 }
219 if (fpr_index < 16) {
220 fprs[fpr_index++] = registers[dex_register++];
221 stack_index++;
222 } else {
223 stack_args[stack_index++] = registers[dex_register++];
224 }
225 break;
226 }
227 case 'J': {
228 stack_index += 2;
229 dex_register += 2;
230 break;
231 }
232 default: {
233 stack_index++;
234 dex_register++;
235 break;
236 }
237 }
238 }
239 }
240
241 #endif
242
NterpGetCodeItem(ArtMethod * method)243 extern "C" const dex::CodeItem* NterpGetCodeItem(ArtMethod* method)
244 REQUIRES_SHARED(Locks::mutator_lock_) {
245 ScopedAssertNoThreadSuspension sants("In nterp");
246 return method->GetCodeItem();
247 }
248
NterpGetShorty(ArtMethod * method)249 extern "C" const char* NterpGetShorty(ArtMethod* method)
250 REQUIRES_SHARED(Locks::mutator_lock_) {
251 ScopedAssertNoThreadSuspension sants("In nterp");
252 return method->GetInterfaceMethodIfProxy(kRuntimePointerSize)->GetShorty();
253 }
254
NterpGetShortyFromMethodId(ArtMethod * caller,uint32_t method_index)255 extern "C" const char* NterpGetShortyFromMethodId(ArtMethod* caller, uint32_t method_index)
256 REQUIRES_SHARED(Locks::mutator_lock_) {
257 ScopedAssertNoThreadSuspension sants("In nterp");
258 return caller->GetDexFile()->GetMethodShorty(method_index);
259 }
260
NterpGetShortyFromInvokePolymorphic(ArtMethod * caller,uint16_t * dex_pc_ptr)261 extern "C" const char* NterpGetShortyFromInvokePolymorphic(ArtMethod* caller, uint16_t* dex_pc_ptr)
262 REQUIRES_SHARED(Locks::mutator_lock_) {
263 ScopedAssertNoThreadSuspension sants("In nterp");
264 const Instruction* inst = Instruction::At(dex_pc_ptr);
265 dex::ProtoIndex proto_idx(inst->Opcode() == Instruction::INVOKE_POLYMORPHIC
266 ? inst->VRegH_45cc()
267 : inst->VRegH_4rcc());
268 return caller->GetDexFile()->GetShorty(proto_idx);
269 }
270
NterpGetShortyFromInvokeCustom(ArtMethod * caller,uint16_t * dex_pc_ptr)271 extern "C" const char* NterpGetShortyFromInvokeCustom(ArtMethod* caller, uint16_t* dex_pc_ptr)
272 REQUIRES_SHARED(Locks::mutator_lock_) {
273 ScopedAssertNoThreadSuspension sants("In nterp");
274 const Instruction* inst = Instruction::At(dex_pc_ptr);
275 uint16_t call_site_index = (inst->Opcode() == Instruction::INVOKE_CUSTOM
276 ? inst->VRegB_35c()
277 : inst->VRegB_3rc());
278 const DexFile* dex_file = caller->GetDexFile();
279 dex::ProtoIndex proto_idx = dex_file->GetProtoIndexForCallSite(call_site_index);
280 return dex_file->GetShorty(proto_idx);
281 }
282
283 static constexpr uint8_t kInvalidInvokeType = 255u;
284 static_assert(static_cast<uint8_t>(kMaxInvokeType) < kInvalidInvokeType);
285
GetOpcodeInvokeType(uint8_t opcode)286 static constexpr uint8_t GetOpcodeInvokeType(uint8_t opcode) {
287 switch (opcode) {
288 case Instruction::INVOKE_DIRECT:
289 case Instruction::INVOKE_DIRECT_RANGE:
290 return static_cast<uint8_t>(kDirect);
291 case Instruction::INVOKE_INTERFACE:
292 case Instruction::INVOKE_INTERFACE_RANGE:
293 return static_cast<uint8_t>(kInterface);
294 case Instruction::INVOKE_STATIC:
295 case Instruction::INVOKE_STATIC_RANGE:
296 return static_cast<uint8_t>(kStatic);
297 case Instruction::INVOKE_SUPER:
298 case Instruction::INVOKE_SUPER_RANGE:
299 return static_cast<uint8_t>(kSuper);
300 case Instruction::INVOKE_VIRTUAL:
301 case Instruction::INVOKE_VIRTUAL_RANGE:
302 return static_cast<uint8_t>(kVirtual);
303
304 default:
305 return kInvalidInvokeType;
306 }
307 }
308
GenerateOpcodeInvokeTypes()309 static constexpr std::array<uint8_t, 256u> GenerateOpcodeInvokeTypes() {
310 std::array<uint8_t, 256u> opcode_invoke_types{};
311 for (size_t opcode = 0u; opcode != opcode_invoke_types.size(); ++opcode) {
312 opcode_invoke_types[opcode] = GetOpcodeInvokeType(opcode);
313 }
314 return opcode_invoke_types;
315 }
316
317 static constexpr std::array<uint8_t, 256u> kOpcodeInvokeTypes = GenerateOpcodeInvokeTypes();
318
319 FLATTEN
NterpGetMethod(Thread * self,ArtMethod * caller,const uint16_t * dex_pc_ptr)320 extern "C" size_t NterpGetMethod(Thread* self, ArtMethod* caller, const uint16_t* dex_pc_ptr)
321 REQUIRES_SHARED(Locks::mutator_lock_) {
322 UpdateHotness(caller);
323 const Instruction* inst = Instruction::At(dex_pc_ptr);
324 Instruction::Code opcode = inst->Opcode();
325 DCHECK(IsUint<8>(static_cast<std::underlying_type_t<Instruction::Code>>(opcode)));
326 uint8_t raw_invoke_type = kOpcodeInvokeTypes[opcode];
327 DCHECK_LE(raw_invoke_type, kMaxInvokeType);
328 InvokeType invoke_type = static_cast<InvokeType>(raw_invoke_type);
329
330 // In release mode, this is just a simple load.
331 // In debug mode, this checks that we're using the correct instruction format.
332 uint16_t method_index =
333 (opcode >= Instruction::INVOKE_VIRTUAL_RANGE) ? inst->VRegB_3rc() : inst->VRegB_35c();
334
335 ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
336 ArtMethod* resolved_method = caller->SkipAccessChecks()
337 ? class_linker->ResolveMethod<ClassLinker::ResolveMode::kNoChecks>(
338 self, method_index, caller, invoke_type)
339 : class_linker->ResolveMethod<ClassLinker::ResolveMode::kCheckICCEAndIAE>(
340 self, method_index, caller, invoke_type);
341 if (resolved_method == nullptr) {
342 DCHECK(self->IsExceptionPending());
343 return 0;
344 }
345
346 if (invoke_type == kSuper) {
347 resolved_method = caller->SkipAccessChecks()
348 ? FindSuperMethodToCall</*access_check=*/false>(method_index, resolved_method, caller, self)
349 : FindSuperMethodToCall</*access_check=*/true>(method_index, resolved_method, caller, self);
350 if (resolved_method == nullptr) {
351 DCHECK(self->IsExceptionPending());
352 return 0;
353 }
354 }
355
356 if (invoke_type == kInterface) {
357 size_t result = 0u;
358 if (resolved_method->GetDeclaringClass()->IsObjectClass()) {
359 // Set the low bit to notify the interpreter it should do a vtable call.
360 DCHECK_LT(resolved_method->GetMethodIndex(), 0x10000);
361 result = (resolved_method->GetMethodIndex() << 16) | 1U;
362 } else {
363 DCHECK(resolved_method->GetDeclaringClass()->IsInterface());
364 DCHECK(!resolved_method->IsCopied());
365 if (!resolved_method->IsAbstract()) {
366 // Set the second bit to notify the interpreter this is a default
367 // method.
368 result = reinterpret_cast<size_t>(resolved_method) | 2U;
369 } else {
370 result = reinterpret_cast<size_t>(resolved_method);
371 }
372 }
373 UpdateCache(self, dex_pc_ptr, result);
374 return result;
375 } else if (resolved_method->IsStringConstructor()) {
376 CHECK_NE(invoke_type, kSuper);
377 resolved_method = WellKnownClasses::StringInitToStringFactory(resolved_method);
378 // Or the result with 1 to notify to nterp this is a string init method. We
379 // also don't cache the result as we don't want nterp to have its fast path always
380 // check for it, and we expect a lot more regular calls than string init
381 // calls.
382 return reinterpret_cast<size_t>(resolved_method) | 1;
383 } else if (invoke_type == kVirtual) {
384 UpdateCache(self, dex_pc_ptr, resolved_method->GetMethodIndex());
385 return resolved_method->GetMethodIndex();
386 } else {
387 UpdateCache(self, dex_pc_ptr, resolved_method);
388 return reinterpret_cast<size_t>(resolved_method);
389 }
390 }
391
NterpGetStaticField(Thread * self,ArtMethod * caller,const uint16_t * dex_pc_ptr,size_t resolve_field_type)392 extern "C" size_t NterpGetStaticField(Thread* self,
393 ArtMethod* caller,
394 const uint16_t* dex_pc_ptr,
395 size_t resolve_field_type) // Resolve if not zero
396 REQUIRES_SHARED(Locks::mutator_lock_) {
397 UpdateHotness(caller);
398 const Instruction* inst = Instruction::At(dex_pc_ptr);
399 uint16_t field_index = inst->VRegB_21c();
400 ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
401 Instruction::Code opcode = inst->Opcode();
402 ArtField* resolved_field = ResolveFieldWithAccessChecks(
403 self,
404 class_linker,
405 field_index,
406 caller,
407 /*is_static=*/ true,
408 /*is_put=*/ IsInstructionSPut(opcode),
409 resolve_field_type);
410
411 if (resolved_field == nullptr) {
412 DCHECK(self->IsExceptionPending());
413 return 0;
414 }
415 if (UNLIKELY(!resolved_field->GetDeclaringClass()->IsVisiblyInitialized())) {
416 StackHandleScope<1> hs(self);
417 Handle<mirror::Class> h_class(hs.NewHandle(resolved_field->GetDeclaringClass()));
418 if (UNLIKELY(!class_linker->EnsureInitialized(
419 self, h_class, /*can_init_fields=*/ true, /*can_init_parents=*/ true))) {
420 DCHECK(self->IsExceptionPending());
421 return 0;
422 }
423 DCHECK(h_class->IsInitializing());
424 }
425 if (resolved_field->IsVolatile()) {
426 // Or the result with 1 to notify to nterp this is a volatile field. We
427 // also don't cache the result as we don't want nterp to have its fast path always
428 // check for it.
429 return reinterpret_cast<size_t>(resolved_field) | 1;
430 } else {
431 // For sput-object, try to resolve the field type even if we were not requested to.
432 // Only if the field type is successfully resolved can we update the cache. If we
433 // fail to resolve the type, we clear the exception to keep interpreter
434 // semantics of not throwing when null is stored.
435 if (opcode == Instruction::SPUT_OBJECT &&
436 resolve_field_type == 0 &&
437 resolved_field->ResolveType() == nullptr) {
438 DCHECK(self->IsExceptionPending());
439 self->ClearException();
440 } else {
441 UpdateCache(self, dex_pc_ptr, resolved_field);
442 }
443 return reinterpret_cast<size_t>(resolved_field);
444 }
445 }
446
NterpGetInstanceFieldOffset(Thread * self,ArtMethod * caller,const uint16_t * dex_pc_ptr,size_t resolve_field_type)447 extern "C" uint32_t NterpGetInstanceFieldOffset(Thread* self,
448 ArtMethod* caller,
449 const uint16_t* dex_pc_ptr,
450 size_t resolve_field_type) // Resolve if not zero
451 REQUIRES_SHARED(Locks::mutator_lock_) {
452 UpdateHotness(caller);
453 const Instruction* inst = Instruction::At(dex_pc_ptr);
454 uint16_t field_index = inst->VRegC_22c();
455 ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
456 Instruction::Code opcode = inst->Opcode();
457 ArtField* resolved_field = ResolveFieldWithAccessChecks(
458 self,
459 class_linker,
460 field_index,
461 caller,
462 /*is_static=*/ false,
463 /*is_put=*/ IsInstructionIPut(opcode),
464 resolve_field_type);
465 if (resolved_field == nullptr) {
466 DCHECK(self->IsExceptionPending());
467 return 0;
468 }
469 if (resolved_field->IsVolatile()) {
470 // Don't cache for a volatile field, and return a negative offset as marker
471 // of volatile.
472 return -resolved_field->GetOffset().Uint32Value();
473 }
474 // For iput-object, try to resolve the field type even if we were not requested to.
475 // Only if the field type is successfully resolved can we update the cache. If we
476 // fail to resolve the type, we clear the exception to keep interpreter
477 // semantics of not throwing when null is stored.
478 if (opcode == Instruction::IPUT_OBJECT &&
479 resolve_field_type == 0 &&
480 resolved_field->ResolveType() == nullptr) {
481 DCHECK(self->IsExceptionPending());
482 self->ClearException();
483 } else {
484 UpdateCache(self, dex_pc_ptr, resolved_field->GetOffset().Uint32Value());
485 }
486 return resolved_field->GetOffset().Uint32Value();
487 }
488
NterpGetClass(Thread * self,ArtMethod * caller,uint16_t * dex_pc_ptr)489 extern "C" mirror::Object* NterpGetClass(Thread* self, ArtMethod* caller, uint16_t* dex_pc_ptr)
490 REQUIRES_SHARED(Locks::mutator_lock_) {
491 UpdateHotness(caller);
492 const Instruction* inst = Instruction::At(dex_pc_ptr);
493 Instruction::Code opcode = inst->Opcode();
494 DCHECK(opcode == Instruction::CHECK_CAST ||
495 opcode == Instruction::INSTANCE_OF ||
496 opcode == Instruction::CONST_CLASS ||
497 opcode == Instruction::NEW_ARRAY);
498
499 // In release mode, this is just a simple load.
500 // In debug mode, this checks that we're using the correct instruction format.
501 dex::TypeIndex index = dex::TypeIndex(
502 (opcode == Instruction::CHECK_CAST || opcode == Instruction::CONST_CLASS)
503 ? inst->VRegB_21c()
504 : inst->VRegC_22c());
505
506 ObjPtr<mirror::Class> c =
507 ResolveVerifyAndClinit(index,
508 caller,
509 self,
510 /* can_run_clinit= */ false,
511 /* verify_access= */ !caller->SkipAccessChecks());
512 if (UNLIKELY(c == nullptr)) {
513 DCHECK(self->IsExceptionPending());
514 return nullptr;
515 }
516
517 UpdateCache(self, dex_pc_ptr, c.Ptr());
518 return c.Ptr();
519 }
520
NterpAllocateObject(Thread * self,ArtMethod * caller,uint16_t * dex_pc_ptr)521 extern "C" mirror::Object* NterpAllocateObject(Thread* self,
522 ArtMethod* caller,
523 uint16_t* dex_pc_ptr)
524 REQUIRES_SHARED(Locks::mutator_lock_) {
525 UpdateHotness(caller);
526 const Instruction* inst = Instruction::At(dex_pc_ptr);
527 DCHECK_EQ(inst->Opcode(), Instruction::NEW_INSTANCE);
528 dex::TypeIndex index = dex::TypeIndex(inst->VRegB_21c());
529 ObjPtr<mirror::Class> c =
530 ResolveVerifyAndClinit(index,
531 caller,
532 self,
533 /* can_run_clinit= */ false,
534 /* verify_access= */ !caller->SkipAccessChecks());
535 if (UNLIKELY(c == nullptr)) {
536 DCHECK(self->IsExceptionPending());
537 return nullptr;
538 }
539
540 gc::AllocatorType allocator_type = Runtime::Current()->GetHeap()->GetCurrentAllocator();
541 if (UNLIKELY(c->IsStringClass())) {
542 // We don't cache the class for strings as we need to special case their
543 // allocation.
544 return mirror::String::AllocEmptyString(self, allocator_type).Ptr();
545 } else {
546 if (!c->IsFinalizable() && c->IsInstantiable()) {
547 // Cache non-finalizable classes for next calls.
548 UpdateCache(self, dex_pc_ptr, c.Ptr());
549 }
550 return AllocObjectFromCode(c, self, allocator_type).Ptr();
551 }
552 }
553
NterpLoadObject(Thread * self,ArtMethod * caller,uint16_t * dex_pc_ptr)554 extern "C" mirror::Object* NterpLoadObject(Thread* self, ArtMethod* caller, uint16_t* dex_pc_ptr)
555 REQUIRES_SHARED(Locks::mutator_lock_) {
556 const Instruction* inst = Instruction::At(dex_pc_ptr);
557 ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
558 switch (inst->Opcode()) {
559 case Instruction::CONST_STRING:
560 case Instruction::CONST_STRING_JUMBO: {
561 UpdateHotness(caller);
562 dex::StringIndex string_index(
563 (inst->Opcode() == Instruction::CONST_STRING)
564 ? inst->VRegB_21c()
565 : inst->VRegB_31c());
566 ObjPtr<mirror::String> str = class_linker->ResolveString(string_index, caller);
567 if (str == nullptr) {
568 DCHECK(self->IsExceptionPending());
569 return nullptr;
570 }
571 UpdateCache(self, dex_pc_ptr, str.Ptr());
572 return str.Ptr();
573 }
574 case Instruction::CONST_METHOD_HANDLE: {
575 // Don't cache: we don't expect this to be performance sensitive, and we
576 // don't want the cache to conflict with a performance sensitive entry.
577 return class_linker->ResolveMethodHandle(self, inst->VRegB_21c(), caller).Ptr();
578 }
579 case Instruction::CONST_METHOD_TYPE: {
580 // Don't cache: we don't expect this to be performance sensitive, and we
581 // don't want the cache to conflict with a performance sensitive entry.
582 return class_linker->ResolveMethodType(
583 self, dex::ProtoIndex(inst->VRegB_21c()), caller).Ptr();
584 }
585 default:
586 LOG(FATAL) << "Unreachable";
587 }
588 return nullptr;
589 }
590
NterpUnimplemented()591 extern "C" void NterpUnimplemented() {
592 LOG(FATAL) << "Unimplemented";
593 }
594
DoFilledNewArray(Thread * self,ArtMethod * caller,uint16_t * dex_pc_ptr,uint32_t * regs,bool is_range)595 static mirror::Object* DoFilledNewArray(Thread* self,
596 ArtMethod* caller,
597 uint16_t* dex_pc_ptr,
598 uint32_t* regs,
599 bool is_range)
600 REQUIRES_SHARED(Locks::mutator_lock_) {
601 const Instruction* inst = Instruction::At(dex_pc_ptr);
602 if (kIsDebugBuild) {
603 if (is_range) {
604 DCHECK_EQ(inst->Opcode(), Instruction::FILLED_NEW_ARRAY_RANGE);
605 } else {
606 DCHECK_EQ(inst->Opcode(), Instruction::FILLED_NEW_ARRAY);
607 }
608 }
609 const int32_t length = is_range ? inst->VRegA_3rc() : inst->VRegA_35c();
610 DCHECK_GE(length, 0);
611 if (!is_range) {
612 // Checks FILLED_NEW_ARRAY's length does not exceed 5 arguments.
613 DCHECK_LE(length, 5);
614 }
615 uint16_t type_idx = is_range ? inst->VRegB_3rc() : inst->VRegB_35c();
616 ObjPtr<mirror::Class> array_class =
617 ResolveVerifyAndClinit(dex::TypeIndex(type_idx),
618 caller,
619 self,
620 /* can_run_clinit= */ true,
621 /* verify_access= */ !caller->SkipAccessChecks());
622 if (UNLIKELY(array_class == nullptr)) {
623 DCHECK(self->IsExceptionPending());
624 return nullptr;
625 }
626 DCHECK(array_class->IsArrayClass());
627 ObjPtr<mirror::Class> component_class = array_class->GetComponentType();
628 const bool is_primitive_int_component = component_class->IsPrimitiveInt();
629 if (UNLIKELY(component_class->IsPrimitive() && !is_primitive_int_component)) {
630 if (component_class->IsPrimitiveLong() || component_class->IsPrimitiveDouble()) {
631 ThrowRuntimeException("Bad filled array request for type %s",
632 component_class->PrettyDescriptor().c_str());
633 } else {
634 self->ThrowNewExceptionF(
635 "Ljava/lang/InternalError;",
636 "Found type %s; filled-new-array not implemented for anything but 'int'",
637 component_class->PrettyDescriptor().c_str());
638 }
639 return nullptr;
640 }
641 ObjPtr<mirror::Object> new_array = mirror::Array::Alloc(
642 self,
643 array_class,
644 length,
645 array_class->GetComponentSizeShift(),
646 Runtime::Current()->GetHeap()->GetCurrentAllocator());
647 if (UNLIKELY(new_array == nullptr)) {
648 self->AssertPendingOOMException();
649 return nullptr;
650 }
651 uint32_t arg[Instruction::kMaxVarArgRegs]; // only used in filled-new-array.
652 uint32_t vregC = 0; // only used in filled-new-array-range.
653 if (is_range) {
654 vregC = inst->VRegC_3rc();
655 } else {
656 inst->GetVarArgs(arg);
657 }
658 for (int32_t i = 0; i < length; ++i) {
659 size_t src_reg = is_range ? vregC + i : arg[i];
660 if (is_primitive_int_component) {
661 new_array->AsIntArray()->SetWithoutChecks</* kTransactionActive= */ false>(i, regs[src_reg]);
662 } else {
663 new_array->AsObjectArray<mirror::Object>()->SetWithoutChecks</* kTransactionActive= */ false>(
664 i, reinterpret_cast<mirror::Object*>(regs[src_reg]));
665 }
666 }
667 return new_array.Ptr();
668 }
669
NterpFilledNewArray(Thread * self,ArtMethod * caller,uint32_t * registers,uint16_t * dex_pc_ptr)670 extern "C" mirror::Object* NterpFilledNewArray(Thread* self,
671 ArtMethod* caller,
672 uint32_t* registers,
673 uint16_t* dex_pc_ptr)
674 REQUIRES_SHARED(Locks::mutator_lock_) {
675 return DoFilledNewArray(self, caller, dex_pc_ptr, registers, /* is_range= */ false);
676 }
677
NterpFilledNewArrayRange(Thread * self,ArtMethod * caller,uint32_t * registers,uint16_t * dex_pc_ptr)678 extern "C" mirror::Object* NterpFilledNewArrayRange(Thread* self,
679 ArtMethod* caller,
680 uint32_t* registers,
681 uint16_t* dex_pc_ptr)
682 REQUIRES_SHARED(Locks::mutator_lock_) {
683 return DoFilledNewArray(self, caller, dex_pc_ptr, registers, /* is_range= */ true);
684 }
685
NterpHotMethod(ArtMethod * method,uint16_t * dex_pc_ptr,uint32_t * vregs)686 extern "C" jit::OsrData* NterpHotMethod(ArtMethod* method, uint16_t* dex_pc_ptr, uint32_t* vregs)
687 REQUIRES_SHARED(Locks::mutator_lock_) {
688 // It is important this method is not suspended because it can be called on
689 // method entry and async deoptimization does not expect runtime methods other than the
690 // suspend entrypoint before executing the first instruction of a Java
691 // method.
692 ScopedAssertNoThreadSuspension sants("In nterp");
693 Runtime* runtime = Runtime::Current();
694 if (method->IsMemorySharedMethod()) {
695 DCHECK_EQ(Thread::Current()->GetSharedMethodHotness(), 0u);
696 Thread::Current()->ResetSharedMethodHotness();
697 } else {
698 // Move the counter to the initial threshold in case we have to re-JIT it.
699 method->ResetCounter(runtime->GetJITOptions()->GetWarmupThreshold());
700 // Mark the method as warm for the profile saver.
701 method->SetPreviouslyWarm();
702 }
703 jit::Jit* jit = runtime->GetJit();
704 if (jit != nullptr && jit->UseJitCompilation()) {
705 // Nterp passes null on entry where we don't want to OSR.
706 if (dex_pc_ptr != nullptr) {
707 // This could be a loop back edge, check if we can OSR.
708 CodeItemInstructionAccessor accessor(method->DexInstructions());
709 uint32_t dex_pc = dex_pc_ptr - accessor.Insns();
710 jit::OsrData* osr_data = jit->PrepareForOsr(
711 method->GetInterfaceMethodIfProxy(kRuntimePointerSize), dex_pc, vregs);
712 if (osr_data != nullptr) {
713 return osr_data;
714 }
715 }
716 jit->MaybeEnqueueCompilation(method, Thread::Current());
717 }
718 return nullptr;
719 }
720
NterpDoPackedSwitch(const uint16_t * switchData,int32_t testVal)721 extern "C" ssize_t NterpDoPackedSwitch(const uint16_t* switchData, int32_t testVal)
722 REQUIRES_SHARED(Locks::mutator_lock_) {
723 ScopedAssertNoThreadSuspension sants("In nterp");
724 const int kInstrLen = 3;
725
726 /*
727 * Packed switch data format:
728 * ushort ident = 0x0100 magic value
729 * ushort size number of entries in the table
730 * int first_key first (and lowest) switch case value
731 * int targets[size] branch targets, relative to switch opcode
732 *
733 * Total size is (4+size*2) 16-bit code units.
734 */
735 uint16_t signature = *switchData++;
736 DCHECK_EQ(signature, static_cast<uint16_t>(art::Instruction::kPackedSwitchSignature));
737
738 uint16_t size = *switchData++;
739
740 int32_t firstKey = *switchData++;
741 firstKey |= (*switchData++) << 16;
742
743 int index = testVal - firstKey;
744 if (index < 0 || index >= size) {
745 return kInstrLen;
746 }
747
748 /*
749 * The entries are guaranteed to be aligned on a 32-bit boundary;
750 * we can treat them as a native int array.
751 */
752 const int32_t* entries = reinterpret_cast<const int32_t*>(switchData);
753 return entries[index];
754 }
755
756 /*
757 * Find the matching case. Returns the offset to the handler instructions.
758 *
759 * Returns 3 if we don't find a match (it's the size of the sparse-switch
760 * instruction).
761 */
NterpDoSparseSwitch(const uint16_t * switchData,int32_t testVal)762 extern "C" ssize_t NterpDoSparseSwitch(const uint16_t* switchData, int32_t testVal)
763 REQUIRES_SHARED(Locks::mutator_lock_) {
764 ScopedAssertNoThreadSuspension sants("In nterp");
765 const int kInstrLen = 3;
766 uint16_t size;
767 const int32_t* keys;
768 const int32_t* entries;
769
770 /*
771 * Sparse switch data format:
772 * ushort ident = 0x0200 magic value
773 * ushort size number of entries in the table; > 0
774 * int keys[size] keys, sorted low-to-high; 32-bit aligned
775 * int targets[size] branch targets, relative to switch opcode
776 *
777 * Total size is (2+size*4) 16-bit code units.
778 */
779
780 uint16_t signature = *switchData++;
781 DCHECK_EQ(signature, static_cast<uint16_t>(art::Instruction::kSparseSwitchSignature));
782
783 size = *switchData++;
784
785 /* The keys are guaranteed to be aligned on a 32-bit boundary;
786 * we can treat them as a native int array.
787 */
788 keys = reinterpret_cast<const int32_t*>(switchData);
789
790 /* The entries are guaranteed to be aligned on a 32-bit boundary;
791 * we can treat them as a native int array.
792 */
793 entries = keys + size;
794
795 /*
796 * Binary-search through the array of keys, which are guaranteed to
797 * be sorted low-to-high.
798 */
799 int lo = 0;
800 int hi = size - 1;
801 while (lo <= hi) {
802 int mid = (lo + hi) >> 1;
803
804 int32_t foundVal = keys[mid];
805 if (testVal < foundVal) {
806 hi = mid - 1;
807 } else if (testVal > foundVal) {
808 lo = mid + 1;
809 } else {
810 return entries[mid];
811 }
812 }
813 return kInstrLen;
814 }
815
NterpFree(void * val)816 extern "C" void NterpFree(void* val) {
817 free(val);
818 }
819
820 } // namespace interpreter
821 } // namespace art
822