1 /*
2 * Copyright (C) 2012 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #ifndef ART_RUNTIME_INTERPRETER_INTERPRETER_COMMON_H_
18 #define ART_RUNTIME_INTERPRETER_INTERPRETER_COMMON_H_
19
20 #include "interpreter.h"
21
22 #include <math.h>
23
24 #include <iostream>
25 #include <sstream>
26
27 #include "art_field-inl.h"
28 #include "art_method-inl.h"
29 #include "base/logging.h"
30 #include "base/macros.h"
31 #include "class_linker-inl.h"
32 #include "common_throws.h"
33 #include "dex_file-inl.h"
34 #include "dex_instruction-inl.h"
35 #include "entrypoints/entrypoint_utils-inl.h"
36 #include "handle_scope-inl.h"
37 #include "jit/jit.h"
38 #include "lambda/art_lambda_method.h"
39 #include "lambda/box_table.h"
40 #include "lambda/closure.h"
41 #include "lambda/closure_builder-inl.h"
42 #include "lambda/leaking_allocator.h"
43 #include "lambda/shorty_field_type.h"
44 #include "mirror/class-inl.h"
45 #include "mirror/method.h"
46 #include "mirror/object-inl.h"
47 #include "mirror/object_array-inl.h"
48 #include "mirror/string-inl.h"
49 #include "stack.h"
50 #include "thread.h"
51 #include "well_known_classes.h"
52
53 using ::art::ArtMethod;
54 using ::art::mirror::Array;
55 using ::art::mirror::BooleanArray;
56 using ::art::mirror::ByteArray;
57 using ::art::mirror::CharArray;
58 using ::art::mirror::Class;
59 using ::art::mirror::ClassLoader;
60 using ::art::mirror::IntArray;
61 using ::art::mirror::LongArray;
62 using ::art::mirror::Object;
63 using ::art::mirror::ObjectArray;
64 using ::art::mirror::ShortArray;
65 using ::art::mirror::String;
66 using ::art::mirror::Throwable;
67
68 namespace art {
69 namespace interpreter {
70
71 // External references to all interpreter implementations.
72
73 template<bool do_access_check, bool transaction_active>
74 extern JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
75 ShadowFrame& shadow_frame, JValue result_register,
76 bool interpret_one_instruction);
77
78 template<bool do_access_check, bool transaction_active>
79 extern JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item,
80 ShadowFrame& shadow_frame, JValue result_register);
81
82 // Mterp does not support transactions or access check, thus no templated versions.
83 extern "C" bool ExecuteMterpImpl(Thread* self, const DexFile::CodeItem* code_item,
84 ShadowFrame* shadow_frame, JValue* result_register);
85
86 void ThrowNullPointerExceptionFromInterpreter()
87 SHARED_REQUIRES(Locks::mutator_lock_);
88
89 template <bool kMonitorCounting>
DoMonitorEnter(Thread * self,ShadowFrame * frame,Object * ref)90 static inline void DoMonitorEnter(Thread* self,
91 ShadowFrame* frame,
92 Object* ref)
93 NO_THREAD_SAFETY_ANALYSIS
94 REQUIRES(!Roles::uninterruptible_) {
95 StackHandleScope<1> hs(self);
96 Handle<Object> h_ref(hs.NewHandle(ref));
97 h_ref->MonitorEnter(self);
98 if (kMonitorCounting && frame->GetMethod()->MustCountLocks()) {
99 frame->GetLockCountData().AddMonitor(self, h_ref.Get());
100 }
101 }
102
103 template <bool kMonitorCounting>
DoMonitorExit(Thread * self,ShadowFrame * frame,Object * ref)104 static inline void DoMonitorExit(Thread* self,
105 ShadowFrame* frame,
106 Object* ref)
107 NO_THREAD_SAFETY_ANALYSIS
108 REQUIRES(!Roles::uninterruptible_) {
109 StackHandleScope<1> hs(self);
110 Handle<Object> h_ref(hs.NewHandle(ref));
111 h_ref->MonitorExit(self);
112 if (kMonitorCounting && frame->GetMethod()->MustCountLocks()) {
113 frame->GetLockCountData().RemoveMonitorOrThrow(self, h_ref.Get());
114 }
115 }
116
117 template <bool kMonitorCounting>
DoMonitorCheckOnExit(Thread * self,ShadowFrame * frame)118 static inline bool DoMonitorCheckOnExit(Thread* self, ShadowFrame* frame)
119 NO_THREAD_SAFETY_ANALYSIS
120 REQUIRES(!Roles::uninterruptible_) {
121 if (kMonitorCounting && frame->GetMethod()->MustCountLocks()) {
122 return frame->GetLockCountData().CheckAllMonitorsReleasedOrThrow(self);
123 }
124 return true;
125 }
126
127 void AbortTransactionF(Thread* self, const char* fmt, ...)
128 __attribute__((__format__(__printf__, 2, 3)))
129 SHARED_REQUIRES(Locks::mutator_lock_);
130
131 void AbortTransactionV(Thread* self, const char* fmt, va_list args)
132 SHARED_REQUIRES(Locks::mutator_lock_);
133
134 void RecordArrayElementsInTransaction(mirror::Array* array, int32_t count)
135 SHARED_REQUIRES(Locks::mutator_lock_);
136
137 // Invokes the given method. This is part of the invocation support and is used by DoInvoke and
138 // DoInvokeVirtualQuick functions.
139 // Returns true on success, otherwise throws an exception and returns false.
140 template<bool is_range, bool do_assignability_check>
141 bool DoCall(ArtMethod* called_method, Thread* self, ShadowFrame& shadow_frame,
142 const Instruction* inst, uint16_t inst_data, JValue* result);
143
144 // Invokes the given lambda closure. This is part of the invocation support and is used by
145 // DoLambdaInvoke functions.
146 // Returns true on success, otherwise throws an exception and returns false.
147 template<bool is_range, bool do_assignability_check>
148 bool DoLambdaCall(ArtMethod* called_method, Thread* self, ShadowFrame& shadow_frame,
149 const Instruction* inst, uint16_t inst_data, JValue* result);
150
151 // Validates that the art method corresponding to a lambda method target
152 // is semantically valid:
153 //
154 // Must be ACC_STATIC and ACC_LAMBDA. Must be a concrete managed implementation
155 // (i.e. not native, not proxy, not abstract, ...).
156 //
157 // If the validation fails, return false and raise an exception.
IsValidLambdaTargetOrThrow(ArtMethod * called_method)158 static inline bool IsValidLambdaTargetOrThrow(ArtMethod* called_method)
159 SHARED_REQUIRES(Locks::mutator_lock_) {
160 bool success = false;
161
162 if (UNLIKELY(called_method == nullptr)) {
163 // The shadow frame should already be pushed, so we don't need to update it.
164 } else if (UNLIKELY(!called_method->IsInvokable())) {
165 called_method->ThrowInvocationTimeError();
166 // We got an error.
167 // TODO(iam): Also handle the case when the method is non-static, what error do we throw?
168 // TODO(iam): Also make sure that ACC_LAMBDA is set.
169 } else if (UNLIKELY(called_method->GetCodeItem() == nullptr)) {
170 // Method could be native, proxy method, etc. Lambda targets have to be concrete impls,
171 // so don't allow this.
172 } else {
173 success = true;
174 }
175
176 return success;
177 }
178
179 // Write out the 'Closure*' into vreg and vreg+1, as if it was a jlong.
WriteLambdaClosureIntoVRegs(ShadowFrame & shadow_frame,const lambda::Closure & lambda_closure,uint32_t vreg)180 static inline void WriteLambdaClosureIntoVRegs(ShadowFrame& shadow_frame,
181 const lambda::Closure& lambda_closure,
182 uint32_t vreg) {
183 // Split the method into a lo and hi 32 bits so we can encode them into 2 virtual registers.
184 uint32_t closure_lo = static_cast<uint32_t>(reinterpret_cast<uintptr_t>(&lambda_closure));
185 uint32_t closure_hi = static_cast<uint32_t>(reinterpret_cast<uint64_t>(&lambda_closure)
186 >> BitSizeOf<uint32_t>());
187 // Use uint64_t instead of uintptr_t to allow shifting past the max on 32-bit.
188 static_assert(sizeof(uint64_t) >= sizeof(uintptr_t), "Impossible");
189
190 DCHECK_NE(closure_lo | closure_hi, 0u);
191
192 shadow_frame.SetVReg(vreg, closure_lo);
193 shadow_frame.SetVReg(vreg + 1, closure_hi);
194 }
195
196 // Handles create-lambda instructions.
197 // Returns true on success, otherwise throws an exception and returns false.
198 // (Exceptions are thrown by creating a new exception and then being put in the thread TLS)
199 //
200 // The closure must be allocated big enough to hold the data, and should not be
201 // pre-initialized. It is initialized with the actual captured variables as a side-effect,
202 // although this should be unimportant to the caller since this function also handles storing it to
203 // the ShadowFrame.
204 //
205 // As a work-in-progress implementation, this shoves the ArtMethod object corresponding
206 // to the target dex method index into the target register vA and vA + 1.
207 template<bool do_access_check>
DoCreateLambda(Thread * self,const Instruction * inst,ShadowFrame & shadow_frame,lambda::ClosureBuilder * closure_builder,lambda::Closure * uninitialized_closure)208 static inline bool DoCreateLambda(Thread* self,
209 const Instruction* inst,
210 /*inout*/ShadowFrame& shadow_frame,
211 /*inout*/lambda::ClosureBuilder* closure_builder,
212 /*inout*/lambda::Closure* uninitialized_closure) {
213 DCHECK(closure_builder != nullptr);
214 DCHECK(uninitialized_closure != nullptr);
215 DCHECK_ALIGNED(uninitialized_closure, alignof(lambda::Closure));
216
217 using lambda::ArtLambdaMethod;
218 using lambda::LeakingAllocator;
219
220 /*
221 * create-lambda is opcode 0x21c
222 * - vA is the target register where the closure will be stored into
223 * (also stores into vA + 1)
224 * - vB is the method index which will be the target for a later invoke-lambda
225 */
226 const uint32_t method_idx = inst->VRegB_21c();
227 mirror::Object* receiver = nullptr; // Always static. (see 'kStatic')
228 ArtMethod* sf_method = shadow_frame.GetMethod();
229 ArtMethod* const called_method = FindMethodFromCode<kStatic, do_access_check>(
230 method_idx, &receiver, sf_method, self);
231
232 uint32_t vreg_dest_closure = inst->VRegA_21c();
233
234 if (UNLIKELY(!IsValidLambdaTargetOrThrow(called_method))) {
235 CHECK(self->IsExceptionPending());
236 shadow_frame.SetVReg(vreg_dest_closure, 0u);
237 shadow_frame.SetVReg(vreg_dest_closure + 1, 0u);
238 return false;
239 }
240
241 ArtLambdaMethod* initialized_lambda_method;
242 // Initialize the ArtLambdaMethod with the right data.
243 {
244 // Allocate enough memory to store a well-aligned ArtLambdaMethod.
245 // This is not the final type yet since the data starts out uninitialized.
246 LeakingAllocator::AlignedMemoryStorage<ArtLambdaMethod>* uninitialized_lambda_method =
247 LeakingAllocator::AllocateMemory<ArtLambdaMethod>(self);
248
249 std::string captured_variables_shorty = closure_builder->GetCapturedVariableShortyTypes();
250 std::string captured_variables_long_type_desc;
251
252 // Synthesize a long type descriptor from the short one.
253 for (char shorty : captured_variables_shorty) {
254 lambda::ShortyFieldType shorty_field_type(shorty);
255 if (shorty_field_type.IsObject()) {
256 // Not the true type, but good enough until we implement verifier support.
257 captured_variables_long_type_desc += "Ljava/lang/Object;";
258 UNIMPLEMENTED(FATAL) << "create-lambda with an object captured variable";
259 } else if (shorty_field_type.IsLambda()) {
260 // Not the true type, but good enough until we implement verifier support.
261 captured_variables_long_type_desc += "Ljava/lang/Runnable;";
262 UNIMPLEMENTED(FATAL) << "create-lambda with a lambda captured variable";
263 } else {
264 // The primitive types have the same length shorty or not, so this is always correct.
265 DCHECK(shorty_field_type.IsPrimitive());
266 captured_variables_long_type_desc += shorty_field_type;
267 }
268 }
269
270 // Copy strings to dynamically allocated storage. This leaks, but that's ok. Fix it later.
271 // TODO: Strings need to come from the DexFile, so they won't need their own allocations.
272 char* captured_variables_type_desc = LeakingAllocator::MakeFlexibleInstance<char>(
273 self,
274 captured_variables_long_type_desc.size() + 1);
275 strcpy(captured_variables_type_desc, captured_variables_long_type_desc.c_str());
276 char* captured_variables_shorty_copy = LeakingAllocator::MakeFlexibleInstance<char>(
277 self,
278 captured_variables_shorty.size() + 1);
279 strcpy(captured_variables_shorty_copy, captured_variables_shorty.c_str());
280
281 // After initialization, the object at the storage is well-typed. Use strong type going forward.
282 initialized_lambda_method =
283 new (uninitialized_lambda_method) ArtLambdaMethod(called_method,
284 captured_variables_type_desc,
285 captured_variables_shorty_copy,
286 true); // innate lambda
287 }
288
289 // Write all the closure captured variables and the closure header into the closure.
290 lambda::Closure* initialized_closure =
291 closure_builder->CreateInPlace(uninitialized_closure, initialized_lambda_method);
292
293 WriteLambdaClosureIntoVRegs(/*inout*/shadow_frame, *initialized_closure, vreg_dest_closure);
294 return true;
295 }
296
297 // Reads out the 'ArtMethod*' stored inside of vreg and vreg+1
298 //
299 // Validates that the art method points to a valid lambda function, otherwise throws
300 // an exception and returns null.
301 // (Exceptions are thrown by creating a new exception and then being put in the thread TLS)
ReadLambdaClosureFromVRegsOrThrow(ShadowFrame & shadow_frame,uint32_t vreg)302 static inline lambda::Closure* ReadLambdaClosureFromVRegsOrThrow(ShadowFrame& shadow_frame,
303 uint32_t vreg)
304 SHARED_REQUIRES(Locks::mutator_lock_) {
305 // Lambda closures take up a consecutive pair of 2 virtual registers.
306 // On 32-bit the high bits are always 0.
307 uint32_t vc_value_lo = shadow_frame.GetVReg(vreg);
308 uint32_t vc_value_hi = shadow_frame.GetVReg(vreg + 1);
309
310 uint64_t vc_value_ptr = (static_cast<uint64_t>(vc_value_hi) << BitSizeOf<uint32_t>())
311 | vc_value_lo;
312
313 // Use uint64_t instead of uintptr_t to allow left-shifting past the max on 32-bit.
314 static_assert(sizeof(uint64_t) >= sizeof(uintptr_t), "Impossible");
315 lambda::Closure* const lambda_closure = reinterpret_cast<lambda::Closure*>(vc_value_ptr);
316 DCHECK_ALIGNED(lambda_closure, alignof(lambda::Closure));
317
318 // Guard against the user passing a null closure, which is odd but (sadly) semantically valid.
319 if (UNLIKELY(lambda_closure == nullptr)) {
320 ThrowNullPointerExceptionFromInterpreter();
321 return nullptr;
322 } else if (UNLIKELY(!IsValidLambdaTargetOrThrow(lambda_closure->GetTargetMethod()))) {
323 // Sanity check against data corruption.
324 return nullptr;
325 }
326
327 return lambda_closure;
328 }
329
330 // Forward declaration for lock annotations. See below for documentation.
331 template <bool do_access_check>
332 static inline const char* GetStringDataByDexStringIndexOrThrow(ShadowFrame& shadow_frame,
333 uint32_t string_idx)
334 SHARED_REQUIRES(Locks::mutator_lock_);
335
336 // Find the c-string data corresponding to a dex file's string index.
337 // Otherwise, returns null if not found and throws a VerifyError.
338 //
339 // Note that with do_access_check=false, we never return null because the verifier
340 // must guard against invalid string indices.
341 // (Exceptions are thrown by creating a new exception and then being put in the thread TLS)
342 template <bool do_access_check>
GetStringDataByDexStringIndexOrThrow(ShadowFrame & shadow_frame,uint32_t string_idx)343 static inline const char* GetStringDataByDexStringIndexOrThrow(ShadowFrame& shadow_frame,
344 uint32_t string_idx) {
345 ArtMethod* method = shadow_frame.GetMethod();
346 const DexFile* dex_file = method->GetDexFile();
347
348 mirror::Class* declaring_class = method->GetDeclaringClass();
349 if (!do_access_check) {
350 // MethodVerifier refuses methods with string_idx out of bounds.
351 DCHECK_LT(string_idx, declaring_class->GetDexCache()->NumStrings());
352 } else {
353 // Access checks enabled: perform string index bounds ourselves.
354 if (string_idx >= dex_file->GetHeader().string_ids_size_) {
355 ThrowVerifyError(declaring_class, "String index '%" PRIu32 "' out of bounds",
356 string_idx);
357 return nullptr;
358 }
359 }
360
361 const char* type_string = dex_file->StringDataByIdx(string_idx);
362
363 if (UNLIKELY(type_string == nullptr)) {
364 CHECK_EQ(false, do_access_check)
365 << " verifier should've caught invalid string index " << string_idx;
366 CHECK_EQ(true, do_access_check)
367 << " string idx size check should've caught invalid string index " << string_idx;
368 }
369
370 return type_string;
371 }
372
373 // Handles capture-variable instructions.
374 // Returns true on success, otherwise throws an exception and returns false.
375 // (Exceptions are thrown by creating a new exception and then being put in the thread TLS)
376 template<bool do_access_check>
DoCaptureVariable(Thread * self,const Instruction * inst,ShadowFrame & shadow_frame,lambda::ClosureBuilder * closure_builder)377 static inline bool DoCaptureVariable(Thread* self,
378 const Instruction* inst,
379 /*inout*/ShadowFrame& shadow_frame,
380 /*inout*/lambda::ClosureBuilder* closure_builder) {
381 DCHECK(closure_builder != nullptr);
382 using lambda::ShortyFieldType;
383 /*
384 * capture-variable is opcode 0xf6, fmt 0x21c
385 * - vA is the source register of the variable that will be captured
386 * - vB is the string ID of the variable's type that will be captured
387 */
388 const uint32_t source_vreg = inst->VRegA_21c();
389 const uint32_t string_idx = inst->VRegB_21c();
390 // TODO: this should be a proper [type id] instead of a [string ID] pointing to a type.
391
392 const char* type_string = GetStringDataByDexStringIndexOrThrow<do_access_check>(shadow_frame,
393 string_idx);
394 if (UNLIKELY(type_string == nullptr)) {
395 CHECK(self->IsExceptionPending());
396 return false;
397 }
398
399 char type_first_letter = type_string[0];
400 ShortyFieldType shorty_type;
401 if (do_access_check &&
402 UNLIKELY(!ShortyFieldType::MaybeCreate(type_first_letter, /*out*/&shorty_type))) { // NOLINT: [whitespace/comma] [3]
403 ThrowVerifyError(shadow_frame.GetMethod()->GetDeclaringClass(),
404 "capture-variable vB must be a valid type");
405 return false;
406 } else {
407 // Already verified that the type is valid.
408 shorty_type = ShortyFieldType(type_first_letter);
409 }
410
411 const size_t captured_variable_count = closure_builder->GetCaptureCount();
412
413 // Note: types are specified explicitly so that the closure is packed tightly.
414 switch (shorty_type) {
415 case ShortyFieldType::kBoolean: {
416 uint32_t primitive_narrow_value = shadow_frame.GetVReg(source_vreg);
417 closure_builder->CaptureVariablePrimitive<bool>(primitive_narrow_value);
418 break;
419 }
420 case ShortyFieldType::kByte: {
421 uint32_t primitive_narrow_value = shadow_frame.GetVReg(source_vreg);
422 closure_builder->CaptureVariablePrimitive<int8_t>(primitive_narrow_value);
423 break;
424 }
425 case ShortyFieldType::kChar: {
426 uint32_t primitive_narrow_value = shadow_frame.GetVReg(source_vreg);
427 closure_builder->CaptureVariablePrimitive<uint16_t>(primitive_narrow_value);
428 break;
429 }
430 case ShortyFieldType::kShort: {
431 uint32_t primitive_narrow_value = shadow_frame.GetVReg(source_vreg);
432 closure_builder->CaptureVariablePrimitive<int16_t>(primitive_narrow_value);
433 break;
434 }
435 case ShortyFieldType::kInt: {
436 uint32_t primitive_narrow_value = shadow_frame.GetVReg(source_vreg);
437 closure_builder->CaptureVariablePrimitive<int32_t>(primitive_narrow_value);
438 break;
439 }
440 case ShortyFieldType::kDouble: {
441 closure_builder->CaptureVariablePrimitive(shadow_frame.GetVRegDouble(source_vreg));
442 break;
443 }
444 case ShortyFieldType::kFloat: {
445 closure_builder->CaptureVariablePrimitive(shadow_frame.GetVRegFloat(source_vreg));
446 break;
447 }
448 case ShortyFieldType::kLambda: {
449 UNIMPLEMENTED(FATAL) << " capture-variable with type kLambda";
450 // TODO: Capturing lambdas recursively will be done at a later time.
451 UNREACHABLE();
452 }
453 case ShortyFieldType::kLong: {
454 closure_builder->CaptureVariablePrimitive(shadow_frame.GetVRegLong(source_vreg));
455 break;
456 }
457 case ShortyFieldType::kObject: {
458 closure_builder->CaptureVariableObject(shadow_frame.GetVRegReference(source_vreg));
459 UNIMPLEMENTED(FATAL) << " capture-variable with type kObject";
460 // TODO: finish implementing this. disabled for now since we can't track lambda refs for GC.
461 UNREACHABLE();
462 }
463
464 default:
465 LOG(FATAL) << "Invalid shorty type value " << shorty_type;
466 UNREACHABLE();
467 }
468
469 DCHECK_EQ(captured_variable_count + 1, closure_builder->GetCaptureCount());
470
471 return true;
472 }
473
474 // Handles capture-variable instructions.
475 // Returns true on success, otherwise throws an exception and returns false.
476 // (Exceptions are thrown by creating a new exception and then being put in the thread TLS)
477 template<bool do_access_check>
DoLiberateVariable(Thread * self,const Instruction * inst,size_t captured_variable_index,ShadowFrame & shadow_frame)478 static inline bool DoLiberateVariable(Thread* self,
479 const Instruction* inst,
480 size_t captured_variable_index,
481 /*inout*/ShadowFrame& shadow_frame) {
482 using lambda::ShortyFieldType;
483 /*
484 * liberate-variable is opcode 0xf7, fmt 0x22c
485 * - vA is the destination register
486 * - vB is the register with the lambda closure in it
487 * - vC is the string ID which needs to be a valid field type descriptor
488 */
489
490 const uint32_t dest_vreg = inst->VRegA_22c();
491 const uint32_t closure_vreg = inst->VRegB_22c();
492 const uint32_t string_idx = inst->VRegC_22c();
493 // TODO: this should be a proper [type id] instead of a [string ID] pointing to a type.
494
495
496 // Synthesize a long type descriptor from a shorty type descriptor list.
497 // TODO: Fix the dex encoding to contain the long and short type descriptors.
498 const char* type_string = GetStringDataByDexStringIndexOrThrow<do_access_check>(shadow_frame,
499 string_idx);
500 if (UNLIKELY(do_access_check && type_string == nullptr)) {
501 CHECK(self->IsExceptionPending());
502 shadow_frame.SetVReg(dest_vreg, 0);
503 return false;
504 }
505
506 char type_first_letter = type_string[0];
507 ShortyFieldType shorty_type;
508 if (do_access_check &&
509 UNLIKELY(!ShortyFieldType::MaybeCreate(type_first_letter, /*out*/&shorty_type))) { // NOLINT: [whitespace/comma] [3]
510 ThrowVerifyError(shadow_frame.GetMethod()->GetDeclaringClass(),
511 "liberate-variable vC must be a valid type");
512 shadow_frame.SetVReg(dest_vreg, 0);
513 return false;
514 } else {
515 // Already verified that the type is valid.
516 shorty_type = ShortyFieldType(type_first_letter);
517 }
518
519 // Check for closure being null *after* the type check.
520 // This way we can access the type info in case we fail later, to know how many vregs to clear.
521 const lambda::Closure* lambda_closure =
522 ReadLambdaClosureFromVRegsOrThrow(/*inout*/shadow_frame, closure_vreg);
523
524 // Failed lambda target runtime check, an exception was raised.
525 if (UNLIKELY(lambda_closure == nullptr)) {
526 CHECK(self->IsExceptionPending());
527
528 // Clear the destination vreg(s) to be safe.
529 shadow_frame.SetVReg(dest_vreg, 0);
530 if (shorty_type.IsPrimitiveWide() || shorty_type.IsLambda()) {
531 shadow_frame.SetVReg(dest_vreg + 1, 0);
532 }
533 return false;
534 }
535
536 if (do_access_check &&
537 UNLIKELY(captured_variable_index >= lambda_closure->GetNumberOfCapturedVariables())) {
538 ThrowVerifyError(shadow_frame.GetMethod()->GetDeclaringClass(),
539 "liberate-variable captured variable index %zu out of bounds",
540 lambda_closure->GetNumberOfCapturedVariables());
541 // Clear the destination vreg(s) to be safe.
542 shadow_frame.SetVReg(dest_vreg, 0);
543 if (shorty_type.IsPrimitiveWide() || shorty_type.IsLambda()) {
544 shadow_frame.SetVReg(dest_vreg + 1, 0);
545 }
546 return false;
547 }
548
549 // Verify that the runtime type of the captured-variable matches the requested dex type.
550 if (do_access_check) {
551 ShortyFieldType actual_type = lambda_closure->GetCapturedShortyType(captured_variable_index);
552 if (actual_type != shorty_type) {
553 ThrowVerifyError(shadow_frame.GetMethod()->GetDeclaringClass(),
554 "cannot liberate-variable of runtime type '%c' to dex type '%c'",
555 static_cast<char>(actual_type),
556 static_cast<char>(shorty_type));
557
558 shadow_frame.SetVReg(dest_vreg, 0);
559 if (shorty_type.IsPrimitiveWide() || shorty_type.IsLambda()) {
560 shadow_frame.SetVReg(dest_vreg + 1, 0);
561 }
562 return false;
563 }
564
565 if (actual_type.IsLambda() || actual_type.IsObject()) {
566 UNIMPLEMENTED(FATAL) << "liberate-variable type checks needs to "
567 << "parse full type descriptor for objects and lambdas";
568 }
569 }
570
571 // Unpack the captured variable from the closure into the correct type, then save it to the vreg.
572 if (shorty_type.IsPrimitiveNarrow()) {
573 uint32_t primitive_narrow_value =
574 lambda_closure->GetCapturedPrimitiveNarrow(captured_variable_index);
575 shadow_frame.SetVReg(dest_vreg, primitive_narrow_value);
576 } else if (shorty_type.IsPrimitiveWide()) {
577 uint64_t primitive_wide_value =
578 lambda_closure->GetCapturedPrimitiveWide(captured_variable_index);
579 shadow_frame.SetVRegLong(dest_vreg, static_cast<int64_t>(primitive_wide_value));
580 } else if (shorty_type.IsObject()) {
581 mirror::Object* unpacked_object =
582 lambda_closure->GetCapturedObject(captured_variable_index);
583 shadow_frame.SetVRegReference(dest_vreg, unpacked_object);
584
585 UNIMPLEMENTED(FATAL) << "liberate-variable cannot unpack objects yet";
586 } else if (shorty_type.IsLambda()) {
587 UNIMPLEMENTED(FATAL) << "liberate-variable cannot unpack lambdas yet";
588 } else {
589 LOG(FATAL) << "unreachable";
590 UNREACHABLE();
591 }
592
593 return true;
594 }
595
596 template<bool do_access_check>
DoInvokeLambda(Thread * self,ShadowFrame & shadow_frame,const Instruction * inst,uint16_t inst_data,JValue * result)597 static inline bool DoInvokeLambda(Thread* self, ShadowFrame& shadow_frame, const Instruction* inst,
598 uint16_t inst_data, JValue* result) {
599 /*
600 * invoke-lambda is opcode 0x25
601 *
602 * - vC is the closure register (both vC and vC + 1 will be used to store the closure).
603 * - vB is the number of additional registers up to |{vD,vE,vF,vG}| (4)
604 * - the rest of the registers are always var-args
605 *
606 * - reading var-args for 0x25 gets us vD,vE,vF,vG (but not vB)
607 */
608 uint32_t vreg_closure = inst->VRegC_25x();
609 const lambda::Closure* lambda_closure =
610 ReadLambdaClosureFromVRegsOrThrow(shadow_frame, vreg_closure);
611
612 // Failed lambda target runtime check, an exception was raised.
613 if (UNLIKELY(lambda_closure == nullptr)) {
614 CHECK(self->IsExceptionPending());
615 result->SetJ(0);
616 return false;
617 }
618
619 ArtMethod* const called_method = lambda_closure->GetTargetMethod();
620 // Invoke a non-range lambda
621 return DoLambdaCall<false, do_access_check>(called_method, self, shadow_frame, inst, inst_data,
622 result);
623 }
624
625 // Handles invoke-XXX/range instructions (other than invoke-lambda[-range]).
626 // Returns true on success, otherwise throws an exception and returns false.
627 template<InvokeType type, bool is_range, bool do_access_check>
DoInvoke(Thread * self,ShadowFrame & shadow_frame,const Instruction * inst,uint16_t inst_data,JValue * result)628 static inline bool DoInvoke(Thread* self, ShadowFrame& shadow_frame, const Instruction* inst,
629 uint16_t inst_data, JValue* result) {
630 const uint32_t method_idx = (is_range) ? inst->VRegB_3rc() : inst->VRegB_35c();
631 const uint32_t vregC = (is_range) ? inst->VRegC_3rc() : inst->VRegC_35c();
632 Object* receiver = (type == kStatic) ? nullptr : shadow_frame.GetVRegReference(vregC);
633 ArtMethod* sf_method = shadow_frame.GetMethod();
634 ArtMethod* const called_method = FindMethodFromCode<type, do_access_check>(
635 method_idx, &receiver, sf_method, self);
636 // The shadow frame should already be pushed, so we don't need to update it.
637 if (UNLIKELY(called_method == nullptr)) {
638 CHECK(self->IsExceptionPending());
639 result->SetJ(0);
640 return false;
641 } else if (UNLIKELY(!called_method->IsInvokable())) {
642 called_method->ThrowInvocationTimeError();
643 result->SetJ(0);
644 return false;
645 } else {
646 jit::Jit* jit = Runtime::Current()->GetJit();
647 if (jit != nullptr) {
648 if (type == kVirtual || type == kInterface) {
649 jit->InvokeVirtualOrInterface(
650 self, receiver, sf_method, shadow_frame.GetDexPC(), called_method);
651 }
652 jit->AddSamples(self, sf_method, 1, /*with_backedges*/false);
653 }
654 // TODO: Remove the InvokeVirtualOrInterface instrumentation, as it was only used by the JIT.
655 if (type == kVirtual || type == kInterface) {
656 instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
657 if (UNLIKELY(instrumentation->HasInvokeVirtualOrInterfaceListeners())) {
658 instrumentation->InvokeVirtualOrInterface(
659 self, receiver, sf_method, shadow_frame.GetDexPC(), called_method);
660 }
661 }
662 return DoCall<is_range, do_access_check>(called_method, self, shadow_frame, inst, inst_data,
663 result);
664 }
665 }
666
667 // Handles invoke-virtual-quick and invoke-virtual-quick-range instructions.
668 // Returns true on success, otherwise throws an exception and returns false.
669 template<bool is_range>
DoInvokeVirtualQuick(Thread * self,ShadowFrame & shadow_frame,const Instruction * inst,uint16_t inst_data,JValue * result)670 static inline bool DoInvokeVirtualQuick(Thread* self, ShadowFrame& shadow_frame,
671 const Instruction* inst, uint16_t inst_data,
672 JValue* result) {
673 const uint32_t vregC = (is_range) ? inst->VRegC_3rc() : inst->VRegC_35c();
674 Object* const receiver = shadow_frame.GetVRegReference(vregC);
675 if (UNLIKELY(receiver == nullptr)) {
676 // We lost the reference to the method index so we cannot get a more
677 // precised exception message.
678 ThrowNullPointerExceptionFromDexPC();
679 return false;
680 }
681 const uint32_t vtable_idx = (is_range) ? inst->VRegB_3rc() : inst->VRegB_35c();
682 CHECK(receiver->GetClass()->ShouldHaveEmbeddedImtAndVTable());
683 ArtMethod* const called_method = receiver->GetClass()->GetEmbeddedVTableEntry(
684 vtable_idx, sizeof(void*));
685 if (UNLIKELY(called_method == nullptr)) {
686 CHECK(self->IsExceptionPending());
687 result->SetJ(0);
688 return false;
689 } else if (UNLIKELY(!called_method->IsInvokable())) {
690 called_method->ThrowInvocationTimeError();
691 result->SetJ(0);
692 return false;
693 } else {
694 jit::Jit* jit = Runtime::Current()->GetJit();
695 if (jit != nullptr) {
696 jit->InvokeVirtualOrInterface(
697 self, receiver, shadow_frame.GetMethod(), shadow_frame.GetDexPC(), called_method);
698 jit->AddSamples(self, shadow_frame.GetMethod(), 1, /*with_backedges*/false);
699 }
700 instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
701 // TODO: Remove the InvokeVirtualOrInterface instrumentation, as it was only used by the JIT.
702 if (UNLIKELY(instrumentation->HasInvokeVirtualOrInterfaceListeners())) {
703 instrumentation->InvokeVirtualOrInterface(
704 self, receiver, shadow_frame.GetMethod(), shadow_frame.GetDexPC(), called_method);
705 }
706 // No need to check since we've been quickened.
707 return DoCall<is_range, false>(called_method, self, shadow_frame, inst, inst_data, result);
708 }
709 }
710
711 // Handles iget-XXX and sget-XXX instructions.
712 // Returns true on success, otherwise throws an exception and returns false.
713 template<FindFieldType find_type, Primitive::Type field_type, bool do_access_check>
714 bool DoFieldGet(Thread* self, ShadowFrame& shadow_frame, const Instruction* inst,
715 uint16_t inst_data) SHARED_REQUIRES(Locks::mutator_lock_);
716
717 // Handles iget-quick, iget-wide-quick and iget-object-quick instructions.
718 // Returns true on success, otherwise throws an exception and returns false.
719 template<Primitive::Type field_type>
720 bool DoIGetQuick(ShadowFrame& shadow_frame, const Instruction* inst, uint16_t inst_data)
721 SHARED_REQUIRES(Locks::mutator_lock_);
722
723 // Handles iput-XXX and sput-XXX instructions.
724 // Returns true on success, otherwise throws an exception and returns false.
725 template<FindFieldType find_type, Primitive::Type field_type, bool do_access_check,
726 bool transaction_active>
727 bool DoFieldPut(Thread* self, const ShadowFrame& shadow_frame, const Instruction* inst,
728 uint16_t inst_data) SHARED_REQUIRES(Locks::mutator_lock_);
729
730 // Handles iput-quick, iput-wide-quick and iput-object-quick instructions.
731 // Returns true on success, otherwise throws an exception and returns false.
732 template<Primitive::Type field_type, bool transaction_active>
733 bool DoIPutQuick(const ShadowFrame& shadow_frame, const Instruction* inst, uint16_t inst_data)
734 SHARED_REQUIRES(Locks::mutator_lock_);
735
736
737 // Handles string resolution for const-string and const-string-jumbo instructions. Also ensures the
738 // java.lang.String class is initialized.
ResolveString(Thread * self,ShadowFrame & shadow_frame,uint32_t string_idx)739 static inline String* ResolveString(Thread* self, ShadowFrame& shadow_frame, uint32_t string_idx)
740 SHARED_REQUIRES(Locks::mutator_lock_) {
741 Class* java_lang_string_class = String::GetJavaLangString();
742 if (UNLIKELY(!java_lang_string_class->IsInitialized())) {
743 ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
744 StackHandleScope<1> hs(self);
745 Handle<mirror::Class> h_class(hs.NewHandle(java_lang_string_class));
746 if (UNLIKELY(!class_linker->EnsureInitialized(self, h_class, true, true))) {
747 DCHECK(self->IsExceptionPending());
748 return nullptr;
749 }
750 }
751 ArtMethod* method = shadow_frame.GetMethod();
752 mirror::Class* declaring_class = method->GetDeclaringClass();
753 // MethodVerifier refuses methods with string_idx out of bounds.
754 DCHECK_LT(string_idx, declaring_class->GetDexCache()->NumStrings());
755 mirror::String* s = declaring_class->GetDexCacheStrings()[string_idx].Read();
756 if (UNLIKELY(s == nullptr)) {
757 StackHandleScope<1> hs(self);
758 Handle<mirror::DexCache> dex_cache(hs.NewHandle(declaring_class->GetDexCache()));
759 s = Runtime::Current()->GetClassLinker()->ResolveString(*method->GetDexFile(), string_idx,
760 dex_cache);
761 }
762 return s;
763 }
764
765 // Handles div-int, div-int/2addr, div-int/li16 and div-int/lit8 instructions.
766 // Returns true on success, otherwise throws a java.lang.ArithmeticException and return false.
DoIntDivide(ShadowFrame & shadow_frame,size_t result_reg,int32_t dividend,int32_t divisor)767 static inline bool DoIntDivide(ShadowFrame& shadow_frame, size_t result_reg,
768 int32_t dividend, int32_t divisor)
769 SHARED_REQUIRES(Locks::mutator_lock_) {
770 constexpr int32_t kMinInt = std::numeric_limits<int32_t>::min();
771 if (UNLIKELY(divisor == 0)) {
772 ThrowArithmeticExceptionDivideByZero();
773 return false;
774 }
775 if (UNLIKELY(dividend == kMinInt && divisor == -1)) {
776 shadow_frame.SetVReg(result_reg, kMinInt);
777 } else {
778 shadow_frame.SetVReg(result_reg, dividend / divisor);
779 }
780 return true;
781 }
782
783 // Handles rem-int, rem-int/2addr, rem-int/li16 and rem-int/lit8 instructions.
784 // Returns true on success, otherwise throws a java.lang.ArithmeticException and return false.
DoIntRemainder(ShadowFrame & shadow_frame,size_t result_reg,int32_t dividend,int32_t divisor)785 static inline bool DoIntRemainder(ShadowFrame& shadow_frame, size_t result_reg,
786 int32_t dividend, int32_t divisor)
787 SHARED_REQUIRES(Locks::mutator_lock_) {
788 constexpr int32_t kMinInt = std::numeric_limits<int32_t>::min();
789 if (UNLIKELY(divisor == 0)) {
790 ThrowArithmeticExceptionDivideByZero();
791 return false;
792 }
793 if (UNLIKELY(dividend == kMinInt && divisor == -1)) {
794 shadow_frame.SetVReg(result_reg, 0);
795 } else {
796 shadow_frame.SetVReg(result_reg, dividend % divisor);
797 }
798 return true;
799 }
800
801 // Handles div-long and div-long-2addr instructions.
802 // Returns true on success, otherwise throws a java.lang.ArithmeticException and return false.
DoLongDivide(ShadowFrame & shadow_frame,size_t result_reg,int64_t dividend,int64_t divisor)803 static inline bool DoLongDivide(ShadowFrame& shadow_frame, size_t result_reg,
804 int64_t dividend, int64_t divisor)
805 SHARED_REQUIRES(Locks::mutator_lock_) {
806 const int64_t kMinLong = std::numeric_limits<int64_t>::min();
807 if (UNLIKELY(divisor == 0)) {
808 ThrowArithmeticExceptionDivideByZero();
809 return false;
810 }
811 if (UNLIKELY(dividend == kMinLong && divisor == -1)) {
812 shadow_frame.SetVRegLong(result_reg, kMinLong);
813 } else {
814 shadow_frame.SetVRegLong(result_reg, dividend / divisor);
815 }
816 return true;
817 }
818
819 // Handles rem-long and rem-long-2addr instructions.
820 // Returns true on success, otherwise throws a java.lang.ArithmeticException and return false.
DoLongRemainder(ShadowFrame & shadow_frame,size_t result_reg,int64_t dividend,int64_t divisor)821 static inline bool DoLongRemainder(ShadowFrame& shadow_frame, size_t result_reg,
822 int64_t dividend, int64_t divisor)
823 SHARED_REQUIRES(Locks::mutator_lock_) {
824 const int64_t kMinLong = std::numeric_limits<int64_t>::min();
825 if (UNLIKELY(divisor == 0)) {
826 ThrowArithmeticExceptionDivideByZero();
827 return false;
828 }
829 if (UNLIKELY(dividend == kMinLong && divisor == -1)) {
830 shadow_frame.SetVRegLong(result_reg, 0);
831 } else {
832 shadow_frame.SetVRegLong(result_reg, dividend % divisor);
833 }
834 return true;
835 }
836
837 // Handles filled-new-array and filled-new-array-range instructions.
838 // Returns true on success, otherwise throws an exception and returns false.
839 template <bool is_range, bool do_access_check, bool transaction_active>
840 bool DoFilledNewArray(const Instruction* inst, const ShadowFrame& shadow_frame,
841 Thread* self, JValue* result);
842
843 // Handles packed-switch instruction.
844 // Returns the branch offset to the next instruction to execute.
DoPackedSwitch(const Instruction * inst,const ShadowFrame & shadow_frame,uint16_t inst_data)845 static inline int32_t DoPackedSwitch(const Instruction* inst, const ShadowFrame& shadow_frame,
846 uint16_t inst_data)
847 SHARED_REQUIRES(Locks::mutator_lock_) {
848 DCHECK(inst->Opcode() == Instruction::PACKED_SWITCH);
849 const uint16_t* switch_data = reinterpret_cast<const uint16_t*>(inst) + inst->VRegB_31t();
850 int32_t test_val = shadow_frame.GetVReg(inst->VRegA_31t(inst_data));
851 DCHECK_EQ(switch_data[0], static_cast<uint16_t>(Instruction::kPackedSwitchSignature));
852 uint16_t size = switch_data[1];
853 if (size == 0) {
854 // Empty packed switch, move forward by 3 (size of PACKED_SWITCH).
855 return 3;
856 }
857 const int32_t* keys = reinterpret_cast<const int32_t*>(&switch_data[2]);
858 DCHECK_ALIGNED(keys, 4);
859 int32_t first_key = keys[0];
860 const int32_t* targets = reinterpret_cast<const int32_t*>(&switch_data[4]);
861 DCHECK_ALIGNED(targets, 4);
862 int32_t index = test_val - first_key;
863 if (index >= 0 && index < size) {
864 return targets[index];
865 } else {
866 // No corresponding value: move forward by 3 (size of PACKED_SWITCH).
867 return 3;
868 }
869 }
870
871 // Handles sparse-switch instruction.
872 // Returns the branch offset to the next instruction to execute.
DoSparseSwitch(const Instruction * inst,const ShadowFrame & shadow_frame,uint16_t inst_data)873 static inline int32_t DoSparseSwitch(const Instruction* inst, const ShadowFrame& shadow_frame,
874 uint16_t inst_data)
875 SHARED_REQUIRES(Locks::mutator_lock_) {
876 DCHECK(inst->Opcode() == Instruction::SPARSE_SWITCH);
877 const uint16_t* switch_data = reinterpret_cast<const uint16_t*>(inst) + inst->VRegB_31t();
878 int32_t test_val = shadow_frame.GetVReg(inst->VRegA_31t(inst_data));
879 DCHECK_EQ(switch_data[0], static_cast<uint16_t>(Instruction::kSparseSwitchSignature));
880 uint16_t size = switch_data[1];
881 // Return length of SPARSE_SWITCH if size is 0.
882 if (size == 0) {
883 return 3;
884 }
885 const int32_t* keys = reinterpret_cast<const int32_t*>(&switch_data[2]);
886 DCHECK_ALIGNED(keys, 4);
887 const int32_t* entries = keys + size;
888 DCHECK_ALIGNED(entries, 4);
889 int lo = 0;
890 int hi = size - 1;
891 while (lo <= hi) {
892 int mid = (lo + hi) / 2;
893 int32_t foundVal = keys[mid];
894 if (test_val < foundVal) {
895 hi = mid - 1;
896 } else if (test_val > foundVal) {
897 lo = mid + 1;
898 } else {
899 return entries[mid];
900 }
901 }
902 // No corresponding value: move forward by 3 (size of SPARSE_SWITCH).
903 return 3;
904 }
905
906 template <bool _do_check>
DoBoxLambda(Thread * self,ShadowFrame & shadow_frame,const Instruction * inst,uint16_t inst_data)907 static inline bool DoBoxLambda(Thread* self, ShadowFrame& shadow_frame, const Instruction* inst,
908 uint16_t inst_data) SHARED_REQUIRES(Locks::mutator_lock_) {
909 /*
910 * box-lambda vA, vB /// opcode 0xf8, format 22x
911 * - vA is the target register where the Object representation of the closure will be stored into
912 * - vB is a closure (made by create-lambda)
913 * (also reads vB + 1)
914 */
915 uint32_t vreg_target_object = inst->VRegA_22x(inst_data);
916 uint32_t vreg_source_closure = inst->VRegB_22x();
917
918 lambda::Closure* lambda_closure = ReadLambdaClosureFromVRegsOrThrow(shadow_frame,
919 vreg_source_closure);
920
921 // Failed lambda target runtime check, an exception was raised.
922 if (UNLIKELY(lambda_closure == nullptr)) {
923 CHECK(self->IsExceptionPending());
924 return false;
925 }
926
927 mirror::Object* closure_as_object =
928 Runtime::Current()->GetLambdaBoxTable()->BoxLambda(lambda_closure);
929
930 // Failed to box the lambda, an exception was raised.
931 if (UNLIKELY(closure_as_object == nullptr)) {
932 CHECK(self->IsExceptionPending());
933 return false;
934 }
935
936 shadow_frame.SetVRegReference(vreg_target_object, closure_as_object);
937 return true;
938 }
939
SHARED_REQUIRES(Locks::mutator_lock_)940 template <bool _do_check> SHARED_REQUIRES(Locks::mutator_lock_)
941 static inline bool DoUnboxLambda(Thread* self,
942 ShadowFrame& shadow_frame,
943 const Instruction* inst,
944 uint16_t inst_data) {
945 /*
946 * unbox-lambda vA, vB, [type id] /// opcode 0xf9, format 22c
947 * - vA is the target register where the closure will be written into
948 * (also writes vA + 1)
949 * - vB is the Object representation of the closure (made by box-lambda)
950 */
951 uint32_t vreg_target_closure = inst->VRegA_22c(inst_data);
952 uint32_t vreg_source_object = inst->VRegB_22c();
953
954 // Raise NullPointerException if object is null
955 mirror::Object* boxed_closure_object = shadow_frame.GetVRegReference(vreg_source_object);
956 if (UNLIKELY(boxed_closure_object == nullptr)) {
957 ThrowNullPointerExceptionFromInterpreter();
958 return false;
959 }
960
961 lambda::Closure* unboxed_closure = nullptr;
962 // Raise an exception if unboxing fails.
963 if (!Runtime::Current()->GetLambdaBoxTable()->UnboxLambda(boxed_closure_object,
964 /*out*/&unboxed_closure)) {
965 CHECK(self->IsExceptionPending());
966 return false;
967 }
968
969 DCHECK(unboxed_closure != nullptr);
970 WriteLambdaClosureIntoVRegs(/*inout*/shadow_frame, *unboxed_closure, vreg_target_closure);
971 return true;
972 }
973
974 uint32_t FindNextInstructionFollowingException(Thread* self, ShadowFrame& shadow_frame,
975 uint32_t dex_pc, const instrumentation::Instrumentation* instrumentation)
976 SHARED_REQUIRES(Locks::mutator_lock_);
977
978 NO_RETURN void UnexpectedOpcode(const Instruction* inst, const ShadowFrame& shadow_frame)
979 __attribute__((cold))
980 SHARED_REQUIRES(Locks::mutator_lock_);
981
TraceExecutionEnabled()982 static inline bool TraceExecutionEnabled() {
983 // Return true if you want TraceExecution invocation before each bytecode execution.
984 return false;
985 }
986
TraceExecution(const ShadowFrame & shadow_frame,const Instruction * inst,const uint32_t dex_pc)987 static inline void TraceExecution(const ShadowFrame& shadow_frame, const Instruction* inst,
988 const uint32_t dex_pc)
989 SHARED_REQUIRES(Locks::mutator_lock_) {
990 if (TraceExecutionEnabled()) {
991 #define TRACE_LOG std::cerr
992 std::ostringstream oss;
993 oss << PrettyMethod(shadow_frame.GetMethod())
994 << StringPrintf("\n0x%x: ", dex_pc)
995 << inst->DumpString(shadow_frame.GetMethod()->GetDexFile()) << "\n";
996 for (uint32_t i = 0; i < shadow_frame.NumberOfVRegs(); ++i) {
997 uint32_t raw_value = shadow_frame.GetVReg(i);
998 Object* ref_value = shadow_frame.GetVRegReference(i);
999 oss << StringPrintf(" vreg%u=0x%08X", i, raw_value);
1000 if (ref_value != nullptr) {
1001 if (ref_value->GetClass()->IsStringClass() &&
1002 ref_value->AsString()->GetValue() != nullptr) {
1003 oss << "/java.lang.String \"" << ref_value->AsString()->ToModifiedUtf8() << "\"";
1004 } else {
1005 oss << "/" << PrettyTypeOf(ref_value);
1006 }
1007 }
1008 }
1009 TRACE_LOG << oss.str() << "\n";
1010 #undef TRACE_LOG
1011 }
1012 }
1013
IsBackwardBranch(int32_t branch_offset)1014 static inline bool IsBackwardBranch(int32_t branch_offset) {
1015 return branch_offset <= 0;
1016 }
1017
1018 void ArtInterpreterToCompiledCodeBridge(Thread* self,
1019 ArtMethod* caller,
1020 const DexFile::CodeItem* code_item,
1021 ShadowFrame* shadow_frame,
1022 JValue* result);
1023
1024 // Set string value created from StringFactory.newStringFromXXX() into all aliases of
1025 // StringFactory.newEmptyString().
1026 void SetStringInitValueToAllAliases(ShadowFrame* shadow_frame,
1027 uint16_t this_obj_vreg,
1028 JValue result);
1029
1030 // Explicitly instantiate all DoInvoke functions.
1031 #define EXPLICIT_DO_INVOKE_TEMPLATE_DECL(_type, _is_range, _do_check) \
1032 template SHARED_REQUIRES(Locks::mutator_lock_) \
1033 bool DoInvoke<_type, _is_range, _do_check>(Thread* self, ShadowFrame& shadow_frame, \
1034 const Instruction* inst, uint16_t inst_data, \
1035 JValue* result)
1036
1037 #define EXPLICIT_DO_INVOKE_ALL_TEMPLATE_DECL(_type) \
1038 EXPLICIT_DO_INVOKE_TEMPLATE_DECL(_type, false, false); \
1039 EXPLICIT_DO_INVOKE_TEMPLATE_DECL(_type, false, true); \
1040 EXPLICIT_DO_INVOKE_TEMPLATE_DECL(_type, true, false); \
1041 EXPLICIT_DO_INVOKE_TEMPLATE_DECL(_type, true, true);
1042
1043 EXPLICIT_DO_INVOKE_ALL_TEMPLATE_DECL(kStatic) // invoke-static/range.
1044 EXPLICIT_DO_INVOKE_ALL_TEMPLATE_DECL(kDirect) // invoke-direct/range.
1045 EXPLICIT_DO_INVOKE_ALL_TEMPLATE_DECL(kVirtual) // invoke-virtual/range.
1046 EXPLICIT_DO_INVOKE_ALL_TEMPLATE_DECL(kSuper) // invoke-super/range.
1047 EXPLICIT_DO_INVOKE_ALL_TEMPLATE_DECL(kInterface) // invoke-interface/range.
1048 #undef EXPLICIT_DO_INVOKE_ALL_TEMPLATE_DECL
1049 #undef EXPLICIT_DO_INVOKE_TEMPLATE_DECL
1050
1051 // Explicitly instantiate all DoInvokeVirtualQuick functions.
1052 #define EXPLICIT_DO_INVOKE_VIRTUAL_QUICK_TEMPLATE_DECL(_is_range) \
1053 template SHARED_REQUIRES(Locks::mutator_lock_) \
1054 bool DoInvokeVirtualQuick<_is_range>(Thread* self, ShadowFrame& shadow_frame, \
1055 const Instruction* inst, uint16_t inst_data, \
1056 JValue* result)
1057
1058 EXPLICIT_DO_INVOKE_VIRTUAL_QUICK_TEMPLATE_DECL(false); // invoke-virtual-quick.
1059 EXPLICIT_DO_INVOKE_VIRTUAL_QUICK_TEMPLATE_DECL(true); // invoke-virtual-quick-range.
1060 #undef EXPLICIT_INSTANTIATION_DO_INVOKE_VIRTUAL_QUICK
1061
1062 // Explicitly instantiate all DoCreateLambda functions.
1063 #define EXPLICIT_DO_CREATE_LAMBDA_DECL(_do_check) \
1064 template SHARED_REQUIRES(Locks::mutator_lock_) \
1065 bool DoCreateLambda<_do_check>(Thread* self, \
1066 const Instruction* inst, \
1067 /*inout*/ShadowFrame& shadow_frame, \
1068 /*inout*/lambda::ClosureBuilder* closure_builder, \
1069 /*inout*/lambda::Closure* uninitialized_closure);
1070
1071 EXPLICIT_DO_CREATE_LAMBDA_DECL(false); // create-lambda
1072 EXPLICIT_DO_CREATE_LAMBDA_DECL(true); // create-lambda
1073 #undef EXPLICIT_DO_CREATE_LAMBDA_DECL
1074
1075 // Explicitly instantiate all DoInvokeLambda functions.
1076 #define EXPLICIT_DO_INVOKE_LAMBDA_DECL(_do_check) \
1077 template SHARED_REQUIRES(Locks::mutator_lock_) \
1078 bool DoInvokeLambda<_do_check>(Thread* self, ShadowFrame& shadow_frame, const Instruction* inst, \
1079 uint16_t inst_data, JValue* result);
1080
1081 EXPLICIT_DO_INVOKE_LAMBDA_DECL(false); // invoke-lambda
1082 EXPLICIT_DO_INVOKE_LAMBDA_DECL(true); // invoke-lambda
1083 #undef EXPLICIT_DO_INVOKE_LAMBDA_DECL
1084
1085 // Explicitly instantiate all DoBoxLambda functions.
1086 #define EXPLICIT_DO_BOX_LAMBDA_DECL(_do_check) \
1087 template SHARED_REQUIRES(Locks::mutator_lock_) \
1088 bool DoBoxLambda<_do_check>(Thread* self, ShadowFrame& shadow_frame, const Instruction* inst, \
1089 uint16_t inst_data);
1090
1091 EXPLICIT_DO_BOX_LAMBDA_DECL(false); // box-lambda
1092 EXPLICIT_DO_BOX_LAMBDA_DECL(true); // box-lambda
1093 #undef EXPLICIT_DO_BOX_LAMBDA_DECL
1094
1095 // Explicitly instantiate all DoUnBoxLambda functions.
1096 #define EXPLICIT_DO_UNBOX_LAMBDA_DECL(_do_check) \
1097 template SHARED_REQUIRES(Locks::mutator_lock_) \
1098 bool DoUnboxLambda<_do_check>(Thread* self, ShadowFrame& shadow_frame, const Instruction* inst, \
1099 uint16_t inst_data);
1100
1101 EXPLICIT_DO_UNBOX_LAMBDA_DECL(false); // unbox-lambda
1102 EXPLICIT_DO_UNBOX_LAMBDA_DECL(true); // unbox-lambda
1103 #undef EXPLICIT_DO_BOX_LAMBDA_DECL
1104
1105 // Explicitly instantiate all DoCaptureVariable functions.
1106 #define EXPLICIT_DO_CAPTURE_VARIABLE_DECL(_do_check) \
1107 template SHARED_REQUIRES(Locks::mutator_lock_) \
1108 bool DoCaptureVariable<_do_check>(Thread* self, \
1109 const Instruction* inst, \
1110 ShadowFrame& shadow_frame, \
1111 lambda::ClosureBuilder* closure_builder);
1112
1113 EXPLICIT_DO_CAPTURE_VARIABLE_DECL(false); // capture-variable
1114 EXPLICIT_DO_CAPTURE_VARIABLE_DECL(true); // capture-variable
1115 #undef EXPLICIT_DO_CREATE_LAMBDA_DECL
1116
1117 // Explicitly instantiate all DoLiberateVariable functions.
1118 #define EXPLICIT_DO_LIBERATE_VARIABLE_DECL(_do_check) \
1119 template SHARED_REQUIRES(Locks::mutator_lock_) \
1120 bool DoLiberateVariable<_do_check>(Thread* self, \
1121 const Instruction* inst, \
1122 size_t captured_variable_index, \
1123 ShadowFrame& shadow_frame); \
1124
1125 EXPLICIT_DO_LIBERATE_VARIABLE_DECL(false); // liberate-variable
1126 EXPLICIT_DO_LIBERATE_VARIABLE_DECL(true); // liberate-variable
1127 #undef EXPLICIT_DO_LIBERATE_LAMBDA_DECL
1128 } // namespace interpreter
1129 } // namespace art
1130
1131 #endif // ART_RUNTIME_INTERPRETER_INTERPRETER_COMMON_H_
1132