1 /*
2 * Copyright (C) 2012 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "art_method-inl.h"
18 #include "base/callee_save_type.h"
19 #include "base/enums.h"
20 #include "callee_save_frame.h"
21 #include "common_throws.h"
22 #include "class_root-inl.h"
23 #include "debug_print.h"
24 #include "debugger.h"
25 #include "dex/dex_file-inl.h"
26 #include "dex/dex_file_types.h"
27 #include "dex/dex_instruction-inl.h"
28 #include "dex/method_reference.h"
29 #include "entrypoints/entrypoint_utils-inl.h"
30 #include "entrypoints/quick/callee_save_frame.h"
31 #include "entrypoints/runtime_asm_entrypoints.h"
32 #include "gc/accounting/card_table-inl.h"
33 #include "imt_conflict_table.h"
34 #include "imtable-inl.h"
35 #include "instrumentation.h"
36 #include "interpreter/interpreter.h"
37 #include "interpreter/interpreter_common.h"
38 #include "interpreter/shadow_frame-inl.h"
39 #include "jit/jit.h"
40 #include "jit/jit_code_cache.h"
41 #include "linear_alloc.h"
42 #include "method_handles.h"
43 #include "mirror/class-inl.h"
44 #include "mirror/dex_cache-inl.h"
45 #include "mirror/method.h"
46 #include "mirror/method_handle_impl.h"
47 #include "mirror/object-inl.h"
48 #include "mirror/object_array-inl.h"
49 #include "mirror/var_handle.h"
50 #include "oat.h"
51 #include "oat_file.h"
52 #include "oat_quick_method_header.h"
53 #include "quick_exception_handler.h"
54 #include "runtime.h"
55 #include "scoped_thread_state_change-inl.h"
56 #include "stack.h"
57 #include "thread-inl.h"
58 #include "var_handles.h"
59 #include "well_known_classes.h"
60
61 namespace art {
62
63 // Visits the arguments as saved to the stack by a CalleeSaveType::kRefAndArgs callee save frame.
64 class QuickArgumentVisitor {
65 // Number of bytes for each out register in the caller method's frame.
66 static constexpr size_t kBytesStackArgLocation = 4;
67 // Frame size in bytes of a callee-save frame for RefsAndArgs.
68 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_FrameSize =
69 RuntimeCalleeSaveFrame::GetFrameSize(CalleeSaveType::kSaveRefsAndArgs);
70 // Offset of first GPR arg.
71 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset =
72 RuntimeCalleeSaveFrame::GetGpr1Offset(CalleeSaveType::kSaveRefsAndArgs);
73 // Offset of first FPR arg.
74 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset =
75 RuntimeCalleeSaveFrame::GetFpr1Offset(CalleeSaveType::kSaveRefsAndArgs);
76 // Offset of return address.
77 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_ReturnPcOffset =
78 RuntimeCalleeSaveFrame::GetReturnPcOffset(CalleeSaveType::kSaveRefsAndArgs);
79 #if defined(__arm__)
80 // The callee save frame is pointed to by SP.
81 // | argN | |
82 // | ... | |
83 // | arg4 | |
84 // | arg3 spill | | Caller's frame
85 // | arg2 spill | |
86 // | arg1 spill | |
87 // | Method* | ---
88 // | LR |
89 // | ... | 4x6 bytes callee saves
90 // | R3 |
91 // | R2 |
92 // | R1 |
93 // | S15 |
94 // | : |
95 // | S0 |
96 // | | 4x2 bytes padding
97 // | Method* | <- sp
98 static constexpr bool kSplitPairAcrossRegisterAndStack = false;
99 static constexpr bool kAlignPairRegister = true;
100 static constexpr bool kQuickSoftFloatAbi = false;
101 static constexpr bool kQuickDoubleRegAlignedFloatBackFilled = true;
102 static constexpr bool kQuickSkipOddFpRegisters = false;
103 static constexpr size_t kNumQuickGprArgs = 3;
104 static constexpr size_t kNumQuickFprArgs = 16;
105 static constexpr bool kGprFprLockstep = false;
GprIndexToGprOffset(uint32_t gpr_index)106 static size_t GprIndexToGprOffset(uint32_t gpr_index) {
107 return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA);
108 }
109 #elif defined(__aarch64__)
110 // The callee save frame is pointed to by SP.
111 // | argN | |
112 // | ... | |
113 // | arg4 | |
114 // | arg3 spill | | Caller's frame
115 // | arg2 spill | |
116 // | arg1 spill | |
117 // | Method* | ---
118 // | LR |
119 // | X29 |
120 // | : |
121 // | X20 |
122 // | X7 |
123 // | : |
124 // | X1 |
125 // | D7 |
126 // | : |
127 // | D0 |
128 // | | padding
129 // | Method* | <- sp
130 static constexpr bool kSplitPairAcrossRegisterAndStack = false;
131 static constexpr bool kAlignPairRegister = false;
132 static constexpr bool kQuickSoftFloatAbi = false; // This is a hard float ABI.
133 static constexpr bool kQuickDoubleRegAlignedFloatBackFilled = false;
134 static constexpr bool kQuickSkipOddFpRegisters = false;
135 static constexpr size_t kNumQuickGprArgs = 7; // 7 arguments passed in GPRs.
136 static constexpr size_t kNumQuickFprArgs = 8; // 8 arguments passed in FPRs.
137 static constexpr bool kGprFprLockstep = false;
GprIndexToGprOffset(uint32_t gpr_index)138 static size_t GprIndexToGprOffset(uint32_t gpr_index) {
139 return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA);
140 }
141 #elif defined(__i386__)
142 // The callee save frame is pointed to by SP.
143 // | argN | |
144 // | ... | |
145 // | arg4 | |
146 // | arg3 spill | | Caller's frame
147 // | arg2 spill | |
148 // | arg1 spill | |
149 // | Method* | ---
150 // | Return |
151 // | EBP,ESI,EDI | callee saves
152 // | EBX | arg3
153 // | EDX | arg2
154 // | ECX | arg1
155 // | XMM3 | float arg 4
156 // | XMM2 | float arg 3
157 // | XMM1 | float arg 2
158 // | XMM0 | float arg 1
159 // | EAX/Method* | <- sp
160 static constexpr bool kSplitPairAcrossRegisterAndStack = false;
161 static constexpr bool kAlignPairRegister = false;
162 static constexpr bool kQuickSoftFloatAbi = false; // This is a hard float ABI.
163 static constexpr bool kQuickDoubleRegAlignedFloatBackFilled = false;
164 static constexpr bool kQuickSkipOddFpRegisters = false;
165 static constexpr size_t kNumQuickGprArgs = 3; // 3 arguments passed in GPRs.
166 static constexpr size_t kNumQuickFprArgs = 4; // 4 arguments passed in FPRs.
167 static constexpr bool kGprFprLockstep = false;
GprIndexToGprOffset(uint32_t gpr_index)168 static size_t GprIndexToGprOffset(uint32_t gpr_index) {
169 return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA);
170 }
171 #elif defined(__x86_64__)
172 // The callee save frame is pointed to by SP.
173 // | argN | |
174 // | ... | |
175 // | reg. arg spills | | Caller's frame
176 // | Method* | ---
177 // | Return |
178 // | R15 | callee save
179 // | R14 | callee save
180 // | R13 | callee save
181 // | R12 | callee save
182 // | R9 | arg5
183 // | R8 | arg4
184 // | RSI/R6 | arg1
185 // | RBP/R5 | callee save
186 // | RBX/R3 | callee save
187 // | RDX/R2 | arg2
188 // | RCX/R1 | arg3
189 // | XMM7 | float arg 8
190 // | XMM6 | float arg 7
191 // | XMM5 | float arg 6
192 // | XMM4 | float arg 5
193 // | XMM3 | float arg 4
194 // | XMM2 | float arg 3
195 // | XMM1 | float arg 2
196 // | XMM0 | float arg 1
197 // | Padding |
198 // | RDI/Method* | <- sp
199 static constexpr bool kSplitPairAcrossRegisterAndStack = false;
200 static constexpr bool kAlignPairRegister = false;
201 static constexpr bool kQuickSoftFloatAbi = false; // This is a hard float ABI.
202 static constexpr bool kQuickDoubleRegAlignedFloatBackFilled = false;
203 static constexpr bool kQuickSkipOddFpRegisters = false;
204 static constexpr size_t kNumQuickGprArgs = 5; // 5 arguments passed in GPRs.
205 static constexpr size_t kNumQuickFprArgs = 8; // 8 arguments passed in FPRs.
206 static constexpr bool kGprFprLockstep = false;
GprIndexToGprOffset(uint32_t gpr_index)207 static size_t GprIndexToGprOffset(uint32_t gpr_index) {
208 switch (gpr_index) {
209 case 0: return (4 * GetBytesPerGprSpillLocation(kRuntimeISA));
210 case 1: return (1 * GetBytesPerGprSpillLocation(kRuntimeISA));
211 case 2: return (0 * GetBytesPerGprSpillLocation(kRuntimeISA));
212 case 3: return (5 * GetBytesPerGprSpillLocation(kRuntimeISA));
213 case 4: return (6 * GetBytesPerGprSpillLocation(kRuntimeISA));
214 default:
215 LOG(FATAL) << "Unexpected GPR index: " << gpr_index;
216 UNREACHABLE();
217 }
218 }
219 #else
220 #error "Unsupported architecture"
221 #endif
222
223 public:
224 // Special handling for proxy methods. Proxy methods are instance methods so the
225 // 'this' object is the 1st argument. They also have the same frame layout as the
226 // kRefAndArgs runtime method. Since 'this' is a reference, it is located in the
227 // 1st GPR.
GetProxyThisObjectReference(ArtMethod ** sp)228 static StackReference<mirror::Object>* GetProxyThisObjectReference(ArtMethod** sp)
229 REQUIRES_SHARED(Locks::mutator_lock_) {
230 CHECK((*sp)->IsProxyMethod());
231 CHECK_GT(kNumQuickGprArgs, 0u);
232 constexpr uint32_t kThisGprIndex = 0u; // 'this' is in the 1st GPR.
233 size_t this_arg_offset = kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset +
234 GprIndexToGprOffset(kThisGprIndex);
235 uint8_t* this_arg_address = reinterpret_cast<uint8_t*>(sp) + this_arg_offset;
236 return reinterpret_cast<StackReference<mirror::Object>*>(this_arg_address);
237 }
238
GetCallingMethod(ArtMethod ** sp)239 static ArtMethod* GetCallingMethod(ArtMethod** sp) REQUIRES_SHARED(Locks::mutator_lock_) {
240 DCHECK((*sp)->IsCalleeSaveMethod());
241 return GetCalleeSaveMethodCaller(sp, CalleeSaveType::kSaveRefsAndArgs);
242 }
243
GetOuterMethod(ArtMethod ** sp)244 static ArtMethod* GetOuterMethod(ArtMethod** sp) REQUIRES_SHARED(Locks::mutator_lock_) {
245 DCHECK((*sp)->IsCalleeSaveMethod());
246 uint8_t* previous_sp =
247 reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_FrameSize;
248 return *reinterpret_cast<ArtMethod**>(previous_sp);
249 }
250
GetCallingDexPc(ArtMethod ** sp)251 static uint32_t GetCallingDexPc(ArtMethod** sp) REQUIRES_SHARED(Locks::mutator_lock_) {
252 DCHECK((*sp)->IsCalleeSaveMethod());
253 constexpr size_t callee_frame_size =
254 RuntimeCalleeSaveFrame::GetFrameSize(CalleeSaveType::kSaveRefsAndArgs);
255 ArtMethod** caller_sp = reinterpret_cast<ArtMethod**>(
256 reinterpret_cast<uintptr_t>(sp) + callee_frame_size);
257 uintptr_t outer_pc = QuickArgumentVisitor::GetCallingPc(sp);
258 const OatQuickMethodHeader* current_code = (*caller_sp)->GetOatQuickMethodHeader(outer_pc);
259 uintptr_t outer_pc_offset = current_code->NativeQuickPcOffset(outer_pc);
260
261 if (current_code->IsOptimized()) {
262 CodeInfo code_info = CodeInfo::DecodeInlineInfoOnly(current_code);
263 StackMap stack_map = code_info.GetStackMapForNativePcOffset(outer_pc_offset);
264 DCHECK(stack_map.IsValid());
265 BitTableRange<InlineInfo> inline_infos = code_info.GetInlineInfosOf(stack_map);
266 if (!inline_infos.empty()) {
267 return inline_infos.back().GetDexPc();
268 } else {
269 return stack_map.GetDexPc();
270 }
271 } else {
272 return current_code->ToDexPc(caller_sp, outer_pc);
273 }
274 }
275
GetCallingPcAddr(ArtMethod ** sp)276 static uint8_t* GetCallingPcAddr(ArtMethod** sp) REQUIRES_SHARED(Locks::mutator_lock_) {
277 DCHECK((*sp)->IsCalleeSaveMethod());
278 uint8_t* return_adress_spill =
279 reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_ReturnPcOffset;
280 return return_adress_spill;
281 }
282
283 // For the given quick ref and args quick frame, return the caller's PC.
GetCallingPc(ArtMethod ** sp)284 static uintptr_t GetCallingPc(ArtMethod** sp) REQUIRES_SHARED(Locks::mutator_lock_) {
285 return *reinterpret_cast<uintptr_t*>(GetCallingPcAddr(sp));
286 }
287
QuickArgumentVisitor(ArtMethod ** sp,bool is_static,const char * shorty,uint32_t shorty_len)288 QuickArgumentVisitor(ArtMethod** sp, bool is_static, const char* shorty,
289 uint32_t shorty_len) REQUIRES_SHARED(Locks::mutator_lock_) :
290 is_static_(is_static), shorty_(shorty), shorty_len_(shorty_len),
291 gpr_args_(reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset),
292 fpr_args_(reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset),
293 stack_args_(reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_FrameSize
294 + sizeof(ArtMethod*)), // Skip ArtMethod*.
295 gpr_index_(0), fpr_index_(0), fpr_double_index_(0), stack_index_(0),
296 cur_type_(Primitive::kPrimVoid), is_split_long_or_double_(false) {
297 static_assert(kQuickSoftFloatAbi == (kNumQuickFprArgs == 0),
298 "Number of Quick FPR arguments unexpected");
299 static_assert(!(kQuickSoftFloatAbi && kQuickDoubleRegAlignedFloatBackFilled),
300 "Double alignment unexpected");
301 // For register alignment, we want to assume that counters(fpr_double_index_) are even if the
302 // next register is even.
303 static_assert(!kQuickDoubleRegAlignedFloatBackFilled || kNumQuickFprArgs % 2 == 0,
304 "Number of Quick FPR arguments not even");
305 DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), kRuntimePointerSize);
306 }
307
~QuickArgumentVisitor()308 virtual ~QuickArgumentVisitor() {}
309
310 virtual void Visit() = 0;
311
GetParamPrimitiveType() const312 Primitive::Type GetParamPrimitiveType() const {
313 return cur_type_;
314 }
315
GetParamAddress() const316 uint8_t* GetParamAddress() const {
317 if (!kQuickSoftFloatAbi) {
318 Primitive::Type type = GetParamPrimitiveType();
319 if (UNLIKELY((type == Primitive::kPrimDouble) || (type == Primitive::kPrimFloat))) {
320 if (type == Primitive::kPrimDouble && kQuickDoubleRegAlignedFloatBackFilled) {
321 if (fpr_double_index_ + 2 < kNumQuickFprArgs + 1) {
322 return fpr_args_ + (fpr_double_index_ * GetBytesPerFprSpillLocation(kRuntimeISA));
323 }
324 } else if (fpr_index_ + 1 < kNumQuickFprArgs + 1) {
325 return fpr_args_ + (fpr_index_ * GetBytesPerFprSpillLocation(kRuntimeISA));
326 }
327 return stack_args_ + (stack_index_ * kBytesStackArgLocation);
328 }
329 }
330 if (gpr_index_ < kNumQuickGprArgs) {
331 return gpr_args_ + GprIndexToGprOffset(gpr_index_);
332 }
333 return stack_args_ + (stack_index_ * kBytesStackArgLocation);
334 }
335
IsSplitLongOrDouble() const336 bool IsSplitLongOrDouble() const {
337 if ((GetBytesPerGprSpillLocation(kRuntimeISA) == 4) ||
338 (GetBytesPerFprSpillLocation(kRuntimeISA) == 4)) {
339 return is_split_long_or_double_;
340 } else {
341 return false; // An optimization for when GPR and FPRs are 64bit.
342 }
343 }
344
IsParamAReference() const345 bool IsParamAReference() const {
346 return GetParamPrimitiveType() == Primitive::kPrimNot;
347 }
348
IsParamALongOrDouble() const349 bool IsParamALongOrDouble() const {
350 Primitive::Type type = GetParamPrimitiveType();
351 return type == Primitive::kPrimLong || type == Primitive::kPrimDouble;
352 }
353
ReadSplitLongParam() const354 uint64_t ReadSplitLongParam() const {
355 // The splitted long is always available through the stack.
356 return *reinterpret_cast<uint64_t*>(stack_args_
357 + stack_index_ * kBytesStackArgLocation);
358 }
359
IncGprIndex()360 void IncGprIndex() {
361 gpr_index_++;
362 if (kGprFprLockstep) {
363 fpr_index_++;
364 }
365 }
366
IncFprIndex()367 void IncFprIndex() {
368 fpr_index_++;
369 if (kGprFprLockstep) {
370 gpr_index_++;
371 }
372 }
373
VisitArguments()374 void VisitArguments() REQUIRES_SHARED(Locks::mutator_lock_) {
375 // (a) 'stack_args_' should point to the first method's argument
376 // (b) whatever the argument type it is, the 'stack_index_' should
377 // be moved forward along with every visiting.
378 gpr_index_ = 0;
379 fpr_index_ = 0;
380 if (kQuickDoubleRegAlignedFloatBackFilled) {
381 fpr_double_index_ = 0;
382 }
383 stack_index_ = 0;
384 if (!is_static_) { // Handle this.
385 cur_type_ = Primitive::kPrimNot;
386 is_split_long_or_double_ = false;
387 Visit();
388 stack_index_++;
389 if (kNumQuickGprArgs > 0) {
390 IncGprIndex();
391 }
392 }
393 for (uint32_t shorty_index = 1; shorty_index < shorty_len_; ++shorty_index) {
394 cur_type_ = Primitive::GetType(shorty_[shorty_index]);
395 switch (cur_type_) {
396 case Primitive::kPrimNot:
397 case Primitive::kPrimBoolean:
398 case Primitive::kPrimByte:
399 case Primitive::kPrimChar:
400 case Primitive::kPrimShort:
401 case Primitive::kPrimInt:
402 is_split_long_or_double_ = false;
403 Visit();
404 stack_index_++;
405 if (gpr_index_ < kNumQuickGprArgs) {
406 IncGprIndex();
407 }
408 break;
409 case Primitive::kPrimFloat:
410 is_split_long_or_double_ = false;
411 Visit();
412 stack_index_++;
413 if (kQuickSoftFloatAbi) {
414 if (gpr_index_ < kNumQuickGprArgs) {
415 IncGprIndex();
416 }
417 } else {
418 if (fpr_index_ + 1 < kNumQuickFprArgs + 1) {
419 IncFprIndex();
420 if (kQuickDoubleRegAlignedFloatBackFilled) {
421 // Double should not overlap with float.
422 // For example, if fpr_index_ = 3, fpr_double_index_ should be at least 4.
423 fpr_double_index_ = std::max(fpr_double_index_, RoundUp(fpr_index_, 2));
424 // Float should not overlap with double.
425 if (fpr_index_ % 2 == 0) {
426 fpr_index_ = std::max(fpr_double_index_, fpr_index_);
427 }
428 } else if (kQuickSkipOddFpRegisters) {
429 IncFprIndex();
430 }
431 }
432 }
433 break;
434 case Primitive::kPrimDouble:
435 case Primitive::kPrimLong:
436 if (kQuickSoftFloatAbi || (cur_type_ == Primitive::kPrimLong)) {
437 if (cur_type_ == Primitive::kPrimLong &&
438 gpr_index_ == 0 &&
439 kAlignPairRegister) {
440 // Currently, this is only for ARM, where we align long parameters with
441 // even-numbered registers by skipping R1 and using R2 instead.
442 IncGprIndex();
443 }
444 is_split_long_or_double_ = (GetBytesPerGprSpillLocation(kRuntimeISA) == 4) &&
445 ((gpr_index_ + 1) == kNumQuickGprArgs);
446 if (!kSplitPairAcrossRegisterAndStack && is_split_long_or_double_) {
447 // We don't want to split this. Pass over this register.
448 gpr_index_++;
449 is_split_long_or_double_ = false;
450 }
451 Visit();
452 if (kBytesStackArgLocation == 4) {
453 stack_index_+= 2;
454 } else {
455 CHECK_EQ(kBytesStackArgLocation, 8U);
456 stack_index_++;
457 }
458 if (gpr_index_ < kNumQuickGprArgs) {
459 IncGprIndex();
460 if (GetBytesPerGprSpillLocation(kRuntimeISA) == 4) {
461 if (gpr_index_ < kNumQuickGprArgs) {
462 IncGprIndex();
463 }
464 }
465 }
466 } else {
467 is_split_long_or_double_ = (GetBytesPerFprSpillLocation(kRuntimeISA) == 4) &&
468 ((fpr_index_ + 1) == kNumQuickFprArgs) && !kQuickDoubleRegAlignedFloatBackFilled;
469 Visit();
470 if (kBytesStackArgLocation == 4) {
471 stack_index_+= 2;
472 } else {
473 CHECK_EQ(kBytesStackArgLocation, 8U);
474 stack_index_++;
475 }
476 if (kQuickDoubleRegAlignedFloatBackFilled) {
477 if (fpr_double_index_ + 2 < kNumQuickFprArgs + 1) {
478 fpr_double_index_ += 2;
479 // Float should not overlap with double.
480 if (fpr_index_ % 2 == 0) {
481 fpr_index_ = std::max(fpr_double_index_, fpr_index_);
482 }
483 }
484 } else if (fpr_index_ + 1 < kNumQuickFprArgs + 1) {
485 IncFprIndex();
486 if (GetBytesPerFprSpillLocation(kRuntimeISA) == 4) {
487 if (fpr_index_ + 1 < kNumQuickFprArgs + 1) {
488 IncFprIndex();
489 }
490 }
491 }
492 }
493 break;
494 default:
495 LOG(FATAL) << "Unexpected type: " << cur_type_ << " in " << shorty_;
496 }
497 }
498 }
499
500 protected:
501 const bool is_static_;
502 const char* const shorty_;
503 const uint32_t shorty_len_;
504
505 private:
506 uint8_t* const gpr_args_; // Address of GPR arguments in callee save frame.
507 uint8_t* const fpr_args_; // Address of FPR arguments in callee save frame.
508 uint8_t* const stack_args_; // Address of stack arguments in caller's frame.
509 uint32_t gpr_index_; // Index into spilled GPRs.
510 // Index into spilled FPRs.
511 // In case kQuickDoubleRegAlignedFloatBackFilled, it may index a hole while fpr_double_index_
512 // holds a higher register number.
513 uint32_t fpr_index_;
514 // Index into spilled FPRs for aligned double.
515 // Only used when kQuickDoubleRegAlignedFloatBackFilled. Next available double register indexed in
516 // terms of singles, may be behind fpr_index.
517 uint32_t fpr_double_index_;
518 uint32_t stack_index_; // Index into arguments on the stack.
519 // The current type of argument during VisitArguments.
520 Primitive::Type cur_type_;
521 // Does a 64bit parameter straddle the register and stack arguments?
522 bool is_split_long_or_double_;
523 };
524
525 // Returns the 'this' object of a proxy method. This function is only used by StackVisitor. It
526 // allows to use the QuickArgumentVisitor constants without moving all the code in its own module.
artQuickGetProxyThisObject(ArtMethod ** sp)527 extern "C" mirror::Object* artQuickGetProxyThisObject(ArtMethod** sp)
528 REQUIRES_SHARED(Locks::mutator_lock_) {
529 return QuickArgumentVisitor::GetProxyThisObjectReference(sp)->AsMirrorPtr();
530 }
531
532 // Visits arguments on the stack placing them into the shadow frame.
533 class BuildQuickShadowFrameVisitor final : public QuickArgumentVisitor {
534 public:
BuildQuickShadowFrameVisitor(ArtMethod ** sp,bool is_static,const char * shorty,uint32_t shorty_len,ShadowFrame * sf,size_t first_arg_reg)535 BuildQuickShadowFrameVisitor(ArtMethod** sp, bool is_static, const char* shorty,
536 uint32_t shorty_len, ShadowFrame* sf, size_t first_arg_reg) :
537 QuickArgumentVisitor(sp, is_static, shorty, shorty_len), sf_(sf), cur_reg_(first_arg_reg) {}
538
539 void Visit() REQUIRES_SHARED(Locks::mutator_lock_) override;
540
541 private:
542 ShadowFrame* const sf_;
543 uint32_t cur_reg_;
544
545 DISALLOW_COPY_AND_ASSIGN(BuildQuickShadowFrameVisitor);
546 };
547
Visit()548 void BuildQuickShadowFrameVisitor::Visit() {
549 Primitive::Type type = GetParamPrimitiveType();
550 switch (type) {
551 case Primitive::kPrimLong: // Fall-through.
552 case Primitive::kPrimDouble:
553 if (IsSplitLongOrDouble()) {
554 sf_->SetVRegLong(cur_reg_, ReadSplitLongParam());
555 } else {
556 sf_->SetVRegLong(cur_reg_, *reinterpret_cast<jlong*>(GetParamAddress()));
557 }
558 ++cur_reg_;
559 break;
560 case Primitive::kPrimNot: {
561 StackReference<mirror::Object>* stack_ref =
562 reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress());
563 sf_->SetVRegReference(cur_reg_, stack_ref->AsMirrorPtr());
564 }
565 break;
566 case Primitive::kPrimBoolean: // Fall-through.
567 case Primitive::kPrimByte: // Fall-through.
568 case Primitive::kPrimChar: // Fall-through.
569 case Primitive::kPrimShort: // Fall-through.
570 case Primitive::kPrimInt: // Fall-through.
571 case Primitive::kPrimFloat:
572 sf_->SetVReg(cur_reg_, *reinterpret_cast<jint*>(GetParamAddress()));
573 break;
574 case Primitive::kPrimVoid:
575 LOG(FATAL) << "UNREACHABLE";
576 UNREACHABLE();
577 }
578 ++cur_reg_;
579 }
580
581 // Don't inline. See b/65159206.
582 NO_INLINE
HandleDeoptimization(JValue * result,ArtMethod * method,ShadowFrame * deopt_frame,ManagedStack * fragment)583 static void HandleDeoptimization(JValue* result,
584 ArtMethod* method,
585 ShadowFrame* deopt_frame,
586 ManagedStack* fragment)
587 REQUIRES_SHARED(Locks::mutator_lock_) {
588 // Coming from partial-fragment deopt.
589 Thread* self = Thread::Current();
590 if (kIsDebugBuild) {
591 // Consistency-check: are the methods as expected? We check that the last shadow frame
592 // (the bottom of the call-stack) corresponds to the called method.
593 ShadowFrame* linked = deopt_frame;
594 while (linked->GetLink() != nullptr) {
595 linked = linked->GetLink();
596 }
597 CHECK_EQ(method, linked->GetMethod()) << method->PrettyMethod() << " "
598 << ArtMethod::PrettyMethod(linked->GetMethod());
599 }
600
601 if (VLOG_IS_ON(deopt)) {
602 // Print out the stack to verify that it was a partial-fragment deopt.
603 LOG(INFO) << "Continue-ing from deopt. Stack is:";
604 QuickExceptionHandler::DumpFramesWithType(self, true);
605 }
606
607 ObjPtr<mirror::Throwable> pending_exception;
608 bool from_code = false;
609 DeoptimizationMethodType method_type;
610 self->PopDeoptimizationContext(/* out */ result,
611 /* out */ &pending_exception,
612 /* out */ &from_code,
613 /* out */ &method_type);
614
615 // Push a transition back into managed code onto the linked list in thread.
616 self->PushManagedStackFragment(fragment);
617
618 // Ensure that the stack is still in order.
619 if (kIsDebugBuild) {
620 class EntireStackVisitor : public StackVisitor {
621 public:
622 explicit EntireStackVisitor(Thread* self_in) REQUIRES_SHARED(Locks::mutator_lock_)
623 : StackVisitor(self_in, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames) {}
624
625 bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
626 // Nothing to do here. In a debug build, ValidateFrame will do the work in the walking
627 // logic. Just always say we want to continue.
628 return true;
629 }
630 };
631 EntireStackVisitor esv(self);
632 esv.WalkStack();
633 }
634
635 // Restore the exception that was pending before deoptimization then interpret the
636 // deoptimized frames.
637 if (pending_exception != nullptr) {
638 self->SetException(pending_exception);
639 }
640 interpreter::EnterInterpreterFromDeoptimize(self,
641 deopt_frame,
642 result,
643 from_code,
644 DeoptimizationMethodType::kDefault);
645 }
646
artQuickToInterpreterBridge(ArtMethod * method,Thread * self,ArtMethod ** sp)647 extern "C" uint64_t artQuickToInterpreterBridge(ArtMethod* method, Thread* self, ArtMethod** sp)
648 REQUIRES_SHARED(Locks::mutator_lock_) {
649 // Ensure we don't get thread suspension until the object arguments are safely in the shadow
650 // frame.
651 ScopedQuickEntrypointChecks sqec(self);
652
653 if (UNLIKELY(!method->IsInvokable())) {
654 method->ThrowInvocationTimeError();
655 return 0;
656 }
657
658 JValue tmp_value;
659 ShadowFrame* deopt_frame = self->PopStackedShadowFrame(
660 StackedShadowFrameType::kDeoptimizationShadowFrame, false);
661 ManagedStack fragment;
662
663 DCHECK(!method->IsNative()) << method->PrettyMethod();
664 uint32_t shorty_len = 0;
665 ArtMethod* non_proxy_method = method->GetInterfaceMethodIfProxy(kRuntimePointerSize);
666 DCHECK(non_proxy_method->GetCodeItem() != nullptr) << method->PrettyMethod();
667 CodeItemDataAccessor accessor(non_proxy_method->DexInstructionData());
668 const char* shorty = non_proxy_method->GetShorty(&shorty_len);
669
670 JValue result;
671 bool force_frame_pop = false;
672
673 if (UNLIKELY(deopt_frame != nullptr)) {
674 HandleDeoptimization(&result, method, deopt_frame, &fragment);
675 } else {
676 const char* old_cause = self->StartAssertNoThreadSuspension(
677 "Building interpreter shadow frame");
678 uint16_t num_regs = accessor.RegistersSize();
679 // No last shadow coming from quick.
680 ShadowFrameAllocaUniquePtr shadow_frame_unique_ptr =
681 CREATE_SHADOW_FRAME(num_regs, /* link= */ nullptr, method, /* dex_pc= */ 0);
682 ShadowFrame* shadow_frame = shadow_frame_unique_ptr.get();
683 size_t first_arg_reg = accessor.RegistersSize() - accessor.InsSize();
684 BuildQuickShadowFrameVisitor shadow_frame_builder(sp, method->IsStatic(), shorty, shorty_len,
685 shadow_frame, first_arg_reg);
686 shadow_frame_builder.VisitArguments();
687 // Push a transition back into managed code onto the linked list in thread.
688 self->PushManagedStackFragment(&fragment);
689 self->PushShadowFrame(shadow_frame);
690 self->EndAssertNoThreadSuspension(old_cause);
691
692 if (NeedsClinitCheckBeforeCall(method)) {
693 ObjPtr<mirror::Class> declaring_class = method->GetDeclaringClass();
694 if (UNLIKELY(!declaring_class->IsVisiblyInitialized())) {
695 // Ensure static method's class is initialized.
696 StackHandleScope<1> hs(self);
697 Handle<mirror::Class> h_class(hs.NewHandle(declaring_class));
698 if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(self, h_class, true, true)) {
699 DCHECK(Thread::Current()->IsExceptionPending()) << method->PrettyMethod();
700 self->PopManagedStackFragment(fragment);
701 return 0;
702 }
703 }
704 }
705
706 result = interpreter::EnterInterpreterFromEntryPoint(self, accessor, shadow_frame);
707 force_frame_pop = shadow_frame->GetForcePopFrame();
708 }
709
710 // Pop transition.
711 self->PopManagedStackFragment(fragment);
712
713 // Request a stack deoptimization if needed
714 ArtMethod* caller = QuickArgumentVisitor::GetCallingMethod(sp);
715 uintptr_t caller_pc = QuickArgumentVisitor::GetCallingPc(sp);
716 // If caller_pc is the instrumentation exit stub, the stub will check to see if deoptimization
717 // should be done and it knows the real return pc. NB If the upcall is null we don't need to do
718 // anything. This can happen during shutdown or early startup.
719 if (UNLIKELY(
720 caller != nullptr &&
721 caller_pc != reinterpret_cast<uintptr_t>(GetQuickInstrumentationExitPc()) &&
722 (self->IsForceInterpreter() || Dbg::IsForcedInterpreterNeededForUpcall(self, caller)))) {
723 if (!Runtime::Current()->IsAsyncDeoptimizeable(caller_pc)) {
724 LOG(WARNING) << "Got a deoptimization request on un-deoptimizable method "
725 << caller->PrettyMethod();
726 } else {
727 VLOG(deopt) << "Forcing deoptimization on return from method " << method->PrettyMethod()
728 << " to " << caller->PrettyMethod()
729 << (force_frame_pop ? " for frame-pop" : "");
730 DCHECK(!force_frame_pop || result.GetJ() == 0) << "Force frame pop should have no result.";
731 if (force_frame_pop && self->GetException() != nullptr) {
732 LOG(WARNING) << "Suppressing exception for instruction-retry: "
733 << self->GetException()->Dump();
734 }
735 // Push the context of the deoptimization stack so we can restore the return value and the
736 // exception before executing the deoptimized frames.
737 self->PushDeoptimizationContext(
738 result,
739 shorty[0] == 'L' || shorty[0] == '[', /* class or array */
740 force_frame_pop ? nullptr : self->GetException(),
741 /* from_code= */ false,
742 DeoptimizationMethodType::kDefault);
743
744 // Set special exception to cause deoptimization.
745 self->SetException(Thread::GetDeoptimizationException());
746 }
747 }
748
749 // No need to restore the args since the method has already been run by the interpreter.
750 return result.GetJ();
751 }
752
753 // Visits arguments on the stack placing them into the args vector, Object* arguments are converted
754 // to jobjects.
755 class BuildQuickArgumentVisitor final : public QuickArgumentVisitor {
756 public:
BuildQuickArgumentVisitor(ArtMethod ** sp,bool is_static,const char * shorty,uint32_t shorty_len,ScopedObjectAccessUnchecked * soa,std::vector<jvalue> * args)757 BuildQuickArgumentVisitor(ArtMethod** sp, bool is_static, const char* shorty, uint32_t shorty_len,
758 ScopedObjectAccessUnchecked* soa, std::vector<jvalue>* args) :
759 QuickArgumentVisitor(sp, is_static, shorty, shorty_len), soa_(soa), args_(args) {}
760
761 void Visit() REQUIRES_SHARED(Locks::mutator_lock_) override;
762
763 private:
764 ScopedObjectAccessUnchecked* const soa_;
765 std::vector<jvalue>* const args_;
766
767 DISALLOW_COPY_AND_ASSIGN(BuildQuickArgumentVisitor);
768 };
769
Visit()770 void BuildQuickArgumentVisitor::Visit() {
771 jvalue val;
772 Primitive::Type type = GetParamPrimitiveType();
773 switch (type) {
774 case Primitive::kPrimNot: {
775 StackReference<mirror::Object>* stack_ref =
776 reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress());
777 val.l = soa_->AddLocalReference<jobject>(stack_ref->AsMirrorPtr());
778 break;
779 }
780 case Primitive::kPrimLong: // Fall-through.
781 case Primitive::kPrimDouble:
782 if (IsSplitLongOrDouble()) {
783 val.j = ReadSplitLongParam();
784 } else {
785 val.j = *reinterpret_cast<jlong*>(GetParamAddress());
786 }
787 break;
788 case Primitive::kPrimBoolean: // Fall-through.
789 case Primitive::kPrimByte: // Fall-through.
790 case Primitive::kPrimChar: // Fall-through.
791 case Primitive::kPrimShort: // Fall-through.
792 case Primitive::kPrimInt: // Fall-through.
793 case Primitive::kPrimFloat:
794 val.i = *reinterpret_cast<jint*>(GetParamAddress());
795 break;
796 case Primitive::kPrimVoid:
797 LOG(FATAL) << "UNREACHABLE";
798 UNREACHABLE();
799 }
800 args_->push_back(val);
801 }
802
803 // Handler for invocation on proxy methods. On entry a frame will exist for the proxy object method
804 // which is responsible for recording callee save registers. We explicitly place into jobjects the
805 // incoming reference arguments (so they survive GC). We invoke the invocation handler, which is a
806 // field within the proxy object, which will box the primitive arguments and deal with error cases.
artQuickProxyInvokeHandler(ArtMethod * proxy_method,mirror::Object * receiver,Thread * self,ArtMethod ** sp)807 extern "C" uint64_t artQuickProxyInvokeHandler(
808 ArtMethod* proxy_method, mirror::Object* receiver, Thread* self, ArtMethod** sp)
809 REQUIRES_SHARED(Locks::mutator_lock_) {
810 DCHECK(proxy_method->IsProxyMethod()) << proxy_method->PrettyMethod();
811 DCHECK(receiver->GetClass()->IsProxyClass()) << proxy_method->PrettyMethod();
812 // Ensure we don't get thread suspension until the object arguments are safely in jobjects.
813 const char* old_cause =
814 self->StartAssertNoThreadSuspension("Adding to IRT proxy object arguments");
815 // Register the top of the managed stack, making stack crawlable.
816 DCHECK_EQ((*sp), proxy_method) << proxy_method->PrettyMethod();
817 self->VerifyStack();
818 // Start new JNI local reference state.
819 JNIEnvExt* env = self->GetJniEnv();
820 ScopedObjectAccessUnchecked soa(env);
821 ScopedJniEnvLocalRefState env_state(env);
822 // Create local ref. copies of proxy method and the receiver.
823 jobject rcvr_jobj = soa.AddLocalReference<jobject>(receiver);
824
825 // Placing arguments into args vector and remove the receiver.
826 ArtMethod* non_proxy_method = proxy_method->GetInterfaceMethodIfProxy(kRuntimePointerSize);
827 CHECK(!non_proxy_method->IsStatic()) << proxy_method->PrettyMethod() << " "
828 << non_proxy_method->PrettyMethod();
829 std::vector<jvalue> args;
830 uint32_t shorty_len = 0;
831 const char* shorty = non_proxy_method->GetShorty(&shorty_len);
832 BuildQuickArgumentVisitor local_ref_visitor(
833 sp, /* is_static= */ false, shorty, shorty_len, &soa, &args);
834
835 local_ref_visitor.VisitArguments();
836 DCHECK_GT(args.size(), 0U) << proxy_method->PrettyMethod();
837 args.erase(args.begin());
838
839 // Convert proxy method into expected interface method.
840 ArtMethod* interface_method = proxy_method->FindOverriddenMethod(kRuntimePointerSize);
841 DCHECK(interface_method != nullptr) << proxy_method->PrettyMethod();
842 DCHECK(!interface_method->IsProxyMethod()) << interface_method->PrettyMethod();
843 self->EndAssertNoThreadSuspension(old_cause);
844 DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), kRuntimePointerSize);
845 DCHECK(!Runtime::Current()->IsActiveTransaction());
846 ObjPtr<mirror::Method> interface_reflect_method =
847 mirror::Method::CreateFromArtMethod<kRuntimePointerSize>(soa.Self(), interface_method);
848 if (interface_reflect_method == nullptr) {
849 soa.Self()->AssertPendingOOMException();
850 return 0;
851 }
852 jobject interface_method_jobj = soa.AddLocalReference<jobject>(interface_reflect_method);
853
854 // All naked Object*s should now be in jobjects, so its safe to go into the main invoke code
855 // that performs allocations or instrumentation events.
856 instrumentation::Instrumentation* instr = Runtime::Current()->GetInstrumentation();
857 if (instr->HasMethodEntryListeners()) {
858 instr->MethodEnterEvent(soa.Self(),
859 soa.Decode<mirror::Object>(rcvr_jobj),
860 proxy_method,
861 0);
862 if (soa.Self()->IsExceptionPending()) {
863 instr->MethodUnwindEvent(self,
864 soa.Decode<mirror::Object>(rcvr_jobj),
865 proxy_method,
866 0);
867 return 0;
868 }
869 }
870 JValue result = InvokeProxyInvocationHandler(soa, shorty, rcvr_jobj, interface_method_jobj, args);
871 if (soa.Self()->IsExceptionPending()) {
872 if (instr->HasMethodUnwindListeners()) {
873 instr->MethodUnwindEvent(self,
874 soa.Decode<mirror::Object>(rcvr_jobj),
875 proxy_method,
876 0);
877 }
878 } else if (instr->HasMethodExitListeners()) {
879 instr->MethodExitEvent(self,
880 soa.Decode<mirror::Object>(rcvr_jobj),
881 proxy_method,
882 0,
883 {},
884 result);
885 }
886 return result.GetJ();
887 }
888
889 // Visitor returning a reference argument at a given position in a Quick stack frame.
890 // NOTE: Only used for testing purposes.
891 class GetQuickReferenceArgumentAtVisitor final : public QuickArgumentVisitor {
892 public:
GetQuickReferenceArgumentAtVisitor(ArtMethod ** sp,const char * shorty,uint32_t shorty_len,size_t arg_pos)893 GetQuickReferenceArgumentAtVisitor(ArtMethod** sp,
894 const char* shorty,
895 uint32_t shorty_len,
896 size_t arg_pos)
897 : QuickArgumentVisitor(sp, /* is_static= */ false, shorty, shorty_len),
898 cur_pos_(0u),
899 arg_pos_(arg_pos),
900 ref_arg_(nullptr) {
901 CHECK_LT(arg_pos, shorty_len) << "Argument position greater than the number arguments";
902 }
903
Visit()904 void Visit() REQUIRES_SHARED(Locks::mutator_lock_) override {
905 if (cur_pos_ == arg_pos_) {
906 Primitive::Type type = GetParamPrimitiveType();
907 CHECK_EQ(type, Primitive::kPrimNot) << "Argument at searched position is not a reference";
908 ref_arg_ = reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress());
909 }
910 ++cur_pos_;
911 }
912
GetReferenceArgument()913 StackReference<mirror::Object>* GetReferenceArgument() {
914 return ref_arg_;
915 }
916
917 private:
918 // The position of the currently visited argument.
919 size_t cur_pos_;
920 // The position of the searched argument.
921 const size_t arg_pos_;
922 // The reference argument, if found.
923 StackReference<mirror::Object>* ref_arg_;
924
925 DISALLOW_COPY_AND_ASSIGN(GetQuickReferenceArgumentAtVisitor);
926 };
927
928 // Returning reference argument at position `arg_pos` in Quick stack frame at address `sp`.
929 // NOTE: Only used for testing purposes.
artQuickGetProxyReferenceArgumentAt(size_t arg_pos,ArtMethod ** sp)930 extern "C" StackReference<mirror::Object>* artQuickGetProxyReferenceArgumentAt(size_t arg_pos,
931 ArtMethod** sp)
932 REQUIRES_SHARED(Locks::mutator_lock_) {
933 ArtMethod* proxy_method = *sp;
934 ArtMethod* non_proxy_method = proxy_method->GetInterfaceMethodIfProxy(kRuntimePointerSize);
935 CHECK(!non_proxy_method->IsStatic())
936 << proxy_method->PrettyMethod() << " " << non_proxy_method->PrettyMethod();
937 uint32_t shorty_len = 0;
938 const char* shorty = non_proxy_method->GetShorty(&shorty_len);
939 GetQuickReferenceArgumentAtVisitor ref_arg_visitor(sp, shorty, shorty_len, arg_pos);
940 ref_arg_visitor.VisitArguments();
941 StackReference<mirror::Object>* ref_arg = ref_arg_visitor.GetReferenceArgument();
942 return ref_arg;
943 }
944
945 // Visitor returning all the reference arguments in a Quick stack frame.
946 class GetQuickReferenceArgumentsVisitor final : public QuickArgumentVisitor {
947 public:
GetQuickReferenceArgumentsVisitor(ArtMethod ** sp,bool is_static,const char * shorty,uint32_t shorty_len)948 GetQuickReferenceArgumentsVisitor(ArtMethod** sp,
949 bool is_static,
950 const char* shorty,
951 uint32_t shorty_len)
952 : QuickArgumentVisitor(sp, is_static, shorty, shorty_len) {}
953
Visit()954 void Visit() REQUIRES_SHARED(Locks::mutator_lock_) override {
955 Primitive::Type type = GetParamPrimitiveType();
956 if (type == Primitive::kPrimNot) {
957 StackReference<mirror::Object>* ref_arg =
958 reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress());
959 ref_args_.push_back(ref_arg);
960 }
961 }
962
GetReferenceArguments()963 std::vector<StackReference<mirror::Object>*> GetReferenceArguments() {
964 return ref_args_;
965 }
966
967 private:
968 // The reference arguments.
969 std::vector<StackReference<mirror::Object>*> ref_args_;
970
971 DISALLOW_COPY_AND_ASSIGN(GetQuickReferenceArgumentsVisitor);
972 };
973
974 // Returning all reference arguments in Quick stack frame at address `sp`.
GetProxyReferenceArguments(ArtMethod ** sp)975 std::vector<StackReference<mirror::Object>*> GetProxyReferenceArguments(ArtMethod** sp)
976 REQUIRES_SHARED(Locks::mutator_lock_) {
977 ArtMethod* proxy_method = *sp;
978 ArtMethod* non_proxy_method = proxy_method->GetInterfaceMethodIfProxy(kRuntimePointerSize);
979 CHECK(!non_proxy_method->IsStatic())
980 << proxy_method->PrettyMethod() << " " << non_proxy_method->PrettyMethod();
981 uint32_t shorty_len = 0;
982 const char* shorty = non_proxy_method->GetShorty(&shorty_len);
983 GetQuickReferenceArgumentsVisitor ref_args_visitor(sp, /*is_static=*/ false, shorty, shorty_len);
984 ref_args_visitor.VisitArguments();
985 std::vector<StackReference<mirror::Object>*> ref_args = ref_args_visitor.GetReferenceArguments();
986 return ref_args;
987 }
988
989 // Read object references held in arguments from quick frames and place in a JNI local references,
990 // so they don't get garbage collected.
991 class RememberForGcArgumentVisitor final : public QuickArgumentVisitor {
992 public:
RememberForGcArgumentVisitor(ArtMethod ** sp,bool is_static,const char * shorty,uint32_t shorty_len,ScopedObjectAccessUnchecked * soa)993 RememberForGcArgumentVisitor(ArtMethod** sp, bool is_static, const char* shorty,
994 uint32_t shorty_len, ScopedObjectAccessUnchecked* soa) :
995 QuickArgumentVisitor(sp, is_static, shorty, shorty_len), soa_(soa) {}
996
997 void Visit() REQUIRES_SHARED(Locks::mutator_lock_) override;
998
999 void FixupReferences() REQUIRES_SHARED(Locks::mutator_lock_);
1000
1001 private:
1002 ScopedObjectAccessUnchecked* const soa_;
1003 // References which we must update when exiting in case the GC moved the objects.
1004 std::vector<std::pair<jobject, StackReference<mirror::Object>*> > references_;
1005
1006 DISALLOW_COPY_AND_ASSIGN(RememberForGcArgumentVisitor);
1007 };
1008
Visit()1009 void RememberForGcArgumentVisitor::Visit() {
1010 if (IsParamAReference()) {
1011 StackReference<mirror::Object>* stack_ref =
1012 reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress());
1013 jobject reference =
1014 soa_->AddLocalReference<jobject>(stack_ref->AsMirrorPtr());
1015 references_.push_back(std::make_pair(reference, stack_ref));
1016 }
1017 }
1018
FixupReferences()1019 void RememberForGcArgumentVisitor::FixupReferences() {
1020 // Fixup any references which may have changed.
1021 for (const auto& pair : references_) {
1022 pair.second->Assign(soa_->Decode<mirror::Object>(pair.first));
1023 soa_->Env()->DeleteLocalRef(pair.first);
1024 }
1025 }
1026
artInstrumentationMethodEntryFromCode(ArtMethod * method,mirror::Object * this_object,Thread * self,ArtMethod ** sp)1027 extern "C" const void* artInstrumentationMethodEntryFromCode(ArtMethod* method,
1028 mirror::Object* this_object,
1029 Thread* self,
1030 ArtMethod** sp)
1031 REQUIRES_SHARED(Locks::mutator_lock_) {
1032 const void* result;
1033 // Instrumentation changes the stack. Thus, when exiting, the stack cannot be verified, so skip
1034 // that part.
1035 ScopedQuickEntrypointChecks sqec(self, kIsDebugBuild, false);
1036 instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
1037 DCHECK(!method->IsProxyMethod())
1038 << "Proxy method " << method->PrettyMethod()
1039 << " (declaring class: " << method->GetDeclaringClass()->PrettyClass() << ")"
1040 << " should not hit instrumentation entrypoint.";
1041 if (instrumentation->IsDeoptimized(method)) {
1042 result = GetQuickToInterpreterBridge();
1043 } else {
1044 // This will get the entry point either from the oat file, the JIT or the appropriate bridge
1045 // method if none of those can be found.
1046 result = instrumentation->GetCodeForInvoke(method);
1047 jit::Jit* jit = Runtime::Current()->GetJit();
1048 DCHECK_NE(result, GetQuickInstrumentationEntryPoint()) << method->PrettyMethod();
1049 DCHECK(jit == nullptr ||
1050 // Native methods come through here in Interpreter entrypoints. We might not have
1051 // disabled jit-gc but that is fine since we won't return jit-code for native methods.
1052 method->IsNative() ||
1053 !jit->GetCodeCache()->GetGarbageCollectCode());
1054 DCHECK(!method->IsNative() ||
1055 jit == nullptr ||
1056 !jit->GetCodeCache()->ContainsPc(result))
1057 << method->PrettyMethod() << " code will jump to possibly cleaned up jit code!";
1058 }
1059
1060 bool interpreter_entry = (result == GetQuickToInterpreterBridge());
1061 bool is_static = method->IsStatic();
1062 uint32_t shorty_len;
1063 const char* shorty =
1064 method->GetInterfaceMethodIfProxy(kRuntimePointerSize)->GetShorty(&shorty_len);
1065
1066 ScopedObjectAccessUnchecked soa(self);
1067 RememberForGcArgumentVisitor visitor(sp, is_static, shorty, shorty_len, &soa);
1068 visitor.VisitArguments();
1069
1070 instrumentation->PushInstrumentationStackFrame(self,
1071 is_static ? nullptr : this_object,
1072 method,
1073 reinterpret_cast<uintptr_t>(
1074 QuickArgumentVisitor::GetCallingPcAddr(sp)),
1075 QuickArgumentVisitor::GetCallingPc(sp),
1076 interpreter_entry);
1077
1078 visitor.FixupReferences();
1079 if (UNLIKELY(self->IsExceptionPending())) {
1080 return nullptr;
1081 }
1082 CHECK(result != nullptr) << method->PrettyMethod();
1083 return result;
1084 }
1085
artInstrumentationMethodExitFromCode(Thread * self,ArtMethod ** sp,uint64_t * gpr_result,uint64_t * fpr_result)1086 extern "C" TwoWordReturn artInstrumentationMethodExitFromCode(Thread* self,
1087 ArtMethod** sp,
1088 uint64_t* gpr_result,
1089 uint64_t* fpr_result)
1090 REQUIRES_SHARED(Locks::mutator_lock_) {
1091 DCHECK_EQ(reinterpret_cast<uintptr_t>(self), reinterpret_cast<uintptr_t>(Thread::Current()));
1092 CHECK(gpr_result != nullptr);
1093 CHECK(fpr_result != nullptr);
1094 // Instrumentation exit stub must not be entered with a pending exception.
1095 CHECK(!self->IsExceptionPending()) << "Enter instrumentation exit stub with pending exception "
1096 << self->GetException()->Dump();
1097 // Compute address of return PC and check that it currently holds 0.
1098 constexpr size_t return_pc_offset =
1099 RuntimeCalleeSaveFrame::GetReturnPcOffset(CalleeSaveType::kSaveEverything);
1100 uintptr_t* return_pc_addr = reinterpret_cast<uintptr_t*>(reinterpret_cast<uint8_t*>(sp) +
1101 return_pc_offset);
1102 CHECK_EQ(*return_pc_addr, 0U);
1103
1104 // Pop the frame filling in the return pc. The low half of the return value is 0 when
1105 // deoptimization shouldn't be performed with the high-half having the return address. When
1106 // deoptimization should be performed the low half is zero and the high-half the address of the
1107 // deoptimization entry point.
1108 instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
1109 TwoWordReturn return_or_deoptimize_pc = instrumentation->PopInstrumentationStackFrame(
1110 self, return_pc_addr, gpr_result, fpr_result);
1111 if (self->IsExceptionPending() || self->ObserveAsyncException()) {
1112 return GetTwoWordFailureValue();
1113 }
1114 return return_or_deoptimize_pc;
1115 }
1116
DumpInstruction(ArtMethod * method,uint32_t dex_pc)1117 static std::string DumpInstruction(ArtMethod* method, uint32_t dex_pc)
1118 REQUIRES_SHARED(Locks::mutator_lock_) {
1119 if (dex_pc == static_cast<uint32_t>(-1)) {
1120 CHECK(method == jni::DecodeArtMethod(WellKnownClasses::java_lang_String_charAt));
1121 return "<native>";
1122 } else {
1123 CodeItemInstructionAccessor accessor = method->DexInstructions();
1124 CHECK_LT(dex_pc, accessor.InsnsSizeInCodeUnits());
1125 return accessor.InstructionAt(dex_pc).DumpString(method->GetDexFile());
1126 }
1127 }
1128
DumpB74410240ClassData(ObjPtr<mirror::Class> klass)1129 static void DumpB74410240ClassData(ObjPtr<mirror::Class> klass)
1130 REQUIRES_SHARED(Locks::mutator_lock_) {
1131 std::string storage;
1132 const char* descriptor = klass->GetDescriptor(&storage);
1133 LOG(FATAL_WITHOUT_ABORT) << " " << DescribeLoaders(klass->GetClassLoader(), descriptor);
1134 const OatDexFile* oat_dex_file = klass->GetDexFile().GetOatDexFile();
1135 if (oat_dex_file != nullptr) {
1136 const OatFile* oat_file = oat_dex_file->GetOatFile();
1137 const char* dex2oat_cmdline =
1138 oat_file->GetOatHeader().GetStoreValueByKey(OatHeader::kDex2OatCmdLineKey);
1139 LOG(FATAL_WITHOUT_ABORT) << " OatFile: " << oat_file->GetLocation()
1140 << "; " << (dex2oat_cmdline != nullptr ? dex2oat_cmdline : "<not recorded>");
1141 }
1142 }
1143
DumpB74410240DebugData(ArtMethod ** sp)1144 static void DumpB74410240DebugData(ArtMethod** sp) REQUIRES_SHARED(Locks::mutator_lock_) {
1145 // Mimick the search for the caller and dump some data while doing so.
1146 LOG(FATAL_WITHOUT_ABORT) << "Dumping debugging data, please attach a bugreport to b/74410240.";
1147
1148 constexpr CalleeSaveType type = CalleeSaveType::kSaveRefsAndArgs;
1149 CHECK_EQ(*sp, Runtime::Current()->GetCalleeSaveMethod(type));
1150
1151 constexpr size_t callee_frame_size = RuntimeCalleeSaveFrame::GetFrameSize(type);
1152 auto** caller_sp = reinterpret_cast<ArtMethod**>(
1153 reinterpret_cast<uintptr_t>(sp) + callee_frame_size);
1154 constexpr size_t callee_return_pc_offset = RuntimeCalleeSaveFrame::GetReturnPcOffset(type);
1155 uintptr_t caller_pc = *reinterpret_cast<uintptr_t*>(
1156 (reinterpret_cast<uint8_t*>(sp) + callee_return_pc_offset));
1157 ArtMethod* outer_method = *caller_sp;
1158
1159 if (UNLIKELY(caller_pc == reinterpret_cast<uintptr_t>(GetQuickInstrumentationExitPc()))) {
1160 LOG(FATAL_WITHOUT_ABORT) << "Method: " << outer_method->PrettyMethod()
1161 << " native pc: " << caller_pc << " Instrumented!";
1162 return;
1163 }
1164
1165 const OatQuickMethodHeader* current_code = outer_method->GetOatQuickMethodHeader(caller_pc);
1166 CHECK(current_code != nullptr);
1167 CHECK(current_code->IsOptimized());
1168 uintptr_t native_pc_offset = current_code->NativeQuickPcOffset(caller_pc);
1169 CodeInfo code_info(current_code);
1170 StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset);
1171 CHECK(stack_map.IsValid());
1172 uint32_t dex_pc = stack_map.GetDexPc();
1173
1174 // Log the outer method and its associated dex file and class table pointer which can be used
1175 // to find out if the inlined methods were defined by other dex file(s) or class loader(s).
1176 ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
1177 LOG(FATAL_WITHOUT_ABORT) << "Outer: " << outer_method->PrettyMethod()
1178 << " native pc: " << caller_pc
1179 << " dex pc: " << dex_pc
1180 << " dex file: " << outer_method->GetDexFile()->GetLocation()
1181 << " class table: " << class_linker->ClassTableForClassLoader(outer_method->GetClassLoader());
1182 DumpB74410240ClassData(outer_method->GetDeclaringClass());
1183 LOG(FATAL_WITHOUT_ABORT) << " instruction: " << DumpInstruction(outer_method, dex_pc);
1184
1185 ArtMethod* caller = outer_method;
1186 BitTableRange<InlineInfo> inline_infos = code_info.GetInlineInfosOf(stack_map);
1187 for (InlineInfo inline_info : inline_infos) {
1188 const char* tag = "";
1189 dex_pc = inline_info.GetDexPc();
1190 if (inline_info.EncodesArtMethod()) {
1191 tag = "encoded ";
1192 caller = inline_info.GetArtMethod();
1193 } else {
1194 uint32_t method_index = code_info.GetMethodIndexOf(inline_info);
1195 if (dex_pc == static_cast<uint32_t>(-1)) {
1196 tag = "special ";
1197 CHECK(inline_info.Equals(inline_infos.back()));
1198 caller = jni::DecodeArtMethod(WellKnownClasses::java_lang_String_charAt);
1199 CHECK_EQ(caller->GetDexMethodIndex(), method_index);
1200 } else {
1201 ObjPtr<mirror::DexCache> dex_cache = caller->GetDexCache();
1202 ObjPtr<mirror::ClassLoader> class_loader = caller->GetClassLoader();
1203 caller = class_linker->LookupResolvedMethod(method_index, dex_cache, class_loader);
1204 CHECK(caller != nullptr);
1205 }
1206 }
1207 LOG(FATAL_WITHOUT_ABORT) << "InlineInfo #" << inline_info.Row()
1208 << ": " << tag << caller->PrettyMethod()
1209 << " dex pc: " << dex_pc
1210 << " dex file: " << caller->GetDexFile()->GetLocation()
1211 << " class table: "
1212 << class_linker->ClassTableForClassLoader(caller->GetClassLoader());
1213 DumpB74410240ClassData(caller->GetDeclaringClass());
1214 LOG(FATAL_WITHOUT_ABORT) << " instruction: " << DumpInstruction(caller, dex_pc);
1215 }
1216 }
1217
1218 // Lazily resolve a method for quick. Called by stub code.
artQuickResolutionTrampoline(ArtMethod * called,mirror::Object * receiver,Thread * self,ArtMethod ** sp)1219 extern "C" const void* artQuickResolutionTrampoline(
1220 ArtMethod* called, mirror::Object* receiver, Thread* self, ArtMethod** sp)
1221 REQUIRES_SHARED(Locks::mutator_lock_) {
1222 // The resolution trampoline stashes the resolved method into the callee-save frame to transport
1223 // it. Thus, when exiting, the stack cannot be verified (as the resolved method most likely
1224 // does not have the same stack layout as the callee-save method).
1225 ScopedQuickEntrypointChecks sqec(self, kIsDebugBuild, false);
1226 // Start new JNI local reference state
1227 JNIEnvExt* env = self->GetJniEnv();
1228 ScopedObjectAccessUnchecked soa(env);
1229 ScopedJniEnvLocalRefState env_state(env);
1230 const char* old_cause = self->StartAssertNoThreadSuspension("Quick method resolution set up");
1231
1232 // Compute details about the called method (avoid GCs)
1233 ClassLinker* linker = Runtime::Current()->GetClassLinker();
1234 InvokeType invoke_type;
1235 MethodReference called_method(nullptr, 0);
1236 const bool called_method_known_on_entry = !called->IsRuntimeMethod();
1237 ArtMethod* caller = nullptr;
1238 if (!called_method_known_on_entry) {
1239 caller = QuickArgumentVisitor::GetCallingMethod(sp);
1240 called_method.dex_file = caller->GetDexFile();
1241
1242 {
1243 uint32_t dex_pc = QuickArgumentVisitor::GetCallingDexPc(sp);
1244 CodeItemInstructionAccessor accessor(caller->DexInstructions());
1245 CHECK_LT(dex_pc, accessor.InsnsSizeInCodeUnits());
1246 const Instruction& instr = accessor.InstructionAt(dex_pc);
1247 Instruction::Code instr_code = instr.Opcode();
1248 bool is_range;
1249 switch (instr_code) {
1250 case Instruction::INVOKE_DIRECT:
1251 invoke_type = kDirect;
1252 is_range = false;
1253 break;
1254 case Instruction::INVOKE_DIRECT_RANGE:
1255 invoke_type = kDirect;
1256 is_range = true;
1257 break;
1258 case Instruction::INVOKE_STATIC:
1259 invoke_type = kStatic;
1260 is_range = false;
1261 break;
1262 case Instruction::INVOKE_STATIC_RANGE:
1263 invoke_type = kStatic;
1264 is_range = true;
1265 break;
1266 case Instruction::INVOKE_SUPER:
1267 invoke_type = kSuper;
1268 is_range = false;
1269 break;
1270 case Instruction::INVOKE_SUPER_RANGE:
1271 invoke_type = kSuper;
1272 is_range = true;
1273 break;
1274 case Instruction::INVOKE_VIRTUAL:
1275 invoke_type = kVirtual;
1276 is_range = false;
1277 break;
1278 case Instruction::INVOKE_VIRTUAL_RANGE:
1279 invoke_type = kVirtual;
1280 is_range = true;
1281 break;
1282 case Instruction::INVOKE_INTERFACE:
1283 invoke_type = kInterface;
1284 is_range = false;
1285 break;
1286 case Instruction::INVOKE_INTERFACE_RANGE:
1287 invoke_type = kInterface;
1288 is_range = true;
1289 break;
1290 default:
1291 DumpB74410240DebugData(sp);
1292 LOG(FATAL) << "Unexpected call into trampoline: " << instr.DumpString(nullptr);
1293 UNREACHABLE();
1294 }
1295 called_method.index = (is_range) ? instr.VRegB_3rc() : instr.VRegB_35c();
1296 VLOG(dex) << "Accessed dex file for invoke " << invoke_type << " "
1297 << called_method.index;
1298 }
1299 } else {
1300 invoke_type = kStatic;
1301 called_method.dex_file = called->GetDexFile();
1302 called_method.index = called->GetDexMethodIndex();
1303 }
1304 uint32_t shorty_len;
1305 const char* shorty =
1306 called_method.dex_file->GetMethodShorty(called_method.GetMethodId(), &shorty_len);
1307 RememberForGcArgumentVisitor visitor(sp, invoke_type == kStatic, shorty, shorty_len, &soa);
1308 visitor.VisitArguments();
1309 self->EndAssertNoThreadSuspension(old_cause);
1310 const bool virtual_or_interface = invoke_type == kVirtual || invoke_type == kInterface;
1311 // Resolve method filling in dex cache.
1312 if (!called_method_known_on_entry) {
1313 StackHandleScope<1> hs(self);
1314 mirror::Object* fake_receiver = nullptr;
1315 HandleWrapper<mirror::Object> h_receiver(
1316 hs.NewHandleWrapper(virtual_or_interface ? &receiver : &fake_receiver));
1317 DCHECK_EQ(caller->GetDexFile(), called_method.dex_file);
1318 called = linker->ResolveMethod<ClassLinker::ResolveMode::kCheckICCEAndIAE>(
1319 self, called_method.index, caller, invoke_type);
1320 }
1321 const void* code = nullptr;
1322 if (LIKELY(!self->IsExceptionPending())) {
1323 // Incompatible class change should have been handled in resolve method.
1324 CHECK(!called->CheckIncompatibleClassChange(invoke_type))
1325 << called->PrettyMethod() << " " << invoke_type;
1326 if (virtual_or_interface || invoke_type == kSuper) {
1327 // Refine called method based on receiver for kVirtual/kInterface, and
1328 // caller for kSuper.
1329 ArtMethod* orig_called = called;
1330 if (invoke_type == kVirtual) {
1331 CHECK(receiver != nullptr) << invoke_type;
1332 called = receiver->GetClass()->FindVirtualMethodForVirtual(called, kRuntimePointerSize);
1333 } else if (invoke_type == kInterface) {
1334 CHECK(receiver != nullptr) << invoke_type;
1335 called = receiver->GetClass()->FindVirtualMethodForInterface(called, kRuntimePointerSize);
1336 } else {
1337 DCHECK_EQ(invoke_type, kSuper);
1338 CHECK(caller != nullptr) << invoke_type;
1339 ObjPtr<mirror::Class> ref_class = linker->LookupResolvedType(
1340 caller->GetDexFile()->GetMethodId(called_method.index).class_idx_, caller);
1341 if (ref_class->IsInterface()) {
1342 called = ref_class->FindVirtualMethodForInterfaceSuper(called, kRuntimePointerSize);
1343 } else {
1344 called = caller->GetDeclaringClass()->GetSuperClass()->GetVTableEntry(
1345 called->GetMethodIndex(), kRuntimePointerSize);
1346 }
1347 }
1348
1349 CHECK(called != nullptr) << orig_called->PrettyMethod() << " "
1350 << mirror::Object::PrettyTypeOf(receiver) << " "
1351 << invoke_type << " " << orig_called->GetVtableIndex();
1352 }
1353 // Now that we know the actual target, update .bss entry in oat file, if
1354 // any.
1355 if (!called_method_known_on_entry) {
1356 // We only put non copied methods in the BSS. Putting a copy can lead to an
1357 // odd situation where the ArtMethod being executed is unrelated to the
1358 // receiver of the method.
1359 called = called->GetCanonicalMethod();
1360 if (invoke_type == kSuper || invoke_type == kInterface || invoke_type == kVirtual) {
1361 if (called->GetDexFile() == called_method.dex_file) {
1362 called_method.index = called->GetDexMethodIndex();
1363 } else {
1364 called_method.index = called->FindDexMethodIndexInOtherDexFile(
1365 *called_method.dex_file, called_method.index);
1366 DCHECK_NE(called_method.index, dex::kDexNoIndex);
1367 }
1368 }
1369 MaybeUpdateBssMethodEntry(called, called_method);
1370 }
1371
1372 // Static invokes need class initialization check but instance invokes can proceed even if
1373 // the class is erroneous, i.e. in the edge case of escaping instances of erroneous classes.
1374 bool success = true;
1375 ObjPtr<mirror::Class> called_class = called->GetDeclaringClass();
1376 if (NeedsClinitCheckBeforeCall(called) && !called_class->IsVisiblyInitialized()) {
1377 // Ensure that the called method's class is initialized.
1378 StackHandleScope<1> hs(soa.Self());
1379 HandleWrapperObjPtr<mirror::Class> h_called_class(hs.NewHandleWrapper(&called_class));
1380 success = linker->EnsureInitialized(soa.Self(), h_called_class, true, true);
1381 }
1382 if (success) {
1383 code = called->GetEntryPointFromQuickCompiledCode();
1384 if (linker->IsQuickResolutionStub(code)) {
1385 DCHECK_EQ(invoke_type, kStatic);
1386 // Go to JIT or oat and grab code.
1387 code = linker->GetQuickOatCodeFor(called);
1388 }
1389 if (linker->ShouldUseInterpreterEntrypoint(called, code)) {
1390 code = GetQuickToInterpreterBridge();
1391 }
1392 } else {
1393 DCHECK(called_class->IsErroneous());
1394 DCHECK(self->IsExceptionPending());
1395 }
1396 }
1397 CHECK_EQ(code == nullptr, self->IsExceptionPending());
1398 // Fixup any locally saved objects may have moved during a GC.
1399 visitor.FixupReferences();
1400 // Place called method in callee-save frame to be placed as first argument to quick method.
1401 *sp = called;
1402
1403 return code;
1404 }
1405
1406 /*
1407 * This class uses a couple of observations to unite the different calling conventions through
1408 * a few constants.
1409 *
1410 * 1) Number of registers used for passing is normally even, so counting down has no penalty for
1411 * possible alignment.
1412 * 2) Known 64b architectures store 8B units on the stack, both for integral and floating point
1413 * types, so using uintptr_t is OK. Also means that we can use kRegistersNeededX to denote
1414 * when we have to split things
1415 * 3) The only soft-float, Arm, is 32b, so no widening needs to be taken into account for floats
1416 * and we can use Int handling directly.
1417 * 4) Only 64b architectures widen, and their stack is aligned 8B anyways, so no padding code
1418 * necessary when widening. Also, widening of Ints will take place implicitly, and the
1419 * extension should be compatible with Aarch64, which mandates copying the available bits
1420 * into LSB and leaving the rest unspecified.
1421 * 5) Aligning longs and doubles is necessary on arm only, and it's the same in registers and on
1422 * the stack.
1423 * 6) There is only little endian.
1424 *
1425 *
1426 * Actual work is supposed to be done in a delegate of the template type. The interface is as
1427 * follows:
1428 *
1429 * void PushGpr(uintptr_t): Add a value for the next GPR
1430 *
1431 * void PushFpr4(float): Add a value for the next FPR of size 32b. Is only called if we need
1432 * padding, that is, think the architecture is 32b and aligns 64b.
1433 *
1434 * void PushFpr8(uint64_t): Push a double. We _will_ call this on 32b, it's the callee's job to
1435 * split this if necessary. The current state will have aligned, if
1436 * necessary.
1437 *
1438 * void PushStack(uintptr_t): Push a value to the stack.
1439 */
1440 template<class T> class BuildNativeCallFrameStateMachine {
1441 public:
1442 #if defined(__arm__)
1443 static constexpr bool kNativeSoftFloatAbi = true;
1444 static constexpr size_t kNumNativeGprArgs = 4; // 4 arguments passed in GPRs, r0-r3
1445 static constexpr size_t kNumNativeFprArgs = 0; // 0 arguments passed in FPRs.
1446
1447 static constexpr size_t kRegistersNeededForLong = 2;
1448 static constexpr size_t kRegistersNeededForDouble = 2;
1449 static constexpr bool kMultiRegistersAligned = true;
1450 static constexpr bool kMultiFPRegistersWidened = false;
1451 static constexpr bool kMultiGPRegistersWidened = false;
1452 static constexpr bool kAlignLongOnStack = true;
1453 static constexpr bool kAlignDoubleOnStack = true;
1454 #elif defined(__aarch64__)
1455 static constexpr bool kNativeSoftFloatAbi = false; // This is a hard float ABI.
1456 static constexpr size_t kNumNativeGprArgs = 8; // 8 arguments passed in GPRs.
1457 static constexpr size_t kNumNativeFprArgs = 8; // 8 arguments passed in FPRs.
1458
1459 static constexpr size_t kRegistersNeededForLong = 1;
1460 static constexpr size_t kRegistersNeededForDouble = 1;
1461 static constexpr bool kMultiRegistersAligned = false;
1462 static constexpr bool kMultiFPRegistersWidened = false;
1463 static constexpr bool kMultiGPRegistersWidened = false;
1464 static constexpr bool kAlignLongOnStack = false;
1465 static constexpr bool kAlignDoubleOnStack = false;
1466 #elif defined(__i386__)
1467 static constexpr bool kNativeSoftFloatAbi = false; // Not using int registers for fp
1468 static constexpr size_t kNumNativeGprArgs = 0; // 0 arguments passed in GPRs.
1469 static constexpr size_t kNumNativeFprArgs = 0; // 0 arguments passed in FPRs.
1470
1471 static constexpr size_t kRegistersNeededForLong = 2;
1472 static constexpr size_t kRegistersNeededForDouble = 2;
1473 static constexpr bool kMultiRegistersAligned = false; // x86 not using regs, anyways
1474 static constexpr bool kMultiFPRegistersWidened = false;
1475 static constexpr bool kMultiGPRegistersWidened = false;
1476 static constexpr bool kAlignLongOnStack = false;
1477 static constexpr bool kAlignDoubleOnStack = false;
1478 #elif defined(__x86_64__)
1479 static constexpr bool kNativeSoftFloatAbi = false; // This is a hard float ABI.
1480 static constexpr size_t kNumNativeGprArgs = 6; // 6 arguments passed in GPRs.
1481 static constexpr size_t kNumNativeFprArgs = 8; // 8 arguments passed in FPRs.
1482
1483 static constexpr size_t kRegistersNeededForLong = 1;
1484 static constexpr size_t kRegistersNeededForDouble = 1;
1485 static constexpr bool kMultiRegistersAligned = false;
1486 static constexpr bool kMultiFPRegistersWidened = false;
1487 static constexpr bool kMultiGPRegistersWidened = false;
1488 static constexpr bool kAlignLongOnStack = false;
1489 static constexpr bool kAlignDoubleOnStack = false;
1490 #else
1491 #error "Unsupported architecture"
1492 #endif
1493
1494 public:
BuildNativeCallFrameStateMachine(T * delegate)1495 explicit BuildNativeCallFrameStateMachine(T* delegate)
1496 : gpr_index_(kNumNativeGprArgs),
1497 fpr_index_(kNumNativeFprArgs),
1498 stack_entries_(0),
1499 delegate_(delegate) {
1500 // For register alignment, we want to assume that counters (gpr_index_, fpr_index_) are even iff
1501 // the next register is even; counting down is just to make the compiler happy...
1502 static_assert(kNumNativeGprArgs % 2 == 0U, "Number of native GPR arguments not even");
1503 static_assert(kNumNativeFprArgs % 2 == 0U, "Number of native FPR arguments not even");
1504 }
1505
~BuildNativeCallFrameStateMachine()1506 virtual ~BuildNativeCallFrameStateMachine() {}
1507
HavePointerGpr() const1508 bool HavePointerGpr() const {
1509 return gpr_index_ > 0;
1510 }
1511
AdvancePointer(const void * val)1512 void AdvancePointer(const void* val) {
1513 if (HavePointerGpr()) {
1514 gpr_index_--;
1515 PushGpr(reinterpret_cast<uintptr_t>(val));
1516 } else {
1517 stack_entries_++; // TODO: have a field for pointer length as multiple of 32b
1518 PushStack(reinterpret_cast<uintptr_t>(val));
1519 gpr_index_ = 0;
1520 }
1521 }
1522
HaveIntGpr() const1523 bool HaveIntGpr() const {
1524 return gpr_index_ > 0;
1525 }
1526
AdvanceInt(uint32_t val)1527 void AdvanceInt(uint32_t val) {
1528 if (HaveIntGpr()) {
1529 gpr_index_--;
1530 if (kMultiGPRegistersWidened) {
1531 DCHECK_EQ(sizeof(uintptr_t), sizeof(int64_t));
1532 PushGpr(static_cast<int64_t>(bit_cast<int32_t, uint32_t>(val)));
1533 } else {
1534 PushGpr(val);
1535 }
1536 } else {
1537 stack_entries_++;
1538 if (kMultiGPRegistersWidened) {
1539 DCHECK_EQ(sizeof(uintptr_t), sizeof(int64_t));
1540 PushStack(static_cast<int64_t>(bit_cast<int32_t, uint32_t>(val)));
1541 } else {
1542 PushStack(val);
1543 }
1544 gpr_index_ = 0;
1545 }
1546 }
1547
HaveLongGpr() const1548 bool HaveLongGpr() const {
1549 return gpr_index_ >= kRegistersNeededForLong + (LongGprNeedsPadding() ? 1 : 0);
1550 }
1551
LongGprNeedsPadding() const1552 bool LongGprNeedsPadding() const {
1553 return kRegistersNeededForLong > 1 && // only pad when using multiple registers
1554 kAlignLongOnStack && // and when it needs alignment
1555 (gpr_index_ & 1) == 1; // counter is odd, see constructor
1556 }
1557
LongStackNeedsPadding() const1558 bool LongStackNeedsPadding() const {
1559 return kRegistersNeededForLong > 1 && // only pad when using multiple registers
1560 kAlignLongOnStack && // and when it needs 8B alignment
1561 (stack_entries_ & 1) == 1; // counter is odd
1562 }
1563
AdvanceLong(uint64_t val)1564 void AdvanceLong(uint64_t val) {
1565 if (HaveLongGpr()) {
1566 if (LongGprNeedsPadding()) {
1567 PushGpr(0);
1568 gpr_index_--;
1569 }
1570 if (kRegistersNeededForLong == 1) {
1571 PushGpr(static_cast<uintptr_t>(val));
1572 } else {
1573 PushGpr(static_cast<uintptr_t>(val & 0xFFFFFFFF));
1574 PushGpr(static_cast<uintptr_t>((val >> 32) & 0xFFFFFFFF));
1575 }
1576 gpr_index_ -= kRegistersNeededForLong;
1577 } else {
1578 if (LongStackNeedsPadding()) {
1579 PushStack(0);
1580 stack_entries_++;
1581 }
1582 if (kRegistersNeededForLong == 1) {
1583 PushStack(static_cast<uintptr_t>(val));
1584 stack_entries_++;
1585 } else {
1586 PushStack(static_cast<uintptr_t>(val & 0xFFFFFFFF));
1587 PushStack(static_cast<uintptr_t>((val >> 32) & 0xFFFFFFFF));
1588 stack_entries_ += 2;
1589 }
1590 gpr_index_ = 0;
1591 }
1592 }
1593
HaveFloatFpr() const1594 bool HaveFloatFpr() const {
1595 return fpr_index_ > 0;
1596 }
1597
AdvanceFloat(float val)1598 void AdvanceFloat(float val) {
1599 if (kNativeSoftFloatAbi) {
1600 AdvanceInt(bit_cast<uint32_t, float>(val));
1601 } else {
1602 if (HaveFloatFpr()) {
1603 fpr_index_--;
1604 if (kRegistersNeededForDouble == 1) {
1605 if (kMultiFPRegistersWidened) {
1606 PushFpr8(bit_cast<uint64_t, double>(val));
1607 } else {
1608 // No widening, just use the bits.
1609 PushFpr8(static_cast<uint64_t>(bit_cast<uint32_t, float>(val)));
1610 }
1611 } else {
1612 PushFpr4(val);
1613 }
1614 } else {
1615 stack_entries_++;
1616 if (kRegistersNeededForDouble == 1 && kMultiFPRegistersWidened) {
1617 // Need to widen before storing: Note the "double" in the template instantiation.
1618 // Note: We need to jump through those hoops to make the compiler happy.
1619 DCHECK_EQ(sizeof(uintptr_t), sizeof(uint64_t));
1620 PushStack(static_cast<uintptr_t>(bit_cast<uint64_t, double>(val)));
1621 } else {
1622 PushStack(static_cast<uintptr_t>(bit_cast<uint32_t, float>(val)));
1623 }
1624 fpr_index_ = 0;
1625 }
1626 }
1627 }
1628
HaveDoubleFpr() const1629 bool HaveDoubleFpr() const {
1630 return fpr_index_ >= kRegistersNeededForDouble + (DoubleFprNeedsPadding() ? 1 : 0);
1631 }
1632
DoubleFprNeedsPadding() const1633 bool DoubleFprNeedsPadding() const {
1634 return kRegistersNeededForDouble > 1 && // only pad when using multiple registers
1635 kAlignDoubleOnStack && // and when it needs alignment
1636 (fpr_index_ & 1) == 1; // counter is odd, see constructor
1637 }
1638
DoubleStackNeedsPadding() const1639 bool DoubleStackNeedsPadding() const {
1640 return kRegistersNeededForDouble > 1 && // only pad when using multiple registers
1641 kAlignDoubleOnStack && // and when it needs 8B alignment
1642 (stack_entries_ & 1) == 1; // counter is odd
1643 }
1644
AdvanceDouble(uint64_t val)1645 void AdvanceDouble(uint64_t val) {
1646 if (kNativeSoftFloatAbi) {
1647 AdvanceLong(val);
1648 } else {
1649 if (HaveDoubleFpr()) {
1650 if (DoubleFprNeedsPadding()) {
1651 PushFpr4(0);
1652 fpr_index_--;
1653 }
1654 PushFpr8(val);
1655 fpr_index_ -= kRegistersNeededForDouble;
1656 } else {
1657 if (DoubleStackNeedsPadding()) {
1658 PushStack(0);
1659 stack_entries_++;
1660 }
1661 if (kRegistersNeededForDouble == 1) {
1662 PushStack(static_cast<uintptr_t>(val));
1663 stack_entries_++;
1664 } else {
1665 PushStack(static_cast<uintptr_t>(val & 0xFFFFFFFF));
1666 PushStack(static_cast<uintptr_t>((val >> 32) & 0xFFFFFFFF));
1667 stack_entries_ += 2;
1668 }
1669 fpr_index_ = 0;
1670 }
1671 }
1672 }
1673
GetStackEntries() const1674 uint32_t GetStackEntries() const {
1675 return stack_entries_;
1676 }
1677
GetNumberOfUsedGprs() const1678 uint32_t GetNumberOfUsedGprs() const {
1679 return kNumNativeGprArgs - gpr_index_;
1680 }
1681
GetNumberOfUsedFprs() const1682 uint32_t GetNumberOfUsedFprs() const {
1683 return kNumNativeFprArgs - fpr_index_;
1684 }
1685
1686 private:
PushGpr(uintptr_t val)1687 void PushGpr(uintptr_t val) {
1688 delegate_->PushGpr(val);
1689 }
PushFpr4(float val)1690 void PushFpr4(float val) {
1691 delegate_->PushFpr4(val);
1692 }
PushFpr8(uint64_t val)1693 void PushFpr8(uint64_t val) {
1694 delegate_->PushFpr8(val);
1695 }
PushStack(uintptr_t val)1696 void PushStack(uintptr_t val) {
1697 delegate_->PushStack(val);
1698 }
1699
1700 uint32_t gpr_index_; // Number of free GPRs
1701 uint32_t fpr_index_; // Number of free FPRs
1702 uint32_t stack_entries_; // Stack entries are in multiples of 32b, as floats are usually not
1703 // extended
1704 T* const delegate_; // What Push implementation gets called
1705 };
1706
1707 // Computes the sizes of register stacks and call stack area. Handling of references can be extended
1708 // in subclasses.
1709 //
1710 // To handle native pointers, use "L" in the shorty for an object reference, which simulates
1711 // them with handles.
1712 class ComputeNativeCallFrameSize {
1713 public:
ComputeNativeCallFrameSize()1714 ComputeNativeCallFrameSize() : num_stack_entries_(0) {}
1715
~ComputeNativeCallFrameSize()1716 virtual ~ComputeNativeCallFrameSize() {}
1717
GetStackSize() const1718 uint32_t GetStackSize() const {
1719 return num_stack_entries_ * sizeof(uintptr_t);
1720 }
1721
LayoutStackArgs(uint8_t * sp8) const1722 uint8_t* LayoutStackArgs(uint8_t* sp8) const {
1723 sp8 -= GetStackSize();
1724 // Align by kStackAlignment; it is at least as strict as native stack alignment.
1725 sp8 = reinterpret_cast<uint8_t*>(RoundDown(reinterpret_cast<uintptr_t>(sp8), kStackAlignment));
1726 return sp8;
1727 }
1728
WalkHeader(BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize> * sm ATTRIBUTE_UNUSED)1729 virtual void WalkHeader(
1730 BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>* sm ATTRIBUTE_UNUSED)
1731 REQUIRES_SHARED(Locks::mutator_lock_) {
1732 }
1733
Walk(const char * shorty,uint32_t shorty_len)1734 void Walk(const char* shorty, uint32_t shorty_len) REQUIRES_SHARED(Locks::mutator_lock_) {
1735 BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize> sm(this);
1736
1737 WalkHeader(&sm);
1738
1739 for (uint32_t i = 1; i < shorty_len; ++i) {
1740 Primitive::Type cur_type_ = Primitive::GetType(shorty[i]);
1741 switch (cur_type_) {
1742 case Primitive::kPrimNot:
1743 sm.AdvancePointer(nullptr);
1744 break;
1745 case Primitive::kPrimBoolean:
1746 case Primitive::kPrimByte:
1747 case Primitive::kPrimChar:
1748 case Primitive::kPrimShort:
1749 case Primitive::kPrimInt:
1750 sm.AdvanceInt(0);
1751 break;
1752 case Primitive::kPrimFloat:
1753 sm.AdvanceFloat(0);
1754 break;
1755 case Primitive::kPrimDouble:
1756 sm.AdvanceDouble(0);
1757 break;
1758 case Primitive::kPrimLong:
1759 sm.AdvanceLong(0);
1760 break;
1761 default:
1762 LOG(FATAL) << "Unexpected type: " << cur_type_ << " in " << shorty;
1763 UNREACHABLE();
1764 }
1765 }
1766
1767 num_stack_entries_ = sm.GetStackEntries();
1768 }
1769
PushGpr(uintptr_t)1770 void PushGpr(uintptr_t /* val */) {
1771 // not optimizing registers, yet
1772 }
1773
PushFpr4(float)1774 void PushFpr4(float /* val */) {
1775 // not optimizing registers, yet
1776 }
1777
PushFpr8(uint64_t)1778 void PushFpr8(uint64_t /* val */) {
1779 // not optimizing registers, yet
1780 }
1781
PushStack(uintptr_t)1782 void PushStack(uintptr_t /* val */) {
1783 // counting is already done in the superclass
1784 }
1785
1786 protected:
1787 uint32_t num_stack_entries_;
1788 };
1789
1790 class ComputeGenericJniFrameSize final : public ComputeNativeCallFrameSize {
1791 public:
ComputeGenericJniFrameSize(bool critical_native)1792 explicit ComputeGenericJniFrameSize(bool critical_native)
1793 : critical_native_(critical_native) {}
1794
ComputeLayout(ArtMethod ** managed_sp,const char * shorty,uint32_t shorty_len)1795 uintptr_t* ComputeLayout(ArtMethod** managed_sp, const char* shorty, uint32_t shorty_len)
1796 REQUIRES_SHARED(Locks::mutator_lock_) {
1797 DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), kRuntimePointerSize);
1798
1799 Walk(shorty, shorty_len);
1800
1801 // Add space for cookie.
1802 DCHECK_ALIGNED(managed_sp, sizeof(uintptr_t));
1803 static_assert(sizeof(uintptr_t) >= sizeof(IRTSegmentState));
1804 uint8_t* sp8 = reinterpret_cast<uint8_t*>(managed_sp) - sizeof(uintptr_t);
1805
1806 // Layout stack arguments.
1807 sp8 = LayoutStackArgs(sp8);
1808
1809 // Return the new bottom.
1810 DCHECK_ALIGNED(sp8, sizeof(uintptr_t));
1811 return reinterpret_cast<uintptr_t*>(sp8);
1812 }
1813
GetStartGprRegs(uintptr_t * reserved_area)1814 static uintptr_t* GetStartGprRegs(uintptr_t* reserved_area) {
1815 return reserved_area;
1816 }
1817
GetStartFprRegs(uintptr_t * reserved_area)1818 static uint32_t* GetStartFprRegs(uintptr_t* reserved_area) {
1819 constexpr size_t num_gprs =
1820 BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>::kNumNativeGprArgs;
1821 return reinterpret_cast<uint32_t*>(GetStartGprRegs(reserved_area) + num_gprs);
1822 }
1823
GetHiddenArgSlot(uintptr_t * reserved_area)1824 static uintptr_t* GetHiddenArgSlot(uintptr_t* reserved_area) {
1825 // Note: `num_fprs` is 0 on architectures where sizeof(uintptr_t) does not match the
1826 // FP register size (it is actually 0 on all supported 32-bit architectures).
1827 constexpr size_t num_fprs =
1828 BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>::kNumNativeFprArgs;
1829 return reinterpret_cast<uintptr_t*>(GetStartFprRegs(reserved_area)) + num_fprs;
1830 }
1831
GetOutArgsSpSlot(uintptr_t * reserved_area)1832 static uintptr_t* GetOutArgsSpSlot(uintptr_t* reserved_area) {
1833 return GetHiddenArgSlot(reserved_area) + 1;
1834 }
1835
1836 // Add JNIEnv* and jobj/jclass before the shorty-derived elements.
1837 void WalkHeader(BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>* sm) override
1838 REQUIRES_SHARED(Locks::mutator_lock_);
1839
1840 private:
1841 const bool critical_native_;
1842 };
1843
WalkHeader(BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize> * sm)1844 void ComputeGenericJniFrameSize::WalkHeader(
1845 BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>* sm) {
1846 // First 2 parameters are always excluded for @CriticalNative.
1847 if (UNLIKELY(critical_native_)) {
1848 return;
1849 }
1850
1851 // JNIEnv
1852 sm->AdvancePointer(nullptr);
1853
1854 // Class object or this as first argument
1855 sm->AdvancePointer(nullptr);
1856 }
1857
1858 // Class to push values to three separate regions. Used to fill the native call part. Adheres to
1859 // the template requirements of BuildGenericJniFrameStateMachine.
1860 class FillNativeCall {
1861 public:
FillNativeCall(uintptr_t * gpr_regs,uint32_t * fpr_regs,uintptr_t * stack_args)1862 FillNativeCall(uintptr_t* gpr_regs, uint32_t* fpr_regs, uintptr_t* stack_args) :
1863 cur_gpr_reg_(gpr_regs), cur_fpr_reg_(fpr_regs), cur_stack_arg_(stack_args) {}
1864
~FillNativeCall()1865 virtual ~FillNativeCall() {}
1866
Reset(uintptr_t * gpr_regs,uint32_t * fpr_regs,uintptr_t * stack_args)1867 void Reset(uintptr_t* gpr_regs, uint32_t* fpr_regs, uintptr_t* stack_args) {
1868 cur_gpr_reg_ = gpr_regs;
1869 cur_fpr_reg_ = fpr_regs;
1870 cur_stack_arg_ = stack_args;
1871 }
1872
PushGpr(uintptr_t val)1873 void PushGpr(uintptr_t val) {
1874 *cur_gpr_reg_ = val;
1875 cur_gpr_reg_++;
1876 }
1877
PushFpr4(float val)1878 void PushFpr4(float val) {
1879 *cur_fpr_reg_ = val;
1880 cur_fpr_reg_++;
1881 }
1882
PushFpr8(uint64_t val)1883 void PushFpr8(uint64_t val) {
1884 uint64_t* tmp = reinterpret_cast<uint64_t*>(cur_fpr_reg_);
1885 *tmp = val;
1886 cur_fpr_reg_ += 2;
1887 }
1888
PushStack(uintptr_t val)1889 void PushStack(uintptr_t val) {
1890 *cur_stack_arg_ = val;
1891 cur_stack_arg_++;
1892 }
1893
1894 private:
1895 uintptr_t* cur_gpr_reg_;
1896 uint32_t* cur_fpr_reg_;
1897 uintptr_t* cur_stack_arg_;
1898 };
1899
1900 // Visits arguments on the stack placing them into a region lower down the stack for the benefit
1901 // of transitioning into native code.
1902 class BuildGenericJniFrameVisitor final : public QuickArgumentVisitor {
1903 public:
BuildGenericJniFrameVisitor(Thread * self,bool is_static,bool critical_native,const char * shorty,uint32_t shorty_len,ArtMethod ** managed_sp,uintptr_t * reserved_area)1904 BuildGenericJniFrameVisitor(Thread* self,
1905 bool is_static,
1906 bool critical_native,
1907 const char* shorty,
1908 uint32_t shorty_len,
1909 ArtMethod** managed_sp,
1910 uintptr_t* reserved_area)
1911 : QuickArgumentVisitor(managed_sp, is_static, shorty, shorty_len),
1912 jni_call_(nullptr, nullptr, nullptr, critical_native),
1913 sm_(&jni_call_),
1914 current_vreg_(nullptr) {
1915 DCHECK_ALIGNED(managed_sp, kStackAlignment);
1916 DCHECK_ALIGNED(reserved_area, sizeof(uintptr_t));
1917
1918 ComputeGenericJniFrameSize fsc(critical_native);
1919 uintptr_t* out_args_sp = fsc.ComputeLayout(managed_sp, shorty, shorty_len);
1920
1921 // Store hidden argument for @CriticalNative.
1922 uintptr_t* hidden_arg_slot = fsc.GetHiddenArgSlot(reserved_area);
1923 constexpr uintptr_t kGenericJniTag = 1u;
1924 ArtMethod* method = *managed_sp;
1925 *hidden_arg_slot = critical_native ? (reinterpret_cast<uintptr_t>(method) | kGenericJniTag)
1926 : 0xebad6a89u; // Bad value.
1927
1928 // Set out args SP.
1929 uintptr_t* out_args_sp_slot = fsc.GetOutArgsSpSlot(reserved_area);
1930 *out_args_sp_slot = reinterpret_cast<uintptr_t>(out_args_sp);
1931
1932 // Prepare vreg pointer for spilling references.
1933 static constexpr size_t frame_size =
1934 RuntimeCalleeSaveFrame::GetFrameSize(CalleeSaveType::kSaveRefsAndArgs);
1935 current_vreg_ = reinterpret_cast<uint32_t*>(
1936 reinterpret_cast<uint8_t*>(managed_sp) + frame_size + sizeof(ArtMethod*));
1937
1938 jni_call_.Reset(fsc.GetStartGprRegs(reserved_area),
1939 fsc.GetStartFprRegs(reserved_area),
1940 out_args_sp);
1941
1942 // First 2 parameters are always excluded for CriticalNative methods.
1943 if (LIKELY(!critical_native)) {
1944 // jni environment is always first argument
1945 sm_.AdvancePointer(self->GetJniEnv());
1946
1947 if (is_static) {
1948 // The `jclass` is a pointer to the method's declaring class.
1949 // The declaring class must be marked.
1950 auto* declaring_class = reinterpret_cast<mirror::CompressedReference<mirror::Class>*>(
1951 method->GetDeclaringClassAddressWithoutBarrier());
1952 if (kUseReadBarrier) {
1953 ReadBarrierJni(declaring_class, self);
1954 }
1955 sm_.AdvancePointer(declaring_class);
1956 } // else "this" reference is already handled by QuickArgumentVisitor.
1957 }
1958 }
1959
1960 void Visit() REQUIRES_SHARED(Locks::mutator_lock_) override;
1961
1962 private:
1963 // A class to fill a JNI call. Adds reference/handle-scope management to FillNativeCall.
1964 class FillJniCall final : public FillNativeCall {
1965 public:
FillJniCall(uintptr_t * gpr_regs,uint32_t * fpr_regs,uintptr_t * stack_args,bool critical_native)1966 FillJniCall(uintptr_t* gpr_regs,
1967 uint32_t* fpr_regs,
1968 uintptr_t* stack_args,
1969 bool critical_native)
1970 : FillNativeCall(gpr_regs, fpr_regs, stack_args),
1971 cur_entry_(0),
1972 critical_native_(critical_native) {}
1973
Reset(uintptr_t * gpr_regs,uint32_t * fpr_regs,uintptr_t * stack_args)1974 void Reset(uintptr_t* gpr_regs, uint32_t* fpr_regs, uintptr_t* stack_args) {
1975 FillNativeCall::Reset(gpr_regs, fpr_regs, stack_args);
1976 cur_entry_ = 0U;
1977 }
1978
CriticalNative() const1979 bool CriticalNative() const {
1980 return critical_native_;
1981 }
1982
1983 private:
1984 size_t cur_entry_;
1985 const bool critical_native_;
1986 };
1987
1988 FillJniCall jni_call_;
1989 BuildNativeCallFrameStateMachine<FillJniCall> sm_;
1990
1991 // Pointer to the current vreg in caller's reserved out vreg area.
1992 // Used for spilling reference arguments.
1993 uint32_t* current_vreg_;
1994
1995 DISALLOW_COPY_AND_ASSIGN(BuildGenericJniFrameVisitor);
1996 };
1997
Visit()1998 void BuildGenericJniFrameVisitor::Visit() {
1999 Primitive::Type type = GetParamPrimitiveType();
2000 switch (type) {
2001 case Primitive::kPrimLong: {
2002 jlong long_arg;
2003 if (IsSplitLongOrDouble()) {
2004 long_arg = ReadSplitLongParam();
2005 } else {
2006 long_arg = *reinterpret_cast<jlong*>(GetParamAddress());
2007 }
2008 sm_.AdvanceLong(long_arg);
2009 current_vreg_ += 2u;
2010 break;
2011 }
2012 case Primitive::kPrimDouble: {
2013 uint64_t double_arg;
2014 if (IsSplitLongOrDouble()) {
2015 // Read into union so that we don't case to a double.
2016 double_arg = ReadSplitLongParam();
2017 } else {
2018 double_arg = *reinterpret_cast<uint64_t*>(GetParamAddress());
2019 }
2020 sm_.AdvanceDouble(double_arg);
2021 current_vreg_ += 2u;
2022 break;
2023 }
2024 case Primitive::kPrimNot: {
2025 mirror::Object* obj =
2026 reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress())->AsMirrorPtr();
2027 StackReference<mirror::Object>* spill_ref =
2028 reinterpret_cast<StackReference<mirror::Object>*>(current_vreg_);
2029 spill_ref->Assign(obj);
2030 sm_.AdvancePointer(obj != nullptr ? spill_ref : nullptr);
2031 current_vreg_ += 1u;
2032 break;
2033 }
2034 case Primitive::kPrimFloat:
2035 sm_.AdvanceFloat(*reinterpret_cast<float*>(GetParamAddress()));
2036 current_vreg_ += 1u;
2037 break;
2038 case Primitive::kPrimBoolean: // Fall-through.
2039 case Primitive::kPrimByte: // Fall-through.
2040 case Primitive::kPrimChar: // Fall-through.
2041 case Primitive::kPrimShort: // Fall-through.
2042 case Primitive::kPrimInt: // Fall-through.
2043 sm_.AdvanceInt(*reinterpret_cast<jint*>(GetParamAddress()));
2044 current_vreg_ += 1u;
2045 break;
2046 case Primitive::kPrimVoid:
2047 LOG(FATAL) << "UNREACHABLE";
2048 UNREACHABLE();
2049 }
2050 }
2051
2052 /*
2053 * Initializes the reserved area assumed to be directly below `managed_sp` for a native call:
2054 *
2055 * On entry, the stack has a standard callee-save frame above `managed_sp`,
2056 * and the reserved area below it. Starting below `managed_sp`, we reserve space
2057 * for local reference cookie (not present for @CriticalNative), HandleScope
2058 * (not present for @CriticalNative) and stack args (if args do not fit into
2059 * registers). At the bottom of the reserved area, there is space for register
2060 * arguments, hidden arg (for @CriticalNative) and the SP for the native call
2061 * (i.e. pointer to the stack args area), which the calling stub shall load
2062 * to perform the native call. We fill all these fields, perform class init
2063 * check (for static methods) and/or locking (for synchronized methods) if
2064 * needed and return to the stub.
2065 *
2066 * The return value is the pointer to the native code, null on failure.
2067 */
artQuickGenericJniTrampoline(Thread * self,ArtMethod ** managed_sp,uintptr_t * reserved_area)2068 extern "C" const void* artQuickGenericJniTrampoline(Thread* self,
2069 ArtMethod** managed_sp,
2070 uintptr_t* reserved_area)
2071 REQUIRES_SHARED(Locks::mutator_lock_) {
2072 // Note: We cannot walk the stack properly until fixed up below.
2073 ArtMethod* called = *managed_sp;
2074 DCHECK(called->IsNative()) << called->PrettyMethod(true);
2075 Runtime* runtime = Runtime::Current();
2076 uint32_t shorty_len = 0;
2077 const char* shorty = called->GetShorty(&shorty_len);
2078 bool critical_native = called->IsCriticalNative();
2079 bool fast_native = called->IsFastNative();
2080 bool normal_native = !critical_native && !fast_native;
2081
2082 // Run the visitor and update sp.
2083 BuildGenericJniFrameVisitor visitor(self,
2084 called->IsStatic(),
2085 critical_native,
2086 shorty,
2087 shorty_len,
2088 managed_sp,
2089 reserved_area);
2090 {
2091 ScopedAssertNoThreadSuspension sants(__FUNCTION__);
2092 visitor.VisitArguments();
2093 }
2094
2095 // Fix up managed-stack things in Thread. After this we can walk the stack.
2096 self->SetTopOfStackTagged(managed_sp);
2097
2098 self->VerifyStack();
2099
2100 // We can now walk the stack if needed by JIT GC from MethodEntered() for JIT-on-first-use.
2101 jit::Jit* jit = runtime->GetJit();
2102 if (jit != nullptr) {
2103 jit->MethodEntered(self, called);
2104 }
2105
2106 // We can set the entrypoint of a native method to generic JNI even when the
2107 // class hasn't been initialized, so we need to do the initialization check
2108 // before invoking the native code.
2109 if (NeedsClinitCheckBeforeCall(called)) {
2110 ObjPtr<mirror::Class> declaring_class = called->GetDeclaringClass();
2111 if (UNLIKELY(!declaring_class->IsVisiblyInitialized())) {
2112 // Ensure static method's class is initialized.
2113 StackHandleScope<1> hs(self);
2114 Handle<mirror::Class> h_class(hs.NewHandle(declaring_class));
2115 if (!runtime->GetClassLinker()->EnsureInitialized(self, h_class, true, true)) {
2116 DCHECK(Thread::Current()->IsExceptionPending()) << called->PrettyMethod();
2117 return nullptr; // Report error.
2118 }
2119 }
2120 }
2121
2122 uint32_t cookie;
2123 uint32_t* sp32;
2124 // Skip calling JniMethodStart for @CriticalNative.
2125 if (LIKELY(!critical_native)) {
2126 // Start JNI, save the cookie.
2127 if (called->IsSynchronized()) {
2128 DCHECK(normal_native) << " @FastNative and synchronize is not supported";
2129 jobject lock = GetGenericJniSynchronizationObject(self, called);
2130 cookie = JniMethodStartSynchronized(lock, self);
2131 if (self->IsExceptionPending()) {
2132 return nullptr; // Report error.
2133 }
2134 } else {
2135 if (fast_native) {
2136 cookie = JniMethodFastStart(self);
2137 } else {
2138 DCHECK(normal_native);
2139 cookie = JniMethodStart(self);
2140 }
2141 }
2142 sp32 = reinterpret_cast<uint32_t*>(managed_sp);
2143 *(sp32 - 1) = cookie;
2144 }
2145
2146 // Retrieve the stored native code.
2147 // Note that it may point to the lookup stub or trampoline.
2148 // FIXME: This is broken for @CriticalNative as the art_jni_dlsym_lookup_stub
2149 // does not handle that case. Calls from compiled stubs are also broken.
2150 void const* nativeCode = called->GetEntryPointFromJni();
2151
2152 VLOG(third_party_jni) << "GenericJNI: "
2153 << called->PrettyMethod()
2154 << " -> "
2155 << std::hex << reinterpret_cast<uintptr_t>(nativeCode);
2156
2157 // Return native code.
2158 return nativeCode;
2159 }
2160
2161 // Defined in quick_jni_entrypoints.cc.
2162 extern uint64_t GenericJniMethodEnd(Thread* self,
2163 uint32_t saved_local_ref_cookie,
2164 jvalue result,
2165 uint64_t result_f,
2166 ArtMethod* called);
2167
2168 /*
2169 * Is called after the native JNI code. Responsible for cleanup (handle scope, saved state) and
2170 * unlocking.
2171 */
artQuickGenericJniEndTrampoline(Thread * self,jvalue result,uint64_t result_f)2172 extern "C" uint64_t artQuickGenericJniEndTrampoline(Thread* self,
2173 jvalue result,
2174 uint64_t result_f) {
2175 // We're here just back from a native call. We don't have the shared mutator lock at this point
2176 // yet until we call GoToRunnable() later in GenericJniMethodEnd(). Accessing objects or doing
2177 // anything that requires a mutator lock before that would cause problems as GC may have the
2178 // exclusive mutator lock and may be moving objects, etc.
2179 ArtMethod** sp = self->GetManagedStack()->GetTopQuickFrame();
2180 DCHECK(self->GetManagedStack()->GetTopQuickFrameTag());
2181 uint32_t* sp32 = reinterpret_cast<uint32_t*>(sp);
2182 ArtMethod* called = *sp;
2183 uint32_t cookie = *(sp32 - 1);
2184 return GenericJniMethodEnd(self, cookie, result, result_f, called);
2185 }
2186
2187 // We use TwoWordReturn to optimize scalar returns. We use the hi value for code, and the lo value
2188 // for the method pointer.
2189 //
2190 // It is valid to use this, as at the usage points here (returns from C functions) we are assuming
2191 // to hold the mutator lock (see REQUIRES_SHARED(Locks::mutator_lock_) annotations).
2192
2193 template <InvokeType type, bool access_check>
artInvokeCommon(uint32_t method_idx,ObjPtr<mirror::Object> this_object,Thread * self,ArtMethod ** sp)2194 static TwoWordReturn artInvokeCommon(uint32_t method_idx,
2195 ObjPtr<mirror::Object> this_object,
2196 Thread* self,
2197 ArtMethod** sp) {
2198 ScopedQuickEntrypointChecks sqec(self);
2199 DCHECK_EQ(*sp, Runtime::Current()->GetCalleeSaveMethod(CalleeSaveType::kSaveRefsAndArgs));
2200 ArtMethod* caller_method = QuickArgumentVisitor::GetCallingMethod(sp);
2201 ArtMethod* method = FindMethodFast<type, access_check>(method_idx, this_object, caller_method);
2202 if (UNLIKELY(method == nullptr)) {
2203 const DexFile* dex_file = caller_method->GetDexFile();
2204 uint32_t shorty_len;
2205 const char* shorty = dex_file->GetMethodShorty(dex_file->GetMethodId(method_idx), &shorty_len);
2206 {
2207 // Remember the args in case a GC happens in FindMethodFromCode.
2208 ScopedObjectAccessUnchecked soa(self->GetJniEnv());
2209 RememberForGcArgumentVisitor visitor(sp, type == kStatic, shorty, shorty_len, &soa);
2210 visitor.VisitArguments();
2211 method = FindMethodFromCode<type, access_check>(method_idx,
2212 &this_object,
2213 caller_method,
2214 self);
2215 visitor.FixupReferences();
2216 }
2217
2218 if (UNLIKELY(method == nullptr)) {
2219 CHECK(self->IsExceptionPending());
2220 return GetTwoWordFailureValue(); // Failure.
2221 }
2222 }
2223 DCHECK(!self->IsExceptionPending());
2224 const void* code = method->GetEntryPointFromQuickCompiledCode();
2225
2226 // When we return, the caller will branch to this address, so it had better not be 0!
2227 DCHECK(code != nullptr) << "Code was null in method: " << method->PrettyMethod()
2228 << " location: "
2229 << method->GetDexFile()->GetLocation();
2230
2231 return GetTwoWordSuccessValue(reinterpret_cast<uintptr_t>(code),
2232 reinterpret_cast<uintptr_t>(method));
2233 }
2234
2235 // Explicit artInvokeCommon template function declarations to please analysis tool.
2236 #define EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(type, access_check) \
2237 template REQUIRES_SHARED(Locks::mutator_lock_) \
2238 TwoWordReturn artInvokeCommon<type, access_check>( \
2239 uint32_t method_idx, ObjPtr<mirror::Object> his_object, Thread* self, ArtMethod** sp)
2240
2241 EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kVirtual, false);
2242 EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kVirtual, true);
2243 EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kInterface, false);
2244 EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kInterface, true);
2245 EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kDirect, false);
2246 EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kDirect, true);
2247 EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kStatic, false);
2248 EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kStatic, true);
2249 EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kSuper, false);
2250 EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kSuper, true);
2251 #undef EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL
2252
2253 // See comments in runtime_support_asm.S
artInvokeInterfaceTrampolineWithAccessCheck(uint32_t method_idx,mirror::Object * this_object,Thread * self,ArtMethod ** sp)2254 extern "C" TwoWordReturn artInvokeInterfaceTrampolineWithAccessCheck(
2255 uint32_t method_idx, mirror::Object* this_object, Thread* self, ArtMethod** sp)
2256 REQUIRES_SHARED(Locks::mutator_lock_) {
2257 return artInvokeCommon<kInterface, true>(method_idx, this_object, self, sp);
2258 }
2259
artInvokeDirectTrampolineWithAccessCheck(uint32_t method_idx,mirror::Object * this_object,Thread * self,ArtMethod ** sp)2260 extern "C" TwoWordReturn artInvokeDirectTrampolineWithAccessCheck(
2261 uint32_t method_idx, mirror::Object* this_object, Thread* self, ArtMethod** sp)
2262 REQUIRES_SHARED(Locks::mutator_lock_) {
2263 return artInvokeCommon<kDirect, true>(method_idx, this_object, self, sp);
2264 }
2265
artInvokeStaticTrampolineWithAccessCheck(uint32_t method_idx,mirror::Object * this_object ATTRIBUTE_UNUSED,Thread * self,ArtMethod ** sp)2266 extern "C" TwoWordReturn artInvokeStaticTrampolineWithAccessCheck(
2267 uint32_t method_idx,
2268 mirror::Object* this_object ATTRIBUTE_UNUSED,
2269 Thread* self,
2270 ArtMethod** sp) REQUIRES_SHARED(Locks::mutator_lock_) {
2271 // For static, this_object is not required and may be random garbage. Don't pass it down so that
2272 // it doesn't cause ObjPtr alignment failure check.
2273 return artInvokeCommon<kStatic, true>(method_idx, nullptr, self, sp);
2274 }
2275
artInvokeSuperTrampolineWithAccessCheck(uint32_t method_idx,mirror::Object * this_object,Thread * self,ArtMethod ** sp)2276 extern "C" TwoWordReturn artInvokeSuperTrampolineWithAccessCheck(
2277 uint32_t method_idx, mirror::Object* this_object, Thread* self, ArtMethod** sp)
2278 REQUIRES_SHARED(Locks::mutator_lock_) {
2279 return artInvokeCommon<kSuper, true>(method_idx, this_object, self, sp);
2280 }
2281
artInvokeVirtualTrampolineWithAccessCheck(uint32_t method_idx,mirror::Object * this_object,Thread * self,ArtMethod ** sp)2282 extern "C" TwoWordReturn artInvokeVirtualTrampolineWithAccessCheck(
2283 uint32_t method_idx, mirror::Object* this_object, Thread* self, ArtMethod** sp)
2284 REQUIRES_SHARED(Locks::mutator_lock_) {
2285 return artInvokeCommon<kVirtual, true>(method_idx, this_object, self, sp);
2286 }
2287
2288 // Determine target of interface dispatch. The interface method and this object are known non-null.
2289 // The interface method is the method returned by the dex cache in the conflict trampoline.
artInvokeInterfaceTrampoline(ArtMethod * interface_method,mirror::Object * raw_this_object,Thread * self,ArtMethod ** sp)2290 extern "C" TwoWordReturn artInvokeInterfaceTrampoline(ArtMethod* interface_method,
2291 mirror::Object* raw_this_object,
2292 Thread* self,
2293 ArtMethod** sp)
2294 REQUIRES_SHARED(Locks::mutator_lock_) {
2295 ScopedQuickEntrypointChecks sqec(self);
2296
2297 Runtime* runtime = Runtime::Current();
2298 bool resolve_method = ((interface_method == nullptr) || interface_method->IsRuntimeMethod());
2299 if (UNLIKELY(resolve_method)) {
2300 // The interface method is unresolved, so resolve it in the dex file of the caller.
2301 // Fetch the dex_method_idx of the target interface method from the caller.
2302 StackHandleScope<1> hs(self);
2303 Handle<mirror::Object> this_object = hs.NewHandle(raw_this_object);
2304 ArtMethod* caller_method = QuickArgumentVisitor::GetCallingMethod(sp);
2305 uint32_t dex_method_idx;
2306 uint32_t dex_pc = QuickArgumentVisitor::GetCallingDexPc(sp);
2307 const Instruction& instr = caller_method->DexInstructions().InstructionAt(dex_pc);
2308 Instruction::Code instr_code = instr.Opcode();
2309 DCHECK(instr_code == Instruction::INVOKE_INTERFACE ||
2310 instr_code == Instruction::INVOKE_INTERFACE_RANGE)
2311 << "Unexpected call into interface trampoline: " << instr.DumpString(nullptr);
2312 if (instr_code == Instruction::INVOKE_INTERFACE) {
2313 dex_method_idx = instr.VRegB_35c();
2314 } else {
2315 DCHECK_EQ(instr_code, Instruction::INVOKE_INTERFACE_RANGE);
2316 dex_method_idx = instr.VRegB_3rc();
2317 }
2318
2319 const DexFile& dex_file = *caller_method->GetDexFile();
2320 uint32_t shorty_len;
2321 const char* shorty = dex_file.GetMethodShorty(dex_file.GetMethodId(dex_method_idx),
2322 &shorty_len);
2323 {
2324 // Remember the args in case a GC happens in ClassLinker::ResolveMethod().
2325 ScopedObjectAccessUnchecked soa(self->GetJniEnv());
2326 RememberForGcArgumentVisitor visitor(sp, false, shorty, shorty_len, &soa);
2327 visitor.VisitArguments();
2328 ClassLinker* class_linker = runtime->GetClassLinker();
2329 interface_method = class_linker->ResolveMethod<ClassLinker::ResolveMode::kNoChecks>(
2330 self, dex_method_idx, caller_method, kInterface);
2331 visitor.FixupReferences();
2332 }
2333
2334 if (UNLIKELY(interface_method == nullptr)) {
2335 CHECK(self->IsExceptionPending());
2336 return GetTwoWordFailureValue(); // Failure.
2337 }
2338 MaybeUpdateBssMethodEntry(interface_method, MethodReference(&dex_file, dex_method_idx));
2339
2340 // Refresh `raw_this_object` which may have changed after resolution.
2341 raw_this_object = this_object.Get();
2342 }
2343
2344 // The compiler and interpreter make sure the conflict trampoline is never
2345 // called on a method that resolves to j.l.Object.
2346 DCHECK(!interface_method->GetDeclaringClass()->IsObjectClass());
2347 DCHECK(interface_method->GetDeclaringClass()->IsInterface());
2348 DCHECK(!interface_method->IsRuntimeMethod());
2349 DCHECK(!interface_method->IsCopied());
2350
2351 ObjPtr<mirror::Object> obj_this = raw_this_object;
2352 ObjPtr<mirror::Class> cls = obj_this->GetClass();
2353 uint32_t imt_index = interface_method->GetImtIndex();
2354 ImTable* imt = cls->GetImt(kRuntimePointerSize);
2355 ArtMethod* conflict_method = imt->Get(imt_index, kRuntimePointerSize);
2356 DCHECK(conflict_method->IsRuntimeMethod());
2357
2358 if (UNLIKELY(resolve_method)) {
2359 // Now that we know the interface method, look it up in the conflict table.
2360 ImtConflictTable* current_table = conflict_method->GetImtConflictTable(kRuntimePointerSize);
2361 DCHECK(current_table != nullptr);
2362 ArtMethod* method = current_table->Lookup(interface_method, kRuntimePointerSize);
2363 if (method != nullptr) {
2364 return GetTwoWordSuccessValue(
2365 reinterpret_cast<uintptr_t>(method->GetEntryPointFromQuickCompiledCode()),
2366 reinterpret_cast<uintptr_t>(method));
2367 }
2368 // Interface method is not in the conflict table. Continue looking up in the
2369 // iftable.
2370 }
2371
2372 ArtMethod* method = cls->FindVirtualMethodForInterface(interface_method, kRuntimePointerSize);
2373 if (UNLIKELY(method == nullptr)) {
2374 ArtMethod* caller_method = QuickArgumentVisitor::GetCallingMethod(sp);
2375 ThrowIncompatibleClassChangeErrorClassForInterfaceDispatch(
2376 interface_method, obj_this.Ptr(), caller_method);
2377 return GetTwoWordFailureValue();
2378 }
2379
2380 // We arrive here if we have found an implementation, and it is not in the ImtConflictTable.
2381 // We create a new table with the new pair { interface_method, method }.
2382
2383 // Classes in the boot image should never need to update conflict methods in
2384 // their IMT.
2385 CHECK(!runtime->GetHeap()->ObjectIsInBootImageSpace(cls.Ptr())) << cls->PrettyClass();
2386 ArtMethod* new_conflict_method = runtime->GetClassLinker()->AddMethodToConflictTable(
2387 cls.Ptr(),
2388 conflict_method,
2389 interface_method,
2390 method);
2391 if (new_conflict_method != conflict_method) {
2392 // Update the IMT if we create a new conflict method. No fence needed here, as the
2393 // data is consistent.
2394 imt->Set(imt_index,
2395 new_conflict_method,
2396 kRuntimePointerSize);
2397 }
2398
2399 const void* code = method->GetEntryPointFromQuickCompiledCode();
2400
2401 // When we return, the caller will branch to this address, so it had better not be 0!
2402 DCHECK(code != nullptr) << "Code was null in method: " << method->PrettyMethod()
2403 << " location: " << method->GetDexFile()->GetLocation();
2404
2405 return GetTwoWordSuccessValue(reinterpret_cast<uintptr_t>(code),
2406 reinterpret_cast<uintptr_t>(method));
2407 }
2408
2409 // Returns uint64_t representing raw bits from JValue.
artInvokePolymorphic(mirror::Object * raw_receiver,Thread * self,ArtMethod ** sp)2410 extern "C" uint64_t artInvokePolymorphic(mirror::Object* raw_receiver, Thread* self, ArtMethod** sp)
2411 REQUIRES_SHARED(Locks::mutator_lock_) {
2412 ScopedQuickEntrypointChecks sqec(self);
2413 DCHECK(raw_receiver != nullptr);
2414 DCHECK_EQ(*sp, Runtime::Current()->GetCalleeSaveMethod(CalleeSaveType::kSaveRefsAndArgs));
2415
2416 // Start new JNI local reference state
2417 JNIEnvExt* env = self->GetJniEnv();
2418 ScopedObjectAccessUnchecked soa(env);
2419 ScopedJniEnvLocalRefState env_state(env);
2420 const char* old_cause = self->StartAssertNoThreadSuspension("Making stack arguments safe.");
2421
2422 // From the instruction, get the |callsite_shorty| and expose arguments on the stack to the GC.
2423 ArtMethod* caller_method = QuickArgumentVisitor::GetCallingMethod(sp);
2424 uint32_t dex_pc = QuickArgumentVisitor::GetCallingDexPc(sp);
2425 const Instruction& inst = caller_method->DexInstructions().InstructionAt(dex_pc);
2426 DCHECK(inst.Opcode() == Instruction::INVOKE_POLYMORPHIC ||
2427 inst.Opcode() == Instruction::INVOKE_POLYMORPHIC_RANGE);
2428 const dex::ProtoIndex proto_idx(inst.VRegH());
2429 const char* shorty = caller_method->GetDexFile()->GetShorty(proto_idx);
2430 const size_t shorty_length = strlen(shorty);
2431 static const bool kMethodIsStatic = false; // invoke() and invokeExact() are not static.
2432 RememberForGcArgumentVisitor gc_visitor(sp, kMethodIsStatic, shorty, shorty_length, &soa);
2433 gc_visitor.VisitArguments();
2434
2435 // Wrap raw_receiver in a Handle for safety.
2436 StackHandleScope<3> hs(self);
2437 Handle<mirror::Object> receiver_handle(hs.NewHandle(raw_receiver));
2438 raw_receiver = nullptr;
2439 self->EndAssertNoThreadSuspension(old_cause);
2440
2441 // Resolve method.
2442 ClassLinker* linker = Runtime::Current()->GetClassLinker();
2443 ArtMethod* resolved_method = linker->ResolveMethod<ClassLinker::ResolveMode::kCheckICCEAndIAE>(
2444 self, inst.VRegB(), caller_method, kVirtual);
2445
2446 Handle<mirror::MethodType> method_type(
2447 hs.NewHandle(linker->ResolveMethodType(self, proto_idx, caller_method)));
2448 if (UNLIKELY(method_type.IsNull())) {
2449 // This implies we couldn't resolve one or more types in this method handle.
2450 CHECK(self->IsExceptionPending());
2451 return 0UL;
2452 }
2453
2454 DCHECK_EQ(ArtMethod::NumArgRegisters(shorty) + 1u, (uint32_t)inst.VRegA());
2455 DCHECK_EQ(resolved_method->IsStatic(), kMethodIsStatic);
2456
2457 // Fix references before constructing the shadow frame.
2458 gc_visitor.FixupReferences();
2459
2460 // Construct shadow frame placing arguments consecutively from |first_arg|.
2461 const bool is_range = (inst.Opcode() == Instruction::INVOKE_POLYMORPHIC_RANGE);
2462 const size_t num_vregs = is_range ? inst.VRegA_4rcc() : inst.VRegA_45cc();
2463 const size_t first_arg = 0;
2464 ShadowFrameAllocaUniquePtr shadow_frame_unique_ptr =
2465 CREATE_SHADOW_FRAME(num_vregs, /* link= */ nullptr, resolved_method, dex_pc);
2466 ShadowFrame* shadow_frame = shadow_frame_unique_ptr.get();
2467 ScopedStackedShadowFramePusher
2468 frame_pusher(self, shadow_frame, StackedShadowFrameType::kShadowFrameUnderConstruction);
2469 BuildQuickShadowFrameVisitor shadow_frame_builder(sp,
2470 kMethodIsStatic,
2471 shorty,
2472 strlen(shorty),
2473 shadow_frame,
2474 first_arg);
2475 shadow_frame_builder.VisitArguments();
2476
2477 // Push a transition back into managed code onto the linked list in thread.
2478 ManagedStack fragment;
2479 self->PushManagedStackFragment(&fragment);
2480
2481 // Call DoInvokePolymorphic with |is_range| = true, as shadow frame has argument registers in
2482 // consecutive order.
2483 RangeInstructionOperands operands(first_arg + 1, num_vregs - 1);
2484 Intrinsics intrinsic = static_cast<Intrinsics>(resolved_method->GetIntrinsic());
2485 JValue result;
2486 bool success = false;
2487 if (resolved_method->GetDeclaringClass() == GetClassRoot<mirror::MethodHandle>(linker)) {
2488 Handle<mirror::MethodHandle> method_handle(hs.NewHandle(
2489 ObjPtr<mirror::MethodHandle>::DownCast(receiver_handle.Get())));
2490 if (intrinsic == Intrinsics::kMethodHandleInvokeExact) {
2491 success = MethodHandleInvokeExact(self,
2492 *shadow_frame,
2493 method_handle,
2494 method_type,
2495 &operands,
2496 &result);
2497 } else {
2498 DCHECK_EQ(static_cast<uint32_t>(intrinsic),
2499 static_cast<uint32_t>(Intrinsics::kMethodHandleInvoke));
2500 success = MethodHandleInvoke(self,
2501 *shadow_frame,
2502 method_handle,
2503 method_type,
2504 &operands,
2505 &result);
2506 }
2507 } else {
2508 DCHECK_EQ(GetClassRoot<mirror::VarHandle>(linker), resolved_method->GetDeclaringClass());
2509 Handle<mirror::VarHandle> var_handle(hs.NewHandle(
2510 ObjPtr<mirror::VarHandle>::DownCast(receiver_handle.Get())));
2511 mirror::VarHandle::AccessMode access_mode =
2512 mirror::VarHandle::GetAccessModeByIntrinsic(intrinsic);
2513 success = VarHandleInvokeAccessor(self,
2514 *shadow_frame,
2515 var_handle,
2516 method_type,
2517 access_mode,
2518 &operands,
2519 &result);
2520 }
2521
2522 DCHECK(success || self->IsExceptionPending());
2523
2524 // Pop transition record.
2525 self->PopManagedStackFragment(fragment);
2526
2527 return result.GetJ();
2528 }
2529
2530 // Returns uint64_t representing raw bits from JValue.
artInvokeCustom(uint32_t call_site_idx,Thread * self,ArtMethod ** sp)2531 extern "C" uint64_t artInvokeCustom(uint32_t call_site_idx, Thread* self, ArtMethod** sp)
2532 REQUIRES_SHARED(Locks::mutator_lock_) {
2533 ScopedQuickEntrypointChecks sqec(self);
2534 DCHECK_EQ(*sp, Runtime::Current()->GetCalleeSaveMethod(CalleeSaveType::kSaveRefsAndArgs));
2535
2536 // invoke-custom is effectively a static call (no receiver).
2537 static constexpr bool kMethodIsStatic = true;
2538
2539 // Start new JNI local reference state
2540 JNIEnvExt* env = self->GetJniEnv();
2541 ScopedObjectAccessUnchecked soa(env);
2542 ScopedJniEnvLocalRefState env_state(env);
2543
2544 const char* old_cause = self->StartAssertNoThreadSuspension("Making stack arguments safe.");
2545
2546 // From the instruction, get the |callsite_shorty| and expose arguments on the stack to the GC.
2547 ArtMethod* caller_method = QuickArgumentVisitor::GetCallingMethod(sp);
2548 uint32_t dex_pc = QuickArgumentVisitor::GetCallingDexPc(sp);
2549 const DexFile* dex_file = caller_method->GetDexFile();
2550 const dex::ProtoIndex proto_idx(dex_file->GetProtoIndexForCallSite(call_site_idx));
2551 const char* shorty = caller_method->GetDexFile()->GetShorty(proto_idx);
2552 const uint32_t shorty_len = strlen(shorty);
2553
2554 // Construct the shadow frame placing arguments consecutively from |first_arg|.
2555 const size_t first_arg = 0;
2556 const size_t num_vregs = ArtMethod::NumArgRegisters(shorty);
2557 ShadowFrameAllocaUniquePtr shadow_frame_unique_ptr =
2558 CREATE_SHADOW_FRAME(num_vregs, /* link= */ nullptr, caller_method, dex_pc);
2559 ShadowFrame* shadow_frame = shadow_frame_unique_ptr.get();
2560 ScopedStackedShadowFramePusher
2561 frame_pusher(self, shadow_frame, StackedShadowFrameType::kShadowFrameUnderConstruction);
2562 BuildQuickShadowFrameVisitor shadow_frame_builder(sp,
2563 kMethodIsStatic,
2564 shorty,
2565 shorty_len,
2566 shadow_frame,
2567 first_arg);
2568 shadow_frame_builder.VisitArguments();
2569
2570 // Push a transition back into managed code onto the linked list in thread.
2571 ManagedStack fragment;
2572 self->PushManagedStackFragment(&fragment);
2573 self->EndAssertNoThreadSuspension(old_cause);
2574
2575 // Perform the invoke-custom operation.
2576 RangeInstructionOperands operands(first_arg, num_vregs);
2577 JValue result;
2578 bool success =
2579 interpreter::DoInvokeCustom(self, *shadow_frame, call_site_idx, &operands, &result);
2580 DCHECK(success || self->IsExceptionPending());
2581
2582 // Pop transition record.
2583 self->PopManagedStackFragment(fragment);
2584
2585 return result.GetJ();
2586 }
2587
2588 } // namespace art
2589