1 /*
2 * Copyright (C) 2012 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "art_method-inl.h"
18 #include "callee_save_frame.h"
19 #include "common_throws.h"
20 #include "dex_file-inl.h"
21 #include "dex_instruction-inl.h"
22 #include "entrypoints/entrypoint_utils-inl.h"
23 #include "entrypoints/runtime_asm_entrypoints.h"
24 #include "gc/accounting/card_table-inl.h"
25 #include "interpreter/interpreter.h"
26 #include "linear_alloc.h"
27 #include "method_reference.h"
28 #include "mirror/class-inl.h"
29 #include "mirror/dex_cache-inl.h"
30 #include "mirror/method.h"
31 #include "mirror/object-inl.h"
32 #include "mirror/object_array-inl.h"
33 #include "oat_quick_method_header.h"
34 #include "quick_exception_handler.h"
35 #include "runtime.h"
36 #include "scoped_thread_state_change.h"
37 #include "stack.h"
38 #include "debugger.h"
39
40 namespace art {
41
42 // Visits the arguments as saved to the stack by a Runtime::kRefAndArgs callee save frame.
43 class QuickArgumentVisitor {
44 // Number of bytes for each out register in the caller method's frame.
45 static constexpr size_t kBytesStackArgLocation = 4;
46 // Frame size in bytes of a callee-save frame for RefsAndArgs.
47 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_FrameSize =
48 GetCalleeSaveFrameSize(kRuntimeISA, Runtime::kRefsAndArgs);
49 #if defined(__arm__)
50 // The callee save frame is pointed to by SP.
51 // | argN | |
52 // | ... | |
53 // | arg4 | |
54 // | arg3 spill | | Caller's frame
55 // | arg2 spill | |
56 // | arg1 spill | |
57 // | Method* | ---
58 // | LR |
59 // | ... | 4x6 bytes callee saves
60 // | R3 |
61 // | R2 |
62 // | R1 |
63 // | S15 |
64 // | : |
65 // | S0 |
66 // | | 4x2 bytes padding
67 // | Method* | <- sp
68 static constexpr bool kSplitPairAcrossRegisterAndStack = kArm32QuickCodeUseSoftFloat;
69 static constexpr bool kAlignPairRegister = !kArm32QuickCodeUseSoftFloat;
70 static constexpr bool kQuickSoftFloatAbi = kArm32QuickCodeUseSoftFloat;
71 static constexpr bool kQuickDoubleRegAlignedFloatBackFilled = !kArm32QuickCodeUseSoftFloat;
72 static constexpr bool kQuickSkipOddFpRegisters = false;
73 static constexpr size_t kNumQuickGprArgs = 3;
74 static constexpr size_t kNumQuickFprArgs = kArm32QuickCodeUseSoftFloat ? 0 : 16;
75 static constexpr bool kGprFprLockstep = false;
76 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset =
77 arm::ArmCalleeSaveFpr1Offset(Runtime::kRefsAndArgs); // Offset of first FPR arg.
78 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset =
79 arm::ArmCalleeSaveGpr1Offset(Runtime::kRefsAndArgs); // Offset of first GPR arg.
80 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset =
81 arm::ArmCalleeSaveLrOffset(Runtime::kRefsAndArgs); // Offset of return address.
GprIndexToGprOffset(uint32_t gpr_index)82 static size_t GprIndexToGprOffset(uint32_t gpr_index) {
83 return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA);
84 }
85 #elif defined(__aarch64__)
86 // The callee save frame is pointed to by SP.
87 // | argN | |
88 // | ... | |
89 // | arg4 | |
90 // | arg3 spill | | Caller's frame
91 // | arg2 spill | |
92 // | arg1 spill | |
93 // | Method* | ---
94 // | LR |
95 // | X29 |
96 // | : |
97 // | X20 |
98 // | X7 |
99 // | : |
100 // | X1 |
101 // | D7 |
102 // | : |
103 // | D0 |
104 // | | padding
105 // | Method* | <- sp
106 static constexpr bool kSplitPairAcrossRegisterAndStack = false;
107 static constexpr bool kAlignPairRegister = false;
108 static constexpr bool kQuickSoftFloatAbi = false; // This is a hard float ABI.
109 static constexpr bool kQuickDoubleRegAlignedFloatBackFilled = false;
110 static constexpr bool kQuickSkipOddFpRegisters = false;
111 static constexpr size_t kNumQuickGprArgs = 7; // 7 arguments passed in GPRs.
112 static constexpr size_t kNumQuickFprArgs = 8; // 8 arguments passed in FPRs.
113 static constexpr bool kGprFprLockstep = false;
114 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset =
115 arm64::Arm64CalleeSaveFpr1Offset(Runtime::kRefsAndArgs); // Offset of first FPR arg.
116 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset =
117 arm64::Arm64CalleeSaveGpr1Offset(Runtime::kRefsAndArgs); // Offset of first GPR arg.
118 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset =
119 arm64::Arm64CalleeSaveLrOffset(Runtime::kRefsAndArgs); // Offset of return address.
GprIndexToGprOffset(uint32_t gpr_index)120 static size_t GprIndexToGprOffset(uint32_t gpr_index) {
121 return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA);
122 }
123 #elif defined(__mips__) && !defined(__LP64__)
124 // The callee save frame is pointed to by SP.
125 // | argN | |
126 // | ... | |
127 // | arg4 | |
128 // | arg3 spill | | Caller's frame
129 // | arg2 spill | |
130 // | arg1 spill | |
131 // | Method* | ---
132 // | RA |
133 // | ... | callee saves
134 // | A3 | arg3
135 // | A2 | arg2
136 // | A1 | arg1
137 // | F15 |
138 // | F14 | f_arg1
139 // | F13 |
140 // | F12 | f_arg0
141 // | | padding
142 // | A0/Method* | <- sp
143 static constexpr bool kSplitPairAcrossRegisterAndStack = false;
144 static constexpr bool kAlignPairRegister = true;
145 static constexpr bool kQuickSoftFloatAbi = false;
146 static constexpr bool kQuickDoubleRegAlignedFloatBackFilled = false;
147 static constexpr bool kQuickSkipOddFpRegisters = true;
148 static constexpr size_t kNumQuickGprArgs = 3; // 3 arguments passed in GPRs.
149 static constexpr size_t kNumQuickFprArgs = 4; // 2 arguments passed in FPRs. Floats can be passed
150 // only in even numbered registers and each double
151 // occupies two registers.
152 static constexpr bool kGprFprLockstep = false;
153 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset = 16; // Offset of first FPR arg.
154 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 32; // Offset of first GPR arg.
155 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset = 76; // Offset of return address.
GprIndexToGprOffset(uint32_t gpr_index)156 static size_t GprIndexToGprOffset(uint32_t gpr_index) {
157 return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA);
158 }
159 #elif defined(__mips__) && defined(__LP64__)
160 // The callee save frame is pointed to by SP.
161 // | argN | |
162 // | ... | |
163 // | arg4 | |
164 // | arg3 spill | | Caller's frame
165 // | arg2 spill | |
166 // | arg1 spill | |
167 // | Method* | ---
168 // | RA |
169 // | ... | callee saves
170 // | A7 | arg7
171 // | A6 | arg6
172 // | A5 | arg5
173 // | A4 | arg4
174 // | A3 | arg3
175 // | A2 | arg2
176 // | A1 | arg1
177 // | F19 | f_arg7
178 // | F18 | f_arg6
179 // | F17 | f_arg5
180 // | F16 | f_arg4
181 // | F15 | f_arg3
182 // | F14 | f_arg2
183 // | F13 | f_arg1
184 // | F12 | f_arg0
185 // | | padding
186 // | A0/Method* | <- sp
187 // NOTE: for Mip64, when A0 is skipped, F0 is also skipped.
188 static constexpr bool kSplitPairAcrossRegisterAndStack = false;
189 static constexpr bool kAlignPairRegister = false;
190 static constexpr bool kQuickSoftFloatAbi = false;
191 static constexpr bool kQuickDoubleRegAlignedFloatBackFilled = false;
192 static constexpr bool kQuickSkipOddFpRegisters = false;
193 static constexpr size_t kNumQuickGprArgs = 7; // 7 arguments passed in GPRs.
194 static constexpr size_t kNumQuickFprArgs = 7; // 7 arguments passed in FPRs.
195 static constexpr bool kGprFprLockstep = true;
196
197 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset = 24; // Offset of first FPR arg (F1).
198 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 80; // Offset of first GPR arg (A1).
199 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset = 200; // Offset of return address.
GprIndexToGprOffset(uint32_t gpr_index)200 static size_t GprIndexToGprOffset(uint32_t gpr_index) {
201 return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA);
202 }
203 #elif defined(__i386__)
204 // The callee save frame is pointed to by SP.
205 // | argN | |
206 // | ... | |
207 // | arg4 | |
208 // | arg3 spill | | Caller's frame
209 // | arg2 spill | |
210 // | arg1 spill | |
211 // | Method* | ---
212 // | Return |
213 // | EBP,ESI,EDI | callee saves
214 // | EBX | arg3
215 // | EDX | arg2
216 // | ECX | arg1
217 // | XMM3 | float arg 4
218 // | XMM2 | float arg 3
219 // | XMM1 | float arg 2
220 // | XMM0 | float arg 1
221 // | EAX/Method* | <- sp
222 static constexpr bool kSplitPairAcrossRegisterAndStack = false;
223 static constexpr bool kAlignPairRegister = false;
224 static constexpr bool kQuickSoftFloatAbi = false; // This is a hard float ABI.
225 static constexpr bool kQuickDoubleRegAlignedFloatBackFilled = false;
226 static constexpr bool kQuickSkipOddFpRegisters = false;
227 static constexpr size_t kNumQuickGprArgs = 3; // 3 arguments passed in GPRs.
228 static constexpr size_t kNumQuickFprArgs = 4; // 4 arguments passed in FPRs.
229 static constexpr bool kGprFprLockstep = false;
230 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset = 4; // Offset of first FPR arg.
231 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 4 + 4*8; // Offset of first GPR arg.
232 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset = 28 + 4*8; // Offset of return address.
GprIndexToGprOffset(uint32_t gpr_index)233 static size_t GprIndexToGprOffset(uint32_t gpr_index) {
234 return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA);
235 }
236 #elif defined(__x86_64__)
237 // The callee save frame is pointed to by SP.
238 // | argN | |
239 // | ... | |
240 // | reg. arg spills | | Caller's frame
241 // | Method* | ---
242 // | Return |
243 // | R15 | callee save
244 // | R14 | callee save
245 // | R13 | callee save
246 // | R12 | callee save
247 // | R9 | arg5
248 // | R8 | arg4
249 // | RSI/R6 | arg1
250 // | RBP/R5 | callee save
251 // | RBX/R3 | callee save
252 // | RDX/R2 | arg2
253 // | RCX/R1 | arg3
254 // | XMM7 | float arg 8
255 // | XMM6 | float arg 7
256 // | XMM5 | float arg 6
257 // | XMM4 | float arg 5
258 // | XMM3 | float arg 4
259 // | XMM2 | float arg 3
260 // | XMM1 | float arg 2
261 // | XMM0 | float arg 1
262 // | Padding |
263 // | RDI/Method* | <- sp
264 static constexpr bool kSplitPairAcrossRegisterAndStack = false;
265 static constexpr bool kAlignPairRegister = false;
266 static constexpr bool kQuickSoftFloatAbi = false; // This is a hard float ABI.
267 static constexpr bool kQuickDoubleRegAlignedFloatBackFilled = false;
268 static constexpr bool kQuickSkipOddFpRegisters = false;
269 static constexpr size_t kNumQuickGprArgs = 5; // 5 arguments passed in GPRs.
270 static constexpr size_t kNumQuickFprArgs = 8; // 8 arguments passed in FPRs.
271 static constexpr bool kGprFprLockstep = false;
272 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset = 16; // Offset of first FPR arg.
273 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 80 + 4*8; // Offset of first GPR arg.
274 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset = 168 + 4*8; // Offset of return address.
GprIndexToGprOffset(uint32_t gpr_index)275 static size_t GprIndexToGprOffset(uint32_t gpr_index) {
276 switch (gpr_index) {
277 case 0: return (4 * GetBytesPerGprSpillLocation(kRuntimeISA));
278 case 1: return (1 * GetBytesPerGprSpillLocation(kRuntimeISA));
279 case 2: return (0 * GetBytesPerGprSpillLocation(kRuntimeISA));
280 case 3: return (5 * GetBytesPerGprSpillLocation(kRuntimeISA));
281 case 4: return (6 * GetBytesPerGprSpillLocation(kRuntimeISA));
282 default:
283 LOG(FATAL) << "Unexpected GPR index: " << gpr_index;
284 return 0;
285 }
286 }
287 #else
288 #error "Unsupported architecture"
289 #endif
290
291 public:
292 // Special handling for proxy methods. Proxy methods are instance methods so the
293 // 'this' object is the 1st argument. They also have the same frame layout as the
294 // kRefAndArgs runtime method. Since 'this' is a reference, it is located in the
295 // 1st GPR.
GetProxyThisObject(ArtMethod ** sp)296 static mirror::Object* GetProxyThisObject(ArtMethod** sp)
297 SHARED_REQUIRES(Locks::mutator_lock_) {
298 CHECK((*sp)->IsProxyMethod());
299 CHECK_GT(kNumQuickGprArgs, 0u);
300 constexpr uint32_t kThisGprIndex = 0u; // 'this' is in the 1st GPR.
301 size_t this_arg_offset = kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset +
302 GprIndexToGprOffset(kThisGprIndex);
303 uint8_t* this_arg_address = reinterpret_cast<uint8_t*>(sp) + this_arg_offset;
304 return reinterpret_cast<StackReference<mirror::Object>*>(this_arg_address)->AsMirrorPtr();
305 }
306
GetCallingMethod(ArtMethod ** sp)307 static ArtMethod* GetCallingMethod(ArtMethod** sp) SHARED_REQUIRES(Locks::mutator_lock_) {
308 DCHECK((*sp)->IsCalleeSaveMethod());
309 return GetCalleeSaveMethodCaller(sp, Runtime::kRefsAndArgs);
310 }
311
GetOuterMethod(ArtMethod ** sp)312 static ArtMethod* GetOuterMethod(ArtMethod** sp) SHARED_REQUIRES(Locks::mutator_lock_) {
313 DCHECK((*sp)->IsCalleeSaveMethod());
314 uint8_t* previous_sp =
315 reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_FrameSize;
316 return *reinterpret_cast<ArtMethod**>(previous_sp);
317 }
318
GetCallingDexPc(ArtMethod ** sp)319 static uint32_t GetCallingDexPc(ArtMethod** sp) SHARED_REQUIRES(Locks::mutator_lock_) {
320 DCHECK((*sp)->IsCalleeSaveMethod());
321 const size_t callee_frame_size = GetCalleeSaveFrameSize(kRuntimeISA, Runtime::kRefsAndArgs);
322 ArtMethod** caller_sp = reinterpret_cast<ArtMethod**>(
323 reinterpret_cast<uintptr_t>(sp) + callee_frame_size);
324 uintptr_t outer_pc = QuickArgumentVisitor::GetCallingPc(sp);
325 const OatQuickMethodHeader* current_code = (*caller_sp)->GetOatQuickMethodHeader(outer_pc);
326 uintptr_t outer_pc_offset = current_code->NativeQuickPcOffset(outer_pc);
327
328 if (current_code->IsOptimized()) {
329 CodeInfo code_info = current_code->GetOptimizedCodeInfo();
330 CodeInfoEncoding encoding = code_info.ExtractEncoding();
331 StackMap stack_map = code_info.GetStackMapForNativePcOffset(outer_pc_offset, encoding);
332 DCHECK(stack_map.IsValid());
333 if (stack_map.HasInlineInfo(encoding.stack_map_encoding)) {
334 InlineInfo inline_info = code_info.GetInlineInfoOf(stack_map, encoding);
335 return inline_info.GetDexPcAtDepth(encoding.inline_info_encoding,
336 inline_info.GetDepth(encoding.inline_info_encoding)-1);
337 } else {
338 return stack_map.GetDexPc(encoding.stack_map_encoding);
339 }
340 } else {
341 return current_code->ToDexPc(*caller_sp, outer_pc);
342 }
343 }
344
345 // For the given quick ref and args quick frame, return the caller's PC.
GetCallingPc(ArtMethod ** sp)346 static uintptr_t GetCallingPc(ArtMethod** sp) SHARED_REQUIRES(Locks::mutator_lock_) {
347 DCHECK((*sp)->IsCalleeSaveMethod());
348 uint8_t* lr = reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_LrOffset;
349 return *reinterpret_cast<uintptr_t*>(lr);
350 }
351
QuickArgumentVisitor(ArtMethod ** sp,bool is_static,const char * shorty,uint32_t shorty_len)352 QuickArgumentVisitor(ArtMethod** sp, bool is_static, const char* shorty,
353 uint32_t shorty_len) SHARED_REQUIRES(Locks::mutator_lock_) :
354 is_static_(is_static), shorty_(shorty), shorty_len_(shorty_len),
355 gpr_args_(reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset),
356 fpr_args_(reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset),
357 stack_args_(reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_FrameSize
358 + sizeof(ArtMethod*)), // Skip ArtMethod*.
359 gpr_index_(0), fpr_index_(0), fpr_double_index_(0), stack_index_(0),
360 cur_type_(Primitive::kPrimVoid), is_split_long_or_double_(false) {
361 static_assert(kQuickSoftFloatAbi == (kNumQuickFprArgs == 0),
362 "Number of Quick FPR arguments unexpected");
363 static_assert(!(kQuickSoftFloatAbi && kQuickDoubleRegAlignedFloatBackFilled),
364 "Double alignment unexpected");
365 // For register alignment, we want to assume that counters(fpr_double_index_) are even if the
366 // next register is even.
367 static_assert(!kQuickDoubleRegAlignedFloatBackFilled || kNumQuickFprArgs % 2 == 0,
368 "Number of Quick FPR arguments not even");
369 DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), sizeof(void*));
370 }
371
~QuickArgumentVisitor()372 virtual ~QuickArgumentVisitor() {}
373
374 virtual void Visit() = 0;
375
GetParamPrimitiveType() const376 Primitive::Type GetParamPrimitiveType() const {
377 return cur_type_;
378 }
379
GetParamAddress() const380 uint8_t* GetParamAddress() const {
381 if (!kQuickSoftFloatAbi) {
382 Primitive::Type type = GetParamPrimitiveType();
383 if (UNLIKELY((type == Primitive::kPrimDouble) || (type == Primitive::kPrimFloat))) {
384 if (type == Primitive::kPrimDouble && kQuickDoubleRegAlignedFloatBackFilled) {
385 if (fpr_double_index_ + 2 < kNumQuickFprArgs + 1) {
386 return fpr_args_ + (fpr_double_index_ * GetBytesPerFprSpillLocation(kRuntimeISA));
387 }
388 } else if (fpr_index_ + 1 < kNumQuickFprArgs + 1) {
389 return fpr_args_ + (fpr_index_ * GetBytesPerFprSpillLocation(kRuntimeISA));
390 }
391 return stack_args_ + (stack_index_ * kBytesStackArgLocation);
392 }
393 }
394 if (gpr_index_ < kNumQuickGprArgs) {
395 return gpr_args_ + GprIndexToGprOffset(gpr_index_);
396 }
397 return stack_args_ + (stack_index_ * kBytesStackArgLocation);
398 }
399
IsSplitLongOrDouble() const400 bool IsSplitLongOrDouble() const {
401 if ((GetBytesPerGprSpillLocation(kRuntimeISA) == 4) ||
402 (GetBytesPerFprSpillLocation(kRuntimeISA) == 4)) {
403 return is_split_long_or_double_;
404 } else {
405 return false; // An optimization for when GPR and FPRs are 64bit.
406 }
407 }
408
IsParamAReference() const409 bool IsParamAReference() const {
410 return GetParamPrimitiveType() == Primitive::kPrimNot;
411 }
412
IsParamALongOrDouble() const413 bool IsParamALongOrDouble() const {
414 Primitive::Type type = GetParamPrimitiveType();
415 return type == Primitive::kPrimLong || type == Primitive::kPrimDouble;
416 }
417
ReadSplitLongParam() const418 uint64_t ReadSplitLongParam() const {
419 // The splitted long is always available through the stack.
420 return *reinterpret_cast<uint64_t*>(stack_args_
421 + stack_index_ * kBytesStackArgLocation);
422 }
423
IncGprIndex()424 void IncGprIndex() {
425 gpr_index_++;
426 if (kGprFprLockstep) {
427 fpr_index_++;
428 }
429 }
430
IncFprIndex()431 void IncFprIndex() {
432 fpr_index_++;
433 if (kGprFprLockstep) {
434 gpr_index_++;
435 }
436 }
437
VisitArguments()438 void VisitArguments() SHARED_REQUIRES(Locks::mutator_lock_) {
439 // (a) 'stack_args_' should point to the first method's argument
440 // (b) whatever the argument type it is, the 'stack_index_' should
441 // be moved forward along with every visiting.
442 gpr_index_ = 0;
443 fpr_index_ = 0;
444 if (kQuickDoubleRegAlignedFloatBackFilled) {
445 fpr_double_index_ = 0;
446 }
447 stack_index_ = 0;
448 if (!is_static_) { // Handle this.
449 cur_type_ = Primitive::kPrimNot;
450 is_split_long_or_double_ = false;
451 Visit();
452 stack_index_++;
453 if (kNumQuickGprArgs > 0) {
454 IncGprIndex();
455 }
456 }
457 for (uint32_t shorty_index = 1; shorty_index < shorty_len_; ++shorty_index) {
458 cur_type_ = Primitive::GetType(shorty_[shorty_index]);
459 switch (cur_type_) {
460 case Primitive::kPrimNot:
461 case Primitive::kPrimBoolean:
462 case Primitive::kPrimByte:
463 case Primitive::kPrimChar:
464 case Primitive::kPrimShort:
465 case Primitive::kPrimInt:
466 is_split_long_or_double_ = false;
467 Visit();
468 stack_index_++;
469 if (gpr_index_ < kNumQuickGprArgs) {
470 IncGprIndex();
471 }
472 break;
473 case Primitive::kPrimFloat:
474 is_split_long_or_double_ = false;
475 Visit();
476 stack_index_++;
477 if (kQuickSoftFloatAbi) {
478 if (gpr_index_ < kNumQuickGprArgs) {
479 IncGprIndex();
480 }
481 } else {
482 if (fpr_index_ + 1 < kNumQuickFprArgs + 1) {
483 IncFprIndex();
484 if (kQuickDoubleRegAlignedFloatBackFilled) {
485 // Double should not overlap with float.
486 // For example, if fpr_index_ = 3, fpr_double_index_ should be at least 4.
487 fpr_double_index_ = std::max(fpr_double_index_, RoundUp(fpr_index_, 2));
488 // Float should not overlap with double.
489 if (fpr_index_ % 2 == 0) {
490 fpr_index_ = std::max(fpr_double_index_, fpr_index_);
491 }
492 } else if (kQuickSkipOddFpRegisters) {
493 IncFprIndex();
494 }
495 }
496 }
497 break;
498 case Primitive::kPrimDouble:
499 case Primitive::kPrimLong:
500 if (kQuickSoftFloatAbi || (cur_type_ == Primitive::kPrimLong)) {
501 if (cur_type_ == Primitive::kPrimLong && kAlignPairRegister && gpr_index_ == 0) {
502 // Currently, this is only for ARM and MIPS, where the first available parameter
503 // register is R1 (on ARM) or A1 (on MIPS). So we skip it, and use R2 (on ARM) or
504 // A2 (on MIPS) instead.
505 IncGprIndex();
506 }
507 is_split_long_or_double_ = (GetBytesPerGprSpillLocation(kRuntimeISA) == 4) &&
508 ((gpr_index_ + 1) == kNumQuickGprArgs);
509 if (!kSplitPairAcrossRegisterAndStack && is_split_long_or_double_) {
510 // We don't want to split this. Pass over this register.
511 gpr_index_++;
512 is_split_long_or_double_ = false;
513 }
514 Visit();
515 if (kBytesStackArgLocation == 4) {
516 stack_index_+= 2;
517 } else {
518 CHECK_EQ(kBytesStackArgLocation, 8U);
519 stack_index_++;
520 }
521 if (gpr_index_ < kNumQuickGprArgs) {
522 IncGprIndex();
523 if (GetBytesPerGprSpillLocation(kRuntimeISA) == 4) {
524 if (gpr_index_ < kNumQuickGprArgs) {
525 IncGprIndex();
526 }
527 }
528 }
529 } else {
530 is_split_long_or_double_ = (GetBytesPerFprSpillLocation(kRuntimeISA) == 4) &&
531 ((fpr_index_ + 1) == kNumQuickFprArgs) && !kQuickDoubleRegAlignedFloatBackFilled;
532 Visit();
533 if (kBytesStackArgLocation == 4) {
534 stack_index_+= 2;
535 } else {
536 CHECK_EQ(kBytesStackArgLocation, 8U);
537 stack_index_++;
538 }
539 if (kQuickDoubleRegAlignedFloatBackFilled) {
540 if (fpr_double_index_ + 2 < kNumQuickFprArgs + 1) {
541 fpr_double_index_ += 2;
542 // Float should not overlap with double.
543 if (fpr_index_ % 2 == 0) {
544 fpr_index_ = std::max(fpr_double_index_, fpr_index_);
545 }
546 }
547 } else if (fpr_index_ + 1 < kNumQuickFprArgs + 1) {
548 IncFprIndex();
549 if (GetBytesPerFprSpillLocation(kRuntimeISA) == 4) {
550 if (fpr_index_ + 1 < kNumQuickFprArgs + 1) {
551 IncFprIndex();
552 }
553 }
554 }
555 }
556 break;
557 default:
558 LOG(FATAL) << "Unexpected type: " << cur_type_ << " in " << shorty_;
559 }
560 }
561 }
562
563 protected:
564 const bool is_static_;
565 const char* const shorty_;
566 const uint32_t shorty_len_;
567
568 private:
569 uint8_t* const gpr_args_; // Address of GPR arguments in callee save frame.
570 uint8_t* const fpr_args_; // Address of FPR arguments in callee save frame.
571 uint8_t* const stack_args_; // Address of stack arguments in caller's frame.
572 uint32_t gpr_index_; // Index into spilled GPRs.
573 // Index into spilled FPRs.
574 // In case kQuickDoubleRegAlignedFloatBackFilled, it may index a hole while fpr_double_index_
575 // holds a higher register number.
576 uint32_t fpr_index_;
577 // Index into spilled FPRs for aligned double.
578 // Only used when kQuickDoubleRegAlignedFloatBackFilled. Next available double register indexed in
579 // terms of singles, may be behind fpr_index.
580 uint32_t fpr_double_index_;
581 uint32_t stack_index_; // Index into arguments on the stack.
582 // The current type of argument during VisitArguments.
583 Primitive::Type cur_type_;
584 // Does a 64bit parameter straddle the register and stack arguments?
585 bool is_split_long_or_double_;
586 };
587
588 // Returns the 'this' object of a proxy method. This function is only used by StackVisitor. It
589 // allows to use the QuickArgumentVisitor constants without moving all the code in its own module.
artQuickGetProxyThisObject(ArtMethod ** sp)590 extern "C" mirror::Object* artQuickGetProxyThisObject(ArtMethod** sp)
591 SHARED_REQUIRES(Locks::mutator_lock_) {
592 return QuickArgumentVisitor::GetProxyThisObject(sp);
593 }
594
595 // Visits arguments on the stack placing them into the shadow frame.
596 class BuildQuickShadowFrameVisitor FINAL : public QuickArgumentVisitor {
597 public:
BuildQuickShadowFrameVisitor(ArtMethod ** sp,bool is_static,const char * shorty,uint32_t shorty_len,ShadowFrame * sf,size_t first_arg_reg)598 BuildQuickShadowFrameVisitor(ArtMethod** sp, bool is_static, const char* shorty,
599 uint32_t shorty_len, ShadowFrame* sf, size_t first_arg_reg) :
600 QuickArgumentVisitor(sp, is_static, shorty, shorty_len), sf_(sf), cur_reg_(first_arg_reg) {}
601
602 void Visit() SHARED_REQUIRES(Locks::mutator_lock_) OVERRIDE;
603
604 private:
605 ShadowFrame* const sf_;
606 uint32_t cur_reg_;
607
608 DISALLOW_COPY_AND_ASSIGN(BuildQuickShadowFrameVisitor);
609 };
610
Visit()611 void BuildQuickShadowFrameVisitor::Visit() {
612 Primitive::Type type = GetParamPrimitiveType();
613 switch (type) {
614 case Primitive::kPrimLong: // Fall-through.
615 case Primitive::kPrimDouble:
616 if (IsSplitLongOrDouble()) {
617 sf_->SetVRegLong(cur_reg_, ReadSplitLongParam());
618 } else {
619 sf_->SetVRegLong(cur_reg_, *reinterpret_cast<jlong*>(GetParamAddress()));
620 }
621 ++cur_reg_;
622 break;
623 case Primitive::kPrimNot: {
624 StackReference<mirror::Object>* stack_ref =
625 reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress());
626 sf_->SetVRegReference(cur_reg_, stack_ref->AsMirrorPtr());
627 }
628 break;
629 case Primitive::kPrimBoolean: // Fall-through.
630 case Primitive::kPrimByte: // Fall-through.
631 case Primitive::kPrimChar: // Fall-through.
632 case Primitive::kPrimShort: // Fall-through.
633 case Primitive::kPrimInt: // Fall-through.
634 case Primitive::kPrimFloat:
635 sf_->SetVReg(cur_reg_, *reinterpret_cast<jint*>(GetParamAddress()));
636 break;
637 case Primitive::kPrimVoid:
638 LOG(FATAL) << "UNREACHABLE";
639 UNREACHABLE();
640 }
641 ++cur_reg_;
642 }
643
artQuickToInterpreterBridge(ArtMethod * method,Thread * self,ArtMethod ** sp)644 extern "C" uint64_t artQuickToInterpreterBridge(ArtMethod* method, Thread* self, ArtMethod** sp)
645 SHARED_REQUIRES(Locks::mutator_lock_) {
646 // Ensure we don't get thread suspension until the object arguments are safely in the shadow
647 // frame.
648 ScopedQuickEntrypointChecks sqec(self);
649
650 if (UNLIKELY(!method->IsInvokable())) {
651 method->ThrowInvocationTimeError();
652 return 0;
653 }
654
655 JValue tmp_value;
656 ShadowFrame* deopt_frame = self->PopStackedShadowFrame(
657 StackedShadowFrameType::kSingleFrameDeoptimizationShadowFrame, false);
658 ManagedStack fragment;
659
660 DCHECK(!method->IsNative()) << PrettyMethod(method);
661 uint32_t shorty_len = 0;
662 ArtMethod* non_proxy_method = method->GetInterfaceMethodIfProxy(sizeof(void*));
663 const DexFile::CodeItem* code_item = non_proxy_method->GetCodeItem();
664 DCHECK(code_item != nullptr) << PrettyMethod(method);
665 const char* shorty = non_proxy_method->GetShorty(&shorty_len);
666
667 JValue result;
668
669 if (deopt_frame != nullptr) {
670 // Coming from single-frame deopt.
671
672 if (kIsDebugBuild) {
673 // Sanity-check: are the methods as expected? We check that the last shadow frame (the bottom
674 // of the call-stack) corresponds to the called method.
675 ShadowFrame* linked = deopt_frame;
676 while (linked->GetLink() != nullptr) {
677 linked = linked->GetLink();
678 }
679 CHECK_EQ(method, linked->GetMethod()) << PrettyMethod(method) << " "
680 << PrettyMethod(linked->GetMethod());
681 }
682
683 if (VLOG_IS_ON(deopt)) {
684 // Print out the stack to verify that it was a single-frame deopt.
685 LOG(INFO) << "Continue-ing from deopt. Stack is:";
686 QuickExceptionHandler::DumpFramesWithType(self, true);
687 }
688
689 mirror::Throwable* pending_exception = nullptr;
690 bool from_code = false;
691 self->PopDeoptimizationContext(&result, &pending_exception, /* out */ &from_code);
692 CHECK(from_code);
693
694 // Push a transition back into managed code onto the linked list in thread.
695 self->PushManagedStackFragment(&fragment);
696
697 // Ensure that the stack is still in order.
698 if (kIsDebugBuild) {
699 class DummyStackVisitor : public StackVisitor {
700 public:
701 explicit DummyStackVisitor(Thread* self_in) SHARED_REQUIRES(Locks::mutator_lock_)
702 : StackVisitor(self_in, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames) {}
703
704 bool VisitFrame() OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
705 // Nothing to do here. In a debug build, SanityCheckFrame will do the work in the walking
706 // logic. Just always say we want to continue.
707 return true;
708 }
709 };
710 DummyStackVisitor dsv(self);
711 dsv.WalkStack();
712 }
713
714 // Restore the exception that was pending before deoptimization then interpret the
715 // deoptimized frames.
716 if (pending_exception != nullptr) {
717 self->SetException(pending_exception);
718 }
719 interpreter::EnterInterpreterFromDeoptimize(self, deopt_frame, from_code, &result);
720 } else {
721 const char* old_cause = self->StartAssertNoThreadSuspension(
722 "Building interpreter shadow frame");
723 uint16_t num_regs = code_item->registers_size_;
724 // No last shadow coming from quick.
725 ShadowFrameAllocaUniquePtr shadow_frame_unique_ptr =
726 CREATE_SHADOW_FRAME(num_regs, /* link */ nullptr, method, /* dex pc */ 0);
727 ShadowFrame* shadow_frame = shadow_frame_unique_ptr.get();
728 size_t first_arg_reg = code_item->registers_size_ - code_item->ins_size_;
729 BuildQuickShadowFrameVisitor shadow_frame_builder(sp, method->IsStatic(), shorty, shorty_len,
730 shadow_frame, first_arg_reg);
731 shadow_frame_builder.VisitArguments();
732 const bool needs_initialization =
733 method->IsStatic() && !method->GetDeclaringClass()->IsInitialized();
734 // Push a transition back into managed code onto the linked list in thread.
735 self->PushManagedStackFragment(&fragment);
736 self->PushShadowFrame(shadow_frame);
737 self->EndAssertNoThreadSuspension(old_cause);
738
739 if (needs_initialization) {
740 // Ensure static method's class is initialized.
741 StackHandleScope<1> hs(self);
742 Handle<mirror::Class> h_class(hs.NewHandle(shadow_frame->GetMethod()->GetDeclaringClass()));
743 if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(self, h_class, true, true)) {
744 DCHECK(Thread::Current()->IsExceptionPending()) << PrettyMethod(shadow_frame->GetMethod());
745 self->PopManagedStackFragment(fragment);
746 return 0;
747 }
748 }
749
750 result = interpreter::EnterInterpreterFromEntryPoint(self, code_item, shadow_frame);
751 }
752
753 // Pop transition.
754 self->PopManagedStackFragment(fragment);
755
756 // Request a stack deoptimization if needed
757 ArtMethod* caller = QuickArgumentVisitor::GetCallingMethod(sp);
758 if (UNLIKELY(Dbg::IsForcedInterpreterNeededForUpcall(self, caller))) {
759 // Push the context of the deoptimization stack so we can restore the return value and the
760 // exception before executing the deoptimized frames.
761 self->PushDeoptimizationContext(
762 result, shorty[0] == 'L', /* from_code */ false, self->GetException());
763
764 // Set special exception to cause deoptimization.
765 self->SetException(Thread::GetDeoptimizationException());
766 }
767
768 // No need to restore the args since the method has already been run by the interpreter.
769 return result.GetJ();
770 }
771
772 // Visits arguments on the stack placing them into the args vector, Object* arguments are converted
773 // to jobjects.
774 class BuildQuickArgumentVisitor FINAL : public QuickArgumentVisitor {
775 public:
BuildQuickArgumentVisitor(ArtMethod ** sp,bool is_static,const char * shorty,uint32_t shorty_len,ScopedObjectAccessUnchecked * soa,std::vector<jvalue> * args)776 BuildQuickArgumentVisitor(ArtMethod** sp, bool is_static, const char* shorty, uint32_t shorty_len,
777 ScopedObjectAccessUnchecked* soa, std::vector<jvalue>* args) :
778 QuickArgumentVisitor(sp, is_static, shorty, shorty_len), soa_(soa), args_(args) {}
779
780 void Visit() SHARED_REQUIRES(Locks::mutator_lock_) OVERRIDE;
781
782 void FixupReferences() SHARED_REQUIRES(Locks::mutator_lock_);
783
784 private:
785 ScopedObjectAccessUnchecked* const soa_;
786 std::vector<jvalue>* const args_;
787 // References which we must update when exiting in case the GC moved the objects.
788 std::vector<std::pair<jobject, StackReference<mirror::Object>*>> references_;
789
790 DISALLOW_COPY_AND_ASSIGN(BuildQuickArgumentVisitor);
791 };
792
Visit()793 void BuildQuickArgumentVisitor::Visit() {
794 jvalue val;
795 Primitive::Type type = GetParamPrimitiveType();
796 switch (type) {
797 case Primitive::kPrimNot: {
798 StackReference<mirror::Object>* stack_ref =
799 reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress());
800 val.l = soa_->AddLocalReference<jobject>(stack_ref->AsMirrorPtr());
801 references_.push_back(std::make_pair(val.l, stack_ref));
802 break;
803 }
804 case Primitive::kPrimLong: // Fall-through.
805 case Primitive::kPrimDouble:
806 if (IsSplitLongOrDouble()) {
807 val.j = ReadSplitLongParam();
808 } else {
809 val.j = *reinterpret_cast<jlong*>(GetParamAddress());
810 }
811 break;
812 case Primitive::kPrimBoolean: // Fall-through.
813 case Primitive::kPrimByte: // Fall-through.
814 case Primitive::kPrimChar: // Fall-through.
815 case Primitive::kPrimShort: // Fall-through.
816 case Primitive::kPrimInt: // Fall-through.
817 case Primitive::kPrimFloat:
818 val.i = *reinterpret_cast<jint*>(GetParamAddress());
819 break;
820 case Primitive::kPrimVoid:
821 LOG(FATAL) << "UNREACHABLE";
822 UNREACHABLE();
823 }
824 args_->push_back(val);
825 }
826
FixupReferences()827 void BuildQuickArgumentVisitor::FixupReferences() {
828 // Fixup any references which may have changed.
829 for (const auto& pair : references_) {
830 pair.second->Assign(soa_->Decode<mirror::Object*>(pair.first));
831 soa_->Env()->DeleteLocalRef(pair.first);
832 }
833 }
834
835 // Handler for invocation on proxy methods. On entry a frame will exist for the proxy object method
836 // which is responsible for recording callee save registers. We explicitly place into jobjects the
837 // incoming reference arguments (so they survive GC). We invoke the invocation handler, which is a
838 // field within the proxy object, which will box the primitive arguments and deal with error cases.
artQuickProxyInvokeHandler(ArtMethod * proxy_method,mirror::Object * receiver,Thread * self,ArtMethod ** sp)839 extern "C" uint64_t artQuickProxyInvokeHandler(
840 ArtMethod* proxy_method, mirror::Object* receiver, Thread* self, ArtMethod** sp)
841 SHARED_REQUIRES(Locks::mutator_lock_) {
842 DCHECK(proxy_method->IsProxyMethod()) << PrettyMethod(proxy_method);
843 DCHECK(receiver->GetClass()->IsProxyClass()) << PrettyMethod(proxy_method);
844 // Ensure we don't get thread suspension until the object arguments are safely in jobjects.
845 const char* old_cause =
846 self->StartAssertNoThreadSuspension("Adding to IRT proxy object arguments");
847 // Register the top of the managed stack, making stack crawlable.
848 DCHECK_EQ((*sp), proxy_method) << PrettyMethod(proxy_method);
849 self->VerifyStack();
850 // Start new JNI local reference state.
851 JNIEnvExt* env = self->GetJniEnv();
852 ScopedObjectAccessUnchecked soa(env);
853 ScopedJniEnvLocalRefState env_state(env);
854 // Create local ref. copies of proxy method and the receiver.
855 jobject rcvr_jobj = soa.AddLocalReference<jobject>(receiver);
856
857 // Placing arguments into args vector and remove the receiver.
858 ArtMethod* non_proxy_method = proxy_method->GetInterfaceMethodIfProxy(sizeof(void*));
859 CHECK(!non_proxy_method->IsStatic()) << PrettyMethod(proxy_method) << " "
860 << PrettyMethod(non_proxy_method);
861 std::vector<jvalue> args;
862 uint32_t shorty_len = 0;
863 const char* shorty = non_proxy_method->GetShorty(&shorty_len);
864 BuildQuickArgumentVisitor local_ref_visitor(sp, false, shorty, shorty_len, &soa, &args);
865
866 local_ref_visitor.VisitArguments();
867 DCHECK_GT(args.size(), 0U) << PrettyMethod(proxy_method);
868 args.erase(args.begin());
869
870 // Convert proxy method into expected interface method.
871 ArtMethod* interface_method = proxy_method->FindOverriddenMethod(sizeof(void*));
872 DCHECK(interface_method != nullptr) << PrettyMethod(proxy_method);
873 DCHECK(!interface_method->IsProxyMethod()) << PrettyMethod(interface_method);
874 self->EndAssertNoThreadSuspension(old_cause);
875 jobject interface_method_jobj = soa.AddLocalReference<jobject>(
876 mirror::Method::CreateFromArtMethod(soa.Self(), interface_method));
877
878 // All naked Object*s should now be in jobjects, so its safe to go into the main invoke code
879 // that performs allocations.
880 JValue result = InvokeProxyInvocationHandler(soa, shorty, rcvr_jobj, interface_method_jobj, args);
881 // Restore references which might have moved.
882 local_ref_visitor.FixupReferences();
883 return result.GetJ();
884 }
885
886 // Read object references held in arguments from quick frames and place in a JNI local references,
887 // so they don't get garbage collected.
888 class RememberForGcArgumentVisitor FINAL : public QuickArgumentVisitor {
889 public:
RememberForGcArgumentVisitor(ArtMethod ** sp,bool is_static,const char * shorty,uint32_t shorty_len,ScopedObjectAccessUnchecked * soa)890 RememberForGcArgumentVisitor(ArtMethod** sp, bool is_static, const char* shorty,
891 uint32_t shorty_len, ScopedObjectAccessUnchecked* soa) :
892 QuickArgumentVisitor(sp, is_static, shorty, shorty_len), soa_(soa) {}
893
894 void Visit() SHARED_REQUIRES(Locks::mutator_lock_) OVERRIDE;
895
896 void FixupReferences() SHARED_REQUIRES(Locks::mutator_lock_);
897
898 private:
899 ScopedObjectAccessUnchecked* const soa_;
900 // References which we must update when exiting in case the GC moved the objects.
901 std::vector<std::pair<jobject, StackReference<mirror::Object>*> > references_;
902
903 DISALLOW_COPY_AND_ASSIGN(RememberForGcArgumentVisitor);
904 };
905
Visit()906 void RememberForGcArgumentVisitor::Visit() {
907 if (IsParamAReference()) {
908 StackReference<mirror::Object>* stack_ref =
909 reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress());
910 jobject reference =
911 soa_->AddLocalReference<jobject>(stack_ref->AsMirrorPtr());
912 references_.push_back(std::make_pair(reference, stack_ref));
913 }
914 }
915
FixupReferences()916 void RememberForGcArgumentVisitor::FixupReferences() {
917 // Fixup any references which may have changed.
918 for (const auto& pair : references_) {
919 pair.second->Assign(soa_->Decode<mirror::Object*>(pair.first));
920 soa_->Env()->DeleteLocalRef(pair.first);
921 }
922 }
923
924 // Lazily resolve a method for quick. Called by stub code.
artQuickResolutionTrampoline(ArtMethod * called,mirror::Object * receiver,Thread * self,ArtMethod ** sp)925 extern "C" const void* artQuickResolutionTrampoline(
926 ArtMethod* called, mirror::Object* receiver, Thread* self, ArtMethod** sp)
927 SHARED_REQUIRES(Locks::mutator_lock_) {
928 // The resolution trampoline stashes the resolved method into the callee-save frame to transport
929 // it. Thus, when exiting, the stack cannot be verified (as the resolved method most likely
930 // does not have the same stack layout as the callee-save method).
931 ScopedQuickEntrypointChecks sqec(self, kIsDebugBuild, false);
932 // Start new JNI local reference state
933 JNIEnvExt* env = self->GetJniEnv();
934 ScopedObjectAccessUnchecked soa(env);
935 ScopedJniEnvLocalRefState env_state(env);
936 const char* old_cause = self->StartAssertNoThreadSuspension("Quick method resolution set up");
937
938 // Compute details about the called method (avoid GCs)
939 ClassLinker* linker = Runtime::Current()->GetClassLinker();
940 InvokeType invoke_type;
941 MethodReference called_method(nullptr, 0);
942 const bool called_method_known_on_entry = !called->IsRuntimeMethod();
943 ArtMethod* caller = nullptr;
944 if (!called_method_known_on_entry) {
945 caller = QuickArgumentVisitor::GetCallingMethod(sp);
946 uint32_t dex_pc = QuickArgumentVisitor::GetCallingDexPc(sp);
947 const DexFile::CodeItem* code;
948 called_method.dex_file = caller->GetDexFile();
949 code = caller->GetCodeItem();
950 CHECK_LT(dex_pc, code->insns_size_in_code_units_);
951 const Instruction* instr = Instruction::At(&code->insns_[dex_pc]);
952 Instruction::Code instr_code = instr->Opcode();
953 bool is_range;
954 switch (instr_code) {
955 case Instruction::INVOKE_DIRECT:
956 invoke_type = kDirect;
957 is_range = false;
958 break;
959 case Instruction::INVOKE_DIRECT_RANGE:
960 invoke_type = kDirect;
961 is_range = true;
962 break;
963 case Instruction::INVOKE_STATIC:
964 invoke_type = kStatic;
965 is_range = false;
966 break;
967 case Instruction::INVOKE_STATIC_RANGE:
968 invoke_type = kStatic;
969 is_range = true;
970 break;
971 case Instruction::INVOKE_SUPER:
972 invoke_type = kSuper;
973 is_range = false;
974 break;
975 case Instruction::INVOKE_SUPER_RANGE:
976 invoke_type = kSuper;
977 is_range = true;
978 break;
979 case Instruction::INVOKE_VIRTUAL:
980 invoke_type = kVirtual;
981 is_range = false;
982 break;
983 case Instruction::INVOKE_VIRTUAL_RANGE:
984 invoke_type = kVirtual;
985 is_range = true;
986 break;
987 case Instruction::INVOKE_INTERFACE:
988 invoke_type = kInterface;
989 is_range = false;
990 break;
991 case Instruction::INVOKE_INTERFACE_RANGE:
992 invoke_type = kInterface;
993 is_range = true;
994 break;
995 default:
996 LOG(FATAL) << "Unexpected call into trampoline: " << instr->DumpString(nullptr);
997 UNREACHABLE();
998 }
999 called_method.dex_method_index = (is_range) ? instr->VRegB_3rc() : instr->VRegB_35c();
1000 } else {
1001 invoke_type = kStatic;
1002 called_method.dex_file = called->GetDexFile();
1003 called_method.dex_method_index = called->GetDexMethodIndex();
1004 }
1005 uint32_t shorty_len;
1006 const char* shorty =
1007 called_method.dex_file->GetMethodShorty(
1008 called_method.dex_file->GetMethodId(called_method.dex_method_index), &shorty_len);
1009 RememberForGcArgumentVisitor visitor(sp, invoke_type == kStatic, shorty, shorty_len, &soa);
1010 visitor.VisitArguments();
1011 self->EndAssertNoThreadSuspension(old_cause);
1012 const bool virtual_or_interface = invoke_type == kVirtual || invoke_type == kInterface;
1013 // Resolve method filling in dex cache.
1014 if (!called_method_known_on_entry) {
1015 StackHandleScope<1> hs(self);
1016 mirror::Object* dummy = nullptr;
1017 HandleWrapper<mirror::Object> h_receiver(
1018 hs.NewHandleWrapper(virtual_or_interface ? &receiver : &dummy));
1019 DCHECK_EQ(caller->GetDexFile(), called_method.dex_file);
1020 called = linker->ResolveMethod<ClassLinker::kForceICCECheck>(
1021 self, called_method.dex_method_index, caller, invoke_type);
1022 }
1023 const void* code = nullptr;
1024 if (LIKELY(!self->IsExceptionPending())) {
1025 // Incompatible class change should have been handled in resolve method.
1026 CHECK(!called->CheckIncompatibleClassChange(invoke_type))
1027 << PrettyMethod(called) << " " << invoke_type;
1028 if (virtual_or_interface || invoke_type == kSuper) {
1029 // Refine called method based on receiver for kVirtual/kInterface, and
1030 // caller for kSuper.
1031 ArtMethod* orig_called = called;
1032 if (invoke_type == kVirtual) {
1033 CHECK(receiver != nullptr) << invoke_type;
1034 called = receiver->GetClass()->FindVirtualMethodForVirtual(called, sizeof(void*));
1035 } else if (invoke_type == kInterface) {
1036 CHECK(receiver != nullptr) << invoke_type;
1037 called = receiver->GetClass()->FindVirtualMethodForInterface(called, sizeof(void*));
1038 } else {
1039 DCHECK_EQ(invoke_type, kSuper);
1040 CHECK(caller != nullptr) << invoke_type;
1041 StackHandleScope<2> hs(self);
1042 Handle<mirror::DexCache> dex_cache(
1043 hs.NewHandle(caller->GetDeclaringClass()->GetDexCache()));
1044 Handle<mirror::ClassLoader> class_loader(
1045 hs.NewHandle(caller->GetDeclaringClass()->GetClassLoader()));
1046 // TODO Maybe put this into a mirror::Class function.
1047 mirror::Class* ref_class = linker->ResolveReferencedClassOfMethod(
1048 called_method.dex_method_index, dex_cache, class_loader);
1049 if (ref_class->IsInterface()) {
1050 called = ref_class->FindVirtualMethodForInterfaceSuper(called, sizeof(void*));
1051 } else {
1052 called = caller->GetDeclaringClass()->GetSuperClass()->GetVTableEntry(
1053 called->GetMethodIndex(), sizeof(void*));
1054 }
1055 }
1056
1057 CHECK(called != nullptr) << PrettyMethod(orig_called) << " "
1058 << PrettyTypeOf(receiver) << " "
1059 << invoke_type << " " << orig_called->GetVtableIndex();
1060
1061 // We came here because of sharpening. Ensure the dex cache is up-to-date on the method index
1062 // of the sharpened method avoiding dirtying the dex cache if possible.
1063 // Note, called_method.dex_method_index references the dex method before the
1064 // FindVirtualMethodFor... This is ok for FindDexMethodIndexInOtherDexFile that only cares
1065 // about the name and signature.
1066 uint32_t update_dex_cache_method_index = called->GetDexMethodIndex();
1067 if (!called->HasSameDexCacheResolvedMethods(caller, sizeof(void*))) {
1068 // Calling from one dex file to another, need to compute the method index appropriate to
1069 // the caller's dex file. Since we get here only if the original called was a runtime
1070 // method, we've got the correct dex_file and a dex_method_idx from above.
1071 DCHECK(!called_method_known_on_entry);
1072 DCHECK_EQ(caller->GetDexFile(), called_method.dex_file);
1073 const DexFile* caller_dex_file = called_method.dex_file;
1074 uint32_t caller_method_name_and_sig_index = called_method.dex_method_index;
1075 update_dex_cache_method_index =
1076 called->FindDexMethodIndexInOtherDexFile(*caller_dex_file,
1077 caller_method_name_and_sig_index);
1078 }
1079 if ((update_dex_cache_method_index != DexFile::kDexNoIndex) &&
1080 (caller->GetDexCacheResolvedMethod(
1081 update_dex_cache_method_index, sizeof(void*)) != called)) {
1082 caller->SetDexCacheResolvedMethod(update_dex_cache_method_index, called, sizeof(void*));
1083 }
1084 } else if (invoke_type == kStatic) {
1085 const auto called_dex_method_idx = called->GetDexMethodIndex();
1086 // For static invokes, we may dispatch to the static method in the superclass but resolve
1087 // using the subclass. To prevent getting slow paths on each invoke, we force set the
1088 // resolved method for the super class dex method index if we are in the same dex file.
1089 // b/19175856
1090 if (called->GetDexFile() == called_method.dex_file &&
1091 called_method.dex_method_index != called_dex_method_idx) {
1092 called->GetDexCache()->SetResolvedMethod(called_dex_method_idx, called, sizeof(void*));
1093 }
1094 }
1095
1096 // Ensure that the called method's class is initialized.
1097 StackHandleScope<1> hs(soa.Self());
1098 Handle<mirror::Class> called_class(hs.NewHandle(called->GetDeclaringClass()));
1099 linker->EnsureInitialized(soa.Self(), called_class, true, true);
1100 if (LIKELY(called_class->IsInitialized())) {
1101 if (UNLIKELY(Dbg::IsForcedInterpreterNeededForResolution(self, called))) {
1102 // If we are single-stepping or the called method is deoptimized (by a
1103 // breakpoint, for example), then we have to execute the called method
1104 // with the interpreter.
1105 code = GetQuickToInterpreterBridge();
1106 } else if (UNLIKELY(Dbg::IsForcedInstrumentationNeededForResolution(self, caller))) {
1107 // If the caller is deoptimized (by a breakpoint, for example), we have to
1108 // continue its execution with interpreter when returning from the called
1109 // method. Because we do not want to execute the called method with the
1110 // interpreter, we wrap its execution into the instrumentation stubs.
1111 // When the called method returns, it will execute the instrumentation
1112 // exit hook that will determine the need of the interpreter with a call
1113 // to Dbg::IsForcedInterpreterNeededForUpcall and deoptimize the stack if
1114 // it is needed.
1115 code = GetQuickInstrumentationEntryPoint();
1116 } else {
1117 code = called->GetEntryPointFromQuickCompiledCode();
1118 }
1119 } else if (called_class->IsInitializing()) {
1120 if (UNLIKELY(Dbg::IsForcedInterpreterNeededForResolution(self, called))) {
1121 // If we are single-stepping or the called method is deoptimized (by a
1122 // breakpoint, for example), then we have to execute the called method
1123 // with the interpreter.
1124 code = GetQuickToInterpreterBridge();
1125 } else if (invoke_type == kStatic) {
1126 // Class is still initializing, go to oat and grab code (trampoline must be left in place
1127 // until class is initialized to stop races between threads).
1128 code = linker->GetQuickOatCodeFor(called);
1129 } else {
1130 // No trampoline for non-static methods.
1131 code = called->GetEntryPointFromQuickCompiledCode();
1132 }
1133 } else {
1134 DCHECK(called_class->IsErroneous());
1135 }
1136 }
1137 CHECK_EQ(code == nullptr, self->IsExceptionPending());
1138 // Fixup any locally saved objects may have moved during a GC.
1139 visitor.FixupReferences();
1140 // Place called method in callee-save frame to be placed as first argument to quick method.
1141 *sp = called;
1142
1143 return code;
1144 }
1145
1146 /*
1147 * This class uses a couple of observations to unite the different calling conventions through
1148 * a few constants.
1149 *
1150 * 1) Number of registers used for passing is normally even, so counting down has no penalty for
1151 * possible alignment.
1152 * 2) Known 64b architectures store 8B units on the stack, both for integral and floating point
1153 * types, so using uintptr_t is OK. Also means that we can use kRegistersNeededX to denote
1154 * when we have to split things
1155 * 3) The only soft-float, Arm, is 32b, so no widening needs to be taken into account for floats
1156 * and we can use Int handling directly.
1157 * 4) Only 64b architectures widen, and their stack is aligned 8B anyways, so no padding code
1158 * necessary when widening. Also, widening of Ints will take place implicitly, and the
1159 * extension should be compatible with Aarch64, which mandates copying the available bits
1160 * into LSB and leaving the rest unspecified.
1161 * 5) Aligning longs and doubles is necessary on arm only, and it's the same in registers and on
1162 * the stack.
1163 * 6) There is only little endian.
1164 *
1165 *
1166 * Actual work is supposed to be done in a delegate of the template type. The interface is as
1167 * follows:
1168 *
1169 * void PushGpr(uintptr_t): Add a value for the next GPR
1170 *
1171 * void PushFpr4(float): Add a value for the next FPR of size 32b. Is only called if we need
1172 * padding, that is, think the architecture is 32b and aligns 64b.
1173 *
1174 * void PushFpr8(uint64_t): Push a double. We _will_ call this on 32b, it's the callee's job to
1175 * split this if necessary. The current state will have aligned, if
1176 * necessary.
1177 *
1178 * void PushStack(uintptr_t): Push a value to the stack.
1179 *
1180 * uintptr_t PushHandleScope(mirror::Object* ref): Add a reference to the HandleScope. This _will_ have nullptr,
1181 * as this might be important for null initialization.
1182 * Must return the jobject, that is, the reference to the
1183 * entry in the HandleScope (nullptr if necessary).
1184 *
1185 */
1186 template<class T> class BuildNativeCallFrameStateMachine {
1187 public:
1188 #if defined(__arm__)
1189 // TODO: These are all dummy values!
1190 static constexpr bool kNativeSoftFloatAbi = true;
1191 static constexpr size_t kNumNativeGprArgs = 4; // 4 arguments passed in GPRs, r0-r3
1192 static constexpr size_t kNumNativeFprArgs = 0; // 0 arguments passed in FPRs.
1193
1194 static constexpr size_t kRegistersNeededForLong = 2;
1195 static constexpr size_t kRegistersNeededForDouble = 2;
1196 static constexpr bool kMultiRegistersAligned = true;
1197 static constexpr bool kMultiFPRegistersWidened = false;
1198 static constexpr bool kMultiGPRegistersWidened = false;
1199 static constexpr bool kAlignLongOnStack = true;
1200 static constexpr bool kAlignDoubleOnStack = true;
1201 #elif defined(__aarch64__)
1202 static constexpr bool kNativeSoftFloatAbi = false; // This is a hard float ABI.
1203 static constexpr size_t kNumNativeGprArgs = 8; // 6 arguments passed in GPRs.
1204 static constexpr size_t kNumNativeFprArgs = 8; // 8 arguments passed in FPRs.
1205
1206 static constexpr size_t kRegistersNeededForLong = 1;
1207 static constexpr size_t kRegistersNeededForDouble = 1;
1208 static constexpr bool kMultiRegistersAligned = false;
1209 static constexpr bool kMultiFPRegistersWidened = false;
1210 static constexpr bool kMultiGPRegistersWidened = false;
1211 static constexpr bool kAlignLongOnStack = false;
1212 static constexpr bool kAlignDoubleOnStack = false;
1213 #elif defined(__mips__) && !defined(__LP64__)
1214 static constexpr bool kNativeSoftFloatAbi = true; // This is a hard float ABI.
1215 static constexpr size_t kNumNativeGprArgs = 4; // 4 arguments passed in GPRs.
1216 static constexpr size_t kNumNativeFprArgs = 0; // 0 arguments passed in FPRs.
1217
1218 static constexpr size_t kRegistersNeededForLong = 2;
1219 static constexpr size_t kRegistersNeededForDouble = 2;
1220 static constexpr bool kMultiRegistersAligned = true;
1221 static constexpr bool kMultiFPRegistersWidened = true;
1222 static constexpr bool kMultiGPRegistersWidened = false;
1223 static constexpr bool kAlignLongOnStack = true;
1224 static constexpr bool kAlignDoubleOnStack = true;
1225 #elif defined(__mips__) && defined(__LP64__)
1226 // Let the code prepare GPRs only and we will load the FPRs with same data.
1227 static constexpr bool kNativeSoftFloatAbi = true;
1228 static constexpr size_t kNumNativeGprArgs = 8;
1229 static constexpr size_t kNumNativeFprArgs = 0;
1230
1231 static constexpr size_t kRegistersNeededForLong = 1;
1232 static constexpr size_t kRegistersNeededForDouble = 1;
1233 static constexpr bool kMultiRegistersAligned = false;
1234 static constexpr bool kMultiFPRegistersWidened = false;
1235 static constexpr bool kMultiGPRegistersWidened = true;
1236 static constexpr bool kAlignLongOnStack = false;
1237 static constexpr bool kAlignDoubleOnStack = false;
1238 #elif defined(__i386__)
1239 // TODO: Check these!
1240 static constexpr bool kNativeSoftFloatAbi = false; // Not using int registers for fp
1241 static constexpr size_t kNumNativeGprArgs = 0; // 6 arguments passed in GPRs.
1242 static constexpr size_t kNumNativeFprArgs = 0; // 8 arguments passed in FPRs.
1243
1244 static constexpr size_t kRegistersNeededForLong = 2;
1245 static constexpr size_t kRegistersNeededForDouble = 2;
1246 static constexpr bool kMultiRegistersAligned = false; // x86 not using regs, anyways
1247 static constexpr bool kMultiFPRegistersWidened = false;
1248 static constexpr bool kMultiGPRegistersWidened = false;
1249 static constexpr bool kAlignLongOnStack = false;
1250 static constexpr bool kAlignDoubleOnStack = false;
1251 #elif defined(__x86_64__)
1252 static constexpr bool kNativeSoftFloatAbi = false; // This is a hard float ABI.
1253 static constexpr size_t kNumNativeGprArgs = 6; // 6 arguments passed in GPRs.
1254 static constexpr size_t kNumNativeFprArgs = 8; // 8 arguments passed in FPRs.
1255
1256 static constexpr size_t kRegistersNeededForLong = 1;
1257 static constexpr size_t kRegistersNeededForDouble = 1;
1258 static constexpr bool kMultiRegistersAligned = false;
1259 static constexpr bool kMultiFPRegistersWidened = false;
1260 static constexpr bool kMultiGPRegistersWidened = false;
1261 static constexpr bool kAlignLongOnStack = false;
1262 static constexpr bool kAlignDoubleOnStack = false;
1263 #else
1264 #error "Unsupported architecture"
1265 #endif
1266
1267 public:
BuildNativeCallFrameStateMachine(T * delegate)1268 explicit BuildNativeCallFrameStateMachine(T* delegate)
1269 : gpr_index_(kNumNativeGprArgs),
1270 fpr_index_(kNumNativeFprArgs),
1271 stack_entries_(0),
1272 delegate_(delegate) {
1273 // For register alignment, we want to assume that counters (gpr_index_, fpr_index_) are even iff
1274 // the next register is even; counting down is just to make the compiler happy...
1275 static_assert(kNumNativeGprArgs % 2 == 0U, "Number of native GPR arguments not even");
1276 static_assert(kNumNativeFprArgs % 2 == 0U, "Number of native FPR arguments not even");
1277 }
1278
~BuildNativeCallFrameStateMachine()1279 virtual ~BuildNativeCallFrameStateMachine() {}
1280
HavePointerGpr() const1281 bool HavePointerGpr() const {
1282 return gpr_index_ > 0;
1283 }
1284
AdvancePointer(const void * val)1285 void AdvancePointer(const void* val) {
1286 if (HavePointerGpr()) {
1287 gpr_index_--;
1288 PushGpr(reinterpret_cast<uintptr_t>(val));
1289 } else {
1290 stack_entries_++; // TODO: have a field for pointer length as multiple of 32b
1291 PushStack(reinterpret_cast<uintptr_t>(val));
1292 gpr_index_ = 0;
1293 }
1294 }
1295
HaveHandleScopeGpr() const1296 bool HaveHandleScopeGpr() const {
1297 return gpr_index_ > 0;
1298 }
1299
AdvanceHandleScope(mirror::Object * ptr)1300 void AdvanceHandleScope(mirror::Object* ptr) SHARED_REQUIRES(Locks::mutator_lock_) {
1301 uintptr_t handle = PushHandle(ptr);
1302 if (HaveHandleScopeGpr()) {
1303 gpr_index_--;
1304 PushGpr(handle);
1305 } else {
1306 stack_entries_++;
1307 PushStack(handle);
1308 gpr_index_ = 0;
1309 }
1310 }
1311
HaveIntGpr() const1312 bool HaveIntGpr() const {
1313 return gpr_index_ > 0;
1314 }
1315
AdvanceInt(uint32_t val)1316 void AdvanceInt(uint32_t val) {
1317 if (HaveIntGpr()) {
1318 gpr_index_--;
1319 if (kMultiGPRegistersWidened) {
1320 DCHECK_EQ(sizeof(uintptr_t), sizeof(int64_t));
1321 PushGpr(static_cast<int64_t>(bit_cast<int32_t, uint32_t>(val)));
1322 } else {
1323 PushGpr(val);
1324 }
1325 } else {
1326 stack_entries_++;
1327 if (kMultiGPRegistersWidened) {
1328 DCHECK_EQ(sizeof(uintptr_t), sizeof(int64_t));
1329 PushStack(static_cast<int64_t>(bit_cast<int32_t, uint32_t>(val)));
1330 } else {
1331 PushStack(val);
1332 }
1333 gpr_index_ = 0;
1334 }
1335 }
1336
HaveLongGpr() const1337 bool HaveLongGpr() const {
1338 return gpr_index_ >= kRegistersNeededForLong + (LongGprNeedsPadding() ? 1 : 0);
1339 }
1340
LongGprNeedsPadding() const1341 bool LongGprNeedsPadding() const {
1342 return kRegistersNeededForLong > 1 && // only pad when using multiple registers
1343 kAlignLongOnStack && // and when it needs alignment
1344 (gpr_index_ & 1) == 1; // counter is odd, see constructor
1345 }
1346
LongStackNeedsPadding() const1347 bool LongStackNeedsPadding() const {
1348 return kRegistersNeededForLong > 1 && // only pad when using multiple registers
1349 kAlignLongOnStack && // and when it needs 8B alignment
1350 (stack_entries_ & 1) == 1; // counter is odd
1351 }
1352
AdvanceLong(uint64_t val)1353 void AdvanceLong(uint64_t val) {
1354 if (HaveLongGpr()) {
1355 if (LongGprNeedsPadding()) {
1356 PushGpr(0);
1357 gpr_index_--;
1358 }
1359 if (kRegistersNeededForLong == 1) {
1360 PushGpr(static_cast<uintptr_t>(val));
1361 } else {
1362 PushGpr(static_cast<uintptr_t>(val & 0xFFFFFFFF));
1363 PushGpr(static_cast<uintptr_t>((val >> 32) & 0xFFFFFFFF));
1364 }
1365 gpr_index_ -= kRegistersNeededForLong;
1366 } else {
1367 if (LongStackNeedsPadding()) {
1368 PushStack(0);
1369 stack_entries_++;
1370 }
1371 if (kRegistersNeededForLong == 1) {
1372 PushStack(static_cast<uintptr_t>(val));
1373 stack_entries_++;
1374 } else {
1375 PushStack(static_cast<uintptr_t>(val & 0xFFFFFFFF));
1376 PushStack(static_cast<uintptr_t>((val >> 32) & 0xFFFFFFFF));
1377 stack_entries_ += 2;
1378 }
1379 gpr_index_ = 0;
1380 }
1381 }
1382
HaveFloatFpr() const1383 bool HaveFloatFpr() const {
1384 return fpr_index_ > 0;
1385 }
1386
AdvanceFloat(float val)1387 void AdvanceFloat(float val) {
1388 if (kNativeSoftFloatAbi) {
1389 AdvanceInt(bit_cast<uint32_t, float>(val));
1390 } else {
1391 if (HaveFloatFpr()) {
1392 fpr_index_--;
1393 if (kRegistersNeededForDouble == 1) {
1394 if (kMultiFPRegistersWidened) {
1395 PushFpr8(bit_cast<uint64_t, double>(val));
1396 } else {
1397 // No widening, just use the bits.
1398 PushFpr8(static_cast<uint64_t>(bit_cast<uint32_t, float>(val)));
1399 }
1400 } else {
1401 PushFpr4(val);
1402 }
1403 } else {
1404 stack_entries_++;
1405 if (kRegistersNeededForDouble == 1 && kMultiFPRegistersWidened) {
1406 // Need to widen before storing: Note the "double" in the template instantiation.
1407 // Note: We need to jump through those hoops to make the compiler happy.
1408 DCHECK_EQ(sizeof(uintptr_t), sizeof(uint64_t));
1409 PushStack(static_cast<uintptr_t>(bit_cast<uint64_t, double>(val)));
1410 } else {
1411 PushStack(static_cast<uintptr_t>(bit_cast<uint32_t, float>(val)));
1412 }
1413 fpr_index_ = 0;
1414 }
1415 }
1416 }
1417
HaveDoubleFpr() const1418 bool HaveDoubleFpr() const {
1419 return fpr_index_ >= kRegistersNeededForDouble + (DoubleFprNeedsPadding() ? 1 : 0);
1420 }
1421
DoubleFprNeedsPadding() const1422 bool DoubleFprNeedsPadding() const {
1423 return kRegistersNeededForDouble > 1 && // only pad when using multiple registers
1424 kAlignDoubleOnStack && // and when it needs alignment
1425 (fpr_index_ & 1) == 1; // counter is odd, see constructor
1426 }
1427
DoubleStackNeedsPadding() const1428 bool DoubleStackNeedsPadding() const {
1429 return kRegistersNeededForDouble > 1 && // only pad when using multiple registers
1430 kAlignDoubleOnStack && // and when it needs 8B alignment
1431 (stack_entries_ & 1) == 1; // counter is odd
1432 }
1433
AdvanceDouble(uint64_t val)1434 void AdvanceDouble(uint64_t val) {
1435 if (kNativeSoftFloatAbi) {
1436 AdvanceLong(val);
1437 } else {
1438 if (HaveDoubleFpr()) {
1439 if (DoubleFprNeedsPadding()) {
1440 PushFpr4(0);
1441 fpr_index_--;
1442 }
1443 PushFpr8(val);
1444 fpr_index_ -= kRegistersNeededForDouble;
1445 } else {
1446 if (DoubleStackNeedsPadding()) {
1447 PushStack(0);
1448 stack_entries_++;
1449 }
1450 if (kRegistersNeededForDouble == 1) {
1451 PushStack(static_cast<uintptr_t>(val));
1452 stack_entries_++;
1453 } else {
1454 PushStack(static_cast<uintptr_t>(val & 0xFFFFFFFF));
1455 PushStack(static_cast<uintptr_t>((val >> 32) & 0xFFFFFFFF));
1456 stack_entries_ += 2;
1457 }
1458 fpr_index_ = 0;
1459 }
1460 }
1461 }
1462
GetStackEntries() const1463 uint32_t GetStackEntries() const {
1464 return stack_entries_;
1465 }
1466
GetNumberOfUsedGprs() const1467 uint32_t GetNumberOfUsedGprs() const {
1468 return kNumNativeGprArgs - gpr_index_;
1469 }
1470
GetNumberOfUsedFprs() const1471 uint32_t GetNumberOfUsedFprs() const {
1472 return kNumNativeFprArgs - fpr_index_;
1473 }
1474
1475 private:
PushGpr(uintptr_t val)1476 void PushGpr(uintptr_t val) {
1477 delegate_->PushGpr(val);
1478 }
PushFpr4(float val)1479 void PushFpr4(float val) {
1480 delegate_->PushFpr4(val);
1481 }
PushFpr8(uint64_t val)1482 void PushFpr8(uint64_t val) {
1483 delegate_->PushFpr8(val);
1484 }
PushStack(uintptr_t val)1485 void PushStack(uintptr_t val) {
1486 delegate_->PushStack(val);
1487 }
PushHandle(mirror::Object * ref)1488 uintptr_t PushHandle(mirror::Object* ref) SHARED_REQUIRES(Locks::mutator_lock_) {
1489 return delegate_->PushHandle(ref);
1490 }
1491
1492 uint32_t gpr_index_; // Number of free GPRs
1493 uint32_t fpr_index_; // Number of free FPRs
1494 uint32_t stack_entries_; // Stack entries are in multiples of 32b, as floats are usually not
1495 // extended
1496 T* const delegate_; // What Push implementation gets called
1497 };
1498
1499 // Computes the sizes of register stacks and call stack area. Handling of references can be extended
1500 // in subclasses.
1501 //
1502 // To handle native pointers, use "L" in the shorty for an object reference, which simulates
1503 // them with handles.
1504 class ComputeNativeCallFrameSize {
1505 public:
ComputeNativeCallFrameSize()1506 ComputeNativeCallFrameSize() : num_stack_entries_(0) {}
1507
~ComputeNativeCallFrameSize()1508 virtual ~ComputeNativeCallFrameSize() {}
1509
GetStackSize() const1510 uint32_t GetStackSize() const {
1511 return num_stack_entries_ * sizeof(uintptr_t);
1512 }
1513
LayoutCallStack(uint8_t * sp8) const1514 uint8_t* LayoutCallStack(uint8_t* sp8) const {
1515 sp8 -= GetStackSize();
1516 // Align by kStackAlignment.
1517 sp8 = reinterpret_cast<uint8_t*>(RoundDown(reinterpret_cast<uintptr_t>(sp8), kStackAlignment));
1518 return sp8;
1519 }
1520
LayoutCallRegisterStacks(uint8_t * sp8,uintptr_t ** start_gpr,uint32_t ** start_fpr) const1521 uint8_t* LayoutCallRegisterStacks(uint8_t* sp8, uintptr_t** start_gpr, uint32_t** start_fpr)
1522 const {
1523 // Assumption is OK right now, as we have soft-float arm
1524 size_t fregs = BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>::kNumNativeFprArgs;
1525 sp8 -= fregs * sizeof(uintptr_t);
1526 *start_fpr = reinterpret_cast<uint32_t*>(sp8);
1527 size_t iregs = BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>::kNumNativeGprArgs;
1528 sp8 -= iregs * sizeof(uintptr_t);
1529 *start_gpr = reinterpret_cast<uintptr_t*>(sp8);
1530 return sp8;
1531 }
1532
LayoutNativeCall(uint8_t * sp8,uintptr_t ** start_stack,uintptr_t ** start_gpr,uint32_t ** start_fpr) const1533 uint8_t* LayoutNativeCall(uint8_t* sp8, uintptr_t** start_stack, uintptr_t** start_gpr,
1534 uint32_t** start_fpr) const {
1535 // Native call stack.
1536 sp8 = LayoutCallStack(sp8);
1537 *start_stack = reinterpret_cast<uintptr_t*>(sp8);
1538
1539 // Put fprs and gprs below.
1540 sp8 = LayoutCallRegisterStacks(sp8, start_gpr, start_fpr);
1541
1542 // Return the new bottom.
1543 return sp8;
1544 }
1545
WalkHeader(BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize> * sm ATTRIBUTE_UNUSED)1546 virtual void WalkHeader(
1547 BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>* sm ATTRIBUTE_UNUSED)
1548 SHARED_REQUIRES(Locks::mutator_lock_) {
1549 }
1550
Walk(const char * shorty,uint32_t shorty_len)1551 void Walk(const char* shorty, uint32_t shorty_len) SHARED_REQUIRES(Locks::mutator_lock_) {
1552 BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize> sm(this);
1553
1554 WalkHeader(&sm);
1555
1556 for (uint32_t i = 1; i < shorty_len; ++i) {
1557 Primitive::Type cur_type_ = Primitive::GetType(shorty[i]);
1558 switch (cur_type_) {
1559 case Primitive::kPrimNot:
1560 // TODO: fix abuse of mirror types.
1561 sm.AdvanceHandleScope(
1562 reinterpret_cast<mirror::Object*>(0x12345678));
1563 break;
1564
1565 case Primitive::kPrimBoolean:
1566 case Primitive::kPrimByte:
1567 case Primitive::kPrimChar:
1568 case Primitive::kPrimShort:
1569 case Primitive::kPrimInt:
1570 sm.AdvanceInt(0);
1571 break;
1572 case Primitive::kPrimFloat:
1573 sm.AdvanceFloat(0);
1574 break;
1575 case Primitive::kPrimDouble:
1576 sm.AdvanceDouble(0);
1577 break;
1578 case Primitive::kPrimLong:
1579 sm.AdvanceLong(0);
1580 break;
1581 default:
1582 LOG(FATAL) << "Unexpected type: " << cur_type_ << " in " << shorty;
1583 UNREACHABLE();
1584 }
1585 }
1586
1587 num_stack_entries_ = sm.GetStackEntries();
1588 }
1589
PushGpr(uintptr_t)1590 void PushGpr(uintptr_t /* val */) {
1591 // not optimizing registers, yet
1592 }
1593
PushFpr4(float)1594 void PushFpr4(float /* val */) {
1595 // not optimizing registers, yet
1596 }
1597
PushFpr8(uint64_t)1598 void PushFpr8(uint64_t /* val */) {
1599 // not optimizing registers, yet
1600 }
1601
PushStack(uintptr_t)1602 void PushStack(uintptr_t /* val */) {
1603 // counting is already done in the superclass
1604 }
1605
PushHandle(mirror::Object *)1606 virtual uintptr_t PushHandle(mirror::Object* /* ptr */) {
1607 return reinterpret_cast<uintptr_t>(nullptr);
1608 }
1609
1610 protected:
1611 uint32_t num_stack_entries_;
1612 };
1613
1614 class ComputeGenericJniFrameSize FINAL : public ComputeNativeCallFrameSize {
1615 public:
ComputeGenericJniFrameSize()1616 ComputeGenericJniFrameSize() : num_handle_scope_references_(0) {}
1617
1618 // Lays out the callee-save frame. Assumes that the incorrect frame corresponding to RefsAndArgs
1619 // is at *m = sp. Will update to point to the bottom of the save frame.
1620 //
1621 // Note: assumes ComputeAll() has been run before.
LayoutCalleeSaveFrame(Thread * self,ArtMethod *** m,void * sp,HandleScope ** handle_scope)1622 void LayoutCalleeSaveFrame(Thread* self, ArtMethod*** m, void* sp, HandleScope** handle_scope)
1623 SHARED_REQUIRES(Locks::mutator_lock_) {
1624 ArtMethod* method = **m;
1625
1626 DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), sizeof(void*));
1627
1628 uint8_t* sp8 = reinterpret_cast<uint8_t*>(sp);
1629
1630 // First, fix up the layout of the callee-save frame.
1631 // We have to squeeze in the HandleScope, and relocate the method pointer.
1632
1633 // "Free" the slot for the method.
1634 sp8 += sizeof(void*); // In the callee-save frame we use a full pointer.
1635
1636 // Under the callee saves put handle scope and new method stack reference.
1637 size_t handle_scope_size = HandleScope::SizeOf(num_handle_scope_references_);
1638 size_t scope_and_method = handle_scope_size + sizeof(ArtMethod*);
1639
1640 sp8 -= scope_and_method;
1641 // Align by kStackAlignment.
1642 sp8 = reinterpret_cast<uint8_t*>(RoundDown(reinterpret_cast<uintptr_t>(sp8), kStackAlignment));
1643
1644 uint8_t* sp8_table = sp8 + sizeof(ArtMethod*);
1645 *handle_scope = HandleScope::Create(sp8_table, self->GetTopHandleScope(),
1646 num_handle_scope_references_);
1647
1648 // Add a slot for the method pointer, and fill it. Fix the pointer-pointer given to us.
1649 uint8_t* method_pointer = sp8;
1650 auto** new_method_ref = reinterpret_cast<ArtMethod**>(method_pointer);
1651 *new_method_ref = method;
1652 *m = new_method_ref;
1653 }
1654
1655 // Adds space for the cookie. Note: may leave stack unaligned.
LayoutCookie(uint8_t ** sp) const1656 void LayoutCookie(uint8_t** sp) const {
1657 // Reference cookie and padding
1658 *sp -= 8;
1659 }
1660
1661 // Re-layout the callee-save frame (insert a handle-scope). Then add space for the cookie.
1662 // Returns the new bottom. Note: this may be unaligned.
LayoutJNISaveFrame(Thread * self,ArtMethod *** m,void * sp,HandleScope ** handle_scope)1663 uint8_t* LayoutJNISaveFrame(Thread* self, ArtMethod*** m, void* sp, HandleScope** handle_scope)
1664 SHARED_REQUIRES(Locks::mutator_lock_) {
1665 // First, fix up the layout of the callee-save frame.
1666 // We have to squeeze in the HandleScope, and relocate the method pointer.
1667 LayoutCalleeSaveFrame(self, m, sp, handle_scope);
1668
1669 // The bottom of the callee-save frame is now where the method is, *m.
1670 uint8_t* sp8 = reinterpret_cast<uint8_t*>(*m);
1671
1672 // Add space for cookie.
1673 LayoutCookie(&sp8);
1674
1675 return sp8;
1676 }
1677
1678 // WARNING: After this, *sp won't be pointing to the method anymore!
ComputeLayout(Thread * self,ArtMethod *** m,const char * shorty,uint32_t shorty_len,HandleScope ** handle_scope,uintptr_t ** start_stack,uintptr_t ** start_gpr,uint32_t ** start_fpr)1679 uint8_t* ComputeLayout(Thread* self, ArtMethod*** m, const char* shorty, uint32_t shorty_len,
1680 HandleScope** handle_scope, uintptr_t** start_stack, uintptr_t** start_gpr,
1681 uint32_t** start_fpr)
1682 SHARED_REQUIRES(Locks::mutator_lock_) {
1683 Walk(shorty, shorty_len);
1684
1685 // JNI part.
1686 uint8_t* sp8 = LayoutJNISaveFrame(self, m, reinterpret_cast<void*>(*m), handle_scope);
1687
1688 sp8 = LayoutNativeCall(sp8, start_stack, start_gpr, start_fpr);
1689
1690 // Return the new bottom.
1691 return sp8;
1692 }
1693
1694 uintptr_t PushHandle(mirror::Object* /* ptr */) OVERRIDE;
1695
1696 // Add JNIEnv* and jobj/jclass before the shorty-derived elements.
1697 void WalkHeader(BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>* sm) OVERRIDE
1698 SHARED_REQUIRES(Locks::mutator_lock_);
1699
1700 private:
1701 uint32_t num_handle_scope_references_;
1702 };
1703
PushHandle(mirror::Object *)1704 uintptr_t ComputeGenericJniFrameSize::PushHandle(mirror::Object* /* ptr */) {
1705 num_handle_scope_references_++;
1706 return reinterpret_cast<uintptr_t>(nullptr);
1707 }
1708
WalkHeader(BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize> * sm)1709 void ComputeGenericJniFrameSize::WalkHeader(
1710 BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>* sm) {
1711 // JNIEnv
1712 sm->AdvancePointer(nullptr);
1713
1714 // Class object or this as first argument
1715 sm->AdvanceHandleScope(reinterpret_cast<mirror::Object*>(0x12345678));
1716 }
1717
1718 // Class to push values to three separate regions. Used to fill the native call part. Adheres to
1719 // the template requirements of BuildGenericJniFrameStateMachine.
1720 class FillNativeCall {
1721 public:
FillNativeCall(uintptr_t * gpr_regs,uint32_t * fpr_regs,uintptr_t * stack_args)1722 FillNativeCall(uintptr_t* gpr_regs, uint32_t* fpr_regs, uintptr_t* stack_args) :
1723 cur_gpr_reg_(gpr_regs), cur_fpr_reg_(fpr_regs), cur_stack_arg_(stack_args) {}
1724
~FillNativeCall()1725 virtual ~FillNativeCall() {}
1726
Reset(uintptr_t * gpr_regs,uint32_t * fpr_regs,uintptr_t * stack_args)1727 void Reset(uintptr_t* gpr_regs, uint32_t* fpr_regs, uintptr_t* stack_args) {
1728 cur_gpr_reg_ = gpr_regs;
1729 cur_fpr_reg_ = fpr_regs;
1730 cur_stack_arg_ = stack_args;
1731 }
1732
PushGpr(uintptr_t val)1733 void PushGpr(uintptr_t val) {
1734 *cur_gpr_reg_ = val;
1735 cur_gpr_reg_++;
1736 }
1737
PushFpr4(float val)1738 void PushFpr4(float val) {
1739 *cur_fpr_reg_ = val;
1740 cur_fpr_reg_++;
1741 }
1742
PushFpr8(uint64_t val)1743 void PushFpr8(uint64_t val) {
1744 uint64_t* tmp = reinterpret_cast<uint64_t*>(cur_fpr_reg_);
1745 *tmp = val;
1746 cur_fpr_reg_ += 2;
1747 }
1748
PushStack(uintptr_t val)1749 void PushStack(uintptr_t val) {
1750 *cur_stack_arg_ = val;
1751 cur_stack_arg_++;
1752 }
1753
PushHandle(mirror::Object *)1754 virtual uintptr_t PushHandle(mirror::Object*) SHARED_REQUIRES(Locks::mutator_lock_) {
1755 LOG(FATAL) << "(Non-JNI) Native call does not use handles.";
1756 UNREACHABLE();
1757 }
1758
1759 private:
1760 uintptr_t* cur_gpr_reg_;
1761 uint32_t* cur_fpr_reg_;
1762 uintptr_t* cur_stack_arg_;
1763 };
1764
1765 // Visits arguments on the stack placing them into a region lower down the stack for the benefit
1766 // of transitioning into native code.
1767 class BuildGenericJniFrameVisitor FINAL : public QuickArgumentVisitor {
1768 public:
BuildGenericJniFrameVisitor(Thread * self,bool is_static,const char * shorty,uint32_t shorty_len,ArtMethod *** sp)1769 BuildGenericJniFrameVisitor(Thread* self, bool is_static, const char* shorty, uint32_t shorty_len,
1770 ArtMethod*** sp)
1771 : QuickArgumentVisitor(*sp, is_static, shorty, shorty_len),
1772 jni_call_(nullptr, nullptr, nullptr, nullptr), sm_(&jni_call_) {
1773 ComputeGenericJniFrameSize fsc;
1774 uintptr_t* start_gpr_reg;
1775 uint32_t* start_fpr_reg;
1776 uintptr_t* start_stack_arg;
1777 bottom_of_used_area_ = fsc.ComputeLayout(self, sp, shorty, shorty_len,
1778 &handle_scope_,
1779 &start_stack_arg,
1780 &start_gpr_reg, &start_fpr_reg);
1781
1782 jni_call_.Reset(start_gpr_reg, start_fpr_reg, start_stack_arg, handle_scope_);
1783
1784 // jni environment is always first argument
1785 sm_.AdvancePointer(self->GetJniEnv());
1786
1787 if (is_static) {
1788 sm_.AdvanceHandleScope((**sp)->GetDeclaringClass());
1789 }
1790 }
1791
1792 void Visit() SHARED_REQUIRES(Locks::mutator_lock_) OVERRIDE;
1793
1794 void FinalizeHandleScope(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_);
1795
GetFirstHandleScopeEntry()1796 StackReference<mirror::Object>* GetFirstHandleScopeEntry() {
1797 return handle_scope_->GetHandle(0).GetReference();
1798 }
1799
GetFirstHandleScopeJObject() const1800 jobject GetFirstHandleScopeJObject() const SHARED_REQUIRES(Locks::mutator_lock_) {
1801 return handle_scope_->GetHandle(0).ToJObject();
1802 }
1803
GetBottomOfUsedArea() const1804 void* GetBottomOfUsedArea() const {
1805 return bottom_of_used_area_;
1806 }
1807
1808 private:
1809 // A class to fill a JNI call. Adds reference/handle-scope management to FillNativeCall.
1810 class FillJniCall FINAL : public FillNativeCall {
1811 public:
FillJniCall(uintptr_t * gpr_regs,uint32_t * fpr_regs,uintptr_t * stack_args,HandleScope * handle_scope)1812 FillJniCall(uintptr_t* gpr_regs, uint32_t* fpr_regs, uintptr_t* stack_args,
1813 HandleScope* handle_scope) : FillNativeCall(gpr_regs, fpr_regs, stack_args),
1814 handle_scope_(handle_scope), cur_entry_(0) {}
1815
1816 uintptr_t PushHandle(mirror::Object* ref) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
1817
Reset(uintptr_t * gpr_regs,uint32_t * fpr_regs,uintptr_t * stack_args,HandleScope * scope)1818 void Reset(uintptr_t* gpr_regs, uint32_t* fpr_regs, uintptr_t* stack_args, HandleScope* scope) {
1819 FillNativeCall::Reset(gpr_regs, fpr_regs, stack_args);
1820 handle_scope_ = scope;
1821 cur_entry_ = 0U;
1822 }
1823
ResetRemainingScopeSlots()1824 void ResetRemainingScopeSlots() SHARED_REQUIRES(Locks::mutator_lock_) {
1825 // Initialize padding entries.
1826 size_t expected_slots = handle_scope_->NumberOfReferences();
1827 while (cur_entry_ < expected_slots) {
1828 handle_scope_->GetMutableHandle(cur_entry_++).Assign(nullptr);
1829 }
1830 DCHECK_NE(cur_entry_, 0U);
1831 }
1832
1833 private:
1834 HandleScope* handle_scope_;
1835 size_t cur_entry_;
1836 };
1837
1838 HandleScope* handle_scope_;
1839 FillJniCall jni_call_;
1840 void* bottom_of_used_area_;
1841
1842 BuildNativeCallFrameStateMachine<FillJniCall> sm_;
1843
1844 DISALLOW_COPY_AND_ASSIGN(BuildGenericJniFrameVisitor);
1845 };
1846
PushHandle(mirror::Object * ref)1847 uintptr_t BuildGenericJniFrameVisitor::FillJniCall::PushHandle(mirror::Object* ref) {
1848 uintptr_t tmp;
1849 MutableHandle<mirror::Object> h = handle_scope_->GetMutableHandle(cur_entry_);
1850 h.Assign(ref);
1851 tmp = reinterpret_cast<uintptr_t>(h.ToJObject());
1852 cur_entry_++;
1853 return tmp;
1854 }
1855
Visit()1856 void BuildGenericJniFrameVisitor::Visit() {
1857 Primitive::Type type = GetParamPrimitiveType();
1858 switch (type) {
1859 case Primitive::kPrimLong: {
1860 jlong long_arg;
1861 if (IsSplitLongOrDouble()) {
1862 long_arg = ReadSplitLongParam();
1863 } else {
1864 long_arg = *reinterpret_cast<jlong*>(GetParamAddress());
1865 }
1866 sm_.AdvanceLong(long_arg);
1867 break;
1868 }
1869 case Primitive::kPrimDouble: {
1870 uint64_t double_arg;
1871 if (IsSplitLongOrDouble()) {
1872 // Read into union so that we don't case to a double.
1873 double_arg = ReadSplitLongParam();
1874 } else {
1875 double_arg = *reinterpret_cast<uint64_t*>(GetParamAddress());
1876 }
1877 sm_.AdvanceDouble(double_arg);
1878 break;
1879 }
1880 case Primitive::kPrimNot: {
1881 StackReference<mirror::Object>* stack_ref =
1882 reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress());
1883 sm_.AdvanceHandleScope(stack_ref->AsMirrorPtr());
1884 break;
1885 }
1886 case Primitive::kPrimFloat:
1887 sm_.AdvanceFloat(*reinterpret_cast<float*>(GetParamAddress()));
1888 break;
1889 case Primitive::kPrimBoolean: // Fall-through.
1890 case Primitive::kPrimByte: // Fall-through.
1891 case Primitive::kPrimChar: // Fall-through.
1892 case Primitive::kPrimShort: // Fall-through.
1893 case Primitive::kPrimInt: // Fall-through.
1894 sm_.AdvanceInt(*reinterpret_cast<jint*>(GetParamAddress()));
1895 break;
1896 case Primitive::kPrimVoid:
1897 LOG(FATAL) << "UNREACHABLE";
1898 UNREACHABLE();
1899 }
1900 }
1901
FinalizeHandleScope(Thread * self)1902 void BuildGenericJniFrameVisitor::FinalizeHandleScope(Thread* self) {
1903 // Clear out rest of the scope.
1904 jni_call_.ResetRemainingScopeSlots();
1905 // Install HandleScope.
1906 self->PushHandleScope(handle_scope_);
1907 }
1908
1909 #if defined(__arm__) || defined(__aarch64__)
1910 extern "C" void* artFindNativeMethod();
1911 #else
1912 extern "C" void* artFindNativeMethod(Thread* self);
1913 #endif
1914
artQuickGenericJniEndJNIRef(Thread * self,uint32_t cookie,jobject l,jobject lock)1915 uint64_t artQuickGenericJniEndJNIRef(Thread* self, uint32_t cookie, jobject l, jobject lock) {
1916 if (lock != nullptr) {
1917 return reinterpret_cast<uint64_t>(JniMethodEndWithReferenceSynchronized(l, cookie, lock, self));
1918 } else {
1919 return reinterpret_cast<uint64_t>(JniMethodEndWithReference(l, cookie, self));
1920 }
1921 }
1922
artQuickGenericJniEndJNINonRef(Thread * self,uint32_t cookie,jobject lock)1923 void artQuickGenericJniEndJNINonRef(Thread* self, uint32_t cookie, jobject lock) {
1924 if (lock != nullptr) {
1925 JniMethodEndSynchronized(cookie, lock, self);
1926 } else {
1927 JniMethodEnd(cookie, self);
1928 }
1929 }
1930
1931 /*
1932 * Initializes an alloca region assumed to be directly below sp for a native call:
1933 * Create a HandleScope and call stack and fill a mini stack with values to be pushed to registers.
1934 * The final element on the stack is a pointer to the native code.
1935 *
1936 * On entry, the stack has a standard callee-save frame above sp, and an alloca below it.
1937 * We need to fix this, as the handle scope needs to go into the callee-save frame.
1938 *
1939 * The return of this function denotes:
1940 * 1) How many bytes of the alloca can be released, if the value is non-negative.
1941 * 2) An error, if the value is negative.
1942 */
artQuickGenericJniTrampoline(Thread * self,ArtMethod ** sp)1943 extern "C" TwoWordReturn artQuickGenericJniTrampoline(Thread* self, ArtMethod** sp)
1944 SHARED_REQUIRES(Locks::mutator_lock_) {
1945 ArtMethod* called = *sp;
1946 DCHECK(called->IsNative()) << PrettyMethod(called, true);
1947 uint32_t shorty_len = 0;
1948 const char* shorty = called->GetShorty(&shorty_len);
1949
1950 // Run the visitor and update sp.
1951 BuildGenericJniFrameVisitor visitor(self, called->IsStatic(), shorty, shorty_len, &sp);
1952 visitor.VisitArguments();
1953 visitor.FinalizeHandleScope(self);
1954
1955 // Fix up managed-stack things in Thread.
1956 self->SetTopOfStack(sp);
1957
1958 self->VerifyStack();
1959
1960 // Start JNI, save the cookie.
1961 uint32_t cookie;
1962 if (called->IsSynchronized()) {
1963 cookie = JniMethodStartSynchronized(visitor.GetFirstHandleScopeJObject(), self);
1964 if (self->IsExceptionPending()) {
1965 self->PopHandleScope();
1966 // A negative value denotes an error.
1967 return GetTwoWordFailureValue();
1968 }
1969 } else {
1970 cookie = JniMethodStart(self);
1971 }
1972 uint32_t* sp32 = reinterpret_cast<uint32_t*>(sp);
1973 *(sp32 - 1) = cookie;
1974
1975 // Retrieve the stored native code.
1976 void* nativeCode = called->GetEntryPointFromJni();
1977
1978 // There are two cases for the content of nativeCode:
1979 // 1) Pointer to the native function.
1980 // 2) Pointer to the trampoline for native code binding.
1981 // In the second case, we need to execute the binding and continue with the actual native function
1982 // pointer.
1983 DCHECK(nativeCode != nullptr);
1984 if (nativeCode == GetJniDlsymLookupStub()) {
1985 #if defined(__arm__) || defined(__aarch64__)
1986 nativeCode = artFindNativeMethod();
1987 #else
1988 nativeCode = artFindNativeMethod(self);
1989 #endif
1990
1991 if (nativeCode == nullptr) {
1992 DCHECK(self->IsExceptionPending()); // There should be an exception pending now.
1993
1994 // End JNI, as the assembly will move to deliver the exception.
1995 jobject lock = called->IsSynchronized() ? visitor.GetFirstHandleScopeJObject() : nullptr;
1996 if (shorty[0] == 'L') {
1997 artQuickGenericJniEndJNIRef(self, cookie, nullptr, lock);
1998 } else {
1999 artQuickGenericJniEndJNINonRef(self, cookie, lock);
2000 }
2001
2002 return GetTwoWordFailureValue();
2003 }
2004 // Note that the native code pointer will be automatically set by artFindNativeMethod().
2005 }
2006
2007 // Return native code addr(lo) and bottom of alloca address(hi).
2008 return GetTwoWordSuccessValue(reinterpret_cast<uintptr_t>(visitor.GetBottomOfUsedArea()),
2009 reinterpret_cast<uintptr_t>(nativeCode));
2010 }
2011
2012 // Defined in quick_jni_entrypoints.cc.
2013 extern uint64_t GenericJniMethodEnd(Thread* self, uint32_t saved_local_ref_cookie,
2014 jvalue result, uint64_t result_f, ArtMethod* called,
2015 HandleScope* handle_scope);
2016 /*
2017 * Is called after the native JNI code. Responsible for cleanup (handle scope, saved state) and
2018 * unlocking.
2019 */
artQuickGenericJniEndTrampoline(Thread * self,jvalue result,uint64_t result_f)2020 extern "C" uint64_t artQuickGenericJniEndTrampoline(Thread* self,
2021 jvalue result,
2022 uint64_t result_f) {
2023 // We're here just back from a native call. We don't have the shared mutator lock at this point
2024 // yet until we call GoToRunnable() later in GenericJniMethodEnd(). Accessing objects or doing
2025 // anything that requires a mutator lock before that would cause problems as GC may have the
2026 // exclusive mutator lock and may be moving objects, etc.
2027 ArtMethod** sp = self->GetManagedStack()->GetTopQuickFrame();
2028 uint32_t* sp32 = reinterpret_cast<uint32_t*>(sp);
2029 ArtMethod* called = *sp;
2030 uint32_t cookie = *(sp32 - 1);
2031 HandleScope* table = reinterpret_cast<HandleScope*>(reinterpret_cast<uint8_t*>(sp) + sizeof(*sp));
2032 return GenericJniMethodEnd(self, cookie, result, result_f, called, table);
2033 }
2034
2035 // We use TwoWordReturn to optimize scalar returns. We use the hi value for code, and the lo value
2036 // for the method pointer.
2037 //
2038 // It is valid to use this, as at the usage points here (returns from C functions) we are assuming
2039 // to hold the mutator lock (see SHARED_REQUIRES(Locks::mutator_lock_) annotations).
2040
2041 template<InvokeType type, bool access_check>
artInvokeCommon(uint32_t method_idx,mirror::Object * this_object,Thread * self,ArtMethod ** sp)2042 static TwoWordReturn artInvokeCommon(uint32_t method_idx, mirror::Object* this_object, Thread* self,
2043 ArtMethod** sp) {
2044 ScopedQuickEntrypointChecks sqec(self);
2045 DCHECK_EQ(*sp, Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs));
2046 ArtMethod* caller_method = QuickArgumentVisitor::GetCallingMethod(sp);
2047 ArtMethod* method = FindMethodFast(method_idx, this_object, caller_method, access_check, type);
2048 if (UNLIKELY(method == nullptr)) {
2049 const DexFile* dex_file = caller_method->GetDeclaringClass()->GetDexCache()->GetDexFile();
2050 uint32_t shorty_len;
2051 const char* shorty = dex_file->GetMethodShorty(dex_file->GetMethodId(method_idx), &shorty_len);
2052 {
2053 // Remember the args in case a GC happens in FindMethodFromCode.
2054 ScopedObjectAccessUnchecked soa(self->GetJniEnv());
2055 RememberForGcArgumentVisitor visitor(sp, type == kStatic, shorty, shorty_len, &soa);
2056 visitor.VisitArguments();
2057 method = FindMethodFromCode<type, access_check>(method_idx, &this_object, caller_method,
2058 self);
2059 visitor.FixupReferences();
2060 }
2061
2062 if (UNLIKELY(method == nullptr)) {
2063 CHECK(self->IsExceptionPending());
2064 return GetTwoWordFailureValue(); // Failure.
2065 }
2066 }
2067 DCHECK(!self->IsExceptionPending());
2068 const void* code = method->GetEntryPointFromQuickCompiledCode();
2069
2070 // When we return, the caller will branch to this address, so it had better not be 0!
2071 DCHECK(code != nullptr) << "Code was null in method: " << PrettyMethod(method)
2072 << " location: "
2073 << method->GetDexFile()->GetLocation();
2074
2075 return GetTwoWordSuccessValue(reinterpret_cast<uintptr_t>(code),
2076 reinterpret_cast<uintptr_t>(method));
2077 }
2078
2079 // Explicit artInvokeCommon template function declarations to please analysis tool.
2080 #define EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(type, access_check) \
2081 template SHARED_REQUIRES(Locks::mutator_lock_) \
2082 TwoWordReturn artInvokeCommon<type, access_check>( \
2083 uint32_t method_idx, mirror::Object* this_object, Thread* self, ArtMethod** sp)
2084
2085 EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kVirtual, false);
2086 EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kVirtual, true);
2087 EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kInterface, false);
2088 EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kInterface, true);
2089 EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kDirect, false);
2090 EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kDirect, true);
2091 EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kStatic, false);
2092 EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kStatic, true);
2093 EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kSuper, false);
2094 EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kSuper, true);
2095 #undef EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL
2096
2097 // See comments in runtime_support_asm.S
artInvokeInterfaceTrampolineWithAccessCheck(uint32_t method_idx,mirror::Object * this_object,Thread * self,ArtMethod ** sp)2098 extern "C" TwoWordReturn artInvokeInterfaceTrampolineWithAccessCheck(
2099 uint32_t method_idx, mirror::Object* this_object, Thread* self, ArtMethod** sp)
2100 SHARED_REQUIRES(Locks::mutator_lock_) {
2101 return artInvokeCommon<kInterface, true>(method_idx, this_object, self, sp);
2102 }
2103
artInvokeDirectTrampolineWithAccessCheck(uint32_t method_idx,mirror::Object * this_object,Thread * self,ArtMethod ** sp)2104 extern "C" TwoWordReturn artInvokeDirectTrampolineWithAccessCheck(
2105 uint32_t method_idx, mirror::Object* this_object, Thread* self, ArtMethod** sp)
2106 SHARED_REQUIRES(Locks::mutator_lock_) {
2107 return artInvokeCommon<kDirect, true>(method_idx, this_object, self, sp);
2108 }
2109
artInvokeStaticTrampolineWithAccessCheck(uint32_t method_idx,mirror::Object * this_object,Thread * self,ArtMethod ** sp)2110 extern "C" TwoWordReturn artInvokeStaticTrampolineWithAccessCheck(
2111 uint32_t method_idx, mirror::Object* this_object, Thread* self, ArtMethod** sp)
2112 SHARED_REQUIRES(Locks::mutator_lock_) {
2113 return artInvokeCommon<kStatic, true>(method_idx, this_object, self, sp);
2114 }
2115
artInvokeSuperTrampolineWithAccessCheck(uint32_t method_idx,mirror::Object * this_object,Thread * self,ArtMethod ** sp)2116 extern "C" TwoWordReturn artInvokeSuperTrampolineWithAccessCheck(
2117 uint32_t method_idx, mirror::Object* this_object, Thread* self, ArtMethod** sp)
2118 SHARED_REQUIRES(Locks::mutator_lock_) {
2119 return artInvokeCommon<kSuper, true>(method_idx, this_object, self, sp);
2120 }
2121
artInvokeVirtualTrampolineWithAccessCheck(uint32_t method_idx,mirror::Object * this_object,Thread * self,ArtMethod ** sp)2122 extern "C" TwoWordReturn artInvokeVirtualTrampolineWithAccessCheck(
2123 uint32_t method_idx, mirror::Object* this_object, Thread* self, ArtMethod** sp)
2124 SHARED_REQUIRES(Locks::mutator_lock_) {
2125 return artInvokeCommon<kVirtual, true>(method_idx, this_object, self, sp);
2126 }
2127
2128 // Determine target of interface dispatch. This object is known non-null. First argument
2129 // is there for consistency but should not be used, as some architectures overwrite it
2130 // in the assembly trampoline.
artInvokeInterfaceTrampoline(uint32_t deadbeef ATTRIBUTE_UNUSED,mirror::Object * this_object,Thread * self,ArtMethod ** sp)2131 extern "C" TwoWordReturn artInvokeInterfaceTrampoline(uint32_t deadbeef ATTRIBUTE_UNUSED,
2132 mirror::Object* this_object,
2133 Thread* self,
2134 ArtMethod** sp)
2135 SHARED_REQUIRES(Locks::mutator_lock_) {
2136 ScopedQuickEntrypointChecks sqec(self);
2137 StackHandleScope<1> hs(self);
2138 Handle<mirror::Class> cls(hs.NewHandle(this_object->GetClass()));
2139
2140 // The optimizing compiler currently does not inline methods that have an interface
2141 // invocation. We use the outer method directly to avoid fetching a stack map, which is
2142 // more expensive.
2143 ArtMethod* caller_method = QuickArgumentVisitor::GetOuterMethod(sp);
2144 DCHECK_EQ(caller_method, QuickArgumentVisitor::GetCallingMethod(sp));
2145
2146 // Fetch the dex_method_idx of the target interface method from the caller.
2147 uint32_t dex_pc = QuickArgumentVisitor::GetCallingDexPc(sp);
2148
2149 const DexFile::CodeItem* code_item = caller_method->GetCodeItem();
2150 CHECK_LT(dex_pc, code_item->insns_size_in_code_units_);
2151 const Instruction* instr = Instruction::At(&code_item->insns_[dex_pc]);
2152 Instruction::Code instr_code = instr->Opcode();
2153 CHECK(instr_code == Instruction::INVOKE_INTERFACE ||
2154 instr_code == Instruction::INVOKE_INTERFACE_RANGE)
2155 << "Unexpected call into interface trampoline: " << instr->DumpString(nullptr);
2156 uint32_t dex_method_idx;
2157 if (instr_code == Instruction::INVOKE_INTERFACE) {
2158 dex_method_idx = instr->VRegB_35c();
2159 } else {
2160 CHECK_EQ(instr_code, Instruction::INVOKE_INTERFACE_RANGE);
2161 dex_method_idx = instr->VRegB_3rc();
2162 }
2163
2164 ArtMethod* interface_method = caller_method->GetDexCacheResolvedMethod(
2165 dex_method_idx, sizeof(void*));
2166 DCHECK(interface_method != nullptr) << dex_method_idx << " " << PrettyMethod(caller_method);
2167 ArtMethod* method = nullptr;
2168
2169 if (LIKELY(interface_method->GetDexMethodIndex() != DexFile::kDexNoIndex)) {
2170 // If the dex cache already resolved the interface method, look whether we have
2171 // a match in the ImtConflictTable.
2172 uint32_t imt_index = interface_method->GetDexMethodIndex();
2173 ArtMethod* conflict_method = cls->GetEmbeddedImTableEntry(
2174 imt_index % mirror::Class::kImtSize, sizeof(void*));
2175 if (LIKELY(conflict_method->IsRuntimeMethod())) {
2176 ImtConflictTable* current_table = conflict_method->GetImtConflictTable(sizeof(void*));
2177 DCHECK(current_table != nullptr);
2178 method = current_table->Lookup(interface_method, sizeof(void*));
2179 } else {
2180 // It seems we aren't really a conflict method!
2181 method = cls->FindVirtualMethodForInterface(interface_method, sizeof(void*));
2182 }
2183 if (method != nullptr) {
2184 return GetTwoWordSuccessValue(
2185 reinterpret_cast<uintptr_t>(method->GetEntryPointFromQuickCompiledCode()),
2186 reinterpret_cast<uintptr_t>(method));
2187 }
2188
2189 // No match, use the IfTable.
2190 method = cls->FindVirtualMethodForInterface(interface_method, sizeof(void*));
2191 if (UNLIKELY(method == nullptr)) {
2192 ThrowIncompatibleClassChangeErrorClassForInterfaceDispatch(
2193 interface_method, this_object, caller_method);
2194 return GetTwoWordFailureValue(); // Failure.
2195 }
2196 } else {
2197 // The dex cache did not resolve the method, look it up in the dex file
2198 // of the caller,
2199 DCHECK_EQ(interface_method, Runtime::Current()->GetResolutionMethod());
2200 const DexFile* dex_file = caller_method->GetDeclaringClass()->GetDexCache()
2201 ->GetDexFile();
2202 uint32_t shorty_len;
2203 const char* shorty = dex_file->GetMethodShorty(dex_file->GetMethodId(dex_method_idx),
2204 &shorty_len);
2205 {
2206 // Remember the args in case a GC happens in FindMethodFromCode.
2207 ScopedObjectAccessUnchecked soa(self->GetJniEnv());
2208 RememberForGcArgumentVisitor visitor(sp, false, shorty, shorty_len, &soa);
2209 visitor.VisitArguments();
2210 method = FindMethodFromCode<kInterface, false>(dex_method_idx, &this_object, caller_method,
2211 self);
2212 visitor.FixupReferences();
2213 }
2214
2215 if (UNLIKELY(method == nullptr)) {
2216 CHECK(self->IsExceptionPending());
2217 return GetTwoWordFailureValue(); // Failure.
2218 }
2219 interface_method = caller_method->GetDexCacheResolvedMethod(dex_method_idx, sizeof(void*));
2220 DCHECK(!interface_method->IsRuntimeMethod());
2221 }
2222
2223 // We arrive here if we have found an implementation, and it is not in the ImtConflictTable.
2224 // We create a new table with the new pair { interface_method, method }.
2225 uint32_t imt_index = interface_method->GetDexMethodIndex();
2226 ArtMethod* conflict_method = cls->GetEmbeddedImTableEntry(
2227 imt_index % mirror::Class::kImtSize, sizeof(void*));
2228 if (conflict_method->IsRuntimeMethod()) {
2229 ArtMethod* new_conflict_method = Runtime::Current()->GetClassLinker()->AddMethodToConflictTable(
2230 cls.Get(),
2231 conflict_method,
2232 interface_method,
2233 method,
2234 /*force_new_conflict_method*/false);
2235 if (new_conflict_method != conflict_method) {
2236 // Update the IMT if we create a new conflict method. No fence needed here, as the
2237 // data is consistent.
2238 cls->SetEmbeddedImTableEntry(imt_index % mirror::Class::kImtSize,
2239 new_conflict_method,
2240 sizeof(void*));
2241 }
2242 }
2243
2244 const void* code = method->GetEntryPointFromQuickCompiledCode();
2245
2246 // When we return, the caller will branch to this address, so it had better not be 0!
2247 DCHECK(code != nullptr) << "Code was null in method: " << PrettyMethod(method)
2248 << " location: " << method->GetDexFile()->GetLocation();
2249
2250 return GetTwoWordSuccessValue(reinterpret_cast<uintptr_t>(code),
2251 reinterpret_cast<uintptr_t>(method));
2252 }
2253
2254 } // namespace art
2255