1 /*
2  * Copyright (C) 2011 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "calling_convention_arm.h"
18 
19 #include <android-base/logging.h>
20 
21 #include "arch/arm/jni_frame_arm.h"
22 #include "arch/instruction_set.h"
23 #include "base/macros.h"
24 #include "handle_scope-inl.h"
25 #include "utils/arm/managed_register_arm.h"
26 
27 namespace art {
28 namespace arm {
29 
30 static_assert(kArmPointerSize == PointerSize::k32, "Unexpected ARM pointer size");
31 
32 //
33 // JNI calling convention constants.
34 //
35 
36 // List of parameters passed via registers for JNI.
37 // JNI uses soft-float, so there is only a GPR list.
38 static const Register kJniArgumentRegisters[] = {
39   R0, R1, R2, R3
40 };
41 
42 static_assert(kJniArgumentRegisterCount == arraysize(kJniArgumentRegisters));
43 
44 //
45 // Managed calling convention constants.
46 //
47 
48 // Used by hard float. (General purpose registers.)
49 static const Register kHFCoreArgumentRegisters[] = {
50   R0, R1, R2, R3
51 };
52 
53 // (VFP single-precision registers.)
54 static const SRegister kHFSArgumentRegisters[] = {
55   S0, S1, S2, S3, S4, S5, S6, S7, S8, S9, S10, S11, S12, S13, S14, S15
56 };
57 
58 // (VFP double-precision registers.)
59 static const DRegister kHFDArgumentRegisters[] = {
60   D0, D1, D2, D3, D4, D5, D6, D7
61 };
62 
63 static_assert(arraysize(kHFDArgumentRegisters) * 2 == arraysize(kHFSArgumentRegisters),
64     "ks d argument registers mismatch");
65 
66 //
67 // Shared managed+JNI calling convention constants.
68 //
69 
70 static constexpr ManagedRegister kCalleeSaveRegisters[] = {
71     // Core registers.
72     ArmManagedRegister::FromCoreRegister(R5),
73     ArmManagedRegister::FromCoreRegister(R6),
74     ArmManagedRegister::FromCoreRegister(R7),
75     ArmManagedRegister::FromCoreRegister(R8),
76     ArmManagedRegister::FromCoreRegister(R10),
77     ArmManagedRegister::FromCoreRegister(R11),
78     ArmManagedRegister::FromCoreRegister(LR),
79     // Hard float registers.
80     ArmManagedRegister::FromSRegister(S16),
81     ArmManagedRegister::FromSRegister(S17),
82     ArmManagedRegister::FromSRegister(S18),
83     ArmManagedRegister::FromSRegister(S19),
84     ArmManagedRegister::FromSRegister(S20),
85     ArmManagedRegister::FromSRegister(S21),
86     ArmManagedRegister::FromSRegister(S22),
87     ArmManagedRegister::FromSRegister(S23),
88     ArmManagedRegister::FromSRegister(S24),
89     ArmManagedRegister::FromSRegister(S25),
90     ArmManagedRegister::FromSRegister(S26),
91     ArmManagedRegister::FromSRegister(S27),
92     ArmManagedRegister::FromSRegister(S28),
93     ArmManagedRegister::FromSRegister(S29),
94     ArmManagedRegister::FromSRegister(S30),
95     ArmManagedRegister::FromSRegister(S31)
96 };
97 
98 template <size_t size>
CalculateCoreCalleeSpillMask(const ManagedRegister (& callee_saves)[size])99 static constexpr uint32_t CalculateCoreCalleeSpillMask(
100     const ManagedRegister (&callee_saves)[size]) {
101   // LR is a special callee save which is not reported by CalleeSaveRegisters().
102   uint32_t result = 0u;
103   for (auto&& r : callee_saves) {
104     if (r.AsArm().IsCoreRegister()) {
105       result |= (1u << r.AsArm().AsCoreRegister());
106     }
107   }
108   return result;
109 }
110 
111 template <size_t size>
CalculateFpCalleeSpillMask(const ManagedRegister (& callee_saves)[size])112 static constexpr uint32_t CalculateFpCalleeSpillMask(const ManagedRegister (&callee_saves)[size]) {
113   uint32_t result = 0u;
114   for (auto&& r : callee_saves) {
115     if (r.AsArm().IsSRegister()) {
116       result |= (1u << r.AsArm().AsSRegister());
117     }
118   }
119   return result;
120 }
121 
122 static constexpr uint32_t kCoreCalleeSpillMask = CalculateCoreCalleeSpillMask(kCalleeSaveRegisters);
123 static constexpr uint32_t kFpCalleeSpillMask = CalculateFpCalleeSpillMask(kCalleeSaveRegisters);
124 
125 static constexpr ManagedRegister kAapcsCalleeSaveRegisters[] = {
126     // Core registers.
127     ArmManagedRegister::FromCoreRegister(R4),
128     ArmManagedRegister::FromCoreRegister(R5),
129     ArmManagedRegister::FromCoreRegister(R6),
130     ArmManagedRegister::FromCoreRegister(R7),
131     ArmManagedRegister::FromCoreRegister(R8),
132     ArmManagedRegister::FromCoreRegister(R9),  // The platform register is callee-save on Android.
133     ArmManagedRegister::FromCoreRegister(R10),
134     ArmManagedRegister::FromCoreRegister(R11),
135     ArmManagedRegister::FromCoreRegister(LR),
136     // Hard float registers.
137     ArmManagedRegister::FromSRegister(S16),
138     ArmManagedRegister::FromSRegister(S17),
139     ArmManagedRegister::FromSRegister(S18),
140     ArmManagedRegister::FromSRegister(S19),
141     ArmManagedRegister::FromSRegister(S20),
142     ArmManagedRegister::FromSRegister(S21),
143     ArmManagedRegister::FromSRegister(S22),
144     ArmManagedRegister::FromSRegister(S23),
145     ArmManagedRegister::FromSRegister(S24),
146     ArmManagedRegister::FromSRegister(S25),
147     ArmManagedRegister::FromSRegister(S26),
148     ArmManagedRegister::FromSRegister(S27),
149     ArmManagedRegister::FromSRegister(S28),
150     ArmManagedRegister::FromSRegister(S29),
151     ArmManagedRegister::FromSRegister(S30),
152     ArmManagedRegister::FromSRegister(S31)
153 };
154 
155 static constexpr uint32_t kAapcsCoreCalleeSpillMask =
156     CalculateCoreCalleeSpillMask(kAapcsCalleeSaveRegisters);
157 static constexpr uint32_t kAapcsFpCalleeSpillMask =
158     CalculateFpCalleeSpillMask(kAapcsCalleeSaveRegisters);
159 
160 // Calling convention
161 
InterproceduralScratchRegister() const162 ManagedRegister ArmManagedRuntimeCallingConvention::InterproceduralScratchRegister() const {
163   return ArmManagedRegister::FromCoreRegister(IP);  // R12
164 }
165 
InterproceduralScratchRegister() const166 ManagedRegister ArmJniCallingConvention::InterproceduralScratchRegister() const {
167   return ArmManagedRegister::FromCoreRegister(IP);  // R12
168 }
169 
ReturnRegister()170 ManagedRegister ArmManagedRuntimeCallingConvention::ReturnRegister() {
171   switch (GetShorty()[0]) {
172     case 'V':
173       return ArmManagedRegister::NoRegister();
174     case 'D':
175       return ArmManagedRegister::FromDRegister(D0);
176     case 'F':
177       return ArmManagedRegister::FromSRegister(S0);
178     case 'J':
179       return ArmManagedRegister::FromRegisterPair(R0_R1);
180     default:
181       return ArmManagedRegister::FromCoreRegister(R0);
182   }
183 }
184 
ReturnRegister()185 ManagedRegister ArmJniCallingConvention::ReturnRegister() {
186   switch (GetShorty()[0]) {
187   case 'V':
188     return ArmManagedRegister::NoRegister();
189   case 'D':
190   case 'J':
191     return ArmManagedRegister::FromRegisterPair(R0_R1);
192   default:
193     return ArmManagedRegister::FromCoreRegister(R0);
194   }
195 }
196 
IntReturnRegister()197 ManagedRegister ArmJniCallingConvention::IntReturnRegister() {
198   return ArmManagedRegister::FromCoreRegister(R0);
199 }
200 
201 // Managed runtime calling convention
202 
MethodRegister()203 ManagedRegister ArmManagedRuntimeCallingConvention::MethodRegister() {
204   return ArmManagedRegister::FromCoreRegister(R0);
205 }
206 
IsCurrentParamInRegister()207 bool ArmManagedRuntimeCallingConvention::IsCurrentParamInRegister() {
208   return false;  // Everything moved to stack on entry.
209 }
210 
IsCurrentParamOnStack()211 bool ArmManagedRuntimeCallingConvention::IsCurrentParamOnStack() {
212   return true;
213 }
214 
CurrentParamRegister()215 ManagedRegister ArmManagedRuntimeCallingConvention::CurrentParamRegister() {
216   LOG(FATAL) << "Should not reach here";
217   UNREACHABLE();
218 }
219 
CurrentParamStackOffset()220 FrameOffset ArmManagedRuntimeCallingConvention::CurrentParamStackOffset() {
221   CHECK(IsCurrentParamOnStack());
222   return FrameOffset(displacement_.Int32Value() +        // displacement
223                      kFramePointerSize +                 // Method*
224                      (itr_slots_ * kFramePointerSize));  // offset into in args
225 }
226 
EntrySpills()227 const ManagedRegisterEntrySpills& ArmManagedRuntimeCallingConvention::EntrySpills() {
228   // We spill the argument registers on ARM to free them up for scratch use, we then assume
229   // all arguments are on the stack.
230   if ((entry_spills_.size() == 0) && (NumArgs() > 0)) {
231     uint32_t gpr_index = 1;  // R0 ~ R3. Reserve r0 for ArtMethod*.
232     uint32_t fpr_index = 0;  // S0 ~ S15.
233     uint32_t fpr_double_index = 0;  // D0 ~ D7.
234 
235     ResetIterator(FrameOffset(0));
236     while (HasNext()) {
237       if (IsCurrentParamAFloatOrDouble()) {
238         if (IsCurrentParamADouble()) {  // Double.
239           // Double should not overlap with float.
240           fpr_double_index = (std::max(fpr_double_index * 2, RoundUp(fpr_index, 2))) / 2;
241           if (fpr_double_index < arraysize(kHFDArgumentRegisters)) {
242             entry_spills_.push_back(
243                 ArmManagedRegister::FromDRegister(kHFDArgumentRegisters[fpr_double_index++]));
244           } else {
245             entry_spills_.push_back(ManagedRegister::NoRegister(), 8);
246           }
247         } else {  // Float.
248           // Float should not overlap with double.
249           if (fpr_index % 2 == 0) {
250             fpr_index = std::max(fpr_double_index * 2, fpr_index);
251           }
252           if (fpr_index < arraysize(kHFSArgumentRegisters)) {
253             entry_spills_.push_back(
254                 ArmManagedRegister::FromSRegister(kHFSArgumentRegisters[fpr_index++]));
255           } else {
256             entry_spills_.push_back(ManagedRegister::NoRegister(), 4);
257           }
258         }
259       } else {
260         // FIXME: Pointer this returns as both reference and long.
261         if (IsCurrentParamALong() && !IsCurrentParamAReference()) {  // Long.
262           if (gpr_index < arraysize(kHFCoreArgumentRegisters) - 1) {
263             // Skip R1, and use R2_R3 if the long is the first parameter.
264             if (gpr_index == 1) {
265               gpr_index++;
266             }
267           }
268 
269           // If it spans register and memory, we must use the value in memory.
270           if (gpr_index < arraysize(kHFCoreArgumentRegisters) - 1) {
271             entry_spills_.push_back(
272                 ArmManagedRegister::FromCoreRegister(kHFCoreArgumentRegisters[gpr_index++]));
273           } else if (gpr_index == arraysize(kHFCoreArgumentRegisters) - 1) {
274             gpr_index++;
275             entry_spills_.push_back(ManagedRegister::NoRegister(), 4);
276           } else {
277             entry_spills_.push_back(ManagedRegister::NoRegister(), 4);
278           }
279         }
280         // High part of long or 32-bit argument.
281         if (gpr_index < arraysize(kHFCoreArgumentRegisters)) {
282           entry_spills_.push_back(
283               ArmManagedRegister::FromCoreRegister(kHFCoreArgumentRegisters[gpr_index++]));
284         } else {
285           entry_spills_.push_back(ManagedRegister::NoRegister(), 4);
286         }
287       }
288       Next();
289     }
290   }
291   return entry_spills_;
292 }
293 
294 // JNI calling convention
295 
ArmJniCallingConvention(bool is_static,bool is_synchronized,bool is_critical_native,const char * shorty)296 ArmJniCallingConvention::ArmJniCallingConvention(bool is_static,
297                                                  bool is_synchronized,
298                                                  bool is_critical_native,
299                                                  const char* shorty)
300     : JniCallingConvention(is_static,
301                            is_synchronized,
302                            is_critical_native,
303                            shorty,
304                            kArmPointerSize) {
305   // AAPCS 4.1 specifies fundamental alignments for each type. All of our stack arguments are
306   // usually 4-byte aligned, however longs and doubles must be 8 bytes aligned. Add padding to
307   // maintain 8-byte alignment invariant.
308   //
309   // Compute padding to ensure longs and doubles are not split in AAPCS.
310   size_t shift = 0;
311 
312   size_t cur_arg, cur_reg;
313   if (LIKELY(HasExtraArgumentsForJni())) {
314     // Ignore the 'this' jobject or jclass for static methods and the JNIEnv.
315     // We start at the aligned register r2.
316     //
317     // Ignore the first 2 parameters because they are guaranteed to be aligned.
318     cur_arg = NumImplicitArgs();  // skip the "this" arg.
319     cur_reg = 2;  // skip {r0=JNIEnv, r1=jobject} / {r0=JNIEnv, r1=jclass} parameters (start at r2).
320   } else {
321     // Check every parameter.
322     cur_arg = 0;
323     cur_reg = 0;
324   }
325 
326   // TODO: Maybe should just use IsCurrentParamALongOrDouble instead to be cleaner?
327   // (this just seems like an unnecessary micro-optimization).
328 
329   // Shift across a logical register mapping that looks like:
330   //
331   //   | r0 | r1 | r2 | r3 | SP | SP+4| SP+8 | SP+12 | ... | SP+n | SP+n+4 |
332   //
333   //   (where SP is some arbitrary stack pointer that our 0th stack arg would go into).
334   //
335   // Any time there would normally be a long/double in an odd logical register,
336   // we have to push out the rest of the mappings by 4 bytes to maintain an 8-byte alignment.
337   //
338   // This works for both physical register pairs {r0, r1}, {r2, r3} and for when
339   // the value is on the stack.
340   //
341   // For example:
342   // (a) long would normally go into r1, but we shift it into r2
343   //  | INT | (PAD) | LONG      |
344   //  | r0  |  r1   |  r2  | r3 |
345   //
346   // (b) long would normally go into r3, but we shift it into SP
347   //  | INT | INT | INT | (PAD) | LONG     |
348   //  | r0  |  r1 |  r2 |  r3   | SP+4 SP+8|
349   //
350   // where INT is any <=4 byte arg, and LONG is any 8-byte arg.
351   for (; cur_arg < NumArgs(); cur_arg++) {
352     if (IsParamALongOrDouble(cur_arg)) {
353       if ((cur_reg & 1) != 0) {  // check that it's in a logical contiguous register pair
354         shift += 4;
355         cur_reg++;  // additional bump to ensure alignment
356       }
357       cur_reg += 2;  // bump the iterator twice for every long argument
358     } else {
359       cur_reg++;  // bump the iterator for every non-long argument
360     }
361   }
362 
363   if (cur_reg <= kJniArgumentRegisterCount) {
364     // As a special case when, as a result of shifting (or not) there are no arguments on the stack,
365     // we actually have 0 stack padding.
366     //
367     // For example with @CriticalNative and:
368     // (int, long) -> shifts the long but doesn't need to pad the stack
369     //
370     //          shift
371     //           \/
372     //  | INT | (PAD) | LONG      | (EMPTY) ...
373     //  | r0  |  r1   |  r2  | r3 |   SP    ...
374     //                                /\
375     //                          no stack padding
376     padding_ = 0;
377   } else {
378     padding_ = shift;
379   }
380 
381   // TODO: add some new JNI tests for @CriticalNative that introduced new edge cases
382   // (a) Using r0,r1 pair = f(long,...)
383   // (b) Shifting r1 long into r2,r3 pair = f(int, long, int, ...);
384   // (c) Shifting but not introducing a stack padding = f(int, long);
385 }
386 
CoreSpillMask() const387 uint32_t ArmJniCallingConvention::CoreSpillMask() const {
388   // Compute spill mask to agree with callee saves initialized in the constructor
389   return is_critical_native_ ? 0u : kCoreCalleeSpillMask;
390 }
391 
FpSpillMask() const392 uint32_t ArmJniCallingConvention::FpSpillMask() const {
393   return is_critical_native_ ? 0u : kFpCalleeSpillMask;
394 }
395 
ReturnScratchRegister() const396 ManagedRegister ArmJniCallingConvention::ReturnScratchRegister() const {
397   return ArmManagedRegister::FromCoreRegister(R2);
398 }
399 
FrameSize() const400 size_t ArmJniCallingConvention::FrameSize() const {
401   if (UNLIKELY(is_critical_native_)) {
402     CHECK(!SpillsMethod());
403     CHECK(!HasLocalReferenceSegmentState());
404     CHECK(!HasHandleScope());
405     CHECK(!SpillsReturnValue());
406     return 0u;  // There is no managed frame for @CriticalNative.
407   }
408 
409   // Method*, callee save area size, local reference segment state
410   CHECK(SpillsMethod());
411   const size_t method_ptr_size = static_cast<size_t>(kArmPointerSize);
412   const size_t callee_save_area_size = CalleeSaveRegisters().size() * kFramePointerSize;
413   size_t total_size = method_ptr_size + callee_save_area_size;
414 
415   CHECK(HasLocalReferenceSegmentState());
416   // local reference segment state
417   total_size += kFramePointerSize;
418   // TODO: Probably better to use sizeof(IRTSegmentState) here...
419 
420   CHECK(HasHandleScope());
421   total_size += HandleScope::SizeOf(kArmPointerSize, ReferenceCount());
422 
423   // Plus return value spill area size
424   CHECK(SpillsReturnValue());
425   total_size += SizeOfReturnValue();
426 
427   return RoundUp(total_size, kStackAlignment);
428 }
429 
OutArgSize() const430 size_t ArmJniCallingConvention::OutArgSize() const {
431   // Count param args, including JNIEnv* and jclass*; count 8-byte args twice.
432   size_t all_args = NumberOfExtraArgumentsForJni() + NumArgs() + NumLongOrDoubleArgs();
433   // Account for arguments passed through r0-r3. (No FP args, AAPCS32 is soft-float.)
434   size_t stack_args = all_args - std::min(kJniArgumentRegisterCount, all_args);
435   // The size of outgoing arguments.
436   size_t size = stack_args * kFramePointerSize + padding_;
437 
438   // @CriticalNative can use tail call as all managed callee saves are preserved by AAPCS.
439   static_assert((kCoreCalleeSpillMask & ~kAapcsCoreCalleeSpillMask) == 0u);
440   static_assert((kFpCalleeSpillMask & ~kAapcsFpCalleeSpillMask) == 0u);
441 
442   // For @CriticalNative, we can make a tail call if there are no stack args and the
443   // return type is not an FP type (otherwise we need to move the result to FP register).
444   DCHECK(!RequiresSmallResultTypeExtension());
445   if (is_critical_native_ && (size != 0u || GetShorty()[0] == 'F' || GetShorty()[0] == 'D')) {
446     size += kFramePointerSize;  // We need to spill LR with the args.
447   }
448   size_t out_args_size = RoundUp(size, kAapcsStackAlignment);
449   if (UNLIKELY(IsCriticalNative())) {
450     DCHECK_EQ(out_args_size, GetCriticalNativeOutArgsSize(GetShorty(), NumArgs() + 1u));
451   }
452   return out_args_size;
453 }
454 
CalleeSaveRegisters() const455 ArrayRef<const ManagedRegister> ArmJniCallingConvention::CalleeSaveRegisters() const {
456   if (UNLIKELY(IsCriticalNative())) {
457     if (UseTailCall()) {
458       return ArrayRef<const ManagedRegister>();  // Do not spill anything.
459     } else {
460       // Spill LR with out args.
461       static_assert((kCoreCalleeSpillMask >> LR) == 1u);  // Contains LR as the highest bit.
462       constexpr size_t lr_index = POPCOUNT(kCoreCalleeSpillMask) - 1u;
463       static_assert(kCalleeSaveRegisters[lr_index].Equals(
464                         ArmManagedRegister::FromCoreRegister(LR)));
465       return ArrayRef<const ManagedRegister>(kCalleeSaveRegisters).SubArray(
466           /*pos*/ lr_index, /*length=*/ 1u);
467     }
468   } else {
469     return ArrayRef<const ManagedRegister>(kCalleeSaveRegisters);
470   }
471 }
472 
473 // JniCallingConvention ABI follows AAPCS where longs and doubles must occur
474 // in even register numbers and stack slots
Next()475 void ArmJniCallingConvention::Next() {
476   // Update the iterator by usual JNI rules.
477   JniCallingConvention::Next();
478 
479   if (LIKELY(HasNext())) {  // Avoid CHECK failure for IsCurrentParam
480     // Ensure slot is 8-byte aligned for longs/doubles (AAPCS).
481     if (IsCurrentParamALongOrDouble() && ((itr_slots_ & 0x1u) != 0)) {
482       // itr_slots_ needs to be an even number, according to AAPCS.
483       itr_slots_++;
484     }
485   }
486 }
487 
IsCurrentParamInRegister()488 bool ArmJniCallingConvention::IsCurrentParamInRegister() {
489   return itr_slots_ < kJniArgumentRegisterCount;
490 }
491 
IsCurrentParamOnStack()492 bool ArmJniCallingConvention::IsCurrentParamOnStack() {
493   return !IsCurrentParamInRegister();
494 }
495 
CurrentParamRegister()496 ManagedRegister ArmJniCallingConvention::CurrentParamRegister() {
497   CHECK_LT(itr_slots_, kJniArgumentRegisterCount);
498   if (IsCurrentParamALongOrDouble()) {
499     // AAPCS 5.1.1 requires 64-bit values to be in a consecutive register pair:
500     // "A double-word sized type is passed in two consecutive registers (e.g., r0 and r1, or r2 and
501     // r3). The content of the registers is as if the value had been loaded from memory
502     // representation with a single LDM instruction."
503     if (itr_slots_ == 0u) {
504       return ArmManagedRegister::FromRegisterPair(R0_R1);
505     } else if (itr_slots_ == 2u) {
506       return ArmManagedRegister::FromRegisterPair(R2_R3);
507     } else {
508       // The register can either be R0 (+R1) or R2 (+R3). Cannot be other values.
509       LOG(FATAL) << "Invalid iterator register position for a long/double " << itr_args_;
510       UNREACHABLE();
511     }
512   } else {
513     // All other types can fit into one register.
514     return ArmManagedRegister::FromCoreRegister(kJniArgumentRegisters[itr_slots_]);
515   }
516 }
517 
CurrentParamStackOffset()518 FrameOffset ArmJniCallingConvention::CurrentParamStackOffset() {
519   CHECK_GE(itr_slots_, kJniArgumentRegisterCount);
520   size_t offset =
521       displacement_.Int32Value()
522           - OutArgSize()
523           + ((itr_slots_ - kJniArgumentRegisterCount) * kFramePointerSize);
524   CHECK_LT(offset, OutArgSize());
525   return FrameOffset(offset);
526 }
527 
HiddenArgumentRegister() const528 ManagedRegister ArmJniCallingConvention::HiddenArgumentRegister() const {
529   CHECK(IsCriticalNative());
530   // R4 is neither managed callee-save, nor argument register, nor scratch register.
531   // (It is native callee-save but the value coming from managed code can be clobbered.)
532   // TODO: Change to static_assert; std::none_of should be constexpr since C++20.
533   DCHECK(std::none_of(kCalleeSaveRegisters,
534                       kCalleeSaveRegisters + std::size(kCalleeSaveRegisters),
535                       [](ManagedRegister callee_save) constexpr {
536                         return callee_save.Equals(ArmManagedRegister::FromCoreRegister(R4));
537                       }));
538   DCHECK(std::none_of(kJniArgumentRegisters,
539                       kJniArgumentRegisters + std::size(kJniArgumentRegisters),
540                       [](Register reg) { return reg == R4; }));
541   DCHECK(!InterproceduralScratchRegister().Equals(ArmManagedRegister::FromCoreRegister(R4)));
542   return ArmManagedRegister::FromCoreRegister(R4);
543 }
544 
545 // Whether to use tail call (used only for @CriticalNative).
UseTailCall() const546 bool ArmJniCallingConvention::UseTailCall() const {
547   CHECK(IsCriticalNative());
548   return OutArgSize() == 0u;
549 }
550 
551 }  // namespace arm
552 }  // namespace art
553