1 /*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "calling_convention_arm64.h"
18
19 #include <android-base/logging.h>
20
21 #include "arch/instruction_set.h"
22 #include "handle_scope-inl.h"
23 #include "utils/arm64/managed_register_arm64.h"
24
25 namespace art {
26 namespace arm64 {
27
28 static_assert(kArm64PointerSize == PointerSize::k64, "Unexpected ARM64 pointer size");
29
30 // Up to how many float-like (float, double) args can be enregistered.
31 // The rest of the args must go on the stack.
32 constexpr size_t kMaxFloatOrDoubleRegisterArguments = 8u;
33 // Up to how many integer-like (pointers, objects, longs, int, short, bool, etc) args can be
34 // enregistered. The rest of the args must go on the stack.
35 constexpr size_t kMaxIntLikeRegisterArguments = 8u;
36
37 static const XRegister kXArgumentRegisters[] = {
38 X0, X1, X2, X3, X4, X5, X6, X7
39 };
40
41 static const WRegister kWArgumentRegisters[] = {
42 W0, W1, W2, W3, W4, W5, W6, W7
43 };
44
45 static const DRegister kDArgumentRegisters[] = {
46 D0, D1, D2, D3, D4, D5, D6, D7
47 };
48
49 static const SRegister kSArgumentRegisters[] = {
50 S0, S1, S2, S3, S4, S5, S6, S7
51 };
52
53 static constexpr ManagedRegister kCalleeSaveRegisters[] = {
54 // Core registers.
55 // Note: The native jni function may call to some VM runtime functions which may suspend
56 // or trigger GC. And the jni method frame will become top quick frame in those cases.
57 // So we need to satisfy GC to save LR and callee-save registers which is similar to
58 // CalleeSaveMethod(RefOnly) frame.
59 // Jni function is the native function which the java code wants to call.
60 // Jni method is the method that is compiled by jni compiler.
61 // Call chain: managed code(java) --> jni method --> jni function.
62 // Thread register(X19) is saved on stack.
63 Arm64ManagedRegister::FromXRegister(X19),
64 Arm64ManagedRegister::FromXRegister(X20),
65 Arm64ManagedRegister::FromXRegister(X21),
66 Arm64ManagedRegister::FromXRegister(X22),
67 Arm64ManagedRegister::FromXRegister(X23),
68 Arm64ManagedRegister::FromXRegister(X24),
69 Arm64ManagedRegister::FromXRegister(X25),
70 Arm64ManagedRegister::FromXRegister(X26),
71 Arm64ManagedRegister::FromXRegister(X27),
72 Arm64ManagedRegister::FromXRegister(X28),
73 Arm64ManagedRegister::FromXRegister(X29),
74 Arm64ManagedRegister::FromXRegister(LR),
75 // Hard float registers.
76 // Considering the case, java_method_1 --> jni method --> jni function --> java_method_2,
77 // we may break on java_method_2 and we still need to find out the values of DEX registers
78 // in java_method_1. So all callee-saves(in managed code) need to be saved.
79 Arm64ManagedRegister::FromDRegister(D8),
80 Arm64ManagedRegister::FromDRegister(D9),
81 Arm64ManagedRegister::FromDRegister(D10),
82 Arm64ManagedRegister::FromDRegister(D11),
83 Arm64ManagedRegister::FromDRegister(D12),
84 Arm64ManagedRegister::FromDRegister(D13),
85 Arm64ManagedRegister::FromDRegister(D14),
86 Arm64ManagedRegister::FromDRegister(D15),
87 };
88
CalculateCoreCalleeSpillMask()89 static constexpr uint32_t CalculateCoreCalleeSpillMask() {
90 uint32_t result = 0u;
91 for (auto&& r : kCalleeSaveRegisters) {
92 if (r.AsArm64().IsXRegister()) {
93 result |= (1 << r.AsArm64().AsXRegister());
94 }
95 }
96 return result;
97 }
98
CalculateFpCalleeSpillMask()99 static constexpr uint32_t CalculateFpCalleeSpillMask() {
100 uint32_t result = 0;
101 for (auto&& r : kCalleeSaveRegisters) {
102 if (r.AsArm64().IsDRegister()) {
103 result |= (1 << r.AsArm64().AsDRegister());
104 }
105 }
106 return result;
107 }
108
109 static constexpr uint32_t kCoreCalleeSpillMask = CalculateCoreCalleeSpillMask();
110 static constexpr uint32_t kFpCalleeSpillMask = CalculateFpCalleeSpillMask();
111
112 // Calling convention
InterproceduralScratchRegister()113 ManagedRegister Arm64ManagedRuntimeCallingConvention::InterproceduralScratchRegister() {
114 // X20 is safe to use as a scratch register:
115 // - with Baker read barriers (in the case of a non-critical native
116 // method), it is reserved as Marking Register, and thus does not
117 // actually need to be saved/restored; it is refreshed on exit
118 // (see Arm64JNIMacroAssembler::RemoveFrame);
119 // - in other cases, it is saved on entry (in
120 // Arm64JNIMacroAssembler::BuildFrame) and restored on exit (in
121 // Arm64JNIMacroAssembler::RemoveFrame). This is also expected in
122 // the case of a critical native method in the Baker read barrier
123 // configuration, where the value of MR must be preserved across
124 // the JNI call (as there is no MR refresh in that case).
125 return Arm64ManagedRegister::FromXRegister(X20);
126 }
127
InterproceduralScratchRegister()128 ManagedRegister Arm64JniCallingConvention::InterproceduralScratchRegister() {
129 // X20 is safe to use as a scratch register:
130 // - with Baker read barriers (in the case of a non-critical native
131 // method), it is reserved as Marking Register, and thus does not
132 // actually need to be saved/restored; it is refreshed on exit
133 // (see Arm64JNIMacroAssembler::RemoveFrame);
134 // - in other cases, it is saved on entry (in
135 // Arm64JNIMacroAssembler::BuildFrame) and restored on exit (in
136 // Arm64JNIMacroAssembler::RemoveFrame). This is also expected in
137 // the case of a critical native method in the Baker read barrier
138 // configuration, where the value of MR must be preserved across
139 // the JNI call (as there is no MR refresh in that case).
140 return Arm64ManagedRegister::FromXRegister(X20);
141 }
142
ReturnRegisterForShorty(const char * shorty)143 static ManagedRegister ReturnRegisterForShorty(const char* shorty) {
144 if (shorty[0] == 'F') {
145 return Arm64ManagedRegister::FromSRegister(S0);
146 } else if (shorty[0] == 'D') {
147 return Arm64ManagedRegister::FromDRegister(D0);
148 } else if (shorty[0] == 'J') {
149 return Arm64ManagedRegister::FromXRegister(X0);
150 } else if (shorty[0] == 'V') {
151 return Arm64ManagedRegister::NoRegister();
152 } else {
153 return Arm64ManagedRegister::FromWRegister(W0);
154 }
155 }
156
ReturnRegister()157 ManagedRegister Arm64ManagedRuntimeCallingConvention::ReturnRegister() {
158 return ReturnRegisterForShorty(GetShorty());
159 }
160
ReturnRegister()161 ManagedRegister Arm64JniCallingConvention::ReturnRegister() {
162 return ReturnRegisterForShorty(GetShorty());
163 }
164
IntReturnRegister()165 ManagedRegister Arm64JniCallingConvention::IntReturnRegister() {
166 return Arm64ManagedRegister::FromWRegister(W0);
167 }
168
169 // Managed runtime calling convention
170
MethodRegister()171 ManagedRegister Arm64ManagedRuntimeCallingConvention::MethodRegister() {
172 return Arm64ManagedRegister::FromXRegister(X0);
173 }
174
IsCurrentParamInRegister()175 bool Arm64ManagedRuntimeCallingConvention::IsCurrentParamInRegister() {
176 return false; // Everything moved to stack on entry.
177 }
178
IsCurrentParamOnStack()179 bool Arm64ManagedRuntimeCallingConvention::IsCurrentParamOnStack() {
180 return true;
181 }
182
CurrentParamRegister()183 ManagedRegister Arm64ManagedRuntimeCallingConvention::CurrentParamRegister() {
184 LOG(FATAL) << "Should not reach here";
185 UNREACHABLE();
186 }
187
CurrentParamStackOffset()188 FrameOffset Arm64ManagedRuntimeCallingConvention::CurrentParamStackOffset() {
189 CHECK(IsCurrentParamOnStack());
190 FrameOffset result =
191 FrameOffset(displacement_.Int32Value() + // displacement
192 kFramePointerSize + // Method ref
193 (itr_slots_ * sizeof(uint32_t))); // offset into in args
194 return result;
195 }
196
EntrySpills()197 const ManagedRegisterEntrySpills& Arm64ManagedRuntimeCallingConvention::EntrySpills() {
198 // We spill the argument registers on ARM64 to free them up for scratch use, we then assume
199 // all arguments are on the stack.
200 if ((entry_spills_.size() == 0) && (NumArgs() > 0)) {
201 int gp_reg_index = 1; // we start from X1/W1, X0 holds ArtMethod*.
202 int fp_reg_index = 0; // D0/S0.
203
204 // We need to choose the correct register (D/S or X/W) since the managed
205 // stack uses 32bit stack slots.
206 ResetIterator(FrameOffset(0));
207 while (HasNext()) {
208 if (IsCurrentParamAFloatOrDouble()) { // FP regs.
209 if (fp_reg_index < 8) {
210 if (!IsCurrentParamADouble()) {
211 entry_spills_.push_back(Arm64ManagedRegister::FromSRegister(kSArgumentRegisters[fp_reg_index]));
212 } else {
213 entry_spills_.push_back(Arm64ManagedRegister::FromDRegister(kDArgumentRegisters[fp_reg_index]));
214 }
215 fp_reg_index++;
216 } else { // just increase the stack offset.
217 if (!IsCurrentParamADouble()) {
218 entry_spills_.push_back(ManagedRegister::NoRegister(), 4);
219 } else {
220 entry_spills_.push_back(ManagedRegister::NoRegister(), 8);
221 }
222 }
223 } else { // GP regs.
224 if (gp_reg_index < 8) {
225 if (IsCurrentParamALong() && (!IsCurrentParamAReference())) {
226 entry_spills_.push_back(Arm64ManagedRegister::FromXRegister(kXArgumentRegisters[gp_reg_index]));
227 } else {
228 entry_spills_.push_back(Arm64ManagedRegister::FromWRegister(kWArgumentRegisters[gp_reg_index]));
229 }
230 gp_reg_index++;
231 } else { // just increase the stack offset.
232 if (IsCurrentParamALong() && (!IsCurrentParamAReference())) {
233 entry_spills_.push_back(ManagedRegister::NoRegister(), 8);
234 } else {
235 entry_spills_.push_back(ManagedRegister::NoRegister(), 4);
236 }
237 }
238 }
239 Next();
240 }
241 }
242 return entry_spills_;
243 }
244
245 // JNI calling convention
Arm64JniCallingConvention(bool is_static,bool is_synchronized,bool is_critical_native,const char * shorty)246 Arm64JniCallingConvention::Arm64JniCallingConvention(bool is_static,
247 bool is_synchronized,
248 bool is_critical_native,
249 const char* shorty)
250 : JniCallingConvention(is_static,
251 is_synchronized,
252 is_critical_native,
253 shorty,
254 kArm64PointerSize) {
255 }
256
CoreSpillMask() const257 uint32_t Arm64JniCallingConvention::CoreSpillMask() const {
258 return kCoreCalleeSpillMask;
259 }
260
FpSpillMask() const261 uint32_t Arm64JniCallingConvention::FpSpillMask() const {
262 return kFpCalleeSpillMask;
263 }
264
ReturnScratchRegister() const265 ManagedRegister Arm64JniCallingConvention::ReturnScratchRegister() const {
266 return ManagedRegister::NoRegister();
267 }
268
FrameSize()269 size_t Arm64JniCallingConvention::FrameSize() {
270 // Method*, callee save area size, local reference segment state
271 //
272 // (Unlike x86_64, do not include return address, and the segment state is uint32
273 // instead of pointer).
274 size_t method_ptr_size = static_cast<size_t>(kFramePointerSize);
275 size_t callee_save_area_size = CalleeSaveRegisters().size() * kFramePointerSize;
276
277 size_t frame_data_size = method_ptr_size + callee_save_area_size;
278 if (LIKELY(HasLocalReferenceSegmentState())) {
279 frame_data_size += sizeof(uint32_t);
280 }
281 // References plus 2 words for HandleScope header
282 size_t handle_scope_size = HandleScope::SizeOf(kArm64PointerSize, ReferenceCount());
283
284 size_t total_size = frame_data_size;
285 if (LIKELY(HasHandleScope())) {
286 // HandleScope is sometimes excluded.
287 total_size += handle_scope_size; // handle scope size
288 }
289
290 // Plus return value spill area size
291 total_size += SizeOfReturnValue();
292
293 return RoundUp(total_size, kStackAlignment);
294 }
295
OutArgSize()296 size_t Arm64JniCallingConvention::OutArgSize() {
297 // Same as X86_64
298 return RoundUp(NumberOfOutgoingStackArgs() * kFramePointerSize, kStackAlignment);
299 }
300
CalleeSaveRegisters() const301 ArrayRef<const ManagedRegister> Arm64JniCallingConvention::CalleeSaveRegisters() const {
302 // Same as X86_64
303 return ArrayRef<const ManagedRegister>(kCalleeSaveRegisters);
304 }
305
IsCurrentParamInRegister()306 bool Arm64JniCallingConvention::IsCurrentParamInRegister() {
307 if (IsCurrentParamAFloatOrDouble()) {
308 return (itr_float_and_doubles_ < kMaxFloatOrDoubleRegisterArguments);
309 } else {
310 return ((itr_args_ - itr_float_and_doubles_) < kMaxIntLikeRegisterArguments);
311 }
312 // TODO: Can we just call CurrentParamRegister to figure this out?
313 }
314
IsCurrentParamOnStack()315 bool Arm64JniCallingConvention::IsCurrentParamOnStack() {
316 // Is this ever not the same for all the architectures?
317 return !IsCurrentParamInRegister();
318 }
319
CurrentParamRegister()320 ManagedRegister Arm64JniCallingConvention::CurrentParamRegister() {
321 CHECK(IsCurrentParamInRegister());
322 if (IsCurrentParamAFloatOrDouble()) {
323 CHECK_LT(itr_float_and_doubles_, kMaxFloatOrDoubleRegisterArguments);
324 if (IsCurrentParamADouble()) {
325 return Arm64ManagedRegister::FromDRegister(kDArgumentRegisters[itr_float_and_doubles_]);
326 } else {
327 return Arm64ManagedRegister::FromSRegister(kSArgumentRegisters[itr_float_and_doubles_]);
328 }
329 } else {
330 int gp_reg = itr_args_ - itr_float_and_doubles_;
331 CHECK_LT(static_cast<unsigned int>(gp_reg), kMaxIntLikeRegisterArguments);
332 if (IsCurrentParamALong() || IsCurrentParamAReference() || IsCurrentParamJniEnv()) {
333 return Arm64ManagedRegister::FromXRegister(kXArgumentRegisters[gp_reg]);
334 } else {
335 return Arm64ManagedRegister::FromWRegister(kWArgumentRegisters[gp_reg]);
336 }
337 }
338 }
339
CurrentParamStackOffset()340 FrameOffset Arm64JniCallingConvention::CurrentParamStackOffset() {
341 CHECK(IsCurrentParamOnStack());
342 size_t args_on_stack = itr_args_
343 - std::min(kMaxFloatOrDoubleRegisterArguments,
344 static_cast<size_t>(itr_float_and_doubles_))
345 - std::min(kMaxIntLikeRegisterArguments,
346 static_cast<size_t>(itr_args_ - itr_float_and_doubles_));
347 size_t offset = displacement_.Int32Value() - OutArgSize() + (args_on_stack * kFramePointerSize);
348 CHECK_LT(offset, OutArgSize());
349 return FrameOffset(offset);
350 // TODO: Seems identical to X86_64 code.
351 }
352
NumberOfOutgoingStackArgs()353 size_t Arm64JniCallingConvention::NumberOfOutgoingStackArgs() {
354 // all arguments including JNI args
355 size_t all_args = NumArgs() + NumberOfExtraArgumentsForJni();
356
357 DCHECK_GE(all_args, NumFloatOrDoubleArgs());
358
359 size_t all_stack_args =
360 all_args
361 - std::min(kMaxFloatOrDoubleRegisterArguments,
362 static_cast<size_t>(NumFloatOrDoubleArgs()))
363 - std::min(kMaxIntLikeRegisterArguments,
364 static_cast<size_t>((all_args - NumFloatOrDoubleArgs())));
365
366 // TODO: Seems similar to X86_64 code except it doesn't count return pc.
367
368 return all_stack_args;
369 }
370
371 } // namespace arm64
372 } // namespace art
373