1 /*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "base/logging.h"
18 #include "calling_convention_arm64.h"
19 #include "handle_scope-inl.h"
20 #include "utils/arm64/managed_register_arm64.h"
21
22 namespace art {
23 namespace arm64 {
24
25 static_assert(kArm64PointerSize == PointerSize::k64, "Unexpected ARM64 pointer size");
26
27 // Up to how many float-like (float, double) args can be enregistered.
28 // The rest of the args must go on the stack.
29 constexpr size_t kMaxFloatOrDoubleRegisterArguments = 8u;
30 // Up to how many integer-like (pointers, objects, longs, int, short, bool, etc) args can be
31 // enregistered. The rest of the args must go on the stack.
32 constexpr size_t kMaxIntLikeRegisterArguments = 8u;
33
34 static const XRegister kXArgumentRegisters[] = {
35 X0, X1, X2, X3, X4, X5, X6, X7
36 };
37
38 static const WRegister kWArgumentRegisters[] = {
39 W0, W1, W2, W3, W4, W5, W6, W7
40 };
41
42 static const DRegister kDArgumentRegisters[] = {
43 D0, D1, D2, D3, D4, D5, D6, D7
44 };
45
46 static const SRegister kSArgumentRegisters[] = {
47 S0, S1, S2, S3, S4, S5, S6, S7
48 };
49
50 static constexpr ManagedRegister kCalleeSaveRegisters[] = {
51 // Core registers.
52 // Note: The native jni function may call to some VM runtime functions which may suspend
53 // or trigger GC. And the jni method frame will become top quick frame in those cases.
54 // So we need to satisfy GC to save LR and callee-save registers which is similar to
55 // CalleeSaveMethod(RefOnly) frame.
56 // Jni function is the native function which the java code wants to call.
57 // Jni method is the method that is compiled by jni compiler.
58 // Call chain: managed code(java) --> jni method --> jni function.
59 // Thread register(X19) is saved on stack.
60 Arm64ManagedRegister::FromXRegister(X19),
61 Arm64ManagedRegister::FromXRegister(X20),
62 Arm64ManagedRegister::FromXRegister(X21),
63 Arm64ManagedRegister::FromXRegister(X22),
64 Arm64ManagedRegister::FromXRegister(X23),
65 Arm64ManagedRegister::FromXRegister(X24),
66 Arm64ManagedRegister::FromXRegister(X25),
67 Arm64ManagedRegister::FromXRegister(X26),
68 Arm64ManagedRegister::FromXRegister(X27),
69 Arm64ManagedRegister::FromXRegister(X28),
70 Arm64ManagedRegister::FromXRegister(X29),
71 Arm64ManagedRegister::FromXRegister(LR),
72 // Hard float registers.
73 // Considering the case, java_method_1 --> jni method --> jni function --> java_method_2,
74 // we may break on java_method_2 and we still need to find out the values of DEX registers
75 // in java_method_1. So all callee-saves(in managed code) need to be saved.
76 Arm64ManagedRegister::FromDRegister(D8),
77 Arm64ManagedRegister::FromDRegister(D9),
78 Arm64ManagedRegister::FromDRegister(D10),
79 Arm64ManagedRegister::FromDRegister(D11),
80 Arm64ManagedRegister::FromDRegister(D12),
81 Arm64ManagedRegister::FromDRegister(D13),
82 Arm64ManagedRegister::FromDRegister(D14),
83 Arm64ManagedRegister::FromDRegister(D15),
84 };
85
CalculateCoreCalleeSpillMask()86 static constexpr uint32_t CalculateCoreCalleeSpillMask() {
87 uint32_t result = 0u;
88 for (auto&& r : kCalleeSaveRegisters) {
89 if (r.AsArm64().IsXRegister()) {
90 result |= (1 << r.AsArm64().AsXRegister());
91 }
92 }
93 return result;
94 }
95
CalculateFpCalleeSpillMask()96 static constexpr uint32_t CalculateFpCalleeSpillMask() {
97 uint32_t result = 0;
98 for (auto&& r : kCalleeSaveRegisters) {
99 if (r.AsArm64().IsDRegister()) {
100 result |= (1 << r.AsArm64().AsDRegister());
101 }
102 }
103 return result;
104 }
105
106 static constexpr uint32_t kCoreCalleeSpillMask = CalculateCoreCalleeSpillMask();
107 static constexpr uint32_t kFpCalleeSpillMask = CalculateFpCalleeSpillMask();
108
109 // Calling convention
InterproceduralScratchRegister()110 ManagedRegister Arm64ManagedRuntimeCallingConvention::InterproceduralScratchRegister() {
111 return Arm64ManagedRegister::FromXRegister(X20); // saved on entry restored on exit
112 }
113
InterproceduralScratchRegister()114 ManagedRegister Arm64JniCallingConvention::InterproceduralScratchRegister() {
115 return Arm64ManagedRegister::FromXRegister(X20); // saved on entry restored on exit
116 }
117
ReturnRegisterForShorty(const char * shorty)118 static ManagedRegister ReturnRegisterForShorty(const char* shorty) {
119 if (shorty[0] == 'F') {
120 return Arm64ManagedRegister::FromSRegister(S0);
121 } else if (shorty[0] == 'D') {
122 return Arm64ManagedRegister::FromDRegister(D0);
123 } else if (shorty[0] == 'J') {
124 return Arm64ManagedRegister::FromXRegister(X0);
125 } else if (shorty[0] == 'V') {
126 return Arm64ManagedRegister::NoRegister();
127 } else {
128 return Arm64ManagedRegister::FromWRegister(W0);
129 }
130 }
131
ReturnRegister()132 ManagedRegister Arm64ManagedRuntimeCallingConvention::ReturnRegister() {
133 return ReturnRegisterForShorty(GetShorty());
134 }
135
ReturnRegister()136 ManagedRegister Arm64JniCallingConvention::ReturnRegister() {
137 return ReturnRegisterForShorty(GetShorty());
138 }
139
IntReturnRegister()140 ManagedRegister Arm64JniCallingConvention::IntReturnRegister() {
141 return Arm64ManagedRegister::FromWRegister(W0);
142 }
143
144 // Managed runtime calling convention
145
MethodRegister()146 ManagedRegister Arm64ManagedRuntimeCallingConvention::MethodRegister() {
147 return Arm64ManagedRegister::FromXRegister(X0);
148 }
149
IsCurrentParamInRegister()150 bool Arm64ManagedRuntimeCallingConvention::IsCurrentParamInRegister() {
151 return false; // Everything moved to stack on entry.
152 }
153
IsCurrentParamOnStack()154 bool Arm64ManagedRuntimeCallingConvention::IsCurrentParamOnStack() {
155 return true;
156 }
157
CurrentParamRegister()158 ManagedRegister Arm64ManagedRuntimeCallingConvention::CurrentParamRegister() {
159 LOG(FATAL) << "Should not reach here";
160 return ManagedRegister::NoRegister();
161 }
162
CurrentParamStackOffset()163 FrameOffset Arm64ManagedRuntimeCallingConvention::CurrentParamStackOffset() {
164 CHECK(IsCurrentParamOnStack());
165 FrameOffset result =
166 FrameOffset(displacement_.Int32Value() + // displacement
167 kFramePointerSize + // Method ref
168 (itr_slots_ * sizeof(uint32_t))); // offset into in args
169 return result;
170 }
171
EntrySpills()172 const ManagedRegisterEntrySpills& Arm64ManagedRuntimeCallingConvention::EntrySpills() {
173 // We spill the argument registers on ARM64 to free them up for scratch use, we then assume
174 // all arguments are on the stack.
175 if ((entry_spills_.size() == 0) && (NumArgs() > 0)) {
176 int gp_reg_index = 1; // we start from X1/W1, X0 holds ArtMethod*.
177 int fp_reg_index = 0; // D0/S0.
178
179 // We need to choose the correct register (D/S or X/W) since the managed
180 // stack uses 32bit stack slots.
181 ResetIterator(FrameOffset(0));
182 while (HasNext()) {
183 if (IsCurrentParamAFloatOrDouble()) { // FP regs.
184 if (fp_reg_index < 8) {
185 if (!IsCurrentParamADouble()) {
186 entry_spills_.push_back(Arm64ManagedRegister::FromSRegister(kSArgumentRegisters[fp_reg_index]));
187 } else {
188 entry_spills_.push_back(Arm64ManagedRegister::FromDRegister(kDArgumentRegisters[fp_reg_index]));
189 }
190 fp_reg_index++;
191 } else { // just increase the stack offset.
192 if (!IsCurrentParamADouble()) {
193 entry_spills_.push_back(ManagedRegister::NoRegister(), 4);
194 } else {
195 entry_spills_.push_back(ManagedRegister::NoRegister(), 8);
196 }
197 }
198 } else { // GP regs.
199 if (gp_reg_index < 8) {
200 if (IsCurrentParamALong() && (!IsCurrentParamAReference())) {
201 entry_spills_.push_back(Arm64ManagedRegister::FromXRegister(kXArgumentRegisters[gp_reg_index]));
202 } else {
203 entry_spills_.push_back(Arm64ManagedRegister::FromWRegister(kWArgumentRegisters[gp_reg_index]));
204 }
205 gp_reg_index++;
206 } else { // just increase the stack offset.
207 if (IsCurrentParamALong() && (!IsCurrentParamAReference())) {
208 entry_spills_.push_back(ManagedRegister::NoRegister(), 8);
209 } else {
210 entry_spills_.push_back(ManagedRegister::NoRegister(), 4);
211 }
212 }
213 }
214 Next();
215 }
216 }
217 return entry_spills_;
218 }
219
220 // JNI calling convention
Arm64JniCallingConvention(bool is_static,bool is_synchronized,bool is_critical_native,const char * shorty)221 Arm64JniCallingConvention::Arm64JniCallingConvention(bool is_static,
222 bool is_synchronized,
223 bool is_critical_native,
224 const char* shorty)
225 : JniCallingConvention(is_static,
226 is_synchronized,
227 is_critical_native,
228 shorty,
229 kArm64PointerSize) {
230 }
231
CoreSpillMask() const232 uint32_t Arm64JniCallingConvention::CoreSpillMask() const {
233 return kCoreCalleeSpillMask;
234 }
235
FpSpillMask() const236 uint32_t Arm64JniCallingConvention::FpSpillMask() const {
237 return kFpCalleeSpillMask;
238 }
239
ReturnScratchRegister() const240 ManagedRegister Arm64JniCallingConvention::ReturnScratchRegister() const {
241 return ManagedRegister::NoRegister();
242 }
243
FrameSize()244 size_t Arm64JniCallingConvention::FrameSize() {
245 // Method*, callee save area size, local reference segment state
246 //
247 // (Unlike x86_64, do not include return address, and the segment state is uint32
248 // instead of pointer).
249 size_t method_ptr_size = static_cast<size_t>(kFramePointerSize);
250 size_t callee_save_area_size = CalleeSaveRegisters().size() * kFramePointerSize;
251
252 size_t frame_data_size = method_ptr_size + callee_save_area_size;
253 if (LIKELY(HasLocalReferenceSegmentState())) {
254 frame_data_size += sizeof(uint32_t);
255 }
256 // References plus 2 words for HandleScope header
257 size_t handle_scope_size = HandleScope::SizeOf(kArm64PointerSize, ReferenceCount());
258
259 size_t total_size = frame_data_size;
260 if (LIKELY(HasHandleScope())) {
261 // HandleScope is sometimes excluded.
262 total_size += handle_scope_size; // handle scope size
263 }
264
265 // Plus return value spill area size
266 total_size += SizeOfReturnValue();
267
268 return RoundUp(total_size, kStackAlignment);
269 }
270
OutArgSize()271 size_t Arm64JniCallingConvention::OutArgSize() {
272 // Same as X86_64
273 return RoundUp(NumberOfOutgoingStackArgs() * kFramePointerSize, kStackAlignment);
274 }
275
CalleeSaveRegisters() const276 ArrayRef<const ManagedRegister> Arm64JniCallingConvention::CalleeSaveRegisters() const {
277 // Same as X86_64
278 return ArrayRef<const ManagedRegister>(kCalleeSaveRegisters);
279 }
280
IsCurrentParamInRegister()281 bool Arm64JniCallingConvention::IsCurrentParamInRegister() {
282 if (IsCurrentParamAFloatOrDouble()) {
283 return (itr_float_and_doubles_ < kMaxFloatOrDoubleRegisterArguments);
284 } else {
285 return ((itr_args_ - itr_float_and_doubles_) < kMaxIntLikeRegisterArguments);
286 }
287 // TODO: Can we just call CurrentParamRegister to figure this out?
288 }
289
IsCurrentParamOnStack()290 bool Arm64JniCallingConvention::IsCurrentParamOnStack() {
291 // Is this ever not the same for all the architectures?
292 return !IsCurrentParamInRegister();
293 }
294
CurrentParamRegister()295 ManagedRegister Arm64JniCallingConvention::CurrentParamRegister() {
296 CHECK(IsCurrentParamInRegister());
297 if (IsCurrentParamAFloatOrDouble()) {
298 CHECK_LT(itr_float_and_doubles_, kMaxFloatOrDoubleRegisterArguments);
299 if (IsCurrentParamADouble()) {
300 return Arm64ManagedRegister::FromDRegister(kDArgumentRegisters[itr_float_and_doubles_]);
301 } else {
302 return Arm64ManagedRegister::FromSRegister(kSArgumentRegisters[itr_float_and_doubles_]);
303 }
304 } else {
305 int gp_reg = itr_args_ - itr_float_and_doubles_;
306 CHECK_LT(static_cast<unsigned int>(gp_reg), kMaxIntLikeRegisterArguments);
307 if (IsCurrentParamALong() || IsCurrentParamAReference() || IsCurrentParamJniEnv()) {
308 return Arm64ManagedRegister::FromXRegister(kXArgumentRegisters[gp_reg]);
309 } else {
310 return Arm64ManagedRegister::FromWRegister(kWArgumentRegisters[gp_reg]);
311 }
312 }
313 }
314
CurrentParamStackOffset()315 FrameOffset Arm64JniCallingConvention::CurrentParamStackOffset() {
316 CHECK(IsCurrentParamOnStack());
317 size_t args_on_stack = itr_args_
318 - std::min(kMaxFloatOrDoubleRegisterArguments,
319 static_cast<size_t>(itr_float_and_doubles_))
320 - std::min(kMaxIntLikeRegisterArguments,
321 static_cast<size_t>(itr_args_ - itr_float_and_doubles_));
322 size_t offset = displacement_.Int32Value() - OutArgSize() + (args_on_stack * kFramePointerSize);
323 CHECK_LT(offset, OutArgSize());
324 return FrameOffset(offset);
325 // TODO: Seems identical to X86_64 code.
326 }
327
NumberOfOutgoingStackArgs()328 size_t Arm64JniCallingConvention::NumberOfOutgoingStackArgs() {
329 // all arguments including JNI args
330 size_t all_args = NumArgs() + NumberOfExtraArgumentsForJni();
331
332 DCHECK_GE(all_args, NumFloatOrDoubleArgs());
333
334 size_t all_stack_args =
335 all_args
336 - std::min(kMaxFloatOrDoubleRegisterArguments,
337 static_cast<size_t>(NumFloatOrDoubleArgs()))
338 - std::min(kMaxIntLikeRegisterArguments,
339 static_cast<size_t>((all_args - NumFloatOrDoubleArgs())));
340
341 // TODO: Seems similar to X86_64 code except it doesn't count return pc.
342
343 return all_stack_args;
344 }
345
346 } // namespace arm64
347 } // namespace art
348