1 /*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "calling_convention_x86_64.h"
18
19 #include <android-base/logging.h>
20
21 #include "arch/instruction_set.h"
22 #include "arch/x86_64/jni_frame_x86_64.h"
23 #include "base/bit_utils.h"
24 #include "handle_scope-inl.h"
25 #include "utils/x86_64/managed_register_x86_64.h"
26
27 namespace art {
28 namespace x86_64 {
29
30 static constexpr ManagedRegister kCalleeSaveRegisters[] = {
31 // Core registers.
32 X86_64ManagedRegister::FromCpuRegister(RBX),
33 X86_64ManagedRegister::FromCpuRegister(RBP),
34 X86_64ManagedRegister::FromCpuRegister(R12),
35 X86_64ManagedRegister::FromCpuRegister(R13),
36 X86_64ManagedRegister::FromCpuRegister(R14),
37 X86_64ManagedRegister::FromCpuRegister(R15),
38 // Hard float registers.
39 X86_64ManagedRegister::FromXmmRegister(XMM12),
40 X86_64ManagedRegister::FromXmmRegister(XMM13),
41 X86_64ManagedRegister::FromXmmRegister(XMM14),
42 X86_64ManagedRegister::FromXmmRegister(XMM15),
43 };
44
45 template <size_t size>
CalculateCoreCalleeSpillMask(const ManagedRegister (& callee_saves)[size])46 static constexpr uint32_t CalculateCoreCalleeSpillMask(
47 const ManagedRegister (&callee_saves)[size]) {
48 // The spilled PC gets a special marker.
49 uint32_t result = 1u << kNumberOfCpuRegisters;
50 for (auto&& r : callee_saves) {
51 if (r.AsX86_64().IsCpuRegister()) {
52 result |= (1u << r.AsX86_64().AsCpuRegister().AsRegister());
53 }
54 }
55 return result;
56 }
57
58 template <size_t size>
CalculateFpCalleeSpillMask(const ManagedRegister (& callee_saves)[size])59 static constexpr uint32_t CalculateFpCalleeSpillMask(const ManagedRegister (&callee_saves)[size]) {
60 uint32_t result = 0u;
61 for (auto&& r : callee_saves) {
62 if (r.AsX86_64().IsXmmRegister()) {
63 result |= (1u << r.AsX86_64().AsXmmRegister().AsFloatRegister());
64 }
65 }
66 return result;
67 }
68
69 static constexpr uint32_t kCoreCalleeSpillMask = CalculateCoreCalleeSpillMask(kCalleeSaveRegisters);
70 static constexpr uint32_t kFpCalleeSpillMask = CalculateFpCalleeSpillMask(kCalleeSaveRegisters);
71
72 static constexpr ManagedRegister kNativeCalleeSaveRegisters[] = {
73 // Core registers.
74 X86_64ManagedRegister::FromCpuRegister(RBX),
75 X86_64ManagedRegister::FromCpuRegister(RBP),
76 X86_64ManagedRegister::FromCpuRegister(R12),
77 X86_64ManagedRegister::FromCpuRegister(R13),
78 X86_64ManagedRegister::FromCpuRegister(R14),
79 X86_64ManagedRegister::FromCpuRegister(R15),
80 // No callee-save float registers.
81 };
82
83 static constexpr uint32_t kNativeCoreCalleeSpillMask =
84 CalculateCoreCalleeSpillMask(kNativeCalleeSaveRegisters);
85 static constexpr uint32_t kNativeFpCalleeSpillMask =
86 CalculateFpCalleeSpillMask(kNativeCalleeSaveRegisters);
87
88 // Calling convention
89
InterproceduralScratchRegister() const90 ManagedRegister X86_64ManagedRuntimeCallingConvention::InterproceduralScratchRegister() const {
91 return X86_64ManagedRegister::FromCpuRegister(RAX);
92 }
93
InterproceduralScratchRegister() const94 ManagedRegister X86_64JniCallingConvention::InterproceduralScratchRegister() const {
95 return X86_64ManagedRegister::FromCpuRegister(RAX);
96 }
97
ReturnScratchRegister() const98 ManagedRegister X86_64JniCallingConvention::ReturnScratchRegister() const {
99 return ManagedRegister::NoRegister(); // No free regs, so assembler uses push/pop
100 }
101
ReturnRegisterForShorty(const char * shorty,bool jni ATTRIBUTE_UNUSED)102 static ManagedRegister ReturnRegisterForShorty(const char* shorty, bool jni ATTRIBUTE_UNUSED) {
103 if (shorty[0] == 'F' || shorty[0] == 'D') {
104 return X86_64ManagedRegister::FromXmmRegister(XMM0);
105 } else if (shorty[0] == 'J') {
106 return X86_64ManagedRegister::FromCpuRegister(RAX);
107 } else if (shorty[0] == 'V') {
108 return ManagedRegister::NoRegister();
109 } else {
110 return X86_64ManagedRegister::FromCpuRegister(RAX);
111 }
112 }
113
ReturnRegister()114 ManagedRegister X86_64ManagedRuntimeCallingConvention::ReturnRegister() {
115 return ReturnRegisterForShorty(GetShorty(), false);
116 }
117
ReturnRegister()118 ManagedRegister X86_64JniCallingConvention::ReturnRegister() {
119 return ReturnRegisterForShorty(GetShorty(), true);
120 }
121
IntReturnRegister()122 ManagedRegister X86_64JniCallingConvention::IntReturnRegister() {
123 return X86_64ManagedRegister::FromCpuRegister(RAX);
124 }
125
126 // Managed runtime calling convention
127
MethodRegister()128 ManagedRegister X86_64ManagedRuntimeCallingConvention::MethodRegister() {
129 return X86_64ManagedRegister::FromCpuRegister(RDI);
130 }
131
IsCurrentParamInRegister()132 bool X86_64ManagedRuntimeCallingConvention::IsCurrentParamInRegister() {
133 return !IsCurrentParamOnStack();
134 }
135
IsCurrentParamOnStack()136 bool X86_64ManagedRuntimeCallingConvention::IsCurrentParamOnStack() {
137 // We assume all parameters are on stack, args coming via registers are spilled as entry_spills
138 return true;
139 }
140
CurrentParamRegister()141 ManagedRegister X86_64ManagedRuntimeCallingConvention::CurrentParamRegister() {
142 ManagedRegister res = ManagedRegister::NoRegister();
143 if (!IsCurrentParamAFloatOrDouble()) {
144 switch (itr_args_ - itr_float_and_doubles_) {
145 case 0: res = X86_64ManagedRegister::FromCpuRegister(RSI); break;
146 case 1: res = X86_64ManagedRegister::FromCpuRegister(RDX); break;
147 case 2: res = X86_64ManagedRegister::FromCpuRegister(RCX); break;
148 case 3: res = X86_64ManagedRegister::FromCpuRegister(R8); break;
149 case 4: res = X86_64ManagedRegister::FromCpuRegister(R9); break;
150 }
151 } else if (itr_float_and_doubles_ < kMaxFloatOrDoubleRegisterArguments) {
152 // First eight float parameters are passed via XMM0..XMM7
153 res = X86_64ManagedRegister::FromXmmRegister(
154 static_cast<FloatRegister>(XMM0 + itr_float_and_doubles_));
155 }
156 return res;
157 }
158
CurrentParamStackOffset()159 FrameOffset X86_64ManagedRuntimeCallingConvention::CurrentParamStackOffset() {
160 CHECK(IsCurrentParamOnStack());
161 return FrameOffset(displacement_.Int32Value() + // displacement
162 static_cast<size_t>(kX86_64PointerSize) + // Method ref
163 itr_slots_ * sizeof(uint32_t)); // offset into in args
164 }
165
EntrySpills()166 const ManagedRegisterEntrySpills& X86_64ManagedRuntimeCallingConvention::EntrySpills() {
167 // We spill the argument registers on X86 to free them up for scratch use, we then assume
168 // all arguments are on the stack.
169 if (entry_spills_.size() == 0) {
170 ResetIterator(FrameOffset(0));
171 while (HasNext()) {
172 ManagedRegister in_reg = CurrentParamRegister();
173 if (!in_reg.IsNoRegister()) {
174 int32_t size = IsParamALongOrDouble(itr_args_) ? 8 : 4;
175 int32_t spill_offset = CurrentParamStackOffset().Uint32Value();
176 ManagedRegisterSpill spill(in_reg, size, spill_offset);
177 entry_spills_.push_back(spill);
178 }
179 Next();
180 }
181 }
182 return entry_spills_;
183 }
184
185 // JNI calling convention
186
X86_64JniCallingConvention(bool is_static,bool is_synchronized,bool is_critical_native,const char * shorty)187 X86_64JniCallingConvention::X86_64JniCallingConvention(bool is_static,
188 bool is_synchronized,
189 bool is_critical_native,
190 const char* shorty)
191 : JniCallingConvention(is_static,
192 is_synchronized,
193 is_critical_native,
194 shorty,
195 kX86_64PointerSize) {
196 }
197
CoreSpillMask() const198 uint32_t X86_64JniCallingConvention::CoreSpillMask() const {
199 return is_critical_native_ ? 0u : kCoreCalleeSpillMask;
200 }
201
FpSpillMask() const202 uint32_t X86_64JniCallingConvention::FpSpillMask() const {
203 return is_critical_native_ ? 0u : kFpCalleeSpillMask;
204 }
205
FrameSize() const206 size_t X86_64JniCallingConvention::FrameSize() const {
207 if (is_critical_native_) {
208 CHECK(!SpillsMethod());
209 CHECK(!HasLocalReferenceSegmentState());
210 CHECK(!HasHandleScope());
211 CHECK(!SpillsReturnValue());
212 return 0u; // There is no managed frame for @CriticalNative.
213 }
214
215 // Method*, PC return address and callee save area size, local reference segment state
216 CHECK(SpillsMethod());
217 const size_t method_ptr_size = static_cast<size_t>(kX86_64PointerSize);
218 const size_t pc_return_addr_size = kFramePointerSize;
219 const size_t callee_save_area_size = CalleeSaveRegisters().size() * kFramePointerSize;
220 size_t total_size = method_ptr_size + pc_return_addr_size + callee_save_area_size;
221
222 CHECK(HasLocalReferenceSegmentState());
223 total_size += kFramePointerSize;
224
225 CHECK(HasHandleScope());
226 total_size += HandleScope::SizeOf(kX86_64PointerSize, ReferenceCount());
227
228 // Plus return value spill area size
229 CHECK(SpillsReturnValue());
230 total_size += SizeOfReturnValue();
231
232 return RoundUp(total_size, kStackAlignment);
233 }
234
OutArgSize() const235 size_t X86_64JniCallingConvention::OutArgSize() const {
236 // Count param args, including JNIEnv* and jclass*.
237 size_t all_args = NumberOfExtraArgumentsForJni() + NumArgs();
238 size_t num_fp_args = NumFloatOrDoubleArgs();
239 DCHECK_GE(all_args, num_fp_args);
240 size_t num_non_fp_args = all_args - num_fp_args;
241 // Account for FP arguments passed through Xmm0..Xmm7.
242 size_t num_stack_fp_args =
243 num_fp_args - std::min(kMaxFloatOrDoubleRegisterArguments, num_fp_args);
244 // Account for other (integer) arguments passed through GPR (RDI, RSI, RDX, RCX, R8, R9).
245 size_t num_stack_non_fp_args =
246 num_non_fp_args - std::min(kMaxIntLikeRegisterArguments, num_non_fp_args);
247 // The size of outgoing arguments.
248 static_assert(kFramePointerSize == kMmxSpillSize);
249 size_t size = (num_stack_fp_args + num_stack_non_fp_args) * kFramePointerSize;
250
251 if (UNLIKELY(IsCriticalNative())) {
252 // We always need to spill xmm12-xmm15 as they are managed callee-saves
253 // but not native callee-saves.
254 static_assert((kCoreCalleeSpillMask & ~kNativeCoreCalleeSpillMask) == 0u);
255 static_assert((kFpCalleeSpillMask & ~kNativeFpCalleeSpillMask) != 0u);
256 static_assert(
257 kAlwaysSpilledMmxRegisters == POPCOUNT(kFpCalleeSpillMask & ~kNativeFpCalleeSpillMask));
258 size += kAlwaysSpilledMmxRegisters * kMmxSpillSize;
259 // Add return address size for @CriticalNative
260 // For normal native the return PC is part of the managed stack frame instead of out args.
261 size += kFramePointerSize;
262 }
263
264 size_t out_args_size = RoundUp(size, kNativeStackAlignment);
265 if (UNLIKELY(IsCriticalNative())) {
266 DCHECK_EQ(out_args_size, GetCriticalNativeOutArgsSize(GetShorty(), NumArgs() + 1u));
267 }
268 return out_args_size;
269 }
270
CalleeSaveRegisters() const271 ArrayRef<const ManagedRegister> X86_64JniCallingConvention::CalleeSaveRegisters() const {
272 if (UNLIKELY(IsCriticalNative())) {
273 DCHECK(!UseTailCall());
274 static_assert(std::size(kCalleeSaveRegisters) > std::size(kNativeCalleeSaveRegisters));
275 // TODO: Change to static_assert; std::equal should be constexpr since C++20.
276 DCHECK(std::equal(kCalleeSaveRegisters,
277 kCalleeSaveRegisters + std::size(kNativeCalleeSaveRegisters),
278 kNativeCalleeSaveRegisters,
279 [](ManagedRegister lhs, ManagedRegister rhs) { return lhs.Equals(rhs); }));
280 return ArrayRef<const ManagedRegister>(kCalleeSaveRegisters).SubArray(
281 /*pos=*/ std::size(kNativeCalleeSaveRegisters));
282 } else {
283 return ArrayRef<const ManagedRegister>(kCalleeSaveRegisters);
284 }
285 }
286
IsCurrentParamInRegister()287 bool X86_64JniCallingConvention::IsCurrentParamInRegister() {
288 return !IsCurrentParamOnStack();
289 }
290
IsCurrentParamOnStack()291 bool X86_64JniCallingConvention::IsCurrentParamOnStack() {
292 return CurrentParamRegister().IsNoRegister();
293 }
294
CurrentParamRegister()295 ManagedRegister X86_64JniCallingConvention::CurrentParamRegister() {
296 ManagedRegister res = ManagedRegister::NoRegister();
297 if (!IsCurrentParamAFloatOrDouble()) {
298 switch (itr_args_ - itr_float_and_doubles_) {
299 case 0: res = X86_64ManagedRegister::FromCpuRegister(RDI); break;
300 case 1: res = X86_64ManagedRegister::FromCpuRegister(RSI); break;
301 case 2: res = X86_64ManagedRegister::FromCpuRegister(RDX); break;
302 case 3: res = X86_64ManagedRegister::FromCpuRegister(RCX); break;
303 case 4: res = X86_64ManagedRegister::FromCpuRegister(R8); break;
304 case 5: res = X86_64ManagedRegister::FromCpuRegister(R9); break;
305 static_assert(5u == kMaxIntLikeRegisterArguments - 1, "Missing case statement(s)");
306 }
307 } else if (itr_float_and_doubles_ < kMaxFloatOrDoubleRegisterArguments) {
308 // First eight float parameters are passed via XMM0..XMM7
309 res = X86_64ManagedRegister::FromXmmRegister(
310 static_cast<FloatRegister>(XMM0 + itr_float_and_doubles_));
311 }
312 return res;
313 }
314
CurrentParamStackOffset()315 FrameOffset X86_64JniCallingConvention::CurrentParamStackOffset() {
316 CHECK(IsCurrentParamOnStack());
317 size_t args_on_stack = itr_args_
318 - std::min(kMaxFloatOrDoubleRegisterArguments,
319 static_cast<size_t>(itr_float_and_doubles_))
320 // Float arguments passed through Xmm0..Xmm7
321 - std::min(kMaxIntLikeRegisterArguments,
322 static_cast<size_t>(itr_args_ - itr_float_and_doubles_));
323 // Integer arguments passed through GPR
324 size_t offset = displacement_.Int32Value() - OutArgSize() + (args_on_stack * kFramePointerSize);
325 CHECK_LT(offset, OutArgSize());
326 return FrameOffset(offset);
327 }
328
HiddenArgumentRegister() const329 ManagedRegister X86_64JniCallingConvention::HiddenArgumentRegister() const {
330 CHECK(IsCriticalNative());
331 // R11 is neither managed callee-save, nor argument register, nor scratch register.
332 DCHECK(std::none_of(kCalleeSaveRegisters,
333 kCalleeSaveRegisters + std::size(kCalleeSaveRegisters),
334 [](ManagedRegister callee_save) constexpr {
335 return callee_save.Equals(X86_64ManagedRegister::FromCpuRegister(R11));
336 }));
337 DCHECK(!InterproceduralScratchRegister().Equals(X86_64ManagedRegister::FromCpuRegister(R11)));
338 return X86_64ManagedRegister::FromCpuRegister(R11);
339 }
340
341 // Whether to use tail call (used only for @CriticalNative).
UseTailCall() const342 bool X86_64JniCallingConvention::UseTailCall() const {
343 CHECK(IsCriticalNative());
344 // We always need to spill xmm12-xmm15 as they are managed callee-saves
345 // but not native callee-saves, so we can never use a tail call.
346 return false;
347 }
348
349 } // namespace x86_64
350 } // namespace art
351