1 /*
2  * Copyright (C) 2014 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "calling_convention_x86_64.h"
18 
19 #include <android-base/logging.h>
20 
21 #include "base/bit_utils.h"
22 #include "handle_scope-inl.h"
23 #include "utils/x86_64/managed_register_x86_64.h"
24 
25 namespace art {
26 namespace x86_64 {
27 
28 constexpr size_t kFramePointerSize = static_cast<size_t>(PointerSize::k64);
29 static_assert(kX86_64PointerSize == PointerSize::k64, "Unexpected x86_64 pointer size");
30 static_assert(kStackAlignment >= 16u, "System V AMD64 ABI requires at least 16 byte stack alignment");
31 
32 // XMM0..XMM7 can be used to pass the first 8 floating args. The rest must go on the stack.
33 // -- Managed and JNI calling conventions.
34 constexpr size_t kMaxFloatOrDoubleRegisterArguments = 8u;
35 // Up to how many integer-like (pointers, objects, longs, int, short, bool, etc) args can be
36 // enregistered. The rest of the args must go on the stack.
37 // -- JNI calling convention only (Managed excludes RDI, so it's actually 5).
38 constexpr size_t kMaxIntLikeRegisterArguments = 6u;
39 
40 static constexpr ManagedRegister kCalleeSaveRegisters[] = {
41     // Core registers.
42     X86_64ManagedRegister::FromCpuRegister(RBX),
43     X86_64ManagedRegister::FromCpuRegister(RBP),
44     X86_64ManagedRegister::FromCpuRegister(R12),
45     X86_64ManagedRegister::FromCpuRegister(R13),
46     X86_64ManagedRegister::FromCpuRegister(R14),
47     X86_64ManagedRegister::FromCpuRegister(R15),
48     // Hard float registers.
49     X86_64ManagedRegister::FromXmmRegister(XMM12),
50     X86_64ManagedRegister::FromXmmRegister(XMM13),
51     X86_64ManagedRegister::FromXmmRegister(XMM14),
52     X86_64ManagedRegister::FromXmmRegister(XMM15),
53 };
54 
CalculateCoreCalleeSpillMask()55 static constexpr uint32_t CalculateCoreCalleeSpillMask() {
56   // The spilled PC gets a special marker.
57   uint32_t result = 1 << kNumberOfCpuRegisters;
58   for (auto&& r : kCalleeSaveRegisters) {
59     if (r.AsX86_64().IsCpuRegister()) {
60       result |= (1 << r.AsX86_64().AsCpuRegister().AsRegister());
61     }
62   }
63   return result;
64 }
65 
CalculateFpCalleeSpillMask()66 static constexpr uint32_t CalculateFpCalleeSpillMask() {
67   uint32_t result = 0;
68   for (auto&& r : kCalleeSaveRegisters) {
69     if (r.AsX86_64().IsXmmRegister()) {
70       result |= (1 << r.AsX86_64().AsXmmRegister().AsFloatRegister());
71     }
72   }
73   return result;
74 }
75 
76 static constexpr uint32_t kCoreCalleeSpillMask = CalculateCoreCalleeSpillMask();
77 static constexpr uint32_t kFpCalleeSpillMask = CalculateFpCalleeSpillMask();
78 
79 // Calling convention
80 
InterproceduralScratchRegister()81 ManagedRegister X86_64ManagedRuntimeCallingConvention::InterproceduralScratchRegister() {
82   return X86_64ManagedRegister::FromCpuRegister(RAX);
83 }
84 
InterproceduralScratchRegister()85 ManagedRegister X86_64JniCallingConvention::InterproceduralScratchRegister() {
86   return X86_64ManagedRegister::FromCpuRegister(RAX);
87 }
88 
ReturnScratchRegister() const89 ManagedRegister X86_64JniCallingConvention::ReturnScratchRegister() const {
90   return ManagedRegister::NoRegister();  // No free regs, so assembler uses push/pop
91 }
92 
ReturnRegisterForShorty(const char * shorty,bool jni ATTRIBUTE_UNUSED)93 static ManagedRegister ReturnRegisterForShorty(const char* shorty, bool jni ATTRIBUTE_UNUSED) {
94   if (shorty[0] == 'F' || shorty[0] == 'D') {
95     return X86_64ManagedRegister::FromXmmRegister(XMM0);
96   } else if (shorty[0] == 'J') {
97     return X86_64ManagedRegister::FromCpuRegister(RAX);
98   } else if (shorty[0] == 'V') {
99     return ManagedRegister::NoRegister();
100   } else {
101     return X86_64ManagedRegister::FromCpuRegister(RAX);
102   }
103 }
104 
ReturnRegister()105 ManagedRegister X86_64ManagedRuntimeCallingConvention::ReturnRegister() {
106   return ReturnRegisterForShorty(GetShorty(), false);
107 }
108 
ReturnRegister()109 ManagedRegister X86_64JniCallingConvention::ReturnRegister() {
110   return ReturnRegisterForShorty(GetShorty(), true);
111 }
112 
IntReturnRegister()113 ManagedRegister X86_64JniCallingConvention::IntReturnRegister() {
114   return X86_64ManagedRegister::FromCpuRegister(RAX);
115 }
116 
117 // Managed runtime calling convention
118 
MethodRegister()119 ManagedRegister X86_64ManagedRuntimeCallingConvention::MethodRegister() {
120   return X86_64ManagedRegister::FromCpuRegister(RDI);
121 }
122 
IsCurrentParamInRegister()123 bool X86_64ManagedRuntimeCallingConvention::IsCurrentParamInRegister() {
124   return !IsCurrentParamOnStack();
125 }
126 
IsCurrentParamOnStack()127 bool X86_64ManagedRuntimeCallingConvention::IsCurrentParamOnStack() {
128   // We assume all parameters are on stack, args coming via registers are spilled as entry_spills
129   return true;
130 }
131 
CurrentParamRegister()132 ManagedRegister X86_64ManagedRuntimeCallingConvention::CurrentParamRegister() {
133   ManagedRegister res = ManagedRegister::NoRegister();
134   if (!IsCurrentParamAFloatOrDouble()) {
135     switch (itr_args_ - itr_float_and_doubles_) {
136     case 0: res = X86_64ManagedRegister::FromCpuRegister(RSI); break;
137     case 1: res = X86_64ManagedRegister::FromCpuRegister(RDX); break;
138     case 2: res = X86_64ManagedRegister::FromCpuRegister(RCX); break;
139     case 3: res = X86_64ManagedRegister::FromCpuRegister(R8); break;
140     case 4: res = X86_64ManagedRegister::FromCpuRegister(R9); break;
141     }
142   } else if (itr_float_and_doubles_ < kMaxFloatOrDoubleRegisterArguments) {
143     // First eight float parameters are passed via XMM0..XMM7
144     res = X86_64ManagedRegister::FromXmmRegister(
145                                  static_cast<FloatRegister>(XMM0 + itr_float_and_doubles_));
146   }
147   return res;
148 }
149 
CurrentParamStackOffset()150 FrameOffset X86_64ManagedRuntimeCallingConvention::CurrentParamStackOffset() {
151   return FrameOffset(displacement_.Int32Value() +  // displacement
152                      static_cast<size_t>(kX86_64PointerSize) +  // Method ref
153                      itr_slots_ * sizeof(uint32_t));  // offset into in args
154 }
155 
EntrySpills()156 const ManagedRegisterEntrySpills& X86_64ManagedRuntimeCallingConvention::EntrySpills() {
157   // We spill the argument registers on X86 to free them up for scratch use, we then assume
158   // all arguments are on the stack.
159   if (entry_spills_.size() == 0) {
160     ResetIterator(FrameOffset(0));
161     while (HasNext()) {
162       ManagedRegister in_reg = CurrentParamRegister();
163       if (!in_reg.IsNoRegister()) {
164         int32_t size = IsParamALongOrDouble(itr_args_) ? 8 : 4;
165         int32_t spill_offset = CurrentParamStackOffset().Uint32Value();
166         ManagedRegisterSpill spill(in_reg, size, spill_offset);
167         entry_spills_.push_back(spill);
168       }
169       Next();
170     }
171   }
172   return entry_spills_;
173 }
174 
175 // JNI calling convention
176 
X86_64JniCallingConvention(bool is_static,bool is_synchronized,bool is_critical_native,const char * shorty)177 X86_64JniCallingConvention::X86_64JniCallingConvention(bool is_static,
178                                                        bool is_synchronized,
179                                                        bool is_critical_native,
180                                                        const char* shorty)
181     : JniCallingConvention(is_static,
182                            is_synchronized,
183                            is_critical_native,
184                            shorty,
185                            kX86_64PointerSize) {
186 }
187 
CoreSpillMask() const188 uint32_t X86_64JniCallingConvention::CoreSpillMask() const {
189   return kCoreCalleeSpillMask;
190 }
191 
FpSpillMask() const192 uint32_t X86_64JniCallingConvention::FpSpillMask() const {
193   return kFpCalleeSpillMask;
194 }
195 
FrameSize()196 size_t X86_64JniCallingConvention::FrameSize() {
197   // Method*, PC return address and callee save area size, local reference segment state
198   const size_t method_ptr_size = static_cast<size_t>(kX86_64PointerSize);
199   const size_t pc_return_addr_size = kFramePointerSize;
200   const size_t callee_save_area_size = CalleeSaveRegisters().size() * kFramePointerSize;
201   size_t frame_data_size = method_ptr_size + pc_return_addr_size + callee_save_area_size;
202 
203   if (LIKELY(HasLocalReferenceSegmentState())) {                     // local ref. segment state
204     // Local reference segment state is sometimes excluded.
205     frame_data_size += kFramePointerSize;
206   }
207 
208   // References plus link_ (pointer) and number_of_references_ (uint32_t) for HandleScope header
209   const size_t handle_scope_size = HandleScope::SizeOf(kX86_64PointerSize, ReferenceCount());
210 
211   size_t total_size = frame_data_size;
212   if (LIKELY(HasHandleScope())) {
213     // HandleScope is sometimes excluded.
214     total_size += handle_scope_size;                                 // handle scope size
215   }
216 
217   // Plus return value spill area size
218   total_size += SizeOfReturnValue();
219 
220   return RoundUp(total_size, kStackAlignment);
221 }
222 
OutArgSize()223 size_t X86_64JniCallingConvention::OutArgSize() {
224   return RoundUp(NumberOfOutgoingStackArgs() * kFramePointerSize, kStackAlignment);
225 }
226 
CalleeSaveRegisters() const227 ArrayRef<const ManagedRegister> X86_64JniCallingConvention::CalleeSaveRegisters() const {
228   return ArrayRef<const ManagedRegister>(kCalleeSaveRegisters);
229 }
230 
IsCurrentParamInRegister()231 bool X86_64JniCallingConvention::IsCurrentParamInRegister() {
232   return !IsCurrentParamOnStack();
233 }
234 
IsCurrentParamOnStack()235 bool X86_64JniCallingConvention::IsCurrentParamOnStack() {
236   return CurrentParamRegister().IsNoRegister();
237 }
238 
CurrentParamRegister()239 ManagedRegister X86_64JniCallingConvention::CurrentParamRegister() {
240   ManagedRegister res = ManagedRegister::NoRegister();
241   if (!IsCurrentParamAFloatOrDouble()) {
242     switch (itr_args_ - itr_float_and_doubles_) {
243     case 0: res = X86_64ManagedRegister::FromCpuRegister(RDI); break;
244     case 1: res = X86_64ManagedRegister::FromCpuRegister(RSI); break;
245     case 2: res = X86_64ManagedRegister::FromCpuRegister(RDX); break;
246     case 3: res = X86_64ManagedRegister::FromCpuRegister(RCX); break;
247     case 4: res = X86_64ManagedRegister::FromCpuRegister(R8); break;
248     case 5: res = X86_64ManagedRegister::FromCpuRegister(R9); break;
249     static_assert(5u == kMaxIntLikeRegisterArguments - 1, "Missing case statement(s)");
250     }
251   } else if (itr_float_and_doubles_ < kMaxFloatOrDoubleRegisterArguments) {
252     // First eight float parameters are passed via XMM0..XMM7
253     res = X86_64ManagedRegister::FromXmmRegister(
254                                  static_cast<FloatRegister>(XMM0 + itr_float_and_doubles_));
255   }
256   return res;
257 }
258 
CurrentParamStackOffset()259 FrameOffset X86_64JniCallingConvention::CurrentParamStackOffset() {
260   CHECK(IsCurrentParamOnStack());
261   size_t args_on_stack = itr_args_
262       - std::min(kMaxFloatOrDoubleRegisterArguments,
263                  static_cast<size_t>(itr_float_and_doubles_))
264           // Float arguments passed through Xmm0..Xmm7
265       - std::min(kMaxIntLikeRegisterArguments,
266                  static_cast<size_t>(itr_args_ - itr_float_and_doubles_));
267           // Integer arguments passed through GPR
268   size_t offset = displacement_.Int32Value() - OutArgSize() + (args_on_stack * kFramePointerSize);
269   CHECK_LT(offset, OutArgSize());
270   return FrameOffset(offset);
271 }
272 
273 // TODO: Calling this "NumberArgs" is misleading.
274 // It's really more like NumberSlots (like itr_slots_)
275 // because doubles/longs get counted twice.
NumberOfOutgoingStackArgs()276 size_t X86_64JniCallingConvention::NumberOfOutgoingStackArgs() {
277   size_t static_args = HasSelfClass() ? 1 : 0;  // count jclass
278   // regular argument parameters and this
279   size_t param_args = NumArgs() + NumLongOrDoubleArgs();
280   // count JNIEnv* and return pc (pushed after Method*)
281   size_t internal_args = 1 /* return pc */ + (HasJniEnv() ? 1 : 0 /* jni env */);
282   size_t total_args = static_args + param_args + internal_args;
283 
284   // Float arguments passed through Xmm0..Xmm7
285   // Other (integer) arguments passed through GPR (RDI, RSI, RDX, RCX, R8, R9)
286   size_t total_stack_args = total_args
287                             - std::min(kMaxFloatOrDoubleRegisterArguments, static_cast<size_t>(NumFloatOrDoubleArgs()))
288                             - std::min(kMaxIntLikeRegisterArguments, static_cast<size_t>(NumArgs() - NumFloatOrDoubleArgs()));
289 
290   return total_stack_args;
291 }
292 
293 }  // namespace x86_64
294 }  // namespace art
295