1 /*
2  * Copyright (C) 2011 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "calling_convention_x86.h"
18 
19 #include <android-base/logging.h>
20 
21 #include "arch/instruction_set.h"
22 #include "arch/x86/jni_frame_x86.h"
23 #include "utils/x86/managed_register_x86.h"
24 
25 namespace art {
26 namespace x86 {
27 
28 static constexpr Register kManagedCoreArgumentRegisters[] = {
29     EAX, ECX, EDX, EBX
30 };
31 static constexpr size_t kManagedCoreArgumentRegistersCount =
32     arraysize(kManagedCoreArgumentRegisters);
33 static constexpr size_t kManagedFpArgumentRegistersCount = 4u;
34 
35 static constexpr ManagedRegister kCalleeSaveRegisters[] = {
36     // Core registers.
37     X86ManagedRegister::FromCpuRegister(EBP),
38     X86ManagedRegister::FromCpuRegister(ESI),
39     X86ManagedRegister::FromCpuRegister(EDI),
40     // No hard float callee saves.
41 };
42 
43 template <size_t size>
CalculateCoreCalleeSpillMask(const ManagedRegister (& callee_saves)[size])44 static constexpr uint32_t CalculateCoreCalleeSpillMask(
45     const ManagedRegister (&callee_saves)[size]) {
46   // The spilled PC gets a special marker.
47   uint32_t result = 1 << kNumberOfCpuRegisters;
48   for (auto&& r : callee_saves) {
49     if (r.AsX86().IsCpuRegister()) {
50       result |= (1 << r.AsX86().AsCpuRegister());
51     }
52   }
53   return result;
54 }
55 
56 static constexpr uint32_t kCoreCalleeSpillMask = CalculateCoreCalleeSpillMask(kCalleeSaveRegisters);
57 static constexpr uint32_t kFpCalleeSpillMask = 0u;
58 
59 static constexpr ManagedRegister kNativeCalleeSaveRegisters[] = {
60     // Core registers.
61     X86ManagedRegister::FromCpuRegister(EBX),
62     X86ManagedRegister::FromCpuRegister(EBP),
63     X86ManagedRegister::FromCpuRegister(ESI),
64     X86ManagedRegister::FromCpuRegister(EDI),
65     // No hard float callee saves.
66 };
67 
68 static constexpr uint32_t kNativeCoreCalleeSpillMask =
69     CalculateCoreCalleeSpillMask(kNativeCalleeSaveRegisters);
70 static constexpr uint32_t kNativeFpCalleeSpillMask = 0u;
71 
72 // Calling convention
73 
SavedLocalReferenceCookieRegister() const74 ManagedRegister X86JniCallingConvention::SavedLocalReferenceCookieRegister() const {
75   // The EBP is callee-save register in both managed and native ABIs.
76   // It is saved in the stack frame and it has no special purpose like `tr` on arm/arm64.
77   static_assert((kCoreCalleeSpillMask & (1u << EBP)) != 0u);  // Managed callee save register.
78   return X86ManagedRegister::FromCpuRegister(EBP);
79 }
80 
ReturnScratchRegister() const81 ManagedRegister X86JniCallingConvention::ReturnScratchRegister() const {
82   return ManagedRegister::NoRegister();  // No free regs, so assembler uses push/pop
83 }
84 
ReturnRegisterForShorty(const char * shorty,bool jni)85 static ManagedRegister ReturnRegisterForShorty(const char* shorty, bool jni) {
86   if (shorty[0] == 'F' || shorty[0] == 'D') {
87     if (jni) {
88       return X86ManagedRegister::FromX87Register(ST0);
89     } else {
90       return X86ManagedRegister::FromXmmRegister(XMM0);
91     }
92   } else if (shorty[0] == 'J') {
93     return X86ManagedRegister::FromRegisterPair(EAX_EDX);
94   } else if (shorty[0] == 'V') {
95     return ManagedRegister::NoRegister();
96   } else {
97     return X86ManagedRegister::FromCpuRegister(EAX);
98   }
99 }
100 
ReturnRegister()101 ManagedRegister X86ManagedRuntimeCallingConvention::ReturnRegister() {
102   return ReturnRegisterForShorty(GetShorty(), false);
103 }
104 
ReturnRegister()105 ManagedRegister X86JniCallingConvention::ReturnRegister() {
106   return ReturnRegisterForShorty(GetShorty(), true);
107 }
108 
IntReturnRegister()109 ManagedRegister X86JniCallingConvention::IntReturnRegister() {
110   return X86ManagedRegister::FromCpuRegister(EAX);
111 }
112 
113 // Managed runtime calling convention
114 
MethodRegister()115 ManagedRegister X86ManagedRuntimeCallingConvention::MethodRegister() {
116   return X86ManagedRegister::FromCpuRegister(EAX);
117 }
118 
ResetIterator(FrameOffset displacement)119 void X86ManagedRuntimeCallingConvention::ResetIterator(FrameOffset displacement) {
120   ManagedRuntimeCallingConvention::ResetIterator(displacement);
121   gpr_arg_count_ = 1u;  // Skip EAX for ArtMethod*
122 }
123 
Next()124 void X86ManagedRuntimeCallingConvention::Next() {
125   if (!IsCurrentParamAFloatOrDouble()) {
126     gpr_arg_count_ += IsCurrentParamALong() ? 2u : 1u;
127   }
128   ManagedRuntimeCallingConvention::Next();
129 }
130 
IsCurrentParamInRegister()131 bool X86ManagedRuntimeCallingConvention::IsCurrentParamInRegister() {
132   if (IsCurrentParamAFloatOrDouble()) {
133     return itr_float_and_doubles_ < kManagedFpArgumentRegistersCount;
134   } else {
135     // Don't split a long between the last register and the stack.
136     size_t extra_regs = IsCurrentParamALong() ? 1u : 0u;
137     return gpr_arg_count_ + extra_regs < kManagedCoreArgumentRegistersCount;
138   }
139 }
140 
IsCurrentParamOnStack()141 bool X86ManagedRuntimeCallingConvention::IsCurrentParamOnStack() {
142   return !IsCurrentParamInRegister();
143 }
144 
CurrentParamRegister()145 ManagedRegister X86ManagedRuntimeCallingConvention::CurrentParamRegister() {
146   DCHECK(IsCurrentParamInRegister());
147   if (IsCurrentParamAFloatOrDouble()) {
148     // First four float parameters are passed via XMM0..XMM3
149     XmmRegister reg = static_cast<XmmRegister>(XMM0 + itr_float_and_doubles_);
150     return X86ManagedRegister::FromXmmRegister(reg);
151   } else {
152     if (IsCurrentParamALong()) {
153       switch (gpr_arg_count_) {
154         case 1:
155           static_assert(kManagedCoreArgumentRegisters[1] == ECX);
156           static_assert(kManagedCoreArgumentRegisters[2] == EDX);
157           return X86ManagedRegister::FromRegisterPair(ECX_EDX);
158         case 2:
159           static_assert(kManagedCoreArgumentRegisters[2] == EDX);
160           static_assert(kManagedCoreArgumentRegisters[3] == EBX);
161           return X86ManagedRegister::FromRegisterPair(EDX_EBX);
162         default:
163           LOG(FATAL) << "UNREACHABLE";
164           UNREACHABLE();
165       }
166     } else {
167       Register core_reg = kManagedCoreArgumentRegisters[gpr_arg_count_];
168       return X86ManagedRegister::FromCpuRegister(core_reg);
169     }
170   }
171 }
172 
CurrentParamStackOffset()173 FrameOffset X86ManagedRuntimeCallingConvention::CurrentParamStackOffset() {
174   return FrameOffset(displacement_.Int32Value() +   // displacement
175                      kFramePointerSize +                 // Method*
176                      (itr_slots_ * kFramePointerSize));  // offset into in args
177 }
178 
179 // JNI calling convention
180 
X86JniCallingConvention(bool is_static,bool is_synchronized,bool is_critical_native,const char * shorty)181 X86JniCallingConvention::X86JniCallingConvention(bool is_static,
182                                                  bool is_synchronized,
183                                                  bool is_critical_native,
184                                                  const char* shorty)
185     : JniCallingConvention(is_static,
186                            is_synchronized,
187                            is_critical_native,
188                            shorty,
189                            kX86PointerSize) {
190 }
191 
CoreSpillMask() const192 uint32_t X86JniCallingConvention::CoreSpillMask() const {
193   return is_critical_native_ ? 0u : kCoreCalleeSpillMask;
194 }
195 
FpSpillMask() const196 uint32_t X86JniCallingConvention::FpSpillMask() const {
197   return is_critical_native_ ? 0u : kFpCalleeSpillMask;
198 }
199 
FrameSize() const200 size_t X86JniCallingConvention::FrameSize() const {
201   if (is_critical_native_) {
202     CHECK(!SpillsMethod());
203     CHECK(!HasLocalReferenceSegmentState());
204     CHECK(!SpillsReturnValue());
205     return 0u;  // There is no managed frame for @CriticalNative.
206   }
207 
208   // Method*, PC return address and callee save area size, local reference segment state
209   DCHECK(SpillsMethod());
210   const size_t method_ptr_size = static_cast<size_t>(kX86PointerSize);
211   const size_t pc_return_addr_size = kFramePointerSize;
212   const size_t callee_save_area_size = CalleeSaveRegisters().size() * kFramePointerSize;
213   size_t total_size = method_ptr_size + pc_return_addr_size + callee_save_area_size;
214 
215   DCHECK(HasLocalReferenceSegmentState());
216   // Cookie is saved in one of the spilled registers.
217 
218   // Plus return value spill area size
219   if (SpillsReturnValue()) {
220     // For 64-bit return values there shall be a 4B alignment gap between
221     // the method pointer and the saved return value.
222     size_t padding = ReturnValueSaveLocation().SizeValue() - method_ptr_size;
223     DCHECK_EQ(padding,
224               (GetReturnType() == Primitive::kPrimLong || GetReturnType() == Primitive::kPrimDouble)
225                   ? 4u
226                   : 0u);
227     total_size += padding;
228     total_size += SizeOfReturnValue();
229   }
230 
231   return RoundUp(total_size, kStackAlignment);
232 }
233 
OutFrameSize() const234 size_t X86JniCallingConvention::OutFrameSize() const {
235   // The size of outgoing arguments.
236   size_t size = GetNativeOutArgsSize(/*num_args=*/ NumberOfExtraArgumentsForJni() + NumArgs(),
237                                      NumLongOrDoubleArgs());
238 
239   // @CriticalNative can use tail call as all managed callee saves are preserved by AAPCS.
240   static_assert((kCoreCalleeSpillMask & ~kNativeCoreCalleeSpillMask) == 0u);
241   static_assert((kFpCalleeSpillMask & ~kNativeFpCalleeSpillMask) == 0u);
242 
243   if (UNLIKELY(IsCriticalNative())) {
244     // Add return address size for @CriticalNative.
245     // For normal native the return PC is part of the managed stack frame instead of out args.
246     size += kFramePointerSize;
247     // For @CriticalNative, we can make a tail call if there are no stack args
248     // and the return type is not FP type (needs moving from ST0 to MMX0) and
249     // we do not need to extend the result.
250     bool return_type_ok = GetShorty()[0] == 'I' || GetShorty()[0] == 'J' || GetShorty()[0] == 'V';
251     DCHECK_EQ(
252         return_type_ok,
253         GetShorty()[0] != 'F' && GetShorty()[0] != 'D' && !RequiresSmallResultTypeExtension());
254     if (return_type_ok && size == kFramePointerSize) {
255       // Note: This is not aligned to kNativeStackAlignment but that's OK for tail call.
256       static_assert(kFramePointerSize < kNativeStackAlignment);
257       // The stub frame size is considered 0 in the callee where the return PC is a part of
258       // the callee frame but it is kPointerSize in the compiled stub before the tail call.
259       DCHECK_EQ(0u, GetCriticalNativeStubFrameSize(GetShorty(), NumArgs() + 1u));
260       return kFramePointerSize;
261     }
262   }
263 
264   size_t out_args_size = RoundUp(size, kNativeStackAlignment);
265   if (UNLIKELY(IsCriticalNative())) {
266     DCHECK_EQ(out_args_size, GetCriticalNativeStubFrameSize(GetShorty(), NumArgs() + 1u));
267   }
268   return out_args_size;
269 }
270 
CalleeSaveRegisters() const271 ArrayRef<const ManagedRegister> X86JniCallingConvention::CalleeSaveRegisters() const {
272   if (UNLIKELY(IsCriticalNative())) {
273     // Do not spill anything, whether tail call or not (return PC is already on the stack).
274     return ArrayRef<const ManagedRegister>();
275   } else {
276     return ArrayRef<const ManagedRegister>(kCalleeSaveRegisters);
277   }
278 }
279 
IsCurrentParamInRegister()280 bool X86JniCallingConvention::IsCurrentParamInRegister() {
281   return false;  // Everything is passed by stack.
282 }
283 
IsCurrentParamOnStack()284 bool X86JniCallingConvention::IsCurrentParamOnStack() {
285   return true;  // Everything is passed by stack.
286 }
287 
CurrentParamRegister()288 ManagedRegister X86JniCallingConvention::CurrentParamRegister() {
289   LOG(FATAL) << "Should not reach here";
290   UNREACHABLE();
291 }
292 
CurrentParamStackOffset()293 FrameOffset X86JniCallingConvention::CurrentParamStackOffset() {
294   return
295       FrameOffset(displacement_.Int32Value() - OutFrameSize() + (itr_slots_ * kFramePointerSize));
296 }
297 
HiddenArgumentRegister() const298 ManagedRegister X86JniCallingConvention::HiddenArgumentRegister() const {
299   CHECK(IsCriticalNative());
300   // EAX is neither managed callee-save, nor argument register, nor scratch register.
301   DCHECK(std::none_of(kCalleeSaveRegisters,
302                       kCalleeSaveRegisters + std::size(kCalleeSaveRegisters),
303                       [](ManagedRegister callee_save) constexpr {
304                         return callee_save.Equals(X86ManagedRegister::FromCpuRegister(EAX));
305                       }));
306   return X86ManagedRegister::FromCpuRegister(EAX);
307 }
308 
UseTailCall() const309 bool X86JniCallingConvention::UseTailCall() const {
310   CHECK(IsCriticalNative());
311   return OutFrameSize() == kFramePointerSize;
312 }
313 
314 }  // namespace x86
315 }  // namespace art
316