1 /*
2  * Copyright (C) 2011 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "calling_convention_x86.h"
18 
19 #include <android-base/logging.h>
20 
21 #include "handle_scope-inl.h"
22 #include "utils/x86/managed_register_x86.h"
23 
24 namespace art {
25 namespace x86 {
26 
27 static_assert(kX86PointerSize == PointerSize::k32, "Unexpected x86 pointer size");
28 static_assert(kStackAlignment >= 16u, "IA-32 cdecl requires at least 16 byte stack alignment");
29 
30 static constexpr ManagedRegister kCalleeSaveRegisters[] = {
31     // Core registers.
32     X86ManagedRegister::FromCpuRegister(EBP),
33     X86ManagedRegister::FromCpuRegister(ESI),
34     X86ManagedRegister::FromCpuRegister(EDI),
35     // No hard float callee saves.
36 };
37 
CalculateCoreCalleeSpillMask()38 static constexpr uint32_t CalculateCoreCalleeSpillMask() {
39   // The spilled PC gets a special marker.
40   uint32_t result = 1 << kNumberOfCpuRegisters;
41   for (auto&& r : kCalleeSaveRegisters) {
42     if (r.AsX86().IsCpuRegister()) {
43       result |= (1 << r.AsX86().AsCpuRegister());
44     }
45   }
46   return result;
47 }
48 
49 static constexpr uint32_t kCoreCalleeSpillMask = CalculateCoreCalleeSpillMask();
50 static constexpr uint32_t kFpCalleeSpillMask = 0u;
51 
52 // Calling convention
53 
InterproceduralScratchRegister()54 ManagedRegister X86ManagedRuntimeCallingConvention::InterproceduralScratchRegister() {
55   return X86ManagedRegister::FromCpuRegister(ECX);
56 }
57 
InterproceduralScratchRegister()58 ManagedRegister X86JniCallingConvention::InterproceduralScratchRegister() {
59   return X86ManagedRegister::FromCpuRegister(ECX);
60 }
61 
ReturnScratchRegister() const62 ManagedRegister X86JniCallingConvention::ReturnScratchRegister() const {
63   return ManagedRegister::NoRegister();  // No free regs, so assembler uses push/pop
64 }
65 
ReturnRegisterForShorty(const char * shorty,bool jni)66 static ManagedRegister ReturnRegisterForShorty(const char* shorty, bool jni) {
67   if (shorty[0] == 'F' || shorty[0] == 'D') {
68     if (jni) {
69       return X86ManagedRegister::FromX87Register(ST0);
70     } else {
71       return X86ManagedRegister::FromXmmRegister(XMM0);
72     }
73   } else if (shorty[0] == 'J') {
74     return X86ManagedRegister::FromRegisterPair(EAX_EDX);
75   } else if (shorty[0] == 'V') {
76     return ManagedRegister::NoRegister();
77   } else {
78     return X86ManagedRegister::FromCpuRegister(EAX);
79   }
80 }
81 
ReturnRegister()82 ManagedRegister X86ManagedRuntimeCallingConvention::ReturnRegister() {
83   return ReturnRegisterForShorty(GetShorty(), false);
84 }
85 
ReturnRegister()86 ManagedRegister X86JniCallingConvention::ReturnRegister() {
87   return ReturnRegisterForShorty(GetShorty(), true);
88 }
89 
IntReturnRegister()90 ManagedRegister X86JniCallingConvention::IntReturnRegister() {
91   return X86ManagedRegister::FromCpuRegister(EAX);
92 }
93 
94 // Managed runtime calling convention
95 
MethodRegister()96 ManagedRegister X86ManagedRuntimeCallingConvention::MethodRegister() {
97   return X86ManagedRegister::FromCpuRegister(EAX);
98 }
99 
IsCurrentParamInRegister()100 bool X86ManagedRuntimeCallingConvention::IsCurrentParamInRegister() {
101   return false;  // Everything is passed by stack
102 }
103 
IsCurrentParamOnStack()104 bool X86ManagedRuntimeCallingConvention::IsCurrentParamOnStack() {
105   // We assume all parameters are on stack, args coming via registers are spilled as entry_spills.
106   return true;
107 }
108 
CurrentParamRegister()109 ManagedRegister X86ManagedRuntimeCallingConvention::CurrentParamRegister() {
110   ManagedRegister res = ManagedRegister::NoRegister();
111   if (!IsCurrentParamAFloatOrDouble()) {
112     switch (gpr_arg_count_) {
113       case 0:
114         res = X86ManagedRegister::FromCpuRegister(ECX);
115         break;
116       case 1:
117         res = X86ManagedRegister::FromCpuRegister(EDX);
118         break;
119       case 2:
120         // Don't split a long between the last register and the stack.
121         if (IsCurrentParamALong()) {
122           return ManagedRegister::NoRegister();
123         }
124         res = X86ManagedRegister::FromCpuRegister(EBX);
125         break;
126     }
127   } else if (itr_float_and_doubles_ < 4) {
128     // First four float parameters are passed via XMM0..XMM3
129     res = X86ManagedRegister::FromXmmRegister(
130                                  static_cast<XmmRegister>(XMM0 + itr_float_and_doubles_));
131   }
132   return res;
133 }
134 
CurrentParamHighLongRegister()135 ManagedRegister X86ManagedRuntimeCallingConvention::CurrentParamHighLongRegister() {
136   ManagedRegister res = ManagedRegister::NoRegister();
137   DCHECK(IsCurrentParamALong());
138   switch (gpr_arg_count_) {
139     case 0: res = X86ManagedRegister::FromCpuRegister(EDX); break;
140     case 1: res = X86ManagedRegister::FromCpuRegister(EBX); break;
141   }
142   return res;
143 }
144 
CurrentParamStackOffset()145 FrameOffset X86ManagedRuntimeCallingConvention::CurrentParamStackOffset() {
146   return FrameOffset(displacement_.Int32Value() +   // displacement
147                      kFramePointerSize +                 // Method*
148                      (itr_slots_ * kFramePointerSize));  // offset into in args
149 }
150 
EntrySpills()151 const ManagedRegisterEntrySpills& X86ManagedRuntimeCallingConvention::EntrySpills() {
152   // We spill the argument registers on X86 to free them up for scratch use, we then assume
153   // all arguments are on the stack.
154   if (entry_spills_.size() == 0) {
155     ResetIterator(FrameOffset(0));
156     while (HasNext()) {
157       ManagedRegister in_reg = CurrentParamRegister();
158       bool is_long = IsCurrentParamALong();
159       if (!in_reg.IsNoRegister()) {
160         int32_t size = IsParamADouble(itr_args_) ? 8 : 4;
161         int32_t spill_offset = CurrentParamStackOffset().Uint32Value();
162         ManagedRegisterSpill spill(in_reg, size, spill_offset);
163         entry_spills_.push_back(spill);
164         if (is_long) {
165           // special case, as we need a second register here.
166           in_reg = CurrentParamHighLongRegister();
167           DCHECK(!in_reg.IsNoRegister());
168           // We have to spill the second half of the long.
169           ManagedRegisterSpill spill2(in_reg, size, spill_offset + 4);
170           entry_spills_.push_back(spill2);
171         }
172 
173         // Keep track of the number of GPRs allocated.
174         if (!IsCurrentParamAFloatOrDouble()) {
175           if (is_long) {
176             // Long was allocated in 2 registers.
177             gpr_arg_count_ += 2;
178           } else {
179             gpr_arg_count_++;
180           }
181         }
182       } else if (is_long) {
183         // We need to skip the unused last register, which is empty.
184         // If we are already out of registers, this is harmless.
185         gpr_arg_count_ += 2;
186       }
187       Next();
188     }
189   }
190   return entry_spills_;
191 }
192 
193 // JNI calling convention
194 
X86JniCallingConvention(bool is_static,bool is_synchronized,bool is_critical_native,const char * shorty)195 X86JniCallingConvention::X86JniCallingConvention(bool is_static,
196                                                  bool is_synchronized,
197                                                  bool is_critical_native,
198                                                  const char* shorty)
199     : JniCallingConvention(is_static,
200                            is_synchronized,
201                            is_critical_native,
202                            shorty,
203                            kX86PointerSize) {
204 }
205 
CoreSpillMask() const206 uint32_t X86JniCallingConvention::CoreSpillMask() const {
207   return kCoreCalleeSpillMask;
208 }
209 
FpSpillMask() const210 uint32_t X86JniCallingConvention::FpSpillMask() const {
211   return kFpCalleeSpillMask;
212 }
213 
FrameSize()214 size_t X86JniCallingConvention::FrameSize() {
215   // Method*, PC return address and callee save area size, local reference segment state
216   const size_t method_ptr_size = static_cast<size_t>(kX86PointerSize);
217   const size_t pc_return_addr_size = kFramePointerSize;
218   const size_t callee_save_area_size = CalleeSaveRegisters().size() * kFramePointerSize;
219   size_t frame_data_size = method_ptr_size + pc_return_addr_size + callee_save_area_size;
220 
221   if (LIKELY(HasLocalReferenceSegmentState())) {                     // local ref. segment state
222     // Local reference segment state is sometimes excluded.
223     frame_data_size += kFramePointerSize;
224   }
225 
226   // References plus link_ (pointer) and number_of_references_ (uint32_t) for HandleScope header
227   const size_t handle_scope_size = HandleScope::SizeOf(kX86PointerSize, ReferenceCount());
228 
229   size_t total_size = frame_data_size;
230   if (LIKELY(HasHandleScope())) {
231     // HandleScope is sometimes excluded.
232     total_size += handle_scope_size;                                 // handle scope size
233   }
234 
235   // Plus return value spill area size
236   total_size += SizeOfReturnValue();
237 
238   return RoundUp(total_size, kStackAlignment);
239   // TODO: Same thing as x64 except using different pointer size. Refactor?
240 }
241 
OutArgSize()242 size_t X86JniCallingConvention::OutArgSize() {
243   return RoundUp(NumberOfOutgoingStackArgs() * kFramePointerSize, kStackAlignment);
244 }
245 
CalleeSaveRegisters() const246 ArrayRef<const ManagedRegister> X86JniCallingConvention::CalleeSaveRegisters() const {
247   return ArrayRef<const ManagedRegister>(kCalleeSaveRegisters);
248 }
249 
IsCurrentParamInRegister()250 bool X86JniCallingConvention::IsCurrentParamInRegister() {
251   return false;  // Everything is passed by stack.
252 }
253 
IsCurrentParamOnStack()254 bool X86JniCallingConvention::IsCurrentParamOnStack() {
255   return true;  // Everything is passed by stack.
256 }
257 
CurrentParamRegister()258 ManagedRegister X86JniCallingConvention::CurrentParamRegister() {
259   LOG(FATAL) << "Should not reach here";
260   return ManagedRegister::NoRegister();
261 }
262 
CurrentParamStackOffset()263 FrameOffset X86JniCallingConvention::CurrentParamStackOffset() {
264   return FrameOffset(displacement_.Int32Value() - OutArgSize() + (itr_slots_ * kFramePointerSize));
265 }
266 
NumberOfOutgoingStackArgs()267 size_t X86JniCallingConvention::NumberOfOutgoingStackArgs() {
268   size_t static_args = HasSelfClass() ? 1 : 0;  // count jclass
269   // regular argument parameters and this
270   size_t param_args = NumArgs() + NumLongOrDoubleArgs();
271   // count JNIEnv* and return pc (pushed after Method*)
272   size_t internal_args = 1 /* return pc */ + (HasJniEnv() ? 1 : 0 /* jni env */);
273   // No register args.
274   size_t total_args = static_args + param_args + internal_args;
275   return total_args;
276 }
277 
278 }  // namespace x86
279 }  // namespace art
280