1 /*
2  * Copyright (C) 2011 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "calling_convention_x86.h"
18 
19 #include "base/logging.h"
20 #include "handle_scope-inl.h"
21 #include "utils/x86/managed_register_x86.h"
22 
23 namespace art {
24 namespace x86 {
25 
26 static_assert(kX86PointerSize == PointerSize::k32, "Unexpected x86 pointer size");
27 static_assert(kStackAlignment >= 16u, "IA-32 cdecl requires at least 16 byte stack alignment");
28 
29 static constexpr ManagedRegister kCalleeSaveRegisters[] = {
30     // Core registers.
31     X86ManagedRegister::FromCpuRegister(EBP),
32     X86ManagedRegister::FromCpuRegister(ESI),
33     X86ManagedRegister::FromCpuRegister(EDI),
34     // No hard float callee saves.
35 };
36 
CalculateCoreCalleeSpillMask()37 static constexpr uint32_t CalculateCoreCalleeSpillMask() {
38   // The spilled PC gets a special marker.
39   uint32_t result = 1 << kNumberOfCpuRegisters;
40   for (auto&& r : kCalleeSaveRegisters) {
41     if (r.AsX86().IsCpuRegister()) {
42       result |= (1 << r.AsX86().AsCpuRegister());
43     }
44   }
45   return result;
46 }
47 
48 static constexpr uint32_t kCoreCalleeSpillMask = CalculateCoreCalleeSpillMask();
49 static constexpr uint32_t kFpCalleeSpillMask = 0u;
50 
51 // Calling convention
52 
InterproceduralScratchRegister()53 ManagedRegister X86ManagedRuntimeCallingConvention::InterproceduralScratchRegister() {
54   return X86ManagedRegister::FromCpuRegister(ECX);
55 }
56 
InterproceduralScratchRegister()57 ManagedRegister X86JniCallingConvention::InterproceduralScratchRegister() {
58   return X86ManagedRegister::FromCpuRegister(ECX);
59 }
60 
ReturnScratchRegister() const61 ManagedRegister X86JniCallingConvention::ReturnScratchRegister() const {
62   return ManagedRegister::NoRegister();  // No free regs, so assembler uses push/pop
63 }
64 
ReturnRegisterForShorty(const char * shorty,bool jni)65 static ManagedRegister ReturnRegisterForShorty(const char* shorty, bool jni) {
66   if (shorty[0] == 'F' || shorty[0] == 'D') {
67     if (jni) {
68       return X86ManagedRegister::FromX87Register(ST0);
69     } else {
70       return X86ManagedRegister::FromXmmRegister(XMM0);
71     }
72   } else if (shorty[0] == 'J') {
73     return X86ManagedRegister::FromRegisterPair(EAX_EDX);
74   } else if (shorty[0] == 'V') {
75     return ManagedRegister::NoRegister();
76   } else {
77     return X86ManagedRegister::FromCpuRegister(EAX);
78   }
79 }
80 
ReturnRegister()81 ManagedRegister X86ManagedRuntimeCallingConvention::ReturnRegister() {
82   return ReturnRegisterForShorty(GetShorty(), false);
83 }
84 
ReturnRegister()85 ManagedRegister X86JniCallingConvention::ReturnRegister() {
86   return ReturnRegisterForShorty(GetShorty(), true);
87 }
88 
IntReturnRegister()89 ManagedRegister X86JniCallingConvention::IntReturnRegister() {
90   return X86ManagedRegister::FromCpuRegister(EAX);
91 }
92 
93 // Managed runtime calling convention
94 
MethodRegister()95 ManagedRegister X86ManagedRuntimeCallingConvention::MethodRegister() {
96   return X86ManagedRegister::FromCpuRegister(EAX);
97 }
98 
IsCurrentParamInRegister()99 bool X86ManagedRuntimeCallingConvention::IsCurrentParamInRegister() {
100   return false;  // Everything is passed by stack
101 }
102 
IsCurrentParamOnStack()103 bool X86ManagedRuntimeCallingConvention::IsCurrentParamOnStack() {
104   // We assume all parameters are on stack, args coming via registers are spilled as entry_spills.
105   return true;
106 }
107 
CurrentParamRegister()108 ManagedRegister X86ManagedRuntimeCallingConvention::CurrentParamRegister() {
109   ManagedRegister res = ManagedRegister::NoRegister();
110   if (!IsCurrentParamAFloatOrDouble()) {
111     switch (gpr_arg_count_) {
112       case 0:
113         res = X86ManagedRegister::FromCpuRegister(ECX);
114         break;
115       case 1:
116         res = X86ManagedRegister::FromCpuRegister(EDX);
117         break;
118       case 2:
119         // Don't split a long between the last register and the stack.
120         if (IsCurrentParamALong()) {
121           return ManagedRegister::NoRegister();
122         }
123         res = X86ManagedRegister::FromCpuRegister(EBX);
124         break;
125     }
126   } else if (itr_float_and_doubles_ < 4) {
127     // First four float parameters are passed via XMM0..XMM3
128     res = X86ManagedRegister::FromXmmRegister(
129                                  static_cast<XmmRegister>(XMM0 + itr_float_and_doubles_));
130   }
131   return res;
132 }
133 
CurrentParamHighLongRegister()134 ManagedRegister X86ManagedRuntimeCallingConvention::CurrentParamHighLongRegister() {
135   ManagedRegister res = ManagedRegister::NoRegister();
136   DCHECK(IsCurrentParamALong());
137   switch (gpr_arg_count_) {
138     case 0: res = X86ManagedRegister::FromCpuRegister(EDX); break;
139     case 1: res = X86ManagedRegister::FromCpuRegister(EBX); break;
140   }
141   return res;
142 }
143 
CurrentParamStackOffset()144 FrameOffset X86ManagedRuntimeCallingConvention::CurrentParamStackOffset() {
145   return FrameOffset(displacement_.Int32Value() +   // displacement
146                      kFramePointerSize +                 // Method*
147                      (itr_slots_ * kFramePointerSize));  // offset into in args
148 }
149 
EntrySpills()150 const ManagedRegisterEntrySpills& X86ManagedRuntimeCallingConvention::EntrySpills() {
151   // We spill the argument registers on X86 to free them up for scratch use, we then assume
152   // all arguments are on the stack.
153   if (entry_spills_.size() == 0) {
154     ResetIterator(FrameOffset(0));
155     while (HasNext()) {
156       ManagedRegister in_reg = CurrentParamRegister();
157       bool is_long = IsCurrentParamALong();
158       if (!in_reg.IsNoRegister()) {
159         int32_t size = IsParamADouble(itr_args_) ? 8 : 4;
160         int32_t spill_offset = CurrentParamStackOffset().Uint32Value();
161         ManagedRegisterSpill spill(in_reg, size, spill_offset);
162         entry_spills_.push_back(spill);
163         if (is_long) {
164           // special case, as we need a second register here.
165           in_reg = CurrentParamHighLongRegister();
166           DCHECK(!in_reg.IsNoRegister());
167           // We have to spill the second half of the long.
168           ManagedRegisterSpill spill2(in_reg, size, spill_offset + 4);
169           entry_spills_.push_back(spill2);
170         }
171 
172         // Keep track of the number of GPRs allocated.
173         if (!IsCurrentParamAFloatOrDouble()) {
174           if (is_long) {
175             // Long was allocated in 2 registers.
176             gpr_arg_count_ += 2;
177           } else {
178             gpr_arg_count_++;
179           }
180         }
181       } else if (is_long) {
182         // We need to skip the unused last register, which is empty.
183         // If we are already out of registers, this is harmless.
184         gpr_arg_count_ += 2;
185       }
186       Next();
187     }
188   }
189   return entry_spills_;
190 }
191 
192 // JNI calling convention
193 
X86JniCallingConvention(bool is_static,bool is_synchronized,bool is_critical_native,const char * shorty)194 X86JniCallingConvention::X86JniCallingConvention(bool is_static,
195                                                  bool is_synchronized,
196                                                  bool is_critical_native,
197                                                  const char* shorty)
198     : JniCallingConvention(is_static,
199                            is_synchronized,
200                            is_critical_native,
201                            shorty,
202                            kX86PointerSize) {
203 }
204 
CoreSpillMask() const205 uint32_t X86JniCallingConvention::CoreSpillMask() const {
206   return kCoreCalleeSpillMask;
207 }
208 
FpSpillMask() const209 uint32_t X86JniCallingConvention::FpSpillMask() const {
210   return kFpCalleeSpillMask;
211 }
212 
FrameSize()213 size_t X86JniCallingConvention::FrameSize() {
214   // Method*, PC return address and callee save area size, local reference segment state
215   const size_t method_ptr_size = static_cast<size_t>(kX86PointerSize);
216   const size_t pc_return_addr_size = kFramePointerSize;
217   const size_t callee_save_area_size = CalleeSaveRegisters().size() * kFramePointerSize;
218   size_t frame_data_size = method_ptr_size + pc_return_addr_size + callee_save_area_size;
219 
220   if (LIKELY(HasLocalReferenceSegmentState())) {                     // local ref. segment state
221     // Local reference segment state is sometimes excluded.
222     frame_data_size += kFramePointerSize;
223   }
224 
225   // References plus link_ (pointer) and number_of_references_ (uint32_t) for HandleScope header
226   const size_t handle_scope_size = HandleScope::SizeOf(kX86PointerSize, ReferenceCount());
227 
228   size_t total_size = frame_data_size;
229   if (LIKELY(HasHandleScope())) {
230     // HandleScope is sometimes excluded.
231     total_size += handle_scope_size;                                 // handle scope size
232   }
233 
234   // Plus return value spill area size
235   total_size += SizeOfReturnValue();
236 
237   return RoundUp(total_size, kStackAlignment);
238   // TODO: Same thing as x64 except using different pointer size. Refactor?
239 }
240 
OutArgSize()241 size_t X86JniCallingConvention::OutArgSize() {
242   return RoundUp(NumberOfOutgoingStackArgs() * kFramePointerSize, kStackAlignment);
243 }
244 
CalleeSaveRegisters() const245 ArrayRef<const ManagedRegister> X86JniCallingConvention::CalleeSaveRegisters() const {
246   return ArrayRef<const ManagedRegister>(kCalleeSaveRegisters);
247 }
248 
IsCurrentParamInRegister()249 bool X86JniCallingConvention::IsCurrentParamInRegister() {
250   return false;  // Everything is passed by stack.
251 }
252 
IsCurrentParamOnStack()253 bool X86JniCallingConvention::IsCurrentParamOnStack() {
254   return true;  // Everything is passed by stack.
255 }
256 
CurrentParamRegister()257 ManagedRegister X86JniCallingConvention::CurrentParamRegister() {
258   LOG(FATAL) << "Should not reach here";
259   return ManagedRegister::NoRegister();
260 }
261 
CurrentParamStackOffset()262 FrameOffset X86JniCallingConvention::CurrentParamStackOffset() {
263   return FrameOffset(displacement_.Int32Value() - OutArgSize() + (itr_slots_ * kFramePointerSize));
264 }
265 
NumberOfOutgoingStackArgs()266 size_t X86JniCallingConvention::NumberOfOutgoingStackArgs() {
267   size_t static_args = HasSelfClass() ? 1 : 0;  // count jclass
268   // regular argument parameters and this
269   size_t param_args = NumArgs() + NumLongOrDoubleArgs();
270   // count JNIEnv* and return pc (pushed after Method*)
271   size_t internal_args = 1 /* return pc */ + (HasJniEnv() ? 1 : 0 /* jni env */);
272   // No register args.
273   size_t total_args = static_args + param_args + internal_args;
274   return total_args;
275 }
276 
277 }  // namespace x86
278 }  // namespace art
279