1 /*
2  * Copyright (C) 2014 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "calling_convention_x86_64.h"
18 
19 #include <android-base/logging.h>
20 
21 #include "arch/instruction_set.h"
22 #include "arch/x86_64/jni_frame_x86_64.h"
23 #include "base/bit_utils.h"
24 #include "utils/x86_64/managed_register_x86_64.h"
25 
26 namespace art HIDDEN {
27 namespace x86_64 {
28 
29 static constexpr ManagedRegister kCoreArgumentRegisters[] = {
30     X86_64ManagedRegister::FromCpuRegister(RDI),
31     X86_64ManagedRegister::FromCpuRegister(RSI),
32     X86_64ManagedRegister::FromCpuRegister(RDX),
33     X86_64ManagedRegister::FromCpuRegister(RCX),
34     X86_64ManagedRegister::FromCpuRegister(R8),
35     X86_64ManagedRegister::FromCpuRegister(R9),
36 };
37 static_assert(kMaxIntLikeRegisterArguments == arraysize(kCoreArgumentRegisters));
38 
39 static constexpr ManagedRegister kCalleeSaveRegisters[] = {
40     // Core registers.
41     X86_64ManagedRegister::FromCpuRegister(RBX),
42     X86_64ManagedRegister::FromCpuRegister(RBP),
43     X86_64ManagedRegister::FromCpuRegister(R12),
44     X86_64ManagedRegister::FromCpuRegister(R13),
45     X86_64ManagedRegister::FromCpuRegister(R14),
46     X86_64ManagedRegister::FromCpuRegister(R15),
47     // Hard float registers.
48     X86_64ManagedRegister::FromXmmRegister(XMM12),
49     X86_64ManagedRegister::FromXmmRegister(XMM13),
50     X86_64ManagedRegister::FromXmmRegister(XMM14),
51     X86_64ManagedRegister::FromXmmRegister(XMM15),
52 };
53 
54 template <size_t size>
CalculateCoreCalleeSpillMask(const ManagedRegister (& callee_saves)[size])55 static constexpr uint32_t CalculateCoreCalleeSpillMask(
56     const ManagedRegister (&callee_saves)[size]) {
57   // The spilled PC gets a special marker.
58   uint32_t result = 1u << kNumberOfCpuRegisters;
59   for (auto&& r : callee_saves) {
60     if (r.AsX86_64().IsCpuRegister()) {
61       result |= (1u << r.AsX86_64().AsCpuRegister().AsRegister());
62     }
63   }
64   return result;
65 }
66 
67 template <size_t size>
CalculateFpCalleeSpillMask(const ManagedRegister (& callee_saves)[size])68 static constexpr uint32_t CalculateFpCalleeSpillMask(const ManagedRegister (&callee_saves)[size]) {
69   uint32_t result = 0u;
70   for (auto&& r : callee_saves) {
71     if (r.AsX86_64().IsXmmRegister()) {
72       result |= (1u << r.AsX86_64().AsXmmRegister().AsFloatRegister());
73     }
74   }
75   return result;
76 }
77 
78 static constexpr uint32_t kCoreCalleeSpillMask = CalculateCoreCalleeSpillMask(kCalleeSaveRegisters);
79 static constexpr uint32_t kFpCalleeSpillMask = CalculateFpCalleeSpillMask(kCalleeSaveRegisters);
80 
81 static constexpr ManagedRegister kNativeCalleeSaveRegisters[] = {
82     // Core registers.
83     X86_64ManagedRegister::FromCpuRegister(RBX),
84     X86_64ManagedRegister::FromCpuRegister(RBP),
85     X86_64ManagedRegister::FromCpuRegister(R12),
86     X86_64ManagedRegister::FromCpuRegister(R13),
87     X86_64ManagedRegister::FromCpuRegister(R14),
88     X86_64ManagedRegister::FromCpuRegister(R15),
89     // No callee-save float registers.
90 };
91 
92 static constexpr uint32_t kNativeCoreCalleeSpillMask =
93     CalculateCoreCalleeSpillMask(kNativeCalleeSaveRegisters);
94 static constexpr uint32_t kNativeFpCalleeSpillMask =
95     CalculateFpCalleeSpillMask(kNativeCalleeSaveRegisters);
96 
97 // Calling convention
98 
CalleeSaveScratchRegisters() const99 ArrayRef<const ManagedRegister> X86_64JniCallingConvention::CalleeSaveScratchRegisters() const {
100   DCHECK(!IsCriticalNative());
101   // All native callee-save registers are available.
102   static_assert((kNativeCoreCalleeSpillMask & ~kCoreCalleeSpillMask) == 0u);
103   static_assert(kNativeFpCalleeSpillMask == 0u);
104   return ArrayRef<const ManagedRegister>(kNativeCalleeSaveRegisters);
105 }
106 
ArgumentScratchRegisters() const107 ArrayRef<const ManagedRegister> X86_64JniCallingConvention::ArgumentScratchRegisters() const {
108   DCHECK(!IsCriticalNative());
109   ArrayRef<const ManagedRegister> scratch_regs(kCoreArgumentRegisters);
110   DCHECK(std::none_of(scratch_regs.begin(),
111                       scratch_regs.end(),
112                       [return_reg = ReturnRegister().AsX86_64()](ManagedRegister reg) {
113                         return return_reg.Overlaps(reg.AsX86_64());
114                       }));
115   return scratch_regs;
116 }
117 
ReturnRegisterForShorty(std::string_view shorty,bool jni)118 static ManagedRegister ReturnRegisterForShorty(std::string_view shorty, [[maybe_unused]] bool jni) {
119   if (shorty[0] == 'F' || shorty[0] == 'D') {
120     return X86_64ManagedRegister::FromXmmRegister(XMM0);
121   } else if (shorty[0] == 'J') {
122     return X86_64ManagedRegister::FromCpuRegister(RAX);
123   } else if (shorty[0] == 'V') {
124     return ManagedRegister::NoRegister();
125   } else {
126     return X86_64ManagedRegister::FromCpuRegister(RAX);
127   }
128 }
129 
ReturnRegister() const130 ManagedRegister X86_64ManagedRuntimeCallingConvention::ReturnRegister() const {
131   return ReturnRegisterForShorty(GetShorty(), false);
132 }
133 
ReturnRegister() const134 ManagedRegister X86_64JniCallingConvention::ReturnRegister() const {
135   return ReturnRegisterForShorty(GetShorty(), true);
136 }
137 
IntReturnRegister() const138 ManagedRegister X86_64JniCallingConvention::IntReturnRegister() const {
139   return X86_64ManagedRegister::FromCpuRegister(RAX);
140 }
141 
142 // Managed runtime calling convention
143 
MethodRegister()144 ManagedRegister X86_64ManagedRuntimeCallingConvention::MethodRegister() {
145   return X86_64ManagedRegister::FromCpuRegister(RDI);
146 }
147 
ArgumentRegisterForMethodExitHook()148 ManagedRegister X86_64ManagedRuntimeCallingConvention::ArgumentRegisterForMethodExitHook() {
149   return X86_64ManagedRegister::FromCpuRegister(R8);
150 }
151 
IsCurrentParamInRegister()152 bool X86_64ManagedRuntimeCallingConvention::IsCurrentParamInRegister() {
153   if (IsCurrentParamAFloatOrDouble()) {
154     return itr_float_and_doubles_ < kMaxFloatOrDoubleRegisterArguments;
155   } else {
156     size_t non_fp_arg_number = itr_args_ - itr_float_and_doubles_;
157     return /* method */ 1u + non_fp_arg_number < kMaxIntLikeRegisterArguments;
158   }
159 }
160 
IsCurrentParamOnStack()161 bool X86_64ManagedRuntimeCallingConvention::IsCurrentParamOnStack() {
162   return !IsCurrentParamInRegister();
163 }
164 
CurrentParamRegister()165 ManagedRegister X86_64ManagedRuntimeCallingConvention::CurrentParamRegister() {
166   DCHECK(IsCurrentParamInRegister());
167   if (IsCurrentParamAFloatOrDouble()) {
168     // First eight float parameters are passed via XMM0..XMM7
169     FloatRegister fp_reg = static_cast<FloatRegister>(XMM0 + itr_float_and_doubles_);
170     return X86_64ManagedRegister::FromXmmRegister(fp_reg);
171   } else {
172     size_t non_fp_arg_number = itr_args_ - itr_float_and_doubles_;
173     return kCoreArgumentRegisters[/* method */ 1u + non_fp_arg_number];
174   }
175 }
176 
CurrentParamStackOffset()177 FrameOffset X86_64ManagedRuntimeCallingConvention::CurrentParamStackOffset() {
178   return FrameOffset(displacement_.Int32Value() +  // displacement
179                      static_cast<size_t>(kX86_64PointerSize) +  // Method ref
180                      itr_slots_ * sizeof(uint32_t));  // offset into in args
181 }
182 
183 // JNI calling convention
184 
X86_64JniCallingConvention(bool is_static,bool is_synchronized,bool is_fast_native,bool is_critical_native,std::string_view shorty)185 X86_64JniCallingConvention::X86_64JniCallingConvention(bool is_static,
186                                                        bool is_synchronized,
187                                                        bool is_fast_native,
188                                                        bool is_critical_native,
189                                                        std::string_view shorty)
190     : JniCallingConvention(is_static,
191                            is_synchronized,
192                            is_fast_native,
193                            is_critical_native,
194                            shorty,
195                            kX86_64PointerSize) {
196 }
197 
CoreSpillMask() const198 uint32_t X86_64JniCallingConvention::CoreSpillMask() const {
199   return is_critical_native_ ? 0u : kCoreCalleeSpillMask;
200 }
201 
FpSpillMask() const202 uint32_t X86_64JniCallingConvention::FpSpillMask() const {
203   return is_critical_native_ ? 0u : kFpCalleeSpillMask;
204 }
205 
FrameSize() const206 size_t X86_64JniCallingConvention::FrameSize() const {
207   if (is_critical_native_) {
208     CHECK(!SpillsMethod());
209     CHECK(!HasLocalReferenceSegmentState());
210     return 0u;  // There is no managed frame for @CriticalNative.
211   }
212 
213   // Method*, PC return address and callee save area size, local reference segment state
214   DCHECK(SpillsMethod());
215   const size_t method_ptr_size = static_cast<size_t>(kX86_64PointerSize);
216   const size_t pc_return_addr_size = kFramePointerSize;
217   const size_t callee_save_area_size = CalleeSaveRegisters().size() * kFramePointerSize;
218   size_t total_size = method_ptr_size + pc_return_addr_size + callee_save_area_size;
219 
220   DCHECK(HasLocalReferenceSegmentState());
221   // Cookie is saved in one of the spilled registers.
222 
223   return RoundUp(total_size, kStackAlignment);
224 }
225 
OutFrameSize() const226 size_t X86_64JniCallingConvention::OutFrameSize() const {
227   // Count param args, including JNIEnv* and jclass*.
228   size_t all_args = NumberOfExtraArgumentsForJni() + NumArgs();
229   size_t num_fp_args = NumFloatOrDoubleArgs();
230   DCHECK_GE(all_args, num_fp_args);
231   size_t num_non_fp_args = all_args - num_fp_args;
232   // The size of outgoing arguments.
233   size_t size = GetNativeOutArgsSize(num_fp_args, num_non_fp_args);
234 
235   if (UNLIKELY(IsCriticalNative())) {
236     // We always need to spill xmm12-xmm15 as they are managed callee-saves
237     // but not native callee-saves.
238     static_assert((kCoreCalleeSpillMask & ~kNativeCoreCalleeSpillMask) == 0u);
239     static_assert((kFpCalleeSpillMask & ~kNativeFpCalleeSpillMask) != 0u);
240     static_assert(
241         kAlwaysSpilledMmxRegisters == POPCOUNT(kFpCalleeSpillMask & ~kNativeFpCalleeSpillMask));
242     size += kAlwaysSpilledMmxRegisters * kMmxSpillSize;
243     // Add return address size for @CriticalNative
244     // For normal native the return PC is part of the managed stack frame instead of out args.
245     size += kFramePointerSize;
246   }
247 
248   size_t out_args_size = RoundUp(size, kNativeStackAlignment);
249   if (UNLIKELY(IsCriticalNative())) {
250     DCHECK_EQ(out_args_size, GetCriticalNativeStubFrameSize(GetShorty()));
251   }
252   return out_args_size;
253 }
254 
CalleeSaveRegisters() const255 ArrayRef<const ManagedRegister> X86_64JniCallingConvention::CalleeSaveRegisters() const {
256   if (UNLIKELY(IsCriticalNative())) {
257     DCHECK(!UseTailCall());
258     static_assert(std::size(kCalleeSaveRegisters) > std::size(kNativeCalleeSaveRegisters));
259     // TODO: Change to static_assert; std::equal should be constexpr since C++20.
260     DCHECK(std::equal(kCalleeSaveRegisters,
261                       kCalleeSaveRegisters + std::size(kNativeCalleeSaveRegisters),
262                       kNativeCalleeSaveRegisters,
263                       [](ManagedRegister lhs, ManagedRegister rhs) { return lhs.Equals(rhs); }));
264     return ArrayRef<const ManagedRegister>(kCalleeSaveRegisters).SubArray(
265         /*pos=*/ std::size(kNativeCalleeSaveRegisters));
266   } else {
267     return ArrayRef<const ManagedRegister>(kCalleeSaveRegisters);
268   }
269 }
270 
IsCurrentParamInRegister()271 bool X86_64JniCallingConvention::IsCurrentParamInRegister() {
272   return !IsCurrentParamOnStack();
273 }
274 
IsCurrentParamOnStack()275 bool X86_64JniCallingConvention::IsCurrentParamOnStack() {
276   return CurrentParamRegister().IsNoRegister();
277 }
278 
CurrentParamRegister()279 ManagedRegister X86_64JniCallingConvention::CurrentParamRegister() {
280   ManagedRegister res = ManagedRegister::NoRegister();
281   if (!IsCurrentParamAFloatOrDouble()) {
282     switch (itr_args_ - itr_float_and_doubles_) {
283     case 0: res = X86_64ManagedRegister::FromCpuRegister(RDI); break;
284     case 1: res = X86_64ManagedRegister::FromCpuRegister(RSI); break;
285     case 2: res = X86_64ManagedRegister::FromCpuRegister(RDX); break;
286     case 3: res = X86_64ManagedRegister::FromCpuRegister(RCX); break;
287     case 4: res = X86_64ManagedRegister::FromCpuRegister(R8); break;
288     case 5: res = X86_64ManagedRegister::FromCpuRegister(R9); break;
289     static_assert(5u == kMaxIntLikeRegisterArguments - 1, "Missing case statement(s)");
290     }
291   } else if (itr_float_and_doubles_ < kMaxFloatOrDoubleRegisterArguments) {
292     // First eight float parameters are passed via XMM0..XMM7
293     res = X86_64ManagedRegister::FromXmmRegister(
294                                  static_cast<FloatRegister>(XMM0 + itr_float_and_doubles_));
295   }
296   return res;
297 }
298 
CurrentParamStackOffset()299 FrameOffset X86_64JniCallingConvention::CurrentParamStackOffset() {
300   CHECK(IsCurrentParamOnStack());
301   size_t args_on_stack = itr_args_
302       - std::min(kMaxFloatOrDoubleRegisterArguments,
303                  static_cast<size_t>(itr_float_and_doubles_))
304           // Float arguments passed through Xmm0..Xmm7
305       - std::min(kMaxIntLikeRegisterArguments,
306                  static_cast<size_t>(itr_args_ - itr_float_and_doubles_));
307           // Integer arguments passed through GPR
308   size_t offset = displacement_.Int32Value() - OutFrameSize() + (args_on_stack * kFramePointerSize);
309   CHECK_LT(offset, OutFrameSize());
310   return FrameOffset(offset);
311 }
312 
LockingArgumentRegister() const313 ManagedRegister X86_64JniCallingConvention::LockingArgumentRegister() const {
314   DCHECK(!IsFastNative());
315   DCHECK(!IsCriticalNative());
316   DCHECK(IsSynchronized());
317   // The callee-save register is RBX is suitable as a locking argument.
318   static_assert(kCalleeSaveRegisters[0].Equals(X86_64ManagedRegister::FromCpuRegister(RBX)));
319   return X86_64ManagedRegister::FromCpuRegister(RBX);
320 }
321 
HiddenArgumentRegister() const322 ManagedRegister X86_64JniCallingConvention::HiddenArgumentRegister() const {
323   CHECK(IsCriticalNative());
324   // RAX is neither managed callee-save, nor argument register, nor scratch register.
325   DCHECK(std::none_of(kCalleeSaveRegisters,
326                       kCalleeSaveRegisters + std::size(kCalleeSaveRegisters),
327                       [](ManagedRegister callee_save) constexpr {
328                         return callee_save.Equals(X86_64ManagedRegister::FromCpuRegister(RAX));
329                       }));
330   return X86_64ManagedRegister::FromCpuRegister(RAX);
331 }
332 
333 // Whether to use tail call (used only for @CriticalNative).
UseTailCall() const334 bool X86_64JniCallingConvention::UseTailCall() const {
335   CHECK(IsCriticalNative());
336   // We always need to spill xmm12-xmm15 as they are managed callee-saves
337   // but not native callee-saves, so we can never use a tail call.
338   return false;
339 }
340 
341 }  // namespace x86_64
342 }  // namespace art
343