1 /*
2  * Copyright (C) 2015 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "calling_convention_mips64.h"
18 
19 #include <android-base/logging.h>
20 
21 #include "arch/instruction_set.h"
22 #include "handle_scope-inl.h"
23 #include "utils/mips64/managed_register_mips64.h"
24 
25 namespace art {
26 namespace mips64 {
27 
28 // Up to kow many args can be enregistered. The rest of the args must go on the stack.
29 constexpr size_t kMaxRegisterArguments = 8u;
30 
31 static const GpuRegister kGpuArgumentRegisters[] = {
32   A0, A1, A2, A3, A4, A5, A6, A7
33 };
34 
35 static const FpuRegister kFpuArgumentRegisters[] = {
36   F12, F13, F14, F15, F16, F17, F18, F19
37 };
38 
39 static constexpr ManagedRegister kCalleeSaveRegisters[] = {
40     // Core registers.
41     Mips64ManagedRegister::FromGpuRegister(S2),
42     Mips64ManagedRegister::FromGpuRegister(S3),
43     Mips64ManagedRegister::FromGpuRegister(S4),
44     Mips64ManagedRegister::FromGpuRegister(S5),
45     Mips64ManagedRegister::FromGpuRegister(S6),
46     Mips64ManagedRegister::FromGpuRegister(S7),
47     Mips64ManagedRegister::FromGpuRegister(GP),
48     Mips64ManagedRegister::FromGpuRegister(S8),
49     // No hard float callee saves.
50 };
51 
CalculateCoreCalleeSpillMask()52 static constexpr uint32_t CalculateCoreCalleeSpillMask() {
53   // RA is a special callee save which is not reported by CalleeSaveRegisters().
54   uint32_t result = 1 << RA;
55   for (auto&& r : kCalleeSaveRegisters) {
56     if (r.AsMips64().IsGpuRegister()) {
57       result |= (1 << r.AsMips64().AsGpuRegister());
58     }
59   }
60   return result;
61 }
62 
63 static constexpr uint32_t kCoreCalleeSpillMask = CalculateCoreCalleeSpillMask();
64 static constexpr uint32_t kFpCalleeSpillMask = 0u;
65 
66 // Calling convention
InterproceduralScratchRegister()67 ManagedRegister Mips64ManagedRuntimeCallingConvention::InterproceduralScratchRegister() {
68   return Mips64ManagedRegister::FromGpuRegister(T9);
69 }
70 
InterproceduralScratchRegister()71 ManagedRegister Mips64JniCallingConvention::InterproceduralScratchRegister() {
72   return Mips64ManagedRegister::FromGpuRegister(T9);
73 }
74 
ReturnRegisterForShorty(const char * shorty)75 static ManagedRegister ReturnRegisterForShorty(const char* shorty) {
76   if (shorty[0] == 'F' || shorty[0] == 'D') {
77     return Mips64ManagedRegister::FromFpuRegister(F0);
78   } else if (shorty[0] == 'V') {
79     return Mips64ManagedRegister::NoRegister();
80   } else {
81     return Mips64ManagedRegister::FromGpuRegister(V0);
82   }
83 }
84 
ReturnRegister()85 ManagedRegister Mips64ManagedRuntimeCallingConvention::ReturnRegister() {
86   return ReturnRegisterForShorty(GetShorty());
87 }
88 
ReturnRegister()89 ManagedRegister Mips64JniCallingConvention::ReturnRegister() {
90   return ReturnRegisterForShorty(GetShorty());
91 }
92 
IntReturnRegister()93 ManagedRegister Mips64JniCallingConvention::IntReturnRegister() {
94   return Mips64ManagedRegister::FromGpuRegister(V0);
95 }
96 
97 // Managed runtime calling convention
98 
MethodRegister()99 ManagedRegister Mips64ManagedRuntimeCallingConvention::MethodRegister() {
100   return Mips64ManagedRegister::FromGpuRegister(A0);
101 }
102 
IsCurrentParamInRegister()103 bool Mips64ManagedRuntimeCallingConvention::IsCurrentParamInRegister() {
104   return false;  // Everything moved to stack on entry.
105 }
106 
IsCurrentParamOnStack()107 bool Mips64ManagedRuntimeCallingConvention::IsCurrentParamOnStack() {
108   return true;
109 }
110 
CurrentParamRegister()111 ManagedRegister Mips64ManagedRuntimeCallingConvention::CurrentParamRegister() {
112   LOG(FATAL) << "Should not reach here";
113   UNREACHABLE();
114 }
115 
CurrentParamStackOffset()116 FrameOffset Mips64ManagedRuntimeCallingConvention::CurrentParamStackOffset() {
117   CHECK(IsCurrentParamOnStack());
118   FrameOffset result =
119       FrameOffset(displacement_.Int32Value() +  // displacement
120                   kFramePointerSize +  // Method ref
121                   (itr_slots_ * sizeof(uint32_t)));  // offset into in args
122   return result;
123 }
124 
EntrySpills()125 const ManagedRegisterEntrySpills& Mips64ManagedRuntimeCallingConvention::EntrySpills() {
126   // We spill the argument registers on MIPS64 to free them up for scratch use,
127   // we then assume all arguments are on the stack.
128   if ((entry_spills_.size() == 0) && (NumArgs() > 0)) {
129     int reg_index = 1;   // we start from A1, A0 holds ArtMethod*.
130 
131     // We need to choose the correct register size since the managed
132     // stack uses 32bit stack slots.
133     ResetIterator(FrameOffset(0));
134     while (HasNext()) {
135       if (reg_index < 8) {
136         if (IsCurrentParamAFloatOrDouble()) {  // FP regs.
137           FpuRegister arg = kFpuArgumentRegisters[reg_index];
138           Mips64ManagedRegister reg = Mips64ManagedRegister::FromFpuRegister(arg);
139           entry_spills_.push_back(reg, IsCurrentParamADouble() ? 8 : 4);
140         } else {  // GP regs.
141           GpuRegister arg = kGpuArgumentRegisters[reg_index];
142           Mips64ManagedRegister reg = Mips64ManagedRegister::FromGpuRegister(arg);
143           entry_spills_.push_back(reg,
144                                   (IsCurrentParamALong() && (!IsCurrentParamAReference())) ? 8 : 4);
145         }
146         // e.g. A1, A2, F3, A4, F5, F6, A7
147         reg_index++;
148       }
149 
150       Next();
151     }
152   }
153   return entry_spills_;
154 }
155 
156 // JNI calling convention
157 
Mips64JniCallingConvention(bool is_static,bool is_synchronized,bool is_critical_native,const char * shorty)158 Mips64JniCallingConvention::Mips64JniCallingConvention(bool is_static,
159                                                        bool is_synchronized,
160                                                        bool is_critical_native,
161                                                        const char* shorty)
162     : JniCallingConvention(is_static,
163                            is_synchronized,
164                            is_critical_native,
165                            shorty,
166                            kMips64PointerSize) {
167 }
168 
CoreSpillMask() const169 uint32_t Mips64JniCallingConvention::CoreSpillMask() const {
170   return kCoreCalleeSpillMask;
171 }
172 
FpSpillMask() const173 uint32_t Mips64JniCallingConvention::FpSpillMask() const {
174   return kFpCalleeSpillMask;
175 }
176 
ReturnScratchRegister() const177 ManagedRegister Mips64JniCallingConvention::ReturnScratchRegister() const {
178   return Mips64ManagedRegister::FromGpuRegister(AT);
179 }
180 
FrameSize()181 size_t Mips64JniCallingConvention::FrameSize() {
182   // ArtMethod*, RA and callee save area size, local reference segment state.
183   size_t method_ptr_size = static_cast<size_t>(kFramePointerSize);
184   size_t ra_and_callee_save_area_size = (CalleeSaveRegisters().size() + 1) * kFramePointerSize;
185 
186   size_t frame_data_size = method_ptr_size + ra_and_callee_save_area_size;
187   if (LIKELY(HasLocalReferenceSegmentState())) {                     // Local ref. segment state.
188     // Local reference segment state is sometimes excluded.
189     frame_data_size += sizeof(uint32_t);
190   }
191   // References plus 2 words for HandleScope header.
192   size_t handle_scope_size = HandleScope::SizeOf(kMips64PointerSize, ReferenceCount());
193 
194   size_t total_size = frame_data_size;
195   if (LIKELY(HasHandleScope())) {
196     // HandleScope is sometimes excluded.
197     total_size += handle_scope_size;                                 // Handle scope size.
198   }
199 
200   // Plus return value spill area size.
201   total_size += SizeOfReturnValue();
202 
203   return RoundUp(total_size, kStackAlignment);
204 }
205 
OutArgSize()206 size_t Mips64JniCallingConvention::OutArgSize() {
207   return RoundUp(NumberOfOutgoingStackArgs() * kFramePointerSize, kStackAlignment);
208 }
209 
CalleeSaveRegisters() const210 ArrayRef<const ManagedRegister> Mips64JniCallingConvention::CalleeSaveRegisters() const {
211   return ArrayRef<const ManagedRegister>(kCalleeSaveRegisters);
212 }
213 
IsCurrentParamInRegister()214 bool Mips64JniCallingConvention::IsCurrentParamInRegister() {
215   return itr_args_ < kMaxRegisterArguments;
216 }
217 
IsCurrentParamOnStack()218 bool Mips64JniCallingConvention::IsCurrentParamOnStack() {
219   return !IsCurrentParamInRegister();
220 }
221 
CurrentParamRegister()222 ManagedRegister Mips64JniCallingConvention::CurrentParamRegister() {
223   CHECK(IsCurrentParamInRegister());
224   if (IsCurrentParamAFloatOrDouble()) {
225     return Mips64ManagedRegister::FromFpuRegister(kFpuArgumentRegisters[itr_args_]);
226   } else {
227     return Mips64ManagedRegister::FromGpuRegister(kGpuArgumentRegisters[itr_args_]);
228   }
229 }
230 
CurrentParamStackOffset()231 FrameOffset Mips64JniCallingConvention::CurrentParamStackOffset() {
232   CHECK(IsCurrentParamOnStack());
233   size_t args_on_stack = itr_args_ - kMaxRegisterArguments;
234   size_t offset = displacement_.Int32Value() - OutArgSize() + (args_on_stack * kFramePointerSize);
235   CHECK_LT(offset, OutArgSize());
236   return FrameOffset(offset);
237 }
238 
NumberOfOutgoingStackArgs()239 size_t Mips64JniCallingConvention::NumberOfOutgoingStackArgs() {
240   // all arguments including JNI args
241   size_t all_args = NumArgs() + NumberOfExtraArgumentsForJni();
242 
243   // Nothing on the stack unless there are more than 8 arguments
244   return (all_args > kMaxRegisterArguments) ? all_args - kMaxRegisterArguments : 0;
245 }
246 }  // namespace mips64
247 }  // namespace art
248