1 /*
2 * Copyright (C) 2015 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "calling_convention_mips64.h"
18
19 #include <android-base/logging.h>
20
21 #include "handle_scope-inl.h"
22 #include "utils/mips64/managed_register_mips64.h"
23
24 namespace art {
25 namespace mips64 {
26
27 // Up to kow many args can be enregistered. The rest of the args must go on the stack.
28 constexpr size_t kMaxRegisterArguments = 8u;
29
30 static const GpuRegister kGpuArgumentRegisters[] = {
31 A0, A1, A2, A3, A4, A5, A6, A7
32 };
33
34 static const FpuRegister kFpuArgumentRegisters[] = {
35 F12, F13, F14, F15, F16, F17, F18, F19
36 };
37
38 static constexpr ManagedRegister kCalleeSaveRegisters[] = {
39 // Core registers.
40 Mips64ManagedRegister::FromGpuRegister(S2),
41 Mips64ManagedRegister::FromGpuRegister(S3),
42 Mips64ManagedRegister::FromGpuRegister(S4),
43 Mips64ManagedRegister::FromGpuRegister(S5),
44 Mips64ManagedRegister::FromGpuRegister(S6),
45 Mips64ManagedRegister::FromGpuRegister(S7),
46 Mips64ManagedRegister::FromGpuRegister(GP),
47 Mips64ManagedRegister::FromGpuRegister(S8),
48 // No hard float callee saves.
49 };
50
CalculateCoreCalleeSpillMask()51 static constexpr uint32_t CalculateCoreCalleeSpillMask() {
52 // RA is a special callee save which is not reported by CalleeSaveRegisters().
53 uint32_t result = 1 << RA;
54 for (auto&& r : kCalleeSaveRegisters) {
55 if (r.AsMips64().IsGpuRegister()) {
56 result |= (1 << r.AsMips64().AsGpuRegister());
57 }
58 }
59 return result;
60 }
61
62 static constexpr uint32_t kCoreCalleeSpillMask = CalculateCoreCalleeSpillMask();
63 static constexpr uint32_t kFpCalleeSpillMask = 0u;
64
65 // Calling convention
InterproceduralScratchRegister()66 ManagedRegister Mips64ManagedRuntimeCallingConvention::InterproceduralScratchRegister() {
67 return Mips64ManagedRegister::FromGpuRegister(T9);
68 }
69
InterproceduralScratchRegister()70 ManagedRegister Mips64JniCallingConvention::InterproceduralScratchRegister() {
71 return Mips64ManagedRegister::FromGpuRegister(T9);
72 }
73
ReturnRegisterForShorty(const char * shorty)74 static ManagedRegister ReturnRegisterForShorty(const char* shorty) {
75 if (shorty[0] == 'F' || shorty[0] == 'D') {
76 return Mips64ManagedRegister::FromFpuRegister(F0);
77 } else if (shorty[0] == 'V') {
78 return Mips64ManagedRegister::NoRegister();
79 } else {
80 return Mips64ManagedRegister::FromGpuRegister(V0);
81 }
82 }
83
ReturnRegister()84 ManagedRegister Mips64ManagedRuntimeCallingConvention::ReturnRegister() {
85 return ReturnRegisterForShorty(GetShorty());
86 }
87
ReturnRegister()88 ManagedRegister Mips64JniCallingConvention::ReturnRegister() {
89 return ReturnRegisterForShorty(GetShorty());
90 }
91
IntReturnRegister()92 ManagedRegister Mips64JniCallingConvention::IntReturnRegister() {
93 return Mips64ManagedRegister::FromGpuRegister(V0);
94 }
95
96 // Managed runtime calling convention
97
MethodRegister()98 ManagedRegister Mips64ManagedRuntimeCallingConvention::MethodRegister() {
99 return Mips64ManagedRegister::FromGpuRegister(A0);
100 }
101
IsCurrentParamInRegister()102 bool Mips64ManagedRuntimeCallingConvention::IsCurrentParamInRegister() {
103 return false; // Everything moved to stack on entry.
104 }
105
IsCurrentParamOnStack()106 bool Mips64ManagedRuntimeCallingConvention::IsCurrentParamOnStack() {
107 return true;
108 }
109
CurrentParamRegister()110 ManagedRegister Mips64ManagedRuntimeCallingConvention::CurrentParamRegister() {
111 LOG(FATAL) << "Should not reach here";
112 return ManagedRegister::NoRegister();
113 }
114
CurrentParamStackOffset()115 FrameOffset Mips64ManagedRuntimeCallingConvention::CurrentParamStackOffset() {
116 CHECK(IsCurrentParamOnStack());
117 FrameOffset result =
118 FrameOffset(displacement_.Int32Value() + // displacement
119 kFramePointerSize + // Method ref
120 (itr_slots_ * sizeof(uint32_t))); // offset into in args
121 return result;
122 }
123
EntrySpills()124 const ManagedRegisterEntrySpills& Mips64ManagedRuntimeCallingConvention::EntrySpills() {
125 // We spill the argument registers on MIPS64 to free them up for scratch use,
126 // we then assume all arguments are on the stack.
127 if ((entry_spills_.size() == 0) && (NumArgs() > 0)) {
128 int reg_index = 1; // we start from A1, A0 holds ArtMethod*.
129
130 // We need to choose the correct register size since the managed
131 // stack uses 32bit stack slots.
132 ResetIterator(FrameOffset(0));
133 while (HasNext()) {
134 if (reg_index < 8) {
135 if (IsCurrentParamAFloatOrDouble()) { // FP regs.
136 FpuRegister arg = kFpuArgumentRegisters[reg_index];
137 Mips64ManagedRegister reg = Mips64ManagedRegister::FromFpuRegister(arg);
138 entry_spills_.push_back(reg, IsCurrentParamADouble() ? 8 : 4);
139 } else { // GP regs.
140 GpuRegister arg = kGpuArgumentRegisters[reg_index];
141 Mips64ManagedRegister reg = Mips64ManagedRegister::FromGpuRegister(arg);
142 entry_spills_.push_back(reg,
143 (IsCurrentParamALong() && (!IsCurrentParamAReference())) ? 8 : 4);
144 }
145 // e.g. A1, A2, F3, A4, F5, F6, A7
146 reg_index++;
147 }
148
149 Next();
150 }
151 }
152 return entry_spills_;
153 }
154
155 // JNI calling convention
156
Mips64JniCallingConvention(bool is_static,bool is_synchronized,bool is_critical_native,const char * shorty)157 Mips64JniCallingConvention::Mips64JniCallingConvention(bool is_static,
158 bool is_synchronized,
159 bool is_critical_native,
160 const char* shorty)
161 : JniCallingConvention(is_static,
162 is_synchronized,
163 is_critical_native,
164 shorty,
165 kMips64PointerSize) {
166 }
167
CoreSpillMask() const168 uint32_t Mips64JniCallingConvention::CoreSpillMask() const {
169 return kCoreCalleeSpillMask;
170 }
171
FpSpillMask() const172 uint32_t Mips64JniCallingConvention::FpSpillMask() const {
173 return kFpCalleeSpillMask;
174 }
175
ReturnScratchRegister() const176 ManagedRegister Mips64JniCallingConvention::ReturnScratchRegister() const {
177 return Mips64ManagedRegister::FromGpuRegister(AT);
178 }
179
FrameSize()180 size_t Mips64JniCallingConvention::FrameSize() {
181 // ArtMethod*, RA and callee save area size, local reference segment state.
182 size_t method_ptr_size = static_cast<size_t>(kFramePointerSize);
183 size_t ra_and_callee_save_area_size = (CalleeSaveRegisters().size() + 1) * kFramePointerSize;
184
185 size_t frame_data_size = method_ptr_size + ra_and_callee_save_area_size;
186 if (LIKELY(HasLocalReferenceSegmentState())) { // Local ref. segment state.
187 // Local reference segment state is sometimes excluded.
188 frame_data_size += sizeof(uint32_t);
189 }
190 // References plus 2 words for HandleScope header.
191 size_t handle_scope_size = HandleScope::SizeOf(kMips64PointerSize, ReferenceCount());
192
193 size_t total_size = frame_data_size;
194 if (LIKELY(HasHandleScope())) {
195 // HandleScope is sometimes excluded.
196 total_size += handle_scope_size; // Handle scope size.
197 }
198
199 // Plus return value spill area size.
200 total_size += SizeOfReturnValue();
201
202 return RoundUp(total_size, kStackAlignment);
203 }
204
OutArgSize()205 size_t Mips64JniCallingConvention::OutArgSize() {
206 return RoundUp(NumberOfOutgoingStackArgs() * kFramePointerSize, kStackAlignment);
207 }
208
CalleeSaveRegisters() const209 ArrayRef<const ManagedRegister> Mips64JniCallingConvention::CalleeSaveRegisters() const {
210 return ArrayRef<const ManagedRegister>(kCalleeSaveRegisters);
211 }
212
IsCurrentParamInRegister()213 bool Mips64JniCallingConvention::IsCurrentParamInRegister() {
214 return itr_args_ < kMaxRegisterArguments;
215 }
216
IsCurrentParamOnStack()217 bool Mips64JniCallingConvention::IsCurrentParamOnStack() {
218 return !IsCurrentParamInRegister();
219 }
220
CurrentParamRegister()221 ManagedRegister Mips64JniCallingConvention::CurrentParamRegister() {
222 CHECK(IsCurrentParamInRegister());
223 if (IsCurrentParamAFloatOrDouble()) {
224 return Mips64ManagedRegister::FromFpuRegister(kFpuArgumentRegisters[itr_args_]);
225 } else {
226 return Mips64ManagedRegister::FromGpuRegister(kGpuArgumentRegisters[itr_args_]);
227 }
228 }
229
CurrentParamStackOffset()230 FrameOffset Mips64JniCallingConvention::CurrentParamStackOffset() {
231 CHECK(IsCurrentParamOnStack());
232 size_t args_on_stack = itr_args_ - kMaxRegisterArguments;
233 size_t offset = displacement_.Int32Value() - OutArgSize() + (args_on_stack * kFramePointerSize);
234 CHECK_LT(offset, OutArgSize());
235 return FrameOffset(offset);
236 }
237
NumberOfOutgoingStackArgs()238 size_t Mips64JniCallingConvention::NumberOfOutgoingStackArgs() {
239 // all arguments including JNI args
240 size_t all_args = NumArgs() + NumberOfExtraArgumentsForJni();
241
242 // Nothing on the stack unless there are more than 8 arguments
243 return (all_args > kMaxRegisterArguments) ? all_args - kMaxRegisterArguments : 0;
244 }
245 } // namespace mips64
246 } // namespace art
247