1 /*
2 * Copyright (C) 2015 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "calling_convention_mips64.h"
18
19 #include "base/logging.h"
20 #include "handle_scope-inl.h"
21 #include "utils/mips64/managed_register_mips64.h"
22
23 namespace art {
24 namespace mips64 {
25
26 static const GpuRegister kGpuArgumentRegisters[] = {
27 A0, A1, A2, A3, A4, A5, A6, A7
28 };
29
30 static const FpuRegister kFpuArgumentRegisters[] = {
31 F12, F13, F14, F15, F16, F17, F18, F19
32 };
33
34 // Calling convention
InterproceduralScratchRegister()35 ManagedRegister Mips64ManagedRuntimeCallingConvention::InterproceduralScratchRegister() {
36 return Mips64ManagedRegister::FromGpuRegister(T9);
37 }
38
InterproceduralScratchRegister()39 ManagedRegister Mips64JniCallingConvention::InterproceduralScratchRegister() {
40 return Mips64ManagedRegister::FromGpuRegister(T9);
41 }
42
ReturnRegisterForShorty(const char * shorty)43 static ManagedRegister ReturnRegisterForShorty(const char* shorty) {
44 if (shorty[0] == 'F' || shorty[0] == 'D') {
45 return Mips64ManagedRegister::FromFpuRegister(F0);
46 } else if (shorty[0] == 'V') {
47 return Mips64ManagedRegister::NoRegister();
48 } else {
49 return Mips64ManagedRegister::FromGpuRegister(V0);
50 }
51 }
52
ReturnRegister()53 ManagedRegister Mips64ManagedRuntimeCallingConvention::ReturnRegister() {
54 return ReturnRegisterForShorty(GetShorty());
55 }
56
ReturnRegister()57 ManagedRegister Mips64JniCallingConvention::ReturnRegister() {
58 return ReturnRegisterForShorty(GetShorty());
59 }
60
IntReturnRegister()61 ManagedRegister Mips64JniCallingConvention::IntReturnRegister() {
62 return Mips64ManagedRegister::FromGpuRegister(V0);
63 }
64
65 // Managed runtime calling convention
66
MethodRegister()67 ManagedRegister Mips64ManagedRuntimeCallingConvention::MethodRegister() {
68 return Mips64ManagedRegister::FromGpuRegister(A0);
69 }
70
IsCurrentParamInRegister()71 bool Mips64ManagedRuntimeCallingConvention::IsCurrentParamInRegister() {
72 return false; // Everything moved to stack on entry.
73 }
74
IsCurrentParamOnStack()75 bool Mips64ManagedRuntimeCallingConvention::IsCurrentParamOnStack() {
76 return true;
77 }
78
CurrentParamRegister()79 ManagedRegister Mips64ManagedRuntimeCallingConvention::CurrentParamRegister() {
80 LOG(FATAL) << "Should not reach here";
81 return ManagedRegister::NoRegister();
82 }
83
CurrentParamStackOffset()84 FrameOffset Mips64ManagedRuntimeCallingConvention::CurrentParamStackOffset() {
85 CHECK(IsCurrentParamOnStack());
86 FrameOffset result =
87 FrameOffset(displacement_.Int32Value() + // displacement
88 kFramePointerSize + // Method ref
89 (itr_slots_ * sizeof(uint32_t))); // offset into in args
90 return result;
91 }
92
EntrySpills()93 const ManagedRegisterEntrySpills& Mips64ManagedRuntimeCallingConvention::EntrySpills() {
94 // We spill the argument registers on MIPS64 to free them up for scratch use,
95 // we then assume all arguments are on the stack.
96 if ((entry_spills_.size() == 0) && (NumArgs() > 0)) {
97 int reg_index = 1; // we start from A1, A0 holds ArtMethod*.
98
99 // We need to choose the correct register size since the managed
100 // stack uses 32bit stack slots.
101 ResetIterator(FrameOffset(0));
102 while (HasNext()) {
103 if (reg_index < 8) {
104 if (IsCurrentParamAFloatOrDouble()) { // FP regs.
105 FpuRegister arg = kFpuArgumentRegisters[reg_index];
106 Mips64ManagedRegister reg = Mips64ManagedRegister::FromFpuRegister(arg);
107 entry_spills_.push_back(reg, IsCurrentParamADouble() ? 8 : 4);
108 } else { // GP regs.
109 GpuRegister arg = kGpuArgumentRegisters[reg_index];
110 Mips64ManagedRegister reg = Mips64ManagedRegister::FromGpuRegister(arg);
111 entry_spills_.push_back(reg,
112 (IsCurrentParamALong() && (!IsCurrentParamAReference())) ? 8 : 4);
113 }
114 // e.g. A1, A2, F3, A4, F5, F6, A7
115 reg_index++;
116 }
117
118 Next();
119 }
120 }
121 return entry_spills_;
122 }
123
124 // JNI calling convention
125
Mips64JniCallingConvention(bool is_static,bool is_synchronized,const char * shorty)126 Mips64JniCallingConvention::Mips64JniCallingConvention(bool is_static, bool is_synchronized,
127 const char* shorty)
128 : JniCallingConvention(is_static, is_synchronized, shorty, kFramePointerSize) {
129 callee_save_regs_.push_back(Mips64ManagedRegister::FromGpuRegister(S2));
130 callee_save_regs_.push_back(Mips64ManagedRegister::FromGpuRegister(S3));
131 callee_save_regs_.push_back(Mips64ManagedRegister::FromGpuRegister(S4));
132 callee_save_regs_.push_back(Mips64ManagedRegister::FromGpuRegister(S5));
133 callee_save_regs_.push_back(Mips64ManagedRegister::FromGpuRegister(S6));
134 callee_save_regs_.push_back(Mips64ManagedRegister::FromGpuRegister(S7));
135 callee_save_regs_.push_back(Mips64ManagedRegister::FromGpuRegister(GP));
136 callee_save_regs_.push_back(Mips64ManagedRegister::FromGpuRegister(S8));
137 }
138
CoreSpillMask() const139 uint32_t Mips64JniCallingConvention::CoreSpillMask() const {
140 // Compute spill mask to agree with callee saves initialized in the constructor
141 uint32_t result = 0;
142 result = 1 << S2 | 1 << S3 | 1 << S4 | 1 << S5 | 1 << S6 | 1 << S7 | 1 << GP | 1 << S8 | 1 << RA;
143 return result;
144 }
145
ReturnScratchRegister() const146 ManagedRegister Mips64JniCallingConvention::ReturnScratchRegister() const {
147 return Mips64ManagedRegister::FromGpuRegister(AT);
148 }
149
FrameSize()150 size_t Mips64JniCallingConvention::FrameSize() {
151 // Mehtod* and callee save area size, local reference segment state
152 size_t frame_data_size = kFramePointerSize +
153 CalleeSaveRegisters().size() * kFramePointerSize + sizeof(uint32_t);
154 // References plus 2 words for HandleScope header
155 size_t handle_scope_size = HandleScope::SizeOf(kFramePointerSize, ReferenceCount());
156 // Plus return value spill area size
157 return RoundUp(frame_data_size + handle_scope_size + SizeOfReturnValue(), kStackAlignment);
158 }
159
OutArgSize()160 size_t Mips64JniCallingConvention::OutArgSize() {
161 return RoundUp(NumberOfOutgoingStackArgs() * kFramePointerSize, kStackAlignment);
162 }
163
IsCurrentParamInRegister()164 bool Mips64JniCallingConvention::IsCurrentParamInRegister() {
165 return itr_args_ < 8;
166 }
167
IsCurrentParamOnStack()168 bool Mips64JniCallingConvention::IsCurrentParamOnStack() {
169 return !IsCurrentParamInRegister();
170 }
171
CurrentParamRegister()172 ManagedRegister Mips64JniCallingConvention::CurrentParamRegister() {
173 CHECK(IsCurrentParamInRegister());
174 if (IsCurrentParamAFloatOrDouble()) {
175 return Mips64ManagedRegister::FromFpuRegister(kFpuArgumentRegisters[itr_args_]);
176 } else {
177 return Mips64ManagedRegister::FromGpuRegister(kGpuArgumentRegisters[itr_args_]);
178 }
179 }
180
CurrentParamStackOffset()181 FrameOffset Mips64JniCallingConvention::CurrentParamStackOffset() {
182 CHECK(IsCurrentParamOnStack());
183 size_t offset = displacement_.Int32Value() - OutArgSize() + ((itr_args_ - 8) * kFramePointerSize);
184 CHECK_LT(offset, OutArgSize());
185 return FrameOffset(offset);
186 }
187
NumberOfOutgoingStackArgs()188 size_t Mips64JniCallingConvention::NumberOfOutgoingStackArgs() {
189 // all arguments including JNI args
190 size_t all_args = NumArgs() + NumberOfExtraArgumentsForJni();
191
192 // Nothing on the stack unless there are more than 8 arguments
193 return (all_args > 8) ? all_args - 8 : 0;
194 }
195 } // namespace mips64
196 } // namespace art
197