1 /*
2 * Copyright (C) 2012 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "codegen_mips.h"
18
19 #include <inttypes.h>
20
21 #include <string>
22
23 #include "arch/mips/instruction_set_features_mips.h"
24 #include "backend_mips.h"
25 #include "base/logging.h"
26 #include "dex/compiler_ir.h"
27 #include "dex/quick/mir_to_lir-inl.h"
28 #include "driver/compiler_driver.h"
29 #include "mips_lir.h"
30
31 namespace art {
32
33 static constexpr RegStorage core_regs_arr_32[] =
34 {rs_rZERO, rs_rAT, rs_rV0, rs_rV1, rs_rA0, rs_rA1, rs_rA2, rs_rA3, rs_rT0_32, rs_rT1_32,
35 rs_rT2_32, rs_rT3_32, rs_rT4_32, rs_rT5_32, rs_rT6_32, rs_rT7_32, rs_rS0, rs_rS1, rs_rS2,
36 rs_rS3, rs_rS4, rs_rS5, rs_rS6, rs_rS7, rs_rT8, rs_rT9, rs_rK0, rs_rK1, rs_rGP, rs_rSP, rs_rFP,
37 rs_rRA};
38 static constexpr RegStorage sp_regs_arr_32[] =
39 {rs_rF0, rs_rF1, rs_rF2, rs_rF3, rs_rF4, rs_rF5, rs_rF6, rs_rF7, rs_rF8, rs_rF9, rs_rF10,
40 rs_rF11, rs_rF12, rs_rF13, rs_rF14, rs_rF15};
41 static constexpr RegStorage dp_fr0_regs_arr_32[] =
42 {rs_rD0_fr0, rs_rD1_fr0, rs_rD2_fr0, rs_rD3_fr0, rs_rD4_fr0, rs_rD5_fr0, rs_rD6_fr0,
43 rs_rD7_fr0};
44 static constexpr RegStorage dp_fr1_regs_arr_32[] =
45 {rs_rD0_fr1, rs_rD1_fr1, rs_rD2_fr1, rs_rD3_fr1, rs_rD4_fr1, rs_rD5_fr1, rs_rD6_fr1,
46 rs_rD7_fr1};
47 static constexpr RegStorage reserved_regs_arr_32[] =
48 {rs_rZERO, rs_rAT, rs_rS0, rs_rS1, rs_rK0, rs_rK1, rs_rGP, rs_rSP, rs_rRA};
49 static constexpr RegStorage core_temps_arr_32[] =
50 {rs_rV0, rs_rV1, rs_rA0, rs_rA1, rs_rA2, rs_rA3, rs_rT0_32, rs_rT1_32, rs_rT2_32, rs_rT3_32,
51 rs_rT4_32, rs_rT5_32, rs_rT6_32, rs_rT7_32, rs_rT8};
52 static constexpr RegStorage sp_fr0_temps_arr_32[] =
53 {rs_rF0, rs_rF1, rs_rF2, rs_rF3, rs_rF4, rs_rF5, rs_rF6, rs_rF7, rs_rF8, rs_rF9, rs_rF10,
54 rs_rF11, rs_rF12, rs_rF13, rs_rF14, rs_rF15};
55 static constexpr RegStorage sp_fr1_temps_arr_32[] =
56 {rs_rF0, rs_rF2, rs_rF4, rs_rF6, rs_rF8, rs_rF10, rs_rF12, rs_rF14};
57 static constexpr RegStorage dp_fr0_temps_arr_32[] =
58 {rs_rD0_fr0, rs_rD1_fr0, rs_rD2_fr0, rs_rD3_fr0, rs_rD4_fr0, rs_rD5_fr0, rs_rD6_fr0,
59 rs_rD7_fr0};
60 static constexpr RegStorage dp_fr1_temps_arr_32[] =
61 {rs_rD0_fr1, rs_rD1_fr1, rs_rD2_fr1, rs_rD3_fr1, rs_rD4_fr1, rs_rD5_fr1, rs_rD6_fr1,
62 rs_rD7_fr1};
63
64 static constexpr RegStorage core_regs_arr_64[] =
65 {rs_rZERO, rs_rAT, rs_rV0, rs_rV1, rs_rA0, rs_rA1, rs_rA2, rs_rA3, rs_rA4, rs_rA5, rs_rA6,
66 rs_rA7, rs_rT0, rs_rT1, rs_rT2, rs_rT3, rs_rS0, rs_rS1, rs_rS2, rs_rS3, rs_rS4, rs_rS5, rs_rS6,
67 rs_rS7, rs_rT8, rs_rT9, rs_rK0, rs_rK1, rs_rGP, rs_rSP, rs_rFP, rs_rRA};
68 static constexpr RegStorage core_regs_arr_64d[] =
69 {rs_rZEROd, rs_rATd, rs_rV0d, rs_rV1d, rs_rA0d, rs_rA1d, rs_rA2d, rs_rA3d, rs_rA4d, rs_rA5d,
70 rs_rA6d, rs_rA7d, rs_rT0d, rs_rT1d, rs_rT2d, rs_rT3d, rs_rS0d, rs_rS1d, rs_rS2d, rs_rS3d,
71 rs_rS4d, rs_rS5d, rs_rS6d, rs_rS7d, rs_rT8d, rs_rT9d, rs_rK0d, rs_rK1d, rs_rGPd, rs_rSPd,
72 rs_rFPd, rs_rRAd};
73 #if 0
74 // TODO: f24-f31 must be saved before calls and restored after.
75 static constexpr RegStorage sp_regs_arr_64[] =
76 {rs_rF0, rs_rF1, rs_rF2, rs_rF3, rs_rF4, rs_rF5, rs_rF6, rs_rF7, rs_rF8, rs_rF9, rs_rF10,
77 rs_rF11, rs_rF12, rs_rF13, rs_rF14, rs_rF15, rs_rF16, rs_rF17, rs_rF18, rs_rF19, rs_rF20,
78 rs_rF21, rs_rF22, rs_rF23, rs_rF24, rs_rF25, rs_rF26, rs_rF27, rs_rF28, rs_rF29, rs_rF30,
79 rs_rF31};
80 static constexpr RegStorage dp_regs_arr_64[] =
81 {rs_rD0, rs_rD1, rs_rD2, rs_rD3, rs_rD4, rs_rD5, rs_rD6, rs_rD7, rs_rD8, rs_rD9, rs_rD10,
82 rs_rD11, rs_rD12, rs_rD13, rs_rD14, rs_rD15, rs_rD16, rs_rD17, rs_rD18, rs_rD19, rs_rD20,
83 rs_rD21, rs_rD22, rs_rD23, rs_rD24, rs_rD25, rs_rD26, rs_rD27, rs_rD28, rs_rD29, rs_rD30,
84 rs_rD31};
85 #else
86 static constexpr RegStorage sp_regs_arr_64[] =
87 {rs_rF0, rs_rF1, rs_rF2, rs_rF3, rs_rF4, rs_rF5, rs_rF6, rs_rF7, rs_rF8, rs_rF9, rs_rF10,
88 rs_rF11, rs_rF12, rs_rF13, rs_rF14, rs_rF15, rs_rF16, rs_rF17, rs_rF18, rs_rF19, rs_rF20,
89 rs_rF21, rs_rF22, rs_rF23};
90 static constexpr RegStorage dp_regs_arr_64[] =
91 {rs_rD0, rs_rD1, rs_rD2, rs_rD3, rs_rD4, rs_rD5, rs_rD6, rs_rD7, rs_rD8, rs_rD9, rs_rD10,
92 rs_rD11, rs_rD12, rs_rD13, rs_rD14, rs_rD15, rs_rD16, rs_rD17, rs_rD18, rs_rD19, rs_rD20,
93 rs_rD21, rs_rD22, rs_rD23};
94 #endif
95 static constexpr RegStorage reserved_regs_arr_64[] =
96 {rs_rZERO, rs_rAT, rs_rS0, rs_rS1, rs_rT9, rs_rK0, rs_rK1, rs_rGP, rs_rSP, rs_rRA};
97 static constexpr RegStorage reserved_regs_arr_64d[] =
98 {rs_rZEROd, rs_rATd, rs_rS0d, rs_rS1d, rs_rT9d, rs_rK0d, rs_rK1d, rs_rGPd, rs_rSPd, rs_rRAd};
99 static constexpr RegStorage core_temps_arr_64[] =
100 {rs_rV0, rs_rV1, rs_rA0, rs_rA1, rs_rA2, rs_rA3, rs_rA4, rs_rA5, rs_rA6, rs_rA7, rs_rT0, rs_rT1,
101 rs_rT2, rs_rT3, rs_rT8};
102 static constexpr RegStorage core_temps_arr_64d[] =
103 {rs_rV0d, rs_rV1d, rs_rA0d, rs_rA1d, rs_rA2d, rs_rA3d, rs_rA4d, rs_rA5d, rs_rA6d, rs_rA7d,
104 rs_rT0d, rs_rT1d, rs_rT2d, rs_rT3d, rs_rT8d};
105 #if 0
106 // TODO: f24-f31 must be saved before calls and restored after.
107 static constexpr RegStorage sp_temps_arr_64[] =
108 {rs_rF0, rs_rF1, rs_rF2, rs_rF3, rs_rF4, rs_rF5, rs_rF6, rs_rF7, rs_rF8, rs_rF9, rs_rF10,
109 rs_rF11, rs_rF12, rs_rF13, rs_rF14, rs_rF15, rs_rF16, rs_rF17, rs_rF18, rs_rF19, rs_rF20,
110 rs_rF21, rs_rF22, rs_rF23, rs_rF24, rs_rF25, rs_rF26, rs_rF27, rs_rF28, rs_rF29, rs_rF30,
111 rs_rF31};
112 static constexpr RegStorage dp_temps_arr_64[] =
113 {rs_rD0, rs_rD1, rs_rD2, rs_rD3, rs_rD4, rs_rD5, rs_rD6, rs_rD7, rs_rD8, rs_rD9, rs_rD10,
114 rs_rD11, rs_rD12, rs_rD13, rs_rD14, rs_rD15, rs_rD16, rs_rD17, rs_rD18, rs_rD19, rs_rD20,
115 rs_rD21, rs_rD22, rs_rD23, rs_rD24, rs_rD25, rs_rD26, rs_rD27, rs_rD28, rs_rD29, rs_rD30,
116 rs_rD31};
117 #else
118 static constexpr RegStorage sp_temps_arr_64[] =
119 {rs_rF0, rs_rF1, rs_rF2, rs_rF3, rs_rF4, rs_rF5, rs_rF6, rs_rF7, rs_rF8, rs_rF9, rs_rF10,
120 rs_rF11, rs_rF12, rs_rF13, rs_rF14, rs_rF15, rs_rF16, rs_rF17, rs_rF18, rs_rF19, rs_rF20,
121 rs_rF21, rs_rF22, rs_rF23};
122 static constexpr RegStorage dp_temps_arr_64[] =
123 {rs_rD0, rs_rD1, rs_rD2, rs_rD3, rs_rD4, rs_rD5, rs_rD6, rs_rD7, rs_rD8, rs_rD9, rs_rD10,
124 rs_rD11, rs_rD12, rs_rD13, rs_rD14, rs_rD15, rs_rD16, rs_rD17, rs_rD18, rs_rD19, rs_rD20,
125 rs_rD21, rs_rD22, rs_rD23};
126 #endif
127
128 static constexpr ArrayRef<const RegStorage> empty_pool;
129 static constexpr ArrayRef<const RegStorage> core_regs_32(core_regs_arr_32);
130 static constexpr ArrayRef<const RegStorage> sp_regs_32(sp_regs_arr_32);
131 static constexpr ArrayRef<const RegStorage> dp_fr0_regs_32(dp_fr0_regs_arr_32);
132 static constexpr ArrayRef<const RegStorage> dp_fr1_regs_32(dp_fr1_regs_arr_32);
133 static constexpr ArrayRef<const RegStorage> reserved_regs_32(reserved_regs_arr_32);
134 static constexpr ArrayRef<const RegStorage> core_temps_32(core_temps_arr_32);
135 static constexpr ArrayRef<const RegStorage> sp_fr0_temps_32(sp_fr0_temps_arr_32);
136 static constexpr ArrayRef<const RegStorage> sp_fr1_temps_32(sp_fr1_temps_arr_32);
137 static constexpr ArrayRef<const RegStorage> dp_fr0_temps_32(dp_fr0_temps_arr_32);
138 static constexpr ArrayRef<const RegStorage> dp_fr1_temps_32(dp_fr1_temps_arr_32);
139
140 static constexpr ArrayRef<const RegStorage> core_regs_64(core_regs_arr_64);
141 static constexpr ArrayRef<const RegStorage> core_regs_64d(core_regs_arr_64d);
142 static constexpr ArrayRef<const RegStorage> sp_regs_64(sp_regs_arr_64);
143 static constexpr ArrayRef<const RegStorage> dp_regs_64(dp_regs_arr_64);
144 static constexpr ArrayRef<const RegStorage> reserved_regs_64(reserved_regs_arr_64);
145 static constexpr ArrayRef<const RegStorage> reserved_regs_64d(reserved_regs_arr_64d);
146 static constexpr ArrayRef<const RegStorage> core_temps_64(core_temps_arr_64);
147 static constexpr ArrayRef<const RegStorage> core_temps_64d(core_temps_arr_64d);
148 static constexpr ArrayRef<const RegStorage> sp_temps_64(sp_temps_arr_64);
149 static constexpr ArrayRef<const RegStorage> dp_temps_64(dp_temps_arr_64);
150
LocCReturn()151 RegLocation MipsMir2Lir::LocCReturn() {
152 return mips_loc_c_return;
153 }
154
LocCReturnRef()155 RegLocation MipsMir2Lir::LocCReturnRef() {
156 return cu_->target64 ? mips64_loc_c_return_ref : mips_loc_c_return;
157 }
158
LocCReturnWide()159 RegLocation MipsMir2Lir::LocCReturnWide() {
160 return cu_->target64 ? mips64_loc_c_return_wide : mips_loc_c_return_wide;
161 }
162
LocCReturnFloat()163 RegLocation MipsMir2Lir::LocCReturnFloat() {
164 return mips_loc_c_return_float;
165 }
166
LocCReturnDouble()167 RegLocation MipsMir2Lir::LocCReturnDouble() {
168 if (cu_->target64) {
169 return mips64_loc_c_return_double;
170 } else if (fpuIs32Bit_) {
171 return mips_loc_c_return_double_fr0;
172 } else {
173 return mips_loc_c_return_double_fr1;
174 }
175 }
176
177 // Convert k64BitSolo into k64BitPair.
Solo64ToPair64(RegStorage reg)178 RegStorage MipsMir2Lir::Solo64ToPair64(RegStorage reg) {
179 DCHECK(reg.IsDouble());
180 DCHECK_EQ(reg.GetRegNum() & 1, 0);
181 int reg_num = (reg.GetRegNum() & ~1) | RegStorage::kFloatingPoint;
182 return RegStorage(RegStorage::k64BitPair, reg_num, reg_num + 1);
183 }
184
185 // Convert 64bit FP (k64BitSolo or k64BitPair) into k32BitSolo.
186 // This routine is only used to allow a 64bit FPU to access FP registers 32bits at a time.
Fp64ToSolo32(RegStorage reg)187 RegStorage MipsMir2Lir::Fp64ToSolo32(RegStorage reg) {
188 DCHECK(!fpuIs32Bit_);
189 DCHECK(reg.IsDouble());
190 DCHECK(!reg.IsPair());
191 int reg_num = reg.GetRegNum() | RegStorage::kFloatingPoint;
192 return RegStorage(RegStorage::k32BitSolo, reg_num);
193 }
194
195 // Return a target-dependent special register.
TargetReg(SpecialTargetRegister reg,WideKind wide_kind)196 RegStorage MipsMir2Lir::TargetReg(SpecialTargetRegister reg, WideKind wide_kind) {
197 if (!cu_->target64 && wide_kind == kWide) {
198 DCHECK((kArg0 <= reg && reg < kArg7) || (kFArg0 <= reg && reg < kFArg15) || (kRet0 == reg));
199 RegStorage ret_reg = RegStorage::MakeRegPair(TargetReg(reg),
200 TargetReg(static_cast<SpecialTargetRegister>(reg + 1)));
201 if (!fpuIs32Bit_ && ret_reg.IsFloat()) {
202 // convert 64BitPair to 64BitSolo for 64bit FPUs.
203 RegStorage low = ret_reg.GetLow();
204 ret_reg = RegStorage::FloatSolo64(low.GetRegNum());
205 }
206 return ret_reg;
207 } else if (cu_->target64 && (wide_kind == kWide || wide_kind == kRef)) {
208 return As64BitReg(TargetReg(reg));
209 } else {
210 return TargetReg(reg);
211 }
212 }
213
214 // Return a target-dependent special register.
TargetReg(SpecialTargetRegister reg)215 RegStorage MipsMir2Lir::TargetReg(SpecialTargetRegister reg) {
216 RegStorage res_reg;
217 switch (reg) {
218 case kSelf: res_reg = rs_rS1; break;
219 case kSuspend: res_reg = rs_rS0; break;
220 case kLr: res_reg = rs_rRA; break;
221 case kPc: res_reg = RegStorage::InvalidReg(); break;
222 case kSp: res_reg = rs_rSP; break;
223 case kArg0: res_reg = rs_rA0; break;
224 case kArg1: res_reg = rs_rA1; break;
225 case kArg2: res_reg = rs_rA2; break;
226 case kArg3: res_reg = rs_rA3; break;
227 case kArg4: res_reg = cu_->target64 ? rs_rA4 : RegStorage::InvalidReg(); break;
228 case kArg5: res_reg = cu_->target64 ? rs_rA5 : RegStorage::InvalidReg(); break;
229 case kArg6: res_reg = cu_->target64 ? rs_rA6 : RegStorage::InvalidReg(); break;
230 case kArg7: res_reg = cu_->target64 ? rs_rA7 : RegStorage::InvalidReg(); break;
231 case kFArg0: res_reg = rs_rF12; break;
232 case kFArg1: res_reg = rs_rF13; break;
233 case kFArg2: res_reg = rs_rF14; break;
234 case kFArg3: res_reg = rs_rF15; break;
235 case kFArg4: res_reg = cu_->target64 ? rs_rF16 : RegStorage::InvalidReg(); break;
236 case kFArg5: res_reg = cu_->target64 ? rs_rF17 : RegStorage::InvalidReg(); break;
237 case kFArg6: res_reg = cu_->target64 ? rs_rF18 : RegStorage::InvalidReg(); break;
238 case kFArg7: res_reg = cu_->target64 ? rs_rF19 : RegStorage::InvalidReg(); break;
239 case kRet0: res_reg = rs_rV0; break;
240 case kRet1: res_reg = rs_rV1; break;
241 case kInvokeTgt: res_reg = rs_rT9; break;
242 case kHiddenArg: res_reg = cu_->target64 ? rs_rT0 : rs_rT0_32; break;
243 case kHiddenFpArg: res_reg = RegStorage::InvalidReg(); break;
244 case kCount: res_reg = RegStorage::InvalidReg(); break;
245 default: res_reg = RegStorage::InvalidReg();
246 }
247 return res_reg;
248 }
249
GetNextReg(ShortyArg arg)250 RegStorage MipsMir2Lir::InToRegStorageMipsMapper::GetNextReg(ShortyArg arg) {
251 const SpecialTargetRegister coreArgMappingToPhysicalReg[] = {kArg1, kArg2, kArg3};
252 const size_t coreArgMappingToPhysicalRegSize = arraysize(coreArgMappingToPhysicalReg);
253
254 RegStorage result = RegStorage::InvalidReg();
255 if (cur_core_reg_ < coreArgMappingToPhysicalRegSize) {
256 result = m2l_->TargetReg(coreArgMappingToPhysicalReg[cur_core_reg_++],
257 arg.IsRef() ? kRef : kNotWide);
258 if (arg.IsWide() && cur_core_reg_ < coreArgMappingToPhysicalRegSize) {
259 result = RegStorage::MakeRegPair(
260 result, m2l_->TargetReg(coreArgMappingToPhysicalReg[cur_core_reg_++], kNotWide));
261 }
262 }
263 return result;
264 }
265
GetNextReg(ShortyArg arg)266 RegStorage MipsMir2Lir::InToRegStorageMips64Mapper::GetNextReg(ShortyArg arg) {
267 const SpecialTargetRegister coreArgMappingToPhysicalReg[] =
268 {kArg1, kArg2, kArg3, kArg4, kArg5, kArg6, kArg7};
269 const size_t coreArgMappingToPhysicalRegSize = arraysize(coreArgMappingToPhysicalReg);
270 const SpecialTargetRegister fpArgMappingToPhysicalReg[] =
271 {kFArg1, kFArg2, kFArg3, kFArg4, kFArg5, kFArg6, kFArg7};
272 const size_t fpArgMappingToPhysicalRegSize = arraysize(fpArgMappingToPhysicalReg);
273
274 RegStorage result = RegStorage::InvalidReg();
275 if (arg.IsFP()) {
276 if (cur_arg_reg_ < fpArgMappingToPhysicalRegSize) {
277 DCHECK(!arg.IsRef());
278 result = m2l_->TargetReg(fpArgMappingToPhysicalReg[cur_arg_reg_++],
279 arg.IsWide() ? kWide : kNotWide);
280 }
281 } else {
282 if (cur_arg_reg_ < coreArgMappingToPhysicalRegSize) {
283 DCHECK(!(arg.IsWide() && arg.IsRef()));
284 result = m2l_->TargetReg(coreArgMappingToPhysicalReg[cur_arg_reg_++],
285 arg.IsRef() ? kRef : (arg.IsWide() ? kWide : kNotWide));
286 }
287 }
288 return result;
289 }
290
291 /*
292 * Decode the register id.
293 */
GetRegMaskCommon(const RegStorage & reg) const294 ResourceMask MipsMir2Lir::GetRegMaskCommon(const RegStorage& reg) const {
295 if (cu_->target64) {
296 return ResourceMask::Bit((reg.IsFloat() ? kMipsFPReg0 : 0) + reg.GetRegNum());
297 } else {
298 if (reg.IsDouble()) {
299 return ResourceMask::TwoBits((reg.GetRegNum() & ~1) + kMipsFPReg0);
300 } else if (reg.IsSingle()) {
301 return ResourceMask::Bit(reg.GetRegNum() + kMipsFPReg0);
302 } else {
303 return ResourceMask::Bit(reg.GetRegNum());
304 }
305 }
306 }
307
GetPCUseDefEncoding() const308 ResourceMask MipsMir2Lir::GetPCUseDefEncoding() const {
309 return cu_->target64 ? ResourceMask::Bit(kMips64RegPC) : ResourceMask::Bit(kMipsRegPC);
310 }
311
SetupTargetResourceMasks(LIR * lir,uint64_t flags,ResourceMask * use_mask,ResourceMask * def_mask)312 void MipsMir2Lir::SetupTargetResourceMasks(LIR* lir, uint64_t flags, ResourceMask* use_mask,
313 ResourceMask* def_mask) {
314 DCHECK(!lir->flags.use_def_invalid);
315
316 // Mips-specific resource map setup here.
317 if (flags & REG_DEF_SP) {
318 def_mask->SetBit(kMipsRegSP);
319 }
320
321 if (flags & REG_USE_SP) {
322 use_mask->SetBit(kMipsRegSP);
323 }
324
325 if (flags & REG_DEF_LR) {
326 def_mask->SetBit(kMipsRegLR);
327 }
328
329 if (!cu_->target64) {
330 if (flags & REG_DEF_HI) {
331 def_mask->SetBit(kMipsRegHI);
332 }
333
334 if (flags & REG_DEF_LO) {
335 def_mask->SetBit(kMipsRegLO);
336 }
337
338 if (flags & REG_USE_HI) {
339 use_mask->SetBit(kMipsRegHI);
340 }
341
342 if (flags & REG_USE_LO) {
343 use_mask->SetBit(kMipsRegLO);
344 }
345 }
346 }
347
348 /* For dumping instructions */
349 #define MIPS_REG_COUNT 32
350 static const char *mips_reg_name[MIPS_REG_COUNT] = {
351 "zero", "at", "v0", "v1", "a0", "a1", "a2", "a3",
352 "t0", "t1", "t2", "t3", "t4", "t5", "t6", "t7",
353 "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7",
354 "t8", "t9", "k0", "k1", "gp", "sp", "fp", "ra"
355 };
356
357 static const char *mips64_reg_name[MIPS_REG_COUNT] = {
358 "zero", "at", "v0", "v1", "a0", "a1", "a2", "a3",
359 "a4", "a5", "a6", "a7", "t0", "t1", "t2", "t3",
360 "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7",
361 "t8", "t9", "k0", "k1", "gp", "sp", "fp", "ra"
362 };
363
364 /*
365 * Interpret a format string and build a string no longer than size
366 * See format key in assemble_mips.cc.
367 */
BuildInsnString(const char * fmt,LIR * lir,unsigned char * base_addr)368 std::string MipsMir2Lir::BuildInsnString(const char *fmt, LIR *lir, unsigned char* base_addr) {
369 std::string buf;
370 int i;
371 const char *fmt_end = &fmt[strlen(fmt)];
372 char tbuf[256];
373 char nc;
374 while (fmt < fmt_end) {
375 int operand;
376 if (*fmt == '!') {
377 fmt++;
378 DCHECK_LT(fmt, fmt_end);
379 nc = *fmt++;
380 if (nc == '!') {
381 strcpy(tbuf, "!");
382 } else {
383 DCHECK_LT(fmt, fmt_end);
384 DCHECK_LT(static_cast<unsigned>(nc-'0'), 4u);
385 operand = lir->operands[nc-'0'];
386 switch (*fmt++) {
387 case 'b':
388 strcpy(tbuf, "0000");
389 for (i = 3; i >= 0; i--) {
390 tbuf[i] += operand & 1;
391 operand >>= 1;
392 }
393 break;
394 case 's':
395 snprintf(tbuf, arraysize(tbuf), "$f%d", RegStorage::RegNum(operand));
396 break;
397 case 'S':
398 DCHECK_EQ(RegStorage::RegNum(operand) & 1, 0);
399 snprintf(tbuf, arraysize(tbuf), "$f%d", RegStorage::RegNum(operand));
400 break;
401 case 'h':
402 snprintf(tbuf, arraysize(tbuf), "%04x", operand);
403 break;
404 case 'M':
405 case 'd':
406 snprintf(tbuf, arraysize(tbuf), "%d", operand);
407 break;
408 case 'D':
409 snprintf(tbuf, arraysize(tbuf), "%d", operand+1);
410 break;
411 case 'E':
412 snprintf(tbuf, arraysize(tbuf), "%d", operand*4);
413 break;
414 case 'F':
415 snprintf(tbuf, arraysize(tbuf), "%d", operand*2);
416 break;
417 case 't':
418 snprintf(tbuf, arraysize(tbuf), "0x%08" PRIxPTR " (L%p)",
419 reinterpret_cast<uintptr_t>(base_addr) + lir->offset + 4 + (operand << 1),
420 lir->target);
421 break;
422 case 'T':
423 snprintf(tbuf, arraysize(tbuf), "0x%08x", operand << 2);
424 break;
425 case 'u': {
426 int offset_1 = lir->operands[0];
427 int offset_2 = NEXT_LIR(lir)->operands[0];
428 uintptr_t target =
429 (((reinterpret_cast<uintptr_t>(base_addr) + lir->offset + 4) & ~3) +
430 (offset_1 << 21 >> 9) + (offset_2 << 1)) & 0xfffffffc;
431 snprintf(tbuf, arraysize(tbuf), "%p", reinterpret_cast<void*>(target));
432 break;
433 }
434
435 /* Nothing to print for BLX_2 */
436 case 'v':
437 strcpy(tbuf, "see above");
438 break;
439 case 'r':
440 DCHECK(operand >= 0 && operand < MIPS_REG_COUNT);
441 if (cu_->target64) {
442 strcpy(tbuf, mips64_reg_name[operand]);
443 } else {
444 strcpy(tbuf, mips_reg_name[operand]);
445 }
446 break;
447 case 'N':
448 // Placeholder for delay slot handling
449 strcpy(tbuf, "; nop");
450 break;
451 default:
452 strcpy(tbuf, "DecodeError");
453 break;
454 }
455 buf += tbuf;
456 }
457 } else {
458 buf += *fmt++;
459 }
460 }
461 return buf;
462 }
463
464 // FIXME: need to redo resource maps for MIPS - fix this at that time.
DumpResourceMask(LIR * mips_lir,const ResourceMask & mask,const char * prefix)465 void MipsMir2Lir::DumpResourceMask(LIR *mips_lir, const ResourceMask& mask, const char *prefix) {
466 char buf[256];
467 buf[0] = 0;
468
469 if (mask.Equals(kEncodeAll)) {
470 strcpy(buf, "all");
471 } else {
472 char num[8];
473 int i;
474
475 for (i = 0; i < (cu_->target64 ? kMips64RegEnd : kMipsRegEnd); i++) {
476 if (mask.HasBit(i)) {
477 snprintf(num, arraysize(num), "%d ", i);
478 strcat(buf, num);
479 }
480 }
481
482 if (mask.HasBit(ResourceMask::kCCode)) {
483 strcat(buf, "cc ");
484 }
485 if (mask.HasBit(ResourceMask::kFPStatus)) {
486 strcat(buf, "fpcc ");
487 }
488 // Memory bits.
489 if (mips_lir && (mask.HasBit(ResourceMask::kDalvikReg))) {
490 snprintf(buf + strlen(buf), arraysize(buf) - strlen(buf), "dr%d%s",
491 DECODE_ALIAS_INFO_REG(mips_lir->flags.alias_info),
492 DECODE_ALIAS_INFO_WIDE(mips_lir->flags.alias_info) ? "(+1)" : "");
493 }
494 if (mask.HasBit(ResourceMask::kLiteral)) {
495 strcat(buf, "lit ");
496 }
497
498 if (mask.HasBit(ResourceMask::kHeapRef)) {
499 strcat(buf, "heap ");
500 }
501 if (mask.HasBit(ResourceMask::kMustNotAlias)) {
502 strcat(buf, "noalias ");
503 }
504 }
505 if (buf[0]) {
506 LOG(INFO) << prefix << ": " << buf;
507 }
508 }
509
510 /*
511 * TUNING: is true leaf? Can't just use METHOD_IS_LEAF to determine as some
512 * instructions might call out to C/assembly helper functions. Until
513 * machinery is in place, always spill lr.
514 */
515
AdjustSpillMask()516 void MipsMir2Lir::AdjustSpillMask() {
517 core_spill_mask_ |= (1 << rs_rRA.GetRegNum());
518 num_core_spills_++;
519 }
520
521 /* Clobber all regs that might be used by an external C call */
ClobberCallerSave()522 void MipsMir2Lir::ClobberCallerSave() {
523 if (cu_->target64) {
524 Clobber(rs_rZEROd);
525 Clobber(rs_rATd);
526 Clobber(rs_rV0d);
527 Clobber(rs_rV1d);
528 Clobber(rs_rA0d);
529 Clobber(rs_rA1d);
530 Clobber(rs_rA2d);
531 Clobber(rs_rA3d);
532 Clobber(rs_rA4d);
533 Clobber(rs_rA5d);
534 Clobber(rs_rA6d);
535 Clobber(rs_rA7d);
536 Clobber(rs_rT0d);
537 Clobber(rs_rT1d);
538 Clobber(rs_rT2d);
539 Clobber(rs_rT3d);
540 Clobber(rs_rT8d);
541 Clobber(rs_rT9d);
542 Clobber(rs_rK0d);
543 Clobber(rs_rK1d);
544 Clobber(rs_rGPd);
545 Clobber(rs_rFPd);
546 Clobber(rs_rRAd);
547
548 Clobber(rs_rF0);
549 Clobber(rs_rF1);
550 Clobber(rs_rF2);
551 Clobber(rs_rF3);
552 Clobber(rs_rF4);
553 Clobber(rs_rF5);
554 Clobber(rs_rF6);
555 Clobber(rs_rF7);
556 Clobber(rs_rF8);
557 Clobber(rs_rF9);
558 Clobber(rs_rF10);
559 Clobber(rs_rF11);
560 Clobber(rs_rF12);
561 Clobber(rs_rF13);
562 Clobber(rs_rF14);
563 Clobber(rs_rF15);
564 Clobber(rs_rD0);
565 Clobber(rs_rD1);
566 Clobber(rs_rD2);
567 Clobber(rs_rD3);
568 Clobber(rs_rD4);
569 Clobber(rs_rD5);
570 Clobber(rs_rD6);
571 Clobber(rs_rD7);
572 } else {
573 Clobber(rs_rZERO);
574 Clobber(rs_rAT);
575 Clobber(rs_rV0);
576 Clobber(rs_rV1);
577 Clobber(rs_rA0);
578 Clobber(rs_rA1);
579 Clobber(rs_rA2);
580 Clobber(rs_rA3);
581 Clobber(rs_rT0_32);
582 Clobber(rs_rT1_32);
583 Clobber(rs_rT2_32);
584 Clobber(rs_rT3_32);
585 Clobber(rs_rT4_32);
586 Clobber(rs_rT5_32);
587 Clobber(rs_rT6_32);
588 Clobber(rs_rT7_32);
589 Clobber(rs_rT8);
590 Clobber(rs_rT9);
591 Clobber(rs_rK0);
592 Clobber(rs_rK1);
593 Clobber(rs_rGP);
594 Clobber(rs_rFP);
595 Clobber(rs_rRA);
596 Clobber(rs_rF0);
597 Clobber(rs_rF2);
598 Clobber(rs_rF4);
599 Clobber(rs_rF6);
600 Clobber(rs_rF8);
601 Clobber(rs_rF10);
602 Clobber(rs_rF12);
603 Clobber(rs_rF14);
604 if (fpuIs32Bit_) {
605 Clobber(rs_rF1);
606 Clobber(rs_rF3);
607 Clobber(rs_rF5);
608 Clobber(rs_rF7);
609 Clobber(rs_rF9);
610 Clobber(rs_rF11);
611 Clobber(rs_rF13);
612 Clobber(rs_rF15);
613 Clobber(rs_rD0_fr0);
614 Clobber(rs_rD1_fr0);
615 Clobber(rs_rD2_fr0);
616 Clobber(rs_rD3_fr0);
617 Clobber(rs_rD4_fr0);
618 Clobber(rs_rD5_fr0);
619 Clobber(rs_rD6_fr0);
620 Clobber(rs_rD7_fr0);
621 } else {
622 Clobber(rs_rD0_fr1);
623 Clobber(rs_rD1_fr1);
624 Clobber(rs_rD2_fr1);
625 Clobber(rs_rD3_fr1);
626 Clobber(rs_rD4_fr1);
627 Clobber(rs_rD5_fr1);
628 Clobber(rs_rD6_fr1);
629 Clobber(rs_rD7_fr1);
630 }
631 }
632 }
633
GetReturnWideAlt()634 RegLocation MipsMir2Lir::GetReturnWideAlt() {
635 UNIMPLEMENTED(FATAL) << "No GetReturnWideAlt for MIPS";
636 RegLocation res = LocCReturnWide();
637 return res;
638 }
639
GetReturnAlt()640 RegLocation MipsMir2Lir::GetReturnAlt() {
641 UNIMPLEMENTED(FATAL) << "No GetReturnAlt for MIPS";
642 RegLocation res = LocCReturn();
643 return res;
644 }
645
646 /* To be used when explicitly managing register use */
LockCallTemps()647 void MipsMir2Lir::LockCallTemps() {
648 LockTemp(TargetReg(kArg0));
649 LockTemp(TargetReg(kArg1));
650 LockTemp(TargetReg(kArg2));
651 LockTemp(TargetReg(kArg3));
652 if (cu_->target64) {
653 LockTemp(TargetReg(kArg4));
654 LockTemp(TargetReg(kArg5));
655 LockTemp(TargetReg(kArg6));
656 LockTemp(TargetReg(kArg7));
657 }
658 }
659
660 /* To be used when explicitly managing register use */
FreeCallTemps()661 void MipsMir2Lir::FreeCallTemps() {
662 FreeTemp(TargetReg(kArg0));
663 FreeTemp(TargetReg(kArg1));
664 FreeTemp(TargetReg(kArg2));
665 FreeTemp(TargetReg(kArg3));
666 if (cu_->target64) {
667 FreeTemp(TargetReg(kArg4));
668 FreeTemp(TargetReg(kArg5));
669 FreeTemp(TargetReg(kArg6));
670 FreeTemp(TargetReg(kArg7));
671 }
672 FreeTemp(TargetReg(kHiddenArg));
673 }
674
GenMemBarrier(MemBarrierKind barrier_kind ATTRIBUTE_UNUSED)675 bool MipsMir2Lir::GenMemBarrier(MemBarrierKind barrier_kind ATTRIBUTE_UNUSED) {
676 if (cu_->compiler_driver->GetInstructionSetFeatures()->IsSmp()) {
677 NewLIR1(kMipsSync, 0 /* Only stype currently supported */);
678 return true;
679 } else {
680 return false;
681 }
682 }
683
CompilerInitializeRegAlloc()684 void MipsMir2Lir::CompilerInitializeRegAlloc() {
685 if (cu_->target64) {
686 reg_pool_.reset(new (arena_) RegisterPool(this, arena_, core_regs_64, core_regs_64d, sp_regs_64,
687 dp_regs_64, reserved_regs_64, reserved_regs_64d,
688 core_temps_64, core_temps_64d, sp_temps_64,
689 dp_temps_64));
690
691 // Alias single precision floats to appropriate half of overlapping double.
692 for (RegisterInfo* info : reg_pool_->sp_regs_) {
693 int sp_reg_num = info->GetReg().GetRegNum();
694 int dp_reg_num = sp_reg_num;
695 RegStorage dp_reg = RegStorage::Solo64(RegStorage::kFloatingPoint | dp_reg_num);
696 RegisterInfo* dp_reg_info = GetRegInfo(dp_reg);
697 // Double precision register's master storage should refer to itself.
698 DCHECK_EQ(dp_reg_info, dp_reg_info->Master());
699 // Redirect single precision's master storage to master.
700 info->SetMaster(dp_reg_info);
701 // Singles should show a single 32-bit mask bit, at first referring to the low half.
702 DCHECK_EQ(info->StorageMask(), 0x1U);
703 }
704
705 // Alias 32bit W registers to corresponding 64bit X registers.
706 for (RegisterInfo* info : reg_pool_->core_regs_) {
707 int d_reg_num = info->GetReg().GetRegNum();
708 RegStorage d_reg = RegStorage::Solo64(d_reg_num);
709 RegisterInfo* d_reg_info = GetRegInfo(d_reg);
710 // 64bit D register's master storage should refer to itself.
711 DCHECK_EQ(d_reg_info, d_reg_info->Master());
712 // Redirect 32bit master storage to 64bit D.
713 info->SetMaster(d_reg_info);
714 // 32bit should show a single 32-bit mask bit, at first referring to the low half.
715 DCHECK_EQ(info->StorageMask(), 0x1U);
716 }
717 } else {
718 reg_pool_.reset(new (arena_) RegisterPool(this, arena_, core_regs_32, empty_pool, // core64
719 sp_regs_32,
720 fpuIs32Bit_ ? dp_fr0_regs_32 : dp_fr1_regs_32,
721 reserved_regs_32, empty_pool, // reserved64
722 core_temps_32, empty_pool, // core64_temps
723 fpuIs32Bit_ ? sp_fr0_temps_32 : sp_fr1_temps_32,
724 fpuIs32Bit_ ? dp_fr0_temps_32 : dp_fr1_temps_32));
725
726 // Alias single precision floats to appropriate half of overlapping double.
727 for (RegisterInfo* info : reg_pool_->sp_regs_) {
728 int sp_reg_num = info->GetReg().GetRegNum();
729 int dp_reg_num = sp_reg_num & ~1;
730 if (fpuIs32Bit_ || (sp_reg_num == dp_reg_num)) {
731 RegStorage dp_reg = RegStorage::Solo64(RegStorage::kFloatingPoint | dp_reg_num);
732 RegisterInfo* dp_reg_info = GetRegInfo(dp_reg);
733 // Double precision register's master storage should refer to itself.
734 DCHECK_EQ(dp_reg_info, dp_reg_info->Master());
735 // Redirect single precision's master storage to master.
736 info->SetMaster(dp_reg_info);
737 // Singles should show a single 32-bit mask bit, at first referring to the low half.
738 DCHECK_EQ(info->StorageMask(), 0x1U);
739 if (sp_reg_num & 1) {
740 // For odd singles, change to user the high word of the backing double.
741 info->SetStorageMask(0x2);
742 }
743 }
744 }
745 }
746
747 // Don't start allocating temps at r0/s0/d0 or you may clobber return regs in early-exit methods.
748 // TODO: adjust when we roll to hard float calling convention.
749 reg_pool_->next_core_reg_ = 2;
750 reg_pool_->next_sp_reg_ = 2;
751 if (cu_->target64) {
752 reg_pool_->next_dp_reg_ = 1;
753 } else {
754 reg_pool_->next_dp_reg_ = 2;
755 }
756 }
757
758 /*
759 * In the Arm code a it is typical to use the link register
760 * to hold the target address. However, for Mips we must
761 * ensure that all branch instructions can be restarted if
762 * there is a trap in the shadow. Allocate a temp register.
763 */
LoadHelper(QuickEntrypointEnum trampoline)764 RegStorage MipsMir2Lir::LoadHelper(QuickEntrypointEnum trampoline) {
765 // NOTE: native pointer.
766 if (cu_->target64) {
767 LoadWordDisp(TargetPtrReg(kSelf), GetThreadOffset<8>(trampoline).Int32Value(),
768 TargetPtrReg(kInvokeTgt));
769 } else {
770 LoadWordDisp(TargetPtrReg(kSelf), GetThreadOffset<4>(trampoline).Int32Value(),
771 TargetPtrReg(kInvokeTgt));
772 }
773 return TargetPtrReg(kInvokeTgt);
774 }
775
CheckSuspendUsingLoad()776 LIR* MipsMir2Lir::CheckSuspendUsingLoad() {
777 RegStorage tmp = AllocTemp();
778 // NOTE: native pointer.
779 if (cu_->target64) {
780 LoadWordDisp(TargetPtrReg(kSelf), Thread::ThreadSuspendTriggerOffset<8>().Int32Value(), tmp);
781 } else {
782 LoadWordDisp(TargetPtrReg(kSelf), Thread::ThreadSuspendTriggerOffset<4>().Int32Value(), tmp);
783 }
784 LIR *inst = LoadWordDisp(tmp, 0, tmp);
785 FreeTemp(tmp);
786 return inst;
787 }
788
GenAtomic64Load(RegStorage r_base,int displacement,RegStorage r_dest)789 LIR* MipsMir2Lir::GenAtomic64Load(RegStorage r_base, int displacement, RegStorage r_dest) {
790 DCHECK(!r_dest.IsFloat()); // See RegClassForFieldLoadStore().
791 if (!cu_->target64) {
792 DCHECK(r_dest.IsPair());
793 }
794 ClobberCallerSave();
795 LockCallTemps(); // Using fixed registers.
796 RegStorage reg_ptr = TargetReg(kArg0);
797 OpRegRegImm(kOpAdd, reg_ptr, r_base, displacement);
798 RegStorage r_tgt = LoadHelper(kQuickA64Load);
799 ForceImplicitNullCheck(reg_ptr, 0, true); // is_wide = true
800 LIR *ret = OpReg(kOpBlx, r_tgt);
801 RegStorage reg_ret;
802 if (cu_->target64) {
803 OpRegCopy(r_dest, TargetReg(kRet0));
804 } else {
805 reg_ret = RegStorage::MakeRegPair(TargetReg(kRet0), TargetReg(kRet1));
806 OpRegCopyWide(r_dest, reg_ret);
807 }
808 return ret;
809 }
810
GenAtomic64Store(RegStorage r_base,int displacement,RegStorage r_src)811 LIR* MipsMir2Lir::GenAtomic64Store(RegStorage r_base, int displacement, RegStorage r_src) {
812 DCHECK(!r_src.IsFloat()); // See RegClassForFieldLoadStore().
813 if (cu_->target64) {
814 DCHECK(!r_src.IsPair());
815 } else {
816 DCHECK(r_src.IsPair());
817 }
818 ClobberCallerSave();
819 LockCallTemps(); // Using fixed registers.
820 RegStorage temp_ptr = AllocTemp();
821 OpRegRegImm(kOpAdd, temp_ptr, r_base, displacement);
822 ForceImplicitNullCheck(temp_ptr, 0, true); // is_wide = true
823 RegStorage temp_value = AllocTempWide();
824 OpRegCopyWide(temp_value, r_src);
825 if (cu_->target64) {
826 OpRegCopyWide(TargetReg(kArg0, kWide), temp_ptr);
827 OpRegCopyWide(TargetReg(kArg1, kWide), temp_value);
828 } else {
829 RegStorage reg_ptr = TargetReg(kArg0);
830 OpRegCopy(reg_ptr, temp_ptr);
831 RegStorage reg_value = RegStorage::MakeRegPair(TargetReg(kArg2), TargetReg(kArg3));
832 OpRegCopyWide(reg_value, temp_value);
833 }
834 FreeTemp(temp_ptr);
835 FreeTemp(temp_value);
836 RegStorage r_tgt = LoadHelper(kQuickA64Store);
837 return OpReg(kOpBlx, r_tgt);
838 }
839
DwarfCoreReg(int num)840 static dwarf::Reg DwarfCoreReg(int num) {
841 return dwarf::Reg::MipsCore(num);
842 }
843
SpillCoreRegs()844 void MipsMir2Lir::SpillCoreRegs() {
845 if (num_core_spills_ == 0) {
846 return;
847 }
848 uint32_t mask = core_spill_mask_;
849 int ptr_size = cu_->target64 ? 8 : 4;
850 int offset = num_core_spills_ * ptr_size;
851 const RegStorage rs_sp = TargetPtrReg(kSp);
852 OpRegImm(kOpSub, rs_sp, offset);
853 cfi_.AdjustCFAOffset(offset);
854 for (int reg = 0; mask; mask >>= 1, reg++) {
855 if (mask & 0x1) {
856 offset -= ptr_size;
857 StoreWordDisp(rs_sp, offset,
858 cu_->target64 ? RegStorage::Solo64(reg) : RegStorage::Solo32(reg));
859 cfi_.RelOffset(DwarfCoreReg(reg), offset);
860 }
861 }
862 }
863
UnSpillCoreRegs()864 void MipsMir2Lir::UnSpillCoreRegs() {
865 if (num_core_spills_ == 0) {
866 return;
867 }
868 uint32_t mask = core_spill_mask_;
869 int offset = frame_size_;
870 int ptr_size = cu_->target64 ? 8 : 4;
871 const RegStorage rs_sp = TargetPtrReg(kSp);
872 for (int reg = 0; mask; mask >>= 1, reg++) {
873 if (mask & 0x1) {
874 offset -= ptr_size;
875 LoadWordDisp(rs_sp, offset,
876 cu_->target64 ? RegStorage::Solo64(reg) : RegStorage::Solo32(reg));
877 cfi_.Restore(DwarfCoreReg(reg));
878 }
879 }
880 OpRegImm(kOpAdd, rs_sp, frame_size_);
881 cfi_.AdjustCFAOffset(-frame_size_);
882 }
883
IsUnconditionalBranch(LIR * lir)884 bool MipsMir2Lir::IsUnconditionalBranch(LIR* lir) {
885 return (lir->opcode == kMipsB);
886 }
887
RegClassForFieldLoadStore(OpSize size,bool is_volatile)888 RegisterClass MipsMir2Lir::RegClassForFieldLoadStore(OpSize size, bool is_volatile) {
889 if (UNLIKELY(is_volatile)) {
890 // On Mips, atomic 64-bit load/store requires a core register.
891 // Smaller aligned load/store is atomic for both core and fp registers.
892 if (size == k64 || size == kDouble) {
893 return kCoreReg;
894 }
895 }
896 // TODO: Verify that both core and fp registers are suitable for smaller sizes.
897 return RegClassBySize(size);
898 }
899
MipsMir2Lir(CompilationUnit * cu,MIRGraph * mir_graph,ArenaAllocator * arena)900 MipsMir2Lir::MipsMir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena)
901 : Mir2Lir(cu, mir_graph, arena), in_to_reg_storage_mips64_mapper_(this),
902 in_to_reg_storage_mips_mapper_(this),
903 isaIsR6_(cu_->target64 ? true : cu->compiler_driver->GetInstructionSetFeatures()
904 ->AsMipsInstructionSetFeatures()->IsR6()),
905 fpuIs32Bit_(cu_->target64 ? false : cu->compiler_driver->GetInstructionSetFeatures()
906 ->AsMipsInstructionSetFeatures()->Is32BitFloatingPoint()) {
907 for (int i = 0; i < kMipsLast; i++) {
908 DCHECK_EQ(MipsMir2Lir::EncodingMap[i].opcode, i)
909 << "Encoding order for " << MipsMir2Lir::EncodingMap[i].name
910 << " is wrong: expecting " << i << ", seeing "
911 << static_cast<int>(MipsMir2Lir::EncodingMap[i].opcode);
912 }
913 }
914
MipsCodeGenerator(CompilationUnit * const cu,MIRGraph * const mir_graph,ArenaAllocator * const arena)915 Mir2Lir* MipsCodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph,
916 ArenaAllocator* const arena) {
917 return new MipsMir2Lir(cu, mir_graph, arena);
918 }
919
GetTargetInstFlags(int opcode)920 uint64_t MipsMir2Lir::GetTargetInstFlags(int opcode) {
921 DCHECK(!IsPseudoLirOp(opcode));
922 return MipsMir2Lir::EncodingMap[opcode].flags;
923 }
924
GetTargetInstName(int opcode)925 const char* MipsMir2Lir::GetTargetInstName(int opcode) {
926 DCHECK(!IsPseudoLirOp(opcode));
927 return MipsMir2Lir::EncodingMap[opcode].name;
928 }
929
GetTargetInstFmt(int opcode)930 const char* MipsMir2Lir::GetTargetInstFmt(int opcode) {
931 DCHECK(!IsPseudoLirOp(opcode));
932 return MipsMir2Lir::EncodingMap[opcode].fmt;
933 }
934
935 } // namespace art
936