1 /*
2 * Copyright (C) 2012 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "codegen_mips.h"
18
19 #include "arch/mips/instruction_set_features_mips.h"
20 #include "arch/mips/entrypoints_direct_mips.h"
21 #include "base/logging.h"
22 #include "dex/quick/mir_to_lir-inl.h"
23 #include "dex/reg_storage_eq.h"
24 #include "dex/mir_graph.h"
25 #include "driver/compiler_driver.h"
26 #include "driver/compiler_options.h"
27 #include "mips_lir.h"
28
29 namespace art {
30
31 /* This file contains codegen for the Mips ISA */
OpFpRegCopy(RegStorage r_dest,RegStorage r_src)32 LIR* MipsMir2Lir::OpFpRegCopy(RegStorage r_dest, RegStorage r_src) {
33 int opcode;
34 if (cu_->target64) {
35 DCHECK_EQ(r_dest.Is64Bit(), r_src.Is64Bit());
36 if (r_dest.Is64Bit()) {
37 if (r_dest.IsDouble()) {
38 if (r_src.IsDouble()) {
39 opcode = kMipsFmovd;
40 } else {
41 // Note the operands are swapped for the dmtc1 instr.
42 RegStorage t_opnd = r_src;
43 r_src = r_dest;
44 r_dest = t_opnd;
45 opcode = kMips64Dmtc1;
46 }
47 } else {
48 DCHECK(r_src.IsDouble());
49 opcode = kMips64Dmfc1;
50 }
51 } else {
52 if (r_dest.IsSingle()) {
53 if (r_src.IsSingle()) {
54 opcode = kMipsFmovs;
55 } else {
56 // Note the operands are swapped for the mtc1 instr.
57 RegStorage t_opnd = r_src;
58 r_src = r_dest;
59 r_dest = t_opnd;
60 opcode = kMipsMtc1;
61 }
62 } else {
63 DCHECK(r_src.IsSingle());
64 opcode = kMipsMfc1;
65 }
66 }
67 } else {
68 // Must be both DOUBLE or both not DOUBLE.
69 DCHECK_EQ(r_dest.IsDouble(), r_src.IsDouble());
70 if (r_dest.IsDouble()) {
71 opcode = kMipsFmovd;
72 } else {
73 if (r_dest.IsSingle()) {
74 if (r_src.IsSingle()) {
75 opcode = kMipsFmovs;
76 } else {
77 // Note the operands are swapped for the mtc1 instr.
78 RegStorage t_opnd = r_src;
79 r_src = r_dest;
80 r_dest = t_opnd;
81 opcode = kMipsMtc1;
82 }
83 } else {
84 DCHECK(r_src.IsSingle());
85 opcode = kMipsMfc1;
86 }
87 }
88 }
89 LIR* res;
90 if (cu_->target64) {
91 res = RawLIR(current_dalvik_offset_, opcode, r_dest.GetReg(), r_src.GetReg());
92 } else {
93 res = RawLIR(current_dalvik_offset_, opcode, r_src.GetReg(), r_dest.GetReg());
94 }
95 if (!(cu_->disable_opt & (1 << kSafeOptimizations)) && r_dest == r_src) {
96 res->flags.is_nop = true;
97 }
98 return res;
99 }
100
InexpensiveConstantInt(int32_t value)101 bool MipsMir2Lir::InexpensiveConstantInt(int32_t value) {
102 // For encodings, see LoadConstantNoClobber below.
103 return ((value == 0) || IsUint<16>(value) || IsInt<16>(value));
104 }
105
InexpensiveConstantFloat(int32_t value)106 bool MipsMir2Lir::InexpensiveConstantFloat(int32_t value) {
107 UNUSED(value);
108 return false; // TUNING
109 }
110
InexpensiveConstantLong(int64_t value)111 bool MipsMir2Lir::InexpensiveConstantLong(int64_t value) {
112 UNUSED(value);
113 return false; // TUNING
114 }
115
InexpensiveConstantDouble(int64_t value)116 bool MipsMir2Lir::InexpensiveConstantDouble(int64_t value) {
117 UNUSED(value);
118 return false; // TUNING
119 }
120
121 /*
122 * Load a immediate using a shortcut if possible; otherwise
123 * grab from the per-translation literal pool. If target is
124 * a high register, build constant into a low register and copy.
125 *
126 * No additional register clobbering operation performed. Use this version when
127 * 1) r_dest is freshly returned from AllocTemp or
128 * 2) The codegen is under fixed register usage
129 */
LoadConstantNoClobber(RegStorage r_dest,int value)130 LIR* MipsMir2Lir::LoadConstantNoClobber(RegStorage r_dest, int value) {
131 LIR *res;
132
133 RegStorage r_dest_save = r_dest;
134 int is_fp_reg = r_dest.IsFloat();
135 if (is_fp_reg) {
136 DCHECK(r_dest.IsSingle());
137 r_dest = AllocTemp();
138 }
139
140 // See if the value can be constructed cheaply.
141 if (value == 0) {
142 res = NewLIR2(kMipsMove, r_dest.GetReg(), rZERO);
143 } else if (IsUint<16>(value)) {
144 // Use OR with (unsigned) immediate to encode 16b unsigned int.
145 res = NewLIR3(kMipsOri, r_dest.GetReg(), rZERO, value);
146 } else if (IsInt<16>(value)) {
147 // Use ADD with (signed) immediate to encode 16b signed int.
148 res = NewLIR3(kMipsAddiu, r_dest.GetReg(), rZERO, value);
149 } else {
150 res = NewLIR2(kMipsLui, r_dest.GetReg(), value >> 16);
151 if (value & 0xffff)
152 NewLIR3(kMipsOri, r_dest.GetReg(), r_dest.GetReg(), value);
153 }
154
155 if (is_fp_reg) {
156 NewLIR2(kMipsMtc1, r_dest.GetReg(), r_dest_save.GetReg());
157 FreeTemp(r_dest);
158 }
159
160 return res;
161 }
162
LoadConstantWideNoClobber(RegStorage r_dest,int64_t value)163 LIR* MipsMir2Lir::LoadConstantWideNoClobber(RegStorage r_dest, int64_t value) {
164 LIR* res = nullptr;
165 DCHECK(r_dest.Is64Bit());
166 RegStorage r_dest_save = r_dest;
167 int is_fp_reg = r_dest.IsFloat();
168 if (is_fp_reg) {
169 DCHECK(r_dest.IsDouble());
170 r_dest = AllocTemp();
171 }
172
173 int bit31 = (value & UINT64_C(0x80000000)) != 0;
174
175 // Loads with 1 instruction.
176 if (IsUint<16>(value)) {
177 res = NewLIR3(kMipsOri, r_dest.GetReg(), rZEROd, value);
178 } else if (IsInt<16>(value)) {
179 res = NewLIR3(kMips64Daddiu, r_dest.GetReg(), rZEROd, value);
180 } else if ((value & 0xFFFF) == 0 && IsInt<16>(value >> 16)) {
181 res = NewLIR2(kMipsLui, r_dest.GetReg(), value >> 16);
182 } else if (IsInt<32>(value)) {
183 // Loads with 2 instructions.
184 res = NewLIR2(kMipsLui, r_dest.GetReg(), value >> 16);
185 NewLIR3(kMipsOri, r_dest.GetReg(), r_dest.GetReg(), value);
186 } else if ((value & 0xFFFF0000) == 0 && IsInt<16>(value >> 32)) {
187 res = NewLIR3(kMipsOri, r_dest.GetReg(), rZEROd, value);
188 NewLIR2(kMips64Dahi, r_dest.GetReg(), value >> 32);
189 } else if ((value & UINT64_C(0xFFFFFFFF0000)) == 0) {
190 res = NewLIR3(kMipsOri, r_dest.GetReg(), rZEROd, value);
191 NewLIR2(kMips64Dati, r_dest.GetReg(), value >> 48);
192 } else if ((value & 0xFFFF) == 0 && (value >> 32) >= (-32768 - bit31) &&
193 (value >> 32) <= (32767 - bit31)) {
194 res = NewLIR2(kMipsLui, r_dest.GetReg(), value >> 16);
195 NewLIR2(kMips64Dahi, r_dest.GetReg(), (value >> 32) + bit31);
196 } else if ((value & 0xFFFF) == 0 && ((value >> 31) & 0x1FFFF) == ((0x20000 - bit31) & 0x1FFFF)) {
197 res = NewLIR2(kMipsLui, r_dest.GetReg(), value >> 16);
198 NewLIR2(kMips64Dati, r_dest.GetReg(), (value >> 48) + bit31);
199 } else {
200 int64_t tmp = value;
201 int shift_cnt = 0;
202 while ((tmp & 1) == 0) {
203 tmp >>= 1;
204 shift_cnt++;
205 }
206
207 if (IsUint<16>(tmp)) {
208 res = NewLIR3(kMipsOri, r_dest.GetReg(), rZEROd, tmp);
209 NewLIR3((shift_cnt < 32) ? kMips64Dsll : kMips64Dsll32, r_dest.GetReg(), r_dest.GetReg(),
210 shift_cnt & 0x1F);
211 } else if (IsInt<16>(tmp)) {
212 res = NewLIR3(kMips64Daddiu, r_dest.GetReg(), rZEROd, tmp);
213 NewLIR3((shift_cnt < 32) ? kMips64Dsll : kMips64Dsll32, r_dest.GetReg(), r_dest.GetReg(),
214 shift_cnt & 0x1F);
215 } else if (IsInt<32>(tmp)) {
216 // Loads with 3 instructions.
217 res = NewLIR2(kMipsLui, r_dest.GetReg(), tmp >> 16);
218 NewLIR3(kMipsOri, r_dest.GetReg(), r_dest.GetReg(), tmp);
219 NewLIR3((shift_cnt < 32) ? kMips64Dsll : kMips64Dsll32, r_dest.GetReg(), r_dest.GetReg(),
220 shift_cnt & 0x1F);
221 } else {
222 tmp = value >> 16;
223 shift_cnt = 16;
224 while ((tmp & 1) == 0) {
225 tmp >>= 1;
226 shift_cnt++;
227 }
228
229 if (IsUint<16>(tmp)) {
230 res = NewLIR3(kMipsOri, r_dest.GetReg(), rZEROd, tmp);
231 NewLIR3((shift_cnt < 32) ? kMips64Dsll : kMips64Dsll32, r_dest.GetReg(), r_dest.GetReg(),
232 shift_cnt & 0x1F);
233 NewLIR3(kMipsOri, r_dest.GetReg(), r_dest.GetReg(), value);
234 } else if (IsInt<16>(tmp)) {
235 res = NewLIR3(kMips64Daddiu, r_dest.GetReg(), rZEROd, tmp);
236 NewLIR3((shift_cnt < 32) ? kMips64Dsll : kMips64Dsll32, r_dest.GetReg(), r_dest.GetReg(),
237 shift_cnt & 0x1F);
238 NewLIR3(kMipsOri, r_dest.GetReg(), r_dest.GetReg(), value);
239 } else {
240 // Loads with 3-4 instructions.
241 uint64_t tmp2 = value;
242 if (((tmp2 >> 16) & 0xFFFF) != 0 || (tmp2 & 0xFFFFFFFF) == 0) {
243 res = NewLIR2(kMipsLui, r_dest.GetReg(), tmp2 >> 16);
244 }
245 if ((tmp2 & 0xFFFF) != 0) {
246 if (res)
247 NewLIR3(kMipsOri, r_dest.GetReg(), r_dest.GetReg(), tmp2);
248 else
249 res = NewLIR3(kMipsOri, r_dest.GetReg(), rZEROd, tmp2);
250 }
251 if (bit31) {
252 tmp2 += UINT64_C(0x100000000);
253 }
254 if (((tmp2 >> 32) & 0xFFFF) != 0) {
255 NewLIR2(kMips64Dahi, r_dest.GetReg(), tmp2 >> 32);
256 }
257 if (tmp2 & UINT64_C(0x800000000000)) {
258 tmp2 += UINT64_C(0x1000000000000);
259 }
260 if ((tmp2 >> 48) != 0) {
261 NewLIR2(kMips64Dati, r_dest.GetReg(), tmp2 >> 48);
262 }
263 }
264 }
265 }
266
267 if (is_fp_reg) {
268 NewLIR2(kMips64Dmtc1, r_dest.GetReg(), r_dest_save.GetReg());
269 FreeTemp(r_dest);
270 }
271 return res;
272 }
273
OpUnconditionalBranch(LIR * target)274 LIR* MipsMir2Lir::OpUnconditionalBranch(LIR* target) {
275 LIR* res = NewLIR1(kMipsB, 0 /* offset to be patched during assembly*/);
276 res->target = target;
277 return res;
278 }
279
OpReg(OpKind op,RegStorage r_dest_src)280 LIR* MipsMir2Lir::OpReg(OpKind op, RegStorage r_dest_src) {
281 MipsOpCode opcode = kMipsNop;
282 switch (op) {
283 case kOpBlx:
284 opcode = kMipsJalr;
285 break;
286 case kOpBx:
287 return NewLIR2(kMipsJalr, rZERO, r_dest_src.GetReg());
288 default:
289 LOG(FATAL) << "Bad case in OpReg";
290 UNREACHABLE();
291 }
292 return NewLIR2(opcode, cu_->target64 ? rRAd : rRA, r_dest_src.GetReg());
293 }
294
OpRegImm(OpKind op,RegStorage r_dest_src1,int value)295 LIR* MipsMir2Lir::OpRegImm(OpKind op, RegStorage r_dest_src1, int value) {
296 if ((op == kOpAdd) || (op == kOpSub)) {
297 return OpRegRegImm(op, r_dest_src1, r_dest_src1, value);
298 } else {
299 LOG(FATAL) << "Bad case in OpRegImm";
300 UNREACHABLE();
301 }
302 }
303
OpRegRegReg(OpKind op,RegStorage r_dest,RegStorage r_src1,RegStorage r_src2)304 LIR* MipsMir2Lir::OpRegRegReg(OpKind op, RegStorage r_dest, RegStorage r_src1, RegStorage r_src2) {
305 MipsOpCode opcode = kMipsNop;
306 bool is64bit = cu_->target64 && (r_dest.Is64Bit() || r_src1.Is64Bit() || r_src2.Is64Bit());
307 switch (op) {
308 case kOpAdd:
309 opcode = is64bit ? kMips64Daddu : kMipsAddu;
310 break;
311 case kOpSub:
312 opcode = is64bit ? kMips64Dsubu : kMipsSubu;
313 break;
314 case kOpAnd:
315 opcode = kMipsAnd;
316 break;
317 case kOpMul:
318 opcode = isaIsR6_ ? kMipsR6Mul : kMipsR2Mul;
319 break;
320 case kOpOr:
321 opcode = kMipsOr;
322 break;
323 case kOpXor:
324 opcode = kMipsXor;
325 break;
326 case kOpLsl:
327 opcode = is64bit ? kMips64Dsllv : kMipsSllv;
328 break;
329 case kOpLsr:
330 opcode = is64bit ? kMips64Dsrlv : kMipsSrlv;
331 break;
332 case kOpAsr:
333 opcode = is64bit ? kMips64Dsrav : kMipsSrav;
334 break;
335 case kOpAdc:
336 case kOpSbc:
337 LOG(FATAL) << "No carry bit on MIPS";
338 break;
339 default:
340 LOG(FATAL) << "Bad case in OpRegRegReg";
341 break;
342 }
343 return NewLIR3(opcode, r_dest.GetReg(), r_src1.GetReg(), r_src2.GetReg());
344 }
345
OpRegRegImm(OpKind op,RegStorage r_dest,RegStorage r_src1,int value)346 LIR* MipsMir2Lir::OpRegRegImm(OpKind op, RegStorage r_dest, RegStorage r_src1, int value) {
347 LIR *res;
348 MipsOpCode opcode = kMipsNop;
349 bool short_form = true;
350 bool is64bit = cu_->target64 && (r_dest.Is64Bit() || r_src1.Is64Bit());
351
352 switch (op) {
353 case kOpAdd:
354 if (IS_SIMM16(value)) {
355 opcode = is64bit ? kMips64Daddiu : kMipsAddiu;
356 } else {
357 short_form = false;
358 opcode = is64bit ? kMips64Daddu : kMipsAddu;
359 }
360 break;
361 case kOpSub:
362 if (IS_SIMM16((-value))) {
363 value = -value;
364 opcode = is64bit ? kMips64Daddiu : kMipsAddiu;
365 } else {
366 short_form = false;
367 opcode = is64bit ? kMips64Dsubu : kMipsSubu;
368 }
369 break;
370 case kOpLsl:
371 if (is64bit) {
372 DCHECK(value >= 0 && value <= 63);
373 if (value >= 0 && value <= 31) {
374 opcode = kMips64Dsll;
375 } else {
376 opcode = kMips64Dsll32;
377 value = value - 32;
378 }
379 } else {
380 DCHECK(value >= 0 && value <= 31);
381 opcode = kMipsSll;
382 }
383 break;
384 case kOpLsr:
385 if (is64bit) {
386 DCHECK(value >= 0 && value <= 63);
387 if (value >= 0 && value <= 31) {
388 opcode = kMips64Dsrl;
389 } else {
390 opcode = kMips64Dsrl32;
391 value = value - 32;
392 }
393 } else {
394 DCHECK(value >= 0 && value <= 31);
395 opcode = kMipsSrl;
396 }
397 break;
398 case kOpAsr:
399 if (is64bit) {
400 DCHECK(value >= 0 && value <= 63);
401 if (value >= 0 && value <= 31) {
402 opcode = kMips64Dsra;
403 } else {
404 opcode = kMips64Dsra32;
405 value = value - 32;
406 }
407 } else {
408 DCHECK(value >= 0 && value <= 31);
409 opcode = kMipsSra;
410 }
411 break;
412 case kOpAnd:
413 if (IS_UIMM16((value))) {
414 opcode = kMipsAndi;
415 } else {
416 short_form = false;
417 opcode = kMipsAnd;
418 }
419 break;
420 case kOpOr:
421 if (IS_UIMM16((value))) {
422 opcode = kMipsOri;
423 } else {
424 short_form = false;
425 opcode = kMipsOr;
426 }
427 break;
428 case kOpXor:
429 if (IS_UIMM16((value))) {
430 opcode = kMipsXori;
431 } else {
432 short_form = false;
433 opcode = kMipsXor;
434 }
435 break;
436 case kOpMul:
437 short_form = false;
438 opcode = isaIsR6_ ? kMipsR6Mul : kMipsR2Mul;
439 break;
440 default:
441 LOG(FATAL) << "Bad case in OpRegRegImm";
442 break;
443 }
444
445 if (short_form) {
446 res = NewLIR3(opcode, r_dest.GetReg(), r_src1.GetReg(), value);
447 } else {
448 if (r_dest != r_src1) {
449 res = LoadConstant(r_dest, value);
450 NewLIR3(opcode, r_dest.GetReg(), r_src1.GetReg(), r_dest.GetReg());
451 } else {
452 RegStorage r_scratch;
453 if (is64bit) {
454 r_scratch = AllocTempWide();
455 res = LoadConstantWide(r_scratch, value);
456 } else {
457 r_scratch = AllocTemp();
458 res = LoadConstant(r_scratch, value);
459 }
460 NewLIR3(opcode, r_dest.GetReg(), r_src1.GetReg(), r_scratch.GetReg());
461 }
462 }
463 return res;
464 }
465
OpRegReg(OpKind op,RegStorage r_dest_src1,RegStorage r_src2)466 LIR* MipsMir2Lir::OpRegReg(OpKind op, RegStorage r_dest_src1, RegStorage r_src2) {
467 MipsOpCode opcode = kMipsNop;
468 LIR *res;
469 switch (op) {
470 case kOpMov:
471 opcode = kMipsMove;
472 break;
473 case kOpMvn:
474 return NewLIR3(kMipsNor, r_dest_src1.GetReg(), r_src2.GetReg(), rZERO);
475 case kOpNeg:
476 if (cu_->target64 && r_dest_src1.Is64Bit()) {
477 return NewLIR3(kMips64Dsubu, r_dest_src1.GetReg(), rZEROd, r_src2.GetReg());
478 } else {
479 return NewLIR3(kMipsSubu, r_dest_src1.GetReg(), rZERO, r_src2.GetReg());
480 }
481 case kOpAdd:
482 case kOpAnd:
483 case kOpMul:
484 case kOpOr:
485 case kOpSub:
486 case kOpXor:
487 return OpRegRegReg(op, r_dest_src1, r_dest_src1, r_src2);
488 case kOp2Byte:
489 if (cu_->target64) {
490 res = NewLIR2(kMipsSeb, r_dest_src1.GetReg(), r_src2.GetReg());
491 } else {
492 if (cu_->compiler_driver->GetInstructionSetFeatures()->AsMipsInstructionSetFeatures()
493 ->IsMipsIsaRevGreaterThanEqual2()) {
494 res = NewLIR2(kMipsSeb, r_dest_src1.GetReg(), r_src2.GetReg());
495 } else {
496 res = OpRegRegImm(kOpLsl, r_dest_src1, r_src2, 24);
497 OpRegRegImm(kOpAsr, r_dest_src1, r_dest_src1, 24);
498 }
499 }
500 return res;
501 case kOp2Short:
502 if (cu_->target64) {
503 res = NewLIR2(kMipsSeh, r_dest_src1.GetReg(), r_src2.GetReg());
504 } else {
505 if (cu_->compiler_driver->GetInstructionSetFeatures()->AsMipsInstructionSetFeatures()
506 ->IsMipsIsaRevGreaterThanEqual2()) {
507 res = NewLIR2(kMipsSeh, r_dest_src1.GetReg(), r_src2.GetReg());
508 } else {
509 res = OpRegRegImm(kOpLsl, r_dest_src1, r_src2, 16);
510 OpRegRegImm(kOpAsr, r_dest_src1, r_dest_src1, 16);
511 }
512 }
513 return res;
514 case kOp2Char:
515 return NewLIR3(kMipsAndi, r_dest_src1.GetReg(), r_src2.GetReg(), 0xFFFF);
516 default:
517 LOG(FATAL) << "Bad case in OpRegReg";
518 UNREACHABLE();
519 }
520 return NewLIR2(opcode, r_dest_src1.GetReg(), r_src2.GetReg());
521 }
522
OpMovRegMem(RegStorage r_dest,RegStorage r_base,int offset,MoveType move_type)523 LIR* MipsMir2Lir::OpMovRegMem(RegStorage r_dest, RegStorage r_base, int offset,
524 MoveType move_type) {
525 UNUSED(r_dest, r_base, offset, move_type);
526 UNIMPLEMENTED(FATAL);
527 UNREACHABLE();
528 }
529
OpMovMemReg(RegStorage r_base,int offset,RegStorage r_src,MoveType move_type)530 LIR* MipsMir2Lir::OpMovMemReg(RegStorage r_base, int offset, RegStorage r_src, MoveType move_type) {
531 UNUSED(r_base, offset, r_src, move_type);
532 UNIMPLEMENTED(FATAL);
533 UNREACHABLE();
534 }
535
OpCondRegReg(OpKind op,ConditionCode cc,RegStorage r_dest,RegStorage r_src)536 LIR* MipsMir2Lir::OpCondRegReg(OpKind op, ConditionCode cc, RegStorage r_dest, RegStorage r_src) {
537 UNUSED(op, cc, r_dest, r_src);
538 LOG(FATAL) << "Unexpected use of OpCondRegReg for MIPS";
539 UNREACHABLE();
540 }
541
LoadConstantWide(RegStorage r_dest,int64_t value)542 LIR* MipsMir2Lir::LoadConstantWide(RegStorage r_dest, int64_t value) {
543 LIR *res;
544 if (cu_->target64) {
545 res = LoadConstantWideNoClobber(r_dest, value);
546 return res;
547 }
548 if (fpuIs32Bit_ || !r_dest.IsFloat()) {
549 // 32bit FPU (pairs) or loading into GPR.
550 if (!r_dest.IsPair()) {
551 // Form 64-bit pair.
552 r_dest = Solo64ToPair64(r_dest);
553 }
554 res = LoadConstantNoClobber(r_dest.GetLow(), Low32Bits(value));
555 LoadConstantNoClobber(r_dest.GetHigh(), High32Bits(value));
556 } else {
557 // Here if we have a 64bit FPU and loading into FPR.
558 RegStorage r_temp = AllocTemp();
559 r_dest = Fp64ToSolo32(r_dest);
560 res = LoadConstantNoClobber(r_dest, Low32Bits(value));
561 LoadConstantNoClobber(r_temp, High32Bits(value));
562 NewLIR2(kMipsMthc1, r_temp.GetReg(), r_dest.GetReg());
563 FreeTemp(r_temp);
564 }
565 return res;
566 }
567
568 /* Load value from base + scaled index. */
LoadBaseIndexed(RegStorage r_base,RegStorage r_index,RegStorage r_dest,int scale,OpSize size)569 LIR* MipsMir2Lir::LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest,
570 int scale, OpSize size) {
571 LIR *first = nullptr;
572 LIR *res;
573 MipsOpCode opcode = kMipsNop;
574 bool is64bit = cu_->target64 && r_dest.Is64Bit();
575 RegStorage t_reg = is64bit ? AllocTempWide() : AllocTemp();
576
577 if (r_dest.IsFloat()) {
578 DCHECK(r_dest.IsSingle());
579 DCHECK((size == k32) || (size == kSingle) || (size == kReference));
580 size = kSingle;
581 } else {
582 if (size == kSingle)
583 size = k32;
584 }
585
586 if (cu_->target64) {
587 if (!scale) {
588 if (is64bit) {
589 first = NewLIR3(kMips64Daddu, t_reg.GetReg() , r_base.GetReg(), r_index.GetReg());
590 } else {
591 first = NewLIR3(kMipsAddu, t_reg.GetReg() , r_base.GetReg(), r_index.GetReg());
592 }
593 } else {
594 first = OpRegRegImm(kOpLsl, t_reg, r_index, scale);
595 NewLIR3(kMips64Daddu, t_reg.GetReg() , r_base.GetReg(), t_reg.GetReg());
596 }
597 } else {
598 if (!scale) {
599 first = NewLIR3(kMipsAddu, t_reg.GetReg() , r_base.GetReg(), r_index.GetReg());
600 } else {
601 first = OpRegRegImm(kOpLsl, t_reg, r_index, scale);
602 NewLIR3(kMipsAddu, t_reg.GetReg() , r_base.GetReg(), t_reg.GetReg());
603 }
604 }
605
606 switch (size) {
607 case k64:
608 if (cu_->target64) {
609 opcode = kMips64Ld;
610 } else {
611 LOG(FATAL) << "Bad case in LoadBaseIndexed";
612 }
613 break;
614 case kSingle:
615 opcode = kMipsFlwc1;
616 break;
617 case k32:
618 case kReference:
619 opcode = kMipsLw;
620 break;
621 case kUnsignedHalf:
622 opcode = kMipsLhu;
623 break;
624 case kSignedHalf:
625 opcode = kMipsLh;
626 break;
627 case kUnsignedByte:
628 opcode = kMipsLbu;
629 break;
630 case kSignedByte:
631 opcode = kMipsLb;
632 break;
633 default:
634 LOG(FATAL) << "Bad case in LoadBaseIndexed";
635 }
636
637 res = NewLIR3(opcode, r_dest.GetReg(), 0, t_reg.GetReg());
638 FreeTemp(t_reg);
639 return (first) ? first : res;
640 }
641
642 // Store value base base + scaled index.
StoreBaseIndexed(RegStorage r_base,RegStorage r_index,RegStorage r_src,int scale,OpSize size)643 LIR* MipsMir2Lir::StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src,
644 int scale, OpSize size) {
645 LIR *first = nullptr;
646 MipsOpCode opcode = kMipsNop;
647 RegStorage t_reg = AllocTemp();
648
649 if (r_src.IsFloat()) {
650 DCHECK(r_src.IsSingle());
651 DCHECK((size == k32) || (size == kSingle) || (size == kReference));
652 size = kSingle;
653 } else {
654 if (size == kSingle)
655 size = k32;
656 }
657
658 MipsOpCode add_opcode = cu_->target64 ? kMips64Daddu : kMipsAddu;
659 if (!scale) {
660 first = NewLIR3(add_opcode, t_reg.GetReg() , r_base.GetReg(), r_index.GetReg());
661 } else {
662 first = OpRegRegImm(kOpLsl, t_reg, r_index, scale);
663 NewLIR3(add_opcode, t_reg.GetReg() , r_base.GetReg(), t_reg.GetReg());
664 }
665
666 switch (size) {
667 case kSingle:
668 opcode = kMipsFswc1;
669 break;
670 case k32:
671 case kReference:
672 opcode = kMipsSw;
673 break;
674 case kUnsignedHalf:
675 case kSignedHalf:
676 opcode = kMipsSh;
677 break;
678 case kUnsignedByte:
679 case kSignedByte:
680 opcode = kMipsSb;
681 break;
682 default:
683 LOG(FATAL) << "Bad case in StoreBaseIndexed";
684 }
685 NewLIR3(opcode, r_src.GetReg(), 0, t_reg.GetReg());
686 return first;
687 }
688
689 // FIXME: don't split r_dest into 2 containers.
LoadBaseDispBody(RegStorage r_base,int displacement,RegStorage r_dest,OpSize size)690 LIR* MipsMir2Lir::LoadBaseDispBody(RegStorage r_base, int displacement, RegStorage r_dest,
691 OpSize size) {
692 /*
693 * Load value from base + displacement. Optionally perform null check
694 * on base (which must have an associated s_reg and MIR). If not
695 * performing null check, incoming MIR can be null. IMPORTANT: this
696 * code must not allocate any new temps. If a new register is needed
697 * and base and dest are the same, spill some other register to
698 * rlp and then restore.
699 */
700 LIR *res;
701 LIR *load = nullptr;
702 LIR *load2 = nullptr;
703 MipsOpCode opcode = kMipsNop;
704 bool short_form = IS_SIMM16(displacement);
705 bool is64bit = false;
706
707 switch (size) {
708 case k64:
709 case kDouble:
710 if (cu_->target64) {
711 r_dest = Check64BitReg(r_dest);
712 if (!r_dest.IsFloat()) {
713 opcode = kMips64Ld;
714 } else {
715 opcode = kMipsFldc1;
716 }
717 DCHECK_EQ((displacement & 0x3), 0);
718 break;
719 }
720 is64bit = true;
721 if (fpuIs32Bit_ && !r_dest.IsPair()) {
722 // Form 64-bit pair.
723 r_dest = Solo64ToPair64(r_dest);
724 }
725 short_form = IS_SIMM16_2WORD(displacement);
726 FALLTHROUGH_INTENDED;
727 case k32:
728 case kSingle:
729 case kReference:
730 opcode = kMipsLw;
731 if (r_dest.IsFloat()) {
732 opcode = kMipsFlwc1;
733 if (!is64bit) {
734 DCHECK(r_dest.IsSingle());
735 } else {
736 DCHECK(r_dest.IsDouble());
737 }
738 }
739 DCHECK_EQ((displacement & 0x3), 0);
740 break;
741 case kUnsignedHalf:
742 opcode = kMipsLhu;
743 DCHECK_EQ((displacement & 0x1), 0);
744 break;
745 case kSignedHalf:
746 opcode = kMipsLh;
747 DCHECK_EQ((displacement & 0x1), 0);
748 break;
749 case kUnsignedByte:
750 opcode = kMipsLbu;
751 break;
752 case kSignedByte:
753 opcode = kMipsLb;
754 break;
755 default:
756 LOG(FATAL) << "Bad case in LoadBaseIndexedBody";
757 }
758
759 if (cu_->target64) {
760 if (short_form) {
761 load = res = NewLIR3(opcode, r_dest.GetReg(), displacement, r_base.GetReg());
762 } else {
763 RegStorage r_tmp = (r_base == r_dest) ? AllocTemp() : r_dest;
764 res = OpRegRegImm(kOpAdd, r_tmp, r_base, displacement);
765 load = NewLIR3(opcode, r_dest.GetReg(), 0, r_tmp.GetReg());
766 if (r_tmp != r_dest)
767 FreeTemp(r_tmp);
768 }
769
770 if (mem_ref_type_ == ResourceMask::kDalvikReg) {
771 DCHECK_EQ(r_base, TargetPtrReg(kSp));
772 AnnotateDalvikRegAccess(load, displacement >> 2, true /* is_load */, r_dest.Is64Bit());
773 }
774 return res;
775 }
776
777 if (short_form) {
778 if (!is64bit) {
779 load = res = NewLIR3(opcode, r_dest.GetReg(), displacement, r_base.GetReg());
780 } else {
781 if (fpuIs32Bit_ || !r_dest.IsFloat()) {
782 DCHECK(r_dest.IsPair());
783 load = res = NewLIR3(opcode, r_dest.GetLowReg(), displacement + LOWORD_OFFSET,
784 r_base.GetReg());
785 load2 = NewLIR3(opcode, r_dest.GetHighReg(), displacement + HIWORD_OFFSET, r_base.GetReg());
786 } else {
787 // Here if 64bit fpu and r_dest is a 64bit fp register.
788 RegStorage r_tmp = AllocTemp();
789 // FIXME: why is r_dest a 64BitPair here???
790 r_dest = Fp64ToSolo32(r_dest);
791 load = res = NewLIR3(kMipsFlwc1, r_dest.GetReg(), displacement + LOWORD_OFFSET,
792 r_base.GetReg());
793 load2 = NewLIR3(kMipsLw, r_tmp.GetReg(), displacement + HIWORD_OFFSET, r_base.GetReg());
794 NewLIR2(kMipsMthc1, r_tmp.GetReg(), r_dest.GetReg());
795 FreeTemp(r_tmp);
796 }
797 }
798 } else {
799 if (!is64bit) {
800 RegStorage r_tmp = (r_base == r_dest || r_dest.IsFloat()) ? AllocTemp() : r_dest;
801 res = OpRegRegImm(kOpAdd, r_tmp, r_base, displacement);
802 load = NewLIR3(opcode, r_dest.GetReg(), 0, r_tmp.GetReg());
803 if (r_tmp != r_dest)
804 FreeTemp(r_tmp);
805 } else {
806 RegStorage r_tmp = AllocTemp();
807 res = OpRegRegImm(kOpAdd, r_tmp, r_base, displacement);
808 if (fpuIs32Bit_ || !r_dest.IsFloat()) {
809 DCHECK(r_dest.IsPair());
810 load = NewLIR3(opcode, r_dest.GetLowReg(), LOWORD_OFFSET, r_tmp.GetReg());
811 load2 = NewLIR3(opcode, r_dest.GetHighReg(), HIWORD_OFFSET, r_tmp.GetReg());
812 } else {
813 // Here if 64bit fpu and r_dest is a 64bit fp register
814 r_dest = Fp64ToSolo32(r_dest);
815 load = res = NewLIR3(kMipsFlwc1, r_dest.GetReg(), LOWORD_OFFSET, r_tmp.GetReg());
816 load2 = NewLIR3(kMipsLw, r_tmp.GetReg(), HIWORD_OFFSET, r_tmp.GetReg());
817 NewLIR2(kMipsMthc1, r_tmp.GetReg(), r_dest.GetReg());
818 }
819 FreeTemp(r_tmp);
820 }
821 }
822
823 if (mem_ref_type_ == ResourceMask::kDalvikReg) {
824 DCHECK_EQ(r_base, TargetPtrReg(kSp));
825 AnnotateDalvikRegAccess(load, (displacement + (is64bit ? LOWORD_OFFSET : 0)) >> 2,
826 true /* is_load */, is64bit /* is64bit */);
827 if (is64bit) {
828 AnnotateDalvikRegAccess(load2, (displacement + HIWORD_OFFSET) >> 2,
829 true /* is_load */, is64bit /* is64bit */);
830 }
831 }
832 return res;
833 }
834
ForceImplicitNullCheck(RegStorage reg,int opt_flags,bool is_wide)835 void MipsMir2Lir::ForceImplicitNullCheck(RegStorage reg, int opt_flags, bool is_wide) {
836 if (cu_->compiler_driver->GetCompilerOptions().GetImplicitNullChecks()) {
837 if (!(cu_->disable_opt & (1 << kNullCheckElimination)) && (opt_flags & MIR_IGNORE_NULL_CHECK)) {
838 return;
839 }
840 // Force an implicit null check by performing a memory operation (load) from the given
841 // register with offset 0. This will cause a signal if the register contains 0 (null).
842 LIR* load = Load32Disp(reg, LOWORD_OFFSET, rs_rZERO);
843 MarkSafepointPC(load);
844 if (is_wide) {
845 load = Load32Disp(reg, HIWORD_OFFSET, rs_rZERO);
846 MarkSafepointPC(load);
847 }
848 }
849 }
850
LoadBaseDisp(RegStorage r_base,int displacement,RegStorage r_dest,OpSize size,VolatileKind is_volatile)851 LIR* MipsMir2Lir::LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest, OpSize size,
852 VolatileKind is_volatile) {
853 if (UNLIKELY(is_volatile == kVolatile && (size == k64 || size == kDouble))
854 && (!cu_->target64 || displacement & 0x7)) {
855 // TODO: use lld/scd instructions for Mips64.
856 // Do atomic 64-bit load.
857 return GenAtomic64Load(r_base, displacement, r_dest);
858 }
859
860 // TODO: base this on target.
861 if (size == kWord) {
862 size = cu_->target64 ? k64 : k32;
863 }
864 LIR* load;
865 load = LoadBaseDispBody(r_base, displacement, r_dest, size);
866
867 if (UNLIKELY(is_volatile == kVolatile)) {
868 GenMemBarrier(kLoadAny);
869 }
870
871 return load;
872 }
873
874 // FIXME: don't split r_dest into 2 containers.
StoreBaseDispBody(RegStorage r_base,int displacement,RegStorage r_src,OpSize size)875 LIR* MipsMir2Lir::StoreBaseDispBody(RegStorage r_base, int displacement, RegStorage r_src,
876 OpSize size) {
877 LIR *res;
878 LIR *store = nullptr;
879 LIR *store2 = nullptr;
880 MipsOpCode opcode = kMipsNop;
881 bool short_form = IS_SIMM16(displacement);
882 bool is64bit = false;
883
884 switch (size) {
885 case k64:
886 case kDouble:
887 if (cu_->target64) {
888 r_src = Check64BitReg(r_src);
889 if (!r_src.IsFloat()) {
890 opcode = kMips64Sd;
891 } else {
892 opcode = kMipsFsdc1;
893 }
894 DCHECK_EQ((displacement & 0x3), 0);
895 break;
896 }
897 is64bit = true;
898 if (fpuIs32Bit_ && !r_src.IsPair()) {
899 // Form 64-bit pair.
900 r_src = Solo64ToPair64(r_src);
901 }
902 short_form = IS_SIMM16_2WORD(displacement);
903 FALLTHROUGH_INTENDED;
904 case k32:
905 case kSingle:
906 case kReference:
907 opcode = kMipsSw;
908 if (r_src.IsFloat()) {
909 opcode = kMipsFswc1;
910 if (!is64bit) {
911 DCHECK(r_src.IsSingle());
912 } else {
913 DCHECK(r_src.IsDouble());
914 }
915 }
916 DCHECK_EQ((displacement & 0x3), 0);
917 break;
918 case kUnsignedHalf:
919 case kSignedHalf:
920 opcode = kMipsSh;
921 DCHECK_EQ((displacement & 0x1), 0);
922 break;
923 case kUnsignedByte:
924 case kSignedByte:
925 opcode = kMipsSb;
926 break;
927 default:
928 LOG(FATAL) << "Bad case in StoreBaseDispBody";
929 }
930
931 if (cu_->target64) {
932 if (short_form) {
933 store = res = NewLIR3(opcode, r_src.GetReg(), displacement, r_base.GetReg());
934 } else {
935 RegStorage r_scratch = AllocTemp();
936 res = OpRegRegImm(kOpAdd, r_scratch, r_base, displacement);
937 store = NewLIR3(opcode, r_src.GetReg(), 0, r_scratch.GetReg());
938 FreeTemp(r_scratch);
939 }
940
941 if (mem_ref_type_ == ResourceMask::kDalvikReg) {
942 DCHECK_EQ(r_base, TargetPtrReg(kSp));
943 AnnotateDalvikRegAccess(store, displacement >> 2, false /* is_load */, r_src.Is64Bit());
944 }
945 return res;
946 }
947
948 if (short_form) {
949 if (!is64bit) {
950 store = res = NewLIR3(opcode, r_src.GetReg(), displacement, r_base.GetReg());
951 } else {
952 if (fpuIs32Bit_ || !r_src.IsFloat()) {
953 DCHECK(r_src.IsPair());
954 store = res = NewLIR3(opcode, r_src.GetLowReg(), displacement + LOWORD_OFFSET,
955 r_base.GetReg());
956 store2 = NewLIR3(opcode, r_src.GetHighReg(), displacement + HIWORD_OFFSET, r_base.GetReg());
957 } else {
958 // Here if 64bit fpu and r_src is a 64bit fp register
959 RegStorage r_tmp = AllocTemp();
960 r_src = Fp64ToSolo32(r_src);
961 store = res = NewLIR3(kMipsFswc1, r_src.GetReg(), displacement + LOWORD_OFFSET,
962 r_base.GetReg());
963 NewLIR2(kMipsMfhc1, r_tmp.GetReg(), r_src.GetReg());
964 store2 = NewLIR3(kMipsSw, r_tmp.GetReg(), displacement + HIWORD_OFFSET, r_base.GetReg());
965 FreeTemp(r_tmp);
966 }
967 }
968 } else {
969 RegStorage r_scratch = AllocTemp();
970 res = OpRegRegImm(kOpAdd, r_scratch, r_base, displacement);
971 if (!is64bit) {
972 store = NewLIR3(opcode, r_src.GetReg(), 0, r_scratch.GetReg());
973 } else {
974 if (fpuIs32Bit_ || !r_src.IsFloat()) {
975 DCHECK(r_src.IsPair());
976 store = NewLIR3(opcode, r_src.GetLowReg(), LOWORD_OFFSET, r_scratch.GetReg());
977 store2 = NewLIR3(opcode, r_src.GetHighReg(), HIWORD_OFFSET, r_scratch.GetReg());
978 } else {
979 // Here if 64bit fpu and r_src is a 64bit fp register
980 RegStorage r_tmp = AllocTemp();
981 r_src = Fp64ToSolo32(r_src);
982 store = NewLIR3(kMipsFswc1, r_src.GetReg(), LOWORD_OFFSET, r_scratch.GetReg());
983 NewLIR2(kMipsMfhc1, r_tmp.GetReg(), r_src.GetReg());
984 store2 = NewLIR3(kMipsSw, r_tmp.GetReg(), HIWORD_OFFSET, r_scratch.GetReg());
985 FreeTemp(r_tmp);
986 }
987 }
988 FreeTemp(r_scratch);
989 }
990
991 if (mem_ref_type_ == ResourceMask::kDalvikReg) {
992 DCHECK_EQ(r_base, TargetPtrReg(kSp));
993 AnnotateDalvikRegAccess(store, (displacement + (is64bit ? LOWORD_OFFSET : 0)) >> 2,
994 false /* is_load */, is64bit /* is64bit */);
995 if (is64bit) {
996 AnnotateDalvikRegAccess(store2, (displacement + HIWORD_OFFSET) >> 2,
997 false /* is_load */, is64bit /* is64bit */);
998 }
999 }
1000
1001 return res;
1002 }
1003
StoreBaseDisp(RegStorage r_base,int displacement,RegStorage r_src,OpSize size,VolatileKind is_volatile)1004 LIR* MipsMir2Lir::StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src, OpSize size,
1005 VolatileKind is_volatile) {
1006 if (is_volatile == kVolatile) {
1007 // Ensure that prior accesses become visible to other threads first.
1008 GenMemBarrier(kAnyStore);
1009 }
1010
1011 LIR* store;
1012 if (UNLIKELY(is_volatile == kVolatile && (size == k64 || size == kDouble) &&
1013 (!cu_->target64 || displacement & 0x7))) {
1014 // TODO: use lld/scd instructions for Mips64.
1015 // Do atomic 64-bit load.
1016 store = GenAtomic64Store(r_base, displacement, r_src);
1017 } else {
1018 // TODO: base this on target.
1019 if (size == kWord) {
1020 size = cu_->target64 ? k64 : k32;
1021 }
1022 store = StoreBaseDispBody(r_base, displacement, r_src, size);
1023 }
1024
1025 if (UNLIKELY(is_volatile == kVolatile)) {
1026 // Preserve order with respect to any subsequent volatile loads.
1027 // We need StoreLoad, but that generally requires the most expensive barrier.
1028 GenMemBarrier(kAnyAny);
1029 }
1030
1031 return store;
1032 }
1033
OpMem(OpKind op,RegStorage r_base,int disp)1034 LIR* MipsMir2Lir::OpMem(OpKind op, RegStorage r_base, int disp) {
1035 UNUSED(op, r_base, disp);
1036 LOG(FATAL) << "Unexpected use of OpMem for MIPS";
1037 UNREACHABLE();
1038 }
1039
OpCondBranch(ConditionCode cc,LIR * target)1040 LIR* MipsMir2Lir::OpCondBranch(ConditionCode cc, LIR* target) {
1041 UNUSED(cc, target);
1042 LOG(FATAL) << "Unexpected use of OpCondBranch for MIPS";
1043 UNREACHABLE();
1044 }
1045
InvokeTrampoline(OpKind op,RegStorage r_tgt,QuickEntrypointEnum trampoline)1046 LIR* MipsMir2Lir::InvokeTrampoline(OpKind op, RegStorage r_tgt, QuickEntrypointEnum trampoline) {
1047 if (!cu_->target64 && IsDirectEntrypoint(trampoline)) {
1048 // Reserve argument space on stack (for $a0-$a3) for
1049 // entrypoints that directly reference native implementations.
1050 // This is not safe in general, as it violates the frame size
1051 // of the Quick method, but it is used here only for calling
1052 // native functions, outside of the runtime.
1053 OpRegImm(kOpSub, rs_rSP, 16);
1054 LIR* retVal = OpReg(op, r_tgt);
1055 OpRegImm(kOpAdd, rs_rSP, 16);
1056 return retVal;
1057 }
1058
1059 return OpReg(op, r_tgt);
1060 }
1061
AllocPtrSizeTemp(bool required)1062 RegStorage MipsMir2Lir::AllocPtrSizeTemp(bool required) {
1063 return cu_->target64 ? AllocTempWide(required) : AllocTemp(required);
1064 }
1065
1066 } // namespace art
1067