1 /*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "arm_lir.h"
18 #include "codegen_arm.h"
19 #include "dex/quick/mir_to_lir-inl.h"
20 #include "dex/reg_storage_eq.h"
21
22 namespace art {
23
24 /* This file contains codegen for the Thumb ISA. */
25
EncodeImmSingle(int32_t value)26 static int32_t EncodeImmSingle(int32_t value) {
27 int32_t res;
28 int32_t bit_a = (value & 0x80000000) >> 31;
29 int32_t not_bit_b = (value & 0x40000000) >> 30;
30 int32_t bit_b = (value & 0x20000000) >> 29;
31 int32_t b_smear = (value & 0x3e000000) >> 25;
32 int32_t slice = (value & 0x01f80000) >> 19;
33 int32_t zeroes = (value & 0x0007ffff);
34 if (zeroes != 0)
35 return -1;
36 if (bit_b) {
37 if ((not_bit_b != 0) || (b_smear != 0x1f))
38 return -1;
39 } else {
40 if ((not_bit_b != 1) || (b_smear != 0x0))
41 return -1;
42 }
43 res = (bit_a << 7) | (bit_b << 6) | slice;
44 return res;
45 }
46
47 /*
48 * Determine whether value can be encoded as a Thumb2 floating point
49 * immediate. If not, return -1. If so return encoded 8-bit value.
50 */
EncodeImmDouble(int64_t value)51 static int32_t EncodeImmDouble(int64_t value) {
52 int32_t res;
53 int32_t bit_a = (value & INT64_C(0x8000000000000000)) >> 63;
54 int32_t not_bit_b = (value & INT64_C(0x4000000000000000)) >> 62;
55 int32_t bit_b = (value & INT64_C(0x2000000000000000)) >> 61;
56 int32_t b_smear = (value & INT64_C(0x3fc0000000000000)) >> 54;
57 int32_t slice = (value & INT64_C(0x003f000000000000)) >> 48;
58 uint64_t zeroes = (value & INT64_C(0x0000ffffffffffff));
59 if (zeroes != 0ull)
60 return -1;
61 if (bit_b) {
62 if ((not_bit_b != 0) || (b_smear != 0xff))
63 return -1;
64 } else {
65 if ((not_bit_b != 1) || (b_smear != 0x0))
66 return -1;
67 }
68 res = (bit_a << 7) | (bit_b << 6) | slice;
69 return res;
70 }
71
LoadFPConstantValue(int r_dest,int value)72 LIR* ArmMir2Lir::LoadFPConstantValue(int r_dest, int value) {
73 DCHECK(RegStorage::IsSingle(r_dest));
74 if (value == 0) {
75 // TODO: we need better info about the target CPU. a vector exclusive or
76 // would probably be better here if we could rely on its existance.
77 // Load an immediate +2.0 (which encodes to 0)
78 NewLIR2(kThumb2Vmovs_IMM8, r_dest, 0);
79 // +0.0 = +2.0 - +2.0
80 return NewLIR3(kThumb2Vsubs, r_dest, r_dest, r_dest);
81 } else {
82 int encoded_imm = EncodeImmSingle(value);
83 if (encoded_imm >= 0) {
84 return NewLIR2(kThumb2Vmovs_IMM8, r_dest, encoded_imm);
85 }
86 }
87 LIR* data_target = ScanLiteralPool(literal_list_, value, 0);
88 if (data_target == NULL) {
89 data_target = AddWordData(&literal_list_, value);
90 }
91 ScopedMemRefType mem_ref_type(this, ResourceMask::kLiteral);
92 LIR* load_pc_rel = RawLIR(current_dalvik_offset_, kThumb2Vldrs,
93 r_dest, rs_r15pc.GetReg(), 0, 0, 0, data_target);
94 AppendLIR(load_pc_rel);
95 return load_pc_rel;
96 }
97
LeadingZeros(uint32_t val)98 static int LeadingZeros(uint32_t val) {
99 uint32_t alt;
100 int32_t n;
101 int32_t count;
102
103 count = 16;
104 n = 32;
105 do {
106 alt = val >> count;
107 if (alt != 0) {
108 n = n - count;
109 val = alt;
110 }
111 count >>= 1;
112 } while (count);
113 return n - val;
114 }
115
116 /*
117 * Determine whether value can be encoded as a Thumb2 modified
118 * immediate. If not, return -1. If so, return i:imm3:a:bcdefgh form.
119 */
ModifiedImmediate(uint32_t value)120 int ArmMir2Lir::ModifiedImmediate(uint32_t value) {
121 int32_t z_leading;
122 int32_t z_trailing;
123 uint32_t b0 = value & 0xff;
124
125 /* Note: case of value==0 must use 0:000:0:0000000 encoding */
126 if (value <= 0xFF)
127 return b0; // 0:000:a:bcdefgh
128 if (value == ((b0 << 16) | b0))
129 return (0x1 << 8) | b0; /* 0:001:a:bcdefgh */
130 if (value == ((b0 << 24) | (b0 << 16) | (b0 << 8) | b0))
131 return (0x3 << 8) | b0; /* 0:011:a:bcdefgh */
132 b0 = (value >> 8) & 0xff;
133 if (value == ((b0 << 24) | (b0 << 8)))
134 return (0x2 << 8) | b0; /* 0:010:a:bcdefgh */
135 /* Can we do it with rotation? */
136 z_leading = LeadingZeros(value);
137 z_trailing = 32 - LeadingZeros(~value & (value - 1));
138 /* A run of eight or fewer active bits? */
139 if ((z_leading + z_trailing) < 24)
140 return -1; /* No - bail */
141 /* left-justify the constant, discarding msb (known to be 1) */
142 value <<= z_leading + 1;
143 /* Create bcdefgh */
144 value >>= 25;
145 /* Put it all together */
146 return value | ((0x8 + z_leading) << 7); /* [01000..11111]:bcdefgh */
147 }
148
InexpensiveConstantInt(int32_t value)149 bool ArmMir2Lir::InexpensiveConstantInt(int32_t value) {
150 return (ModifiedImmediate(value) >= 0) || (ModifiedImmediate(~value) >= 0);
151 }
152
InexpensiveConstantFloat(int32_t value)153 bool ArmMir2Lir::InexpensiveConstantFloat(int32_t value) {
154 return EncodeImmSingle(value) >= 0;
155 }
156
InexpensiveConstantLong(int64_t value)157 bool ArmMir2Lir::InexpensiveConstantLong(int64_t value) {
158 return InexpensiveConstantInt(High32Bits(value)) && InexpensiveConstantInt(Low32Bits(value));
159 }
160
InexpensiveConstantDouble(int64_t value)161 bool ArmMir2Lir::InexpensiveConstantDouble(int64_t value) {
162 return EncodeImmDouble(value) >= 0;
163 }
164
165 /*
166 * Load a immediate using a shortcut if possible; otherwise
167 * grab from the per-translation literal pool.
168 *
169 * No additional register clobbering operation performed. Use this version when
170 * 1) r_dest is freshly returned from AllocTemp or
171 * 2) The codegen is under fixed register usage
172 */
LoadConstantNoClobber(RegStorage r_dest,int value)173 LIR* ArmMir2Lir::LoadConstantNoClobber(RegStorage r_dest, int value) {
174 LIR* res;
175 int mod_imm;
176
177 if (r_dest.IsFloat()) {
178 return LoadFPConstantValue(r_dest.GetReg(), value);
179 }
180
181 /* See if the value can be constructed cheaply */
182 if (r_dest.Low8() && (value >= 0) && (value <= 255)) {
183 return NewLIR2(kThumbMovImm, r_dest.GetReg(), value);
184 }
185 /* Check Modified immediate special cases */
186 mod_imm = ModifiedImmediate(value);
187 if (mod_imm >= 0) {
188 res = NewLIR2(kThumb2MovI8M, r_dest.GetReg(), mod_imm);
189 return res;
190 }
191 mod_imm = ModifiedImmediate(~value);
192 if (mod_imm >= 0) {
193 res = NewLIR2(kThumb2MvnI8M, r_dest.GetReg(), mod_imm);
194 return res;
195 }
196 /* 16-bit immediate? */
197 if ((value & 0xffff) == value) {
198 res = NewLIR2(kThumb2MovImm16, r_dest.GetReg(), value);
199 return res;
200 }
201 /* Do a low/high pair */
202 res = NewLIR2(kThumb2MovImm16, r_dest.GetReg(), Low16Bits(value));
203 NewLIR2(kThumb2MovImm16H, r_dest.GetReg(), High16Bits(value));
204 return res;
205 }
206
OpUnconditionalBranch(LIR * target)207 LIR* ArmMir2Lir::OpUnconditionalBranch(LIR* target) {
208 LIR* res = NewLIR1(kThumbBUncond, 0 /* offset to be patched during assembly */);
209 res->target = target;
210 return res;
211 }
212
OpCondBranch(ConditionCode cc,LIR * target)213 LIR* ArmMir2Lir::OpCondBranch(ConditionCode cc, LIR* target) {
214 // This is kThumb2BCond instead of kThumbBCond for performance reasons. The assembly
215 // time required for a new pass after kThumbBCond is fixed up to kThumb2BCond is
216 // substantial.
217 LIR* branch = NewLIR2(kThumb2BCond, 0 /* offset to be patched */,
218 ArmConditionEncoding(cc));
219 branch->target = target;
220 return branch;
221 }
222
OpReg(OpKind op,RegStorage r_dest_src)223 LIR* ArmMir2Lir::OpReg(OpKind op, RegStorage r_dest_src) {
224 ArmOpcode opcode = kThumbBkpt;
225 switch (op) {
226 case kOpBlx:
227 opcode = kThumbBlxR;
228 break;
229 case kOpBx:
230 opcode = kThumbBx;
231 break;
232 default:
233 LOG(FATAL) << "Bad opcode " << op;
234 }
235 return NewLIR1(opcode, r_dest_src.GetReg());
236 }
237
OpRegRegShift(OpKind op,RegStorage r_dest_src1,RegStorage r_src2,int shift)238 LIR* ArmMir2Lir::OpRegRegShift(OpKind op, RegStorage r_dest_src1, RegStorage r_src2,
239 int shift) {
240 bool thumb_form =
241 ((shift == 0) && r_dest_src1.Low8() && r_src2.Low8());
242 ArmOpcode opcode = kThumbBkpt;
243 switch (op) {
244 case kOpAdc:
245 opcode = (thumb_form) ? kThumbAdcRR : kThumb2AdcRRR;
246 break;
247 case kOpAnd:
248 opcode = (thumb_form) ? kThumbAndRR : kThumb2AndRRR;
249 break;
250 case kOpBic:
251 opcode = (thumb_form) ? kThumbBicRR : kThumb2BicRRR;
252 break;
253 case kOpCmn:
254 DCHECK_EQ(shift, 0);
255 opcode = (thumb_form) ? kThumbCmnRR : kThumb2CmnRR;
256 break;
257 case kOpCmp:
258 if (thumb_form)
259 opcode = kThumbCmpRR;
260 else if ((shift == 0) && !r_dest_src1.Low8() && !r_src2.Low8())
261 opcode = kThumbCmpHH;
262 else if ((shift == 0) && r_dest_src1.Low8())
263 opcode = kThumbCmpLH;
264 else if (shift == 0)
265 opcode = kThumbCmpHL;
266 else
267 opcode = kThumb2CmpRR;
268 break;
269 case kOpXor:
270 opcode = (thumb_form) ? kThumbEorRR : kThumb2EorRRR;
271 break;
272 case kOpMov:
273 DCHECK_EQ(shift, 0);
274 if (r_dest_src1.Low8() && r_src2.Low8())
275 opcode = kThumbMovRR;
276 else if (!r_dest_src1.Low8() && !r_src2.Low8())
277 opcode = kThumbMovRR_H2H;
278 else if (r_dest_src1.Low8())
279 opcode = kThumbMovRR_H2L;
280 else
281 opcode = kThumbMovRR_L2H;
282 break;
283 case kOpMul:
284 DCHECK_EQ(shift, 0);
285 opcode = (thumb_form) ? kThumbMul : kThumb2MulRRR;
286 break;
287 case kOpMvn:
288 opcode = (thumb_form) ? kThumbMvn : kThumb2MnvRR;
289 break;
290 case kOpNeg:
291 DCHECK_EQ(shift, 0);
292 opcode = (thumb_form) ? kThumbNeg : kThumb2NegRR;
293 break;
294 case kOpOr:
295 opcode = (thumb_form) ? kThumbOrr : kThumb2OrrRRR;
296 break;
297 case kOpSbc:
298 opcode = (thumb_form) ? kThumbSbc : kThumb2SbcRRR;
299 break;
300 case kOpTst:
301 opcode = (thumb_form) ? kThumbTst : kThumb2TstRR;
302 break;
303 case kOpLsl:
304 DCHECK_EQ(shift, 0);
305 opcode = (thumb_form) ? kThumbLslRR : kThumb2LslRRR;
306 break;
307 case kOpLsr:
308 DCHECK_EQ(shift, 0);
309 opcode = (thumb_form) ? kThumbLsrRR : kThumb2LsrRRR;
310 break;
311 case kOpAsr:
312 DCHECK_EQ(shift, 0);
313 opcode = (thumb_form) ? kThumbAsrRR : kThumb2AsrRRR;
314 break;
315 case kOpRor:
316 DCHECK_EQ(shift, 0);
317 opcode = (thumb_form) ? kThumbRorRR : kThumb2RorRRR;
318 break;
319 case kOpAdd:
320 opcode = (thumb_form) ? kThumbAddRRR : kThumb2AddRRR;
321 break;
322 case kOpSub:
323 opcode = (thumb_form) ? kThumbSubRRR : kThumb2SubRRR;
324 break;
325 case kOpRev:
326 DCHECK_EQ(shift, 0);
327 if (!thumb_form) {
328 // Binary, but rm is encoded twice.
329 return NewLIR3(kThumb2RevRR, r_dest_src1.GetReg(), r_src2.GetReg(), r_src2.GetReg());
330 }
331 opcode = kThumbRev;
332 break;
333 case kOpRevsh:
334 DCHECK_EQ(shift, 0);
335 if (!thumb_form) {
336 // Binary, but rm is encoded twice.
337 return NewLIR3(kThumb2RevshRR, r_dest_src1.GetReg(), r_src2.GetReg(), r_src2.GetReg());
338 }
339 opcode = kThumbRevsh;
340 break;
341 case kOp2Byte:
342 DCHECK_EQ(shift, 0);
343 return NewLIR4(kThumb2Sbfx, r_dest_src1.GetReg(), r_src2.GetReg(), 0, 8);
344 case kOp2Short:
345 DCHECK_EQ(shift, 0);
346 return NewLIR4(kThumb2Sbfx, r_dest_src1.GetReg(), r_src2.GetReg(), 0, 16);
347 case kOp2Char:
348 DCHECK_EQ(shift, 0);
349 return NewLIR4(kThumb2Ubfx, r_dest_src1.GetReg(), r_src2.GetReg(), 0, 16);
350 default:
351 LOG(FATAL) << "Bad opcode: " << op;
352 break;
353 }
354 DCHECK(!IsPseudoLirOp(opcode));
355 if (EncodingMap[opcode].flags & IS_BINARY_OP) {
356 return NewLIR2(opcode, r_dest_src1.GetReg(), r_src2.GetReg());
357 } else if (EncodingMap[opcode].flags & IS_TERTIARY_OP) {
358 if (EncodingMap[opcode].field_loc[2].kind == kFmtShift) {
359 return NewLIR3(opcode, r_dest_src1.GetReg(), r_src2.GetReg(), shift);
360 } else {
361 return NewLIR3(opcode, r_dest_src1.GetReg(), r_dest_src1.GetReg(), r_src2.GetReg());
362 }
363 } else if (EncodingMap[opcode].flags & IS_QUAD_OP) {
364 return NewLIR4(opcode, r_dest_src1.GetReg(), r_dest_src1.GetReg(), r_src2.GetReg(), shift);
365 } else {
366 LOG(FATAL) << "Unexpected encoding operand count";
367 return NULL;
368 }
369 }
370
OpRegReg(OpKind op,RegStorage r_dest_src1,RegStorage r_src2)371 LIR* ArmMir2Lir::OpRegReg(OpKind op, RegStorage r_dest_src1, RegStorage r_src2) {
372 return OpRegRegShift(op, r_dest_src1, r_src2, 0);
373 }
374
OpMovRegMem(RegStorage r_dest,RegStorage r_base,int offset,MoveType move_type)375 LIR* ArmMir2Lir::OpMovRegMem(RegStorage r_dest, RegStorage r_base, int offset, MoveType move_type) {
376 UNIMPLEMENTED(FATAL);
377 return nullptr;
378 }
379
OpMovMemReg(RegStorage r_base,int offset,RegStorage r_src,MoveType move_type)380 LIR* ArmMir2Lir::OpMovMemReg(RegStorage r_base, int offset, RegStorage r_src, MoveType move_type) {
381 UNIMPLEMENTED(FATAL);
382 return nullptr;
383 }
384
OpCondRegReg(OpKind op,ConditionCode cc,RegStorage r_dest,RegStorage r_src)385 LIR* ArmMir2Lir::OpCondRegReg(OpKind op, ConditionCode cc, RegStorage r_dest, RegStorage r_src) {
386 LOG(FATAL) << "Unexpected use of OpCondRegReg for Arm";
387 return NULL;
388 }
389
OpRegRegRegShift(OpKind op,RegStorage r_dest,RegStorage r_src1,RegStorage r_src2,int shift)390 LIR* ArmMir2Lir::OpRegRegRegShift(OpKind op, RegStorage r_dest, RegStorage r_src1,
391 RegStorage r_src2, int shift) {
392 ArmOpcode opcode = kThumbBkpt;
393 bool thumb_form = (shift == 0) && r_dest.Low8() && r_src1.Low8() && r_src2.Low8();
394 switch (op) {
395 case kOpAdd:
396 opcode = (thumb_form) ? kThumbAddRRR : kThumb2AddRRR;
397 break;
398 case kOpSub:
399 opcode = (thumb_form) ? kThumbSubRRR : kThumb2SubRRR;
400 break;
401 case kOpRsub:
402 opcode = kThumb2RsubRRR;
403 break;
404 case kOpAdc:
405 opcode = kThumb2AdcRRR;
406 break;
407 case kOpAnd:
408 opcode = kThumb2AndRRR;
409 break;
410 case kOpBic:
411 opcode = kThumb2BicRRR;
412 break;
413 case kOpXor:
414 opcode = kThumb2EorRRR;
415 break;
416 case kOpMul:
417 DCHECK_EQ(shift, 0);
418 opcode = kThumb2MulRRR;
419 break;
420 case kOpDiv:
421 DCHECK_EQ(shift, 0);
422 opcode = kThumb2SdivRRR;
423 break;
424 case kOpOr:
425 opcode = kThumb2OrrRRR;
426 break;
427 case kOpSbc:
428 opcode = kThumb2SbcRRR;
429 break;
430 case kOpLsl:
431 DCHECK_EQ(shift, 0);
432 opcode = kThumb2LslRRR;
433 break;
434 case kOpLsr:
435 DCHECK_EQ(shift, 0);
436 opcode = kThumb2LsrRRR;
437 break;
438 case kOpAsr:
439 DCHECK_EQ(shift, 0);
440 opcode = kThumb2AsrRRR;
441 break;
442 case kOpRor:
443 DCHECK_EQ(shift, 0);
444 opcode = kThumb2RorRRR;
445 break;
446 default:
447 LOG(FATAL) << "Bad opcode: " << op;
448 break;
449 }
450 DCHECK(!IsPseudoLirOp(opcode));
451 if (EncodingMap[opcode].flags & IS_QUAD_OP) {
452 return NewLIR4(opcode, r_dest.GetReg(), r_src1.GetReg(), r_src2.GetReg(), shift);
453 } else {
454 DCHECK(EncodingMap[opcode].flags & IS_TERTIARY_OP);
455 return NewLIR3(opcode, r_dest.GetReg(), r_src1.GetReg(), r_src2.GetReg());
456 }
457 }
458
OpRegRegReg(OpKind op,RegStorage r_dest,RegStorage r_src1,RegStorage r_src2)459 LIR* ArmMir2Lir::OpRegRegReg(OpKind op, RegStorage r_dest, RegStorage r_src1, RegStorage r_src2) {
460 return OpRegRegRegShift(op, r_dest, r_src1, r_src2, 0);
461 }
462
OpRegRegImm(OpKind op,RegStorage r_dest,RegStorage r_src1,int value)463 LIR* ArmMir2Lir::OpRegRegImm(OpKind op, RegStorage r_dest, RegStorage r_src1, int value) {
464 LIR* res;
465 bool neg = (value < 0);
466 int32_t abs_value = (neg) ? -value : value;
467 ArmOpcode opcode = kThumbBkpt;
468 ArmOpcode alt_opcode = kThumbBkpt;
469 bool all_low_regs = r_dest.Low8() && r_src1.Low8();
470 int32_t mod_imm = ModifiedImmediate(value);
471
472 switch (op) {
473 case kOpLsl:
474 if (all_low_regs)
475 return NewLIR3(kThumbLslRRI5, r_dest.GetReg(), r_src1.GetReg(), value);
476 else
477 return NewLIR3(kThumb2LslRRI5, r_dest.GetReg(), r_src1.GetReg(), value);
478 case kOpLsr:
479 if (all_low_regs)
480 return NewLIR3(kThumbLsrRRI5, r_dest.GetReg(), r_src1.GetReg(), value);
481 else
482 return NewLIR3(kThumb2LsrRRI5, r_dest.GetReg(), r_src1.GetReg(), value);
483 case kOpAsr:
484 if (all_low_regs)
485 return NewLIR3(kThumbAsrRRI5, r_dest.GetReg(), r_src1.GetReg(), value);
486 else
487 return NewLIR3(kThumb2AsrRRI5, r_dest.GetReg(), r_src1.GetReg(), value);
488 case kOpRor:
489 return NewLIR3(kThumb2RorRRI5, r_dest.GetReg(), r_src1.GetReg(), value);
490 case kOpAdd:
491 if (r_dest.Low8() && (r_src1 == rs_r13sp) && (value <= 1020) && ((value & 0x3) == 0)) {
492 return NewLIR3(kThumbAddSpRel, r_dest.GetReg(), r_src1.GetReg(), value >> 2);
493 } else if (r_dest.Low8() && (r_src1 == rs_r15pc) &&
494 (value <= 1020) && ((value & 0x3) == 0)) {
495 return NewLIR3(kThumbAddPcRel, r_dest.GetReg(), r_src1.GetReg(), value >> 2);
496 }
497 // Note: intentional fallthrough
498 case kOpSub:
499 if (all_low_regs && ((abs_value & 0x7) == abs_value)) {
500 if (op == kOpAdd)
501 opcode = (neg) ? kThumbSubRRI3 : kThumbAddRRI3;
502 else
503 opcode = (neg) ? kThumbAddRRI3 : kThumbSubRRI3;
504 return NewLIR3(opcode, r_dest.GetReg(), r_src1.GetReg(), abs_value);
505 }
506 if (mod_imm < 0) {
507 mod_imm = ModifiedImmediate(-value);
508 if (mod_imm >= 0) {
509 op = (op == kOpAdd) ? kOpSub : kOpAdd;
510 }
511 }
512 if (mod_imm < 0 && (abs_value & 0x3ff) == abs_value) {
513 // This is deliberately used only if modified immediate encoding is inadequate since
514 // we sometimes actually use the flags for small values but not necessarily low regs.
515 if (op == kOpAdd)
516 opcode = (neg) ? kThumb2SubRRI12 : kThumb2AddRRI12;
517 else
518 opcode = (neg) ? kThumb2AddRRI12 : kThumb2SubRRI12;
519 return NewLIR3(opcode, r_dest.GetReg(), r_src1.GetReg(), abs_value);
520 }
521 if (op == kOpSub) {
522 opcode = kThumb2SubRRI8M;
523 alt_opcode = kThumb2SubRRR;
524 } else {
525 opcode = kThumb2AddRRI8M;
526 alt_opcode = kThumb2AddRRR;
527 }
528 break;
529 case kOpRsub:
530 opcode = kThumb2RsubRRI8M;
531 alt_opcode = kThumb2RsubRRR;
532 break;
533 case kOpAdc:
534 opcode = kThumb2AdcRRI8M;
535 alt_opcode = kThumb2AdcRRR;
536 break;
537 case kOpSbc:
538 opcode = kThumb2SbcRRI8M;
539 alt_opcode = kThumb2SbcRRR;
540 break;
541 case kOpOr:
542 opcode = kThumb2OrrRRI8M;
543 alt_opcode = kThumb2OrrRRR;
544 break;
545 case kOpAnd:
546 if (mod_imm < 0) {
547 mod_imm = ModifiedImmediate(~value);
548 if (mod_imm >= 0) {
549 return NewLIR3(kThumb2BicRRI8M, r_dest.GetReg(), r_src1.GetReg(), mod_imm);
550 }
551 }
552 opcode = kThumb2AndRRI8M;
553 alt_opcode = kThumb2AndRRR;
554 break;
555 case kOpXor:
556 opcode = kThumb2EorRRI8M;
557 alt_opcode = kThumb2EorRRR;
558 break;
559 case kOpMul:
560 // TUNING: power of 2, shift & add
561 mod_imm = -1;
562 alt_opcode = kThumb2MulRRR;
563 break;
564 case kOpCmp: {
565 LIR* res;
566 if (mod_imm >= 0) {
567 res = NewLIR2(kThumb2CmpRI8M, r_src1.GetReg(), mod_imm);
568 } else {
569 mod_imm = ModifiedImmediate(-value);
570 if (mod_imm >= 0) {
571 res = NewLIR2(kThumb2CmnRI8M, r_src1.GetReg(), mod_imm);
572 } else {
573 RegStorage r_tmp = AllocTemp();
574 res = LoadConstant(r_tmp, value);
575 OpRegReg(kOpCmp, r_src1, r_tmp);
576 FreeTemp(r_tmp);
577 }
578 }
579 return res;
580 }
581 default:
582 LOG(FATAL) << "Bad opcode: " << op;
583 }
584
585 if (mod_imm >= 0) {
586 return NewLIR3(opcode, r_dest.GetReg(), r_src1.GetReg(), mod_imm);
587 } else {
588 RegStorage r_scratch = AllocTemp();
589 LoadConstant(r_scratch, value);
590 if (EncodingMap[alt_opcode].flags & IS_QUAD_OP)
591 res = NewLIR4(alt_opcode, r_dest.GetReg(), r_src1.GetReg(), r_scratch.GetReg(), 0);
592 else
593 res = NewLIR3(alt_opcode, r_dest.GetReg(), r_src1.GetReg(), r_scratch.GetReg());
594 FreeTemp(r_scratch);
595 return res;
596 }
597 }
598
599 /* Handle Thumb-only variants here - otherwise punt to OpRegRegImm */
OpRegImm(OpKind op,RegStorage r_dest_src1,int value)600 LIR* ArmMir2Lir::OpRegImm(OpKind op, RegStorage r_dest_src1, int value) {
601 bool neg = (value < 0);
602 int32_t abs_value = (neg) ? -value : value;
603 bool short_form = (((abs_value & 0xff) == abs_value) && r_dest_src1.Low8());
604 ArmOpcode opcode = kThumbBkpt;
605 switch (op) {
606 case kOpAdd:
607 if (!neg && (r_dest_src1 == rs_r13sp) && (value <= 508)) { /* sp */
608 DCHECK_EQ((value & 0x3), 0);
609 return NewLIR1(kThumbAddSpI7, value >> 2);
610 } else if (short_form) {
611 opcode = (neg) ? kThumbSubRI8 : kThumbAddRI8;
612 }
613 break;
614 case kOpSub:
615 if (!neg && (r_dest_src1 == rs_r13sp) && (value <= 508)) { /* sp */
616 DCHECK_EQ((value & 0x3), 0);
617 return NewLIR1(kThumbSubSpI7, value >> 2);
618 } else if (short_form) {
619 opcode = (neg) ? kThumbAddRI8 : kThumbSubRI8;
620 }
621 break;
622 case kOpCmp:
623 if (!neg && short_form) {
624 opcode = kThumbCmpRI8;
625 } else {
626 short_form = false;
627 }
628 break;
629 default:
630 /* Punt to OpRegRegImm - if bad case catch it there */
631 short_form = false;
632 break;
633 }
634 if (short_form) {
635 return NewLIR2(opcode, r_dest_src1.GetReg(), abs_value);
636 } else {
637 return OpRegRegImm(op, r_dest_src1, r_dest_src1, value);
638 }
639 }
640
LoadConstantWide(RegStorage r_dest,int64_t value)641 LIR* ArmMir2Lir::LoadConstantWide(RegStorage r_dest, int64_t value) {
642 LIR* res = NULL;
643 int32_t val_lo = Low32Bits(value);
644 int32_t val_hi = High32Bits(value);
645 if (r_dest.IsFloat()) {
646 DCHECK(!r_dest.IsPair());
647 if ((val_lo == 0) && (val_hi == 0)) {
648 // TODO: we need better info about the target CPU. a vector exclusive or
649 // would probably be better here if we could rely on its existance.
650 // Load an immediate +2.0 (which encodes to 0)
651 NewLIR2(kThumb2Vmovd_IMM8, r_dest.GetReg(), 0);
652 // +0.0 = +2.0 - +2.0
653 res = NewLIR3(kThumb2Vsubd, r_dest.GetReg(), r_dest.GetReg(), r_dest.GetReg());
654 } else {
655 int encoded_imm = EncodeImmDouble(value);
656 if (encoded_imm >= 0) {
657 res = NewLIR2(kThumb2Vmovd_IMM8, r_dest.GetReg(), encoded_imm);
658 }
659 }
660 } else {
661 // NOTE: Arm32 assumption here.
662 DCHECK(r_dest.IsPair());
663 if ((InexpensiveConstantInt(val_lo) && (InexpensiveConstantInt(val_hi)))) {
664 res = LoadConstantNoClobber(r_dest.GetLow(), val_lo);
665 LoadConstantNoClobber(r_dest.GetHigh(), val_hi);
666 }
667 }
668 if (res == NULL) {
669 // No short form - load from the literal pool.
670 LIR* data_target = ScanLiteralPoolWide(literal_list_, val_lo, val_hi);
671 if (data_target == NULL) {
672 data_target = AddWideData(&literal_list_, val_lo, val_hi);
673 }
674 ScopedMemRefType mem_ref_type(this, ResourceMask::kLiteral);
675 if (r_dest.IsFloat()) {
676 res = RawLIR(current_dalvik_offset_, kThumb2Vldrd,
677 r_dest.GetReg(), rs_r15pc.GetReg(), 0, 0, 0, data_target);
678 } else {
679 DCHECK(r_dest.IsPair());
680 res = RawLIR(current_dalvik_offset_, kThumb2LdrdPcRel8,
681 r_dest.GetLowReg(), r_dest.GetHighReg(), rs_r15pc.GetReg(), 0, 0, data_target);
682 }
683 AppendLIR(res);
684 }
685 return res;
686 }
687
EncodeShift(int code,int amount)688 int ArmMir2Lir::EncodeShift(int code, int amount) {
689 return ((amount & 0x1f) << 2) | code;
690 }
691
LoadBaseIndexed(RegStorage r_base,RegStorage r_index,RegStorage r_dest,int scale,OpSize size)692 LIR* ArmMir2Lir::LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest,
693 int scale, OpSize size) {
694 bool all_low_regs = r_base.Low8() && r_index.Low8() && r_dest.Low8();
695 LIR* load;
696 ArmOpcode opcode = kThumbBkpt;
697 bool thumb_form = (all_low_regs && (scale == 0));
698 RegStorage reg_ptr;
699
700 if (r_dest.IsFloat()) {
701 if (r_dest.IsSingle()) {
702 DCHECK((size == k32) || (size == kSingle) || (size == kReference));
703 opcode = kThumb2Vldrs;
704 size = kSingle;
705 } else {
706 DCHECK(r_dest.IsDouble());
707 DCHECK((size == k64) || (size == kDouble));
708 opcode = kThumb2Vldrd;
709 size = kDouble;
710 }
711 } else {
712 if (size == kSingle)
713 size = k32;
714 }
715
716 switch (size) {
717 case kDouble: // fall-through
718 // Intentional fall-though.
719 case kSingle:
720 reg_ptr = AllocTemp();
721 if (scale) {
722 NewLIR4(kThumb2AddRRR, reg_ptr.GetReg(), r_base.GetReg(), r_index.GetReg(),
723 EncodeShift(kArmLsl, scale));
724 } else {
725 OpRegRegReg(kOpAdd, reg_ptr, r_base, r_index);
726 }
727 load = NewLIR3(opcode, r_dest.GetReg(), reg_ptr.GetReg(), 0);
728 FreeTemp(reg_ptr);
729 return load;
730 case k32:
731 // Intentional fall-though.
732 case kReference:
733 opcode = (thumb_form) ? kThumbLdrRRR : kThumb2LdrRRR;
734 break;
735 case kUnsignedHalf:
736 opcode = (thumb_form) ? kThumbLdrhRRR : kThumb2LdrhRRR;
737 break;
738 case kSignedHalf:
739 opcode = (thumb_form) ? kThumbLdrshRRR : kThumb2LdrshRRR;
740 break;
741 case kUnsignedByte:
742 opcode = (thumb_form) ? kThumbLdrbRRR : kThumb2LdrbRRR;
743 break;
744 case kSignedByte:
745 opcode = (thumb_form) ? kThumbLdrsbRRR : kThumb2LdrsbRRR;
746 break;
747 default:
748 LOG(FATAL) << "Bad size: " << size;
749 }
750 if (thumb_form)
751 load = NewLIR3(opcode, r_dest.GetReg(), r_base.GetReg(), r_index.GetReg());
752 else
753 load = NewLIR4(opcode, r_dest.GetReg(), r_base.GetReg(), r_index.GetReg(), scale);
754
755 return load;
756 }
757
StoreBaseIndexed(RegStorage r_base,RegStorage r_index,RegStorage r_src,int scale,OpSize size)758 LIR* ArmMir2Lir::StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src,
759 int scale, OpSize size) {
760 bool all_low_regs = r_base.Low8() && r_index.Low8() && r_src.Low8();
761 LIR* store = NULL;
762 ArmOpcode opcode = kThumbBkpt;
763 bool thumb_form = (all_low_regs && (scale == 0));
764 RegStorage reg_ptr;
765
766 if (r_src.IsFloat()) {
767 if (r_src.IsSingle()) {
768 DCHECK((size == k32) || (size == kSingle) || (size == kReference));
769 opcode = kThumb2Vstrs;
770 size = kSingle;
771 } else {
772 DCHECK(r_src.IsDouble());
773 DCHECK((size == k64) || (size == kDouble));
774 DCHECK_EQ((r_src.GetReg() & 0x1), 0);
775 opcode = kThumb2Vstrd;
776 size = kDouble;
777 }
778 } else {
779 if (size == kSingle)
780 size = k32;
781 }
782
783 switch (size) {
784 case kDouble: // fall-through
785 // Intentional fall-though.
786 case kSingle:
787 reg_ptr = AllocTemp();
788 if (scale) {
789 NewLIR4(kThumb2AddRRR, reg_ptr.GetReg(), r_base.GetReg(), r_index.GetReg(),
790 EncodeShift(kArmLsl, scale));
791 } else {
792 OpRegRegReg(kOpAdd, reg_ptr, r_base, r_index);
793 }
794 store = NewLIR3(opcode, r_src.GetReg(), reg_ptr.GetReg(), 0);
795 FreeTemp(reg_ptr);
796 return store;
797 case k32:
798 // Intentional fall-though.
799 case kReference:
800 opcode = (thumb_form) ? kThumbStrRRR : kThumb2StrRRR;
801 break;
802 case kUnsignedHalf:
803 // Intentional fall-though.
804 case kSignedHalf:
805 opcode = (thumb_form) ? kThumbStrhRRR : kThumb2StrhRRR;
806 break;
807 case kUnsignedByte:
808 // Intentional fall-though.
809 case kSignedByte:
810 opcode = (thumb_form) ? kThumbStrbRRR : kThumb2StrbRRR;
811 break;
812 default:
813 LOG(FATAL) << "Bad size: " << size;
814 }
815 if (thumb_form)
816 store = NewLIR3(opcode, r_src.GetReg(), r_base.GetReg(), r_index.GetReg());
817 else
818 store = NewLIR4(opcode, r_src.GetReg(), r_base.GetReg(), r_index.GetReg(), scale);
819
820 return store;
821 }
822
823 // Helper function for LoadBaseDispBody()/StoreBaseDispBody().
LoadStoreUsingInsnWithOffsetImm8Shl2(ArmOpcode opcode,RegStorage r_base,int displacement,RegStorage r_src_dest,RegStorage r_work)824 LIR* ArmMir2Lir::LoadStoreUsingInsnWithOffsetImm8Shl2(ArmOpcode opcode, RegStorage r_base,
825 int displacement, RegStorage r_src_dest,
826 RegStorage r_work) {
827 DCHECK_EQ(displacement & 3, 0);
828 constexpr int kOffsetMask = 0xff << 2;
829 int encoded_disp = (displacement & kOffsetMask) >> 2; // Within range of the instruction.
830 RegStorage r_ptr = r_base;
831 if ((displacement & ~kOffsetMask) != 0) {
832 r_ptr = r_work.Valid() ? r_work : AllocTemp();
833 // Add displacement & ~kOffsetMask to base, it's a single instruction for up to +-256KiB.
834 OpRegRegImm(kOpAdd, r_ptr, r_base, displacement & ~kOffsetMask);
835 }
836 LIR* lir = nullptr;
837 if (!r_src_dest.IsPair()) {
838 lir = NewLIR3(opcode, r_src_dest.GetReg(), r_ptr.GetReg(), encoded_disp);
839 } else {
840 lir = NewLIR4(opcode, r_src_dest.GetLowReg(), r_src_dest.GetHighReg(), r_ptr.GetReg(),
841 encoded_disp);
842 }
843 if ((displacement & ~kOffsetMask) != 0 && !r_work.Valid()) {
844 FreeTemp(r_ptr);
845 }
846 return lir;
847 }
848
849 /*
850 * Load value from base + displacement. Optionally perform null check
851 * on base (which must have an associated s_reg and MIR). If not
852 * performing null check, incoming MIR can be null.
853 */
LoadBaseDispBody(RegStorage r_base,int displacement,RegStorage r_dest,OpSize size)854 LIR* ArmMir2Lir::LoadBaseDispBody(RegStorage r_base, int displacement, RegStorage r_dest,
855 OpSize size) {
856 LIR* load = NULL;
857 ArmOpcode opcode = kThumbBkpt;
858 bool short_form = false;
859 bool thumb2Form = (displacement < 4092 && displacement >= 0);
860 bool all_low = r_dest.Is32Bit() && r_base.Low8() && r_dest.Low8();
861 int encoded_disp = displacement;
862 bool already_generated = false;
863 switch (size) {
864 case kDouble:
865 // Intentional fall-though.
866 case k64:
867 if (r_dest.IsFloat()) {
868 DCHECK(!r_dest.IsPair());
869 load = LoadStoreUsingInsnWithOffsetImm8Shl2(kThumb2Vldrd, r_base, displacement, r_dest);
870 } else {
871 DCHECK(r_dest.IsPair());
872 // Use the r_dest.GetLow() for the temporary pointer if needed.
873 load = LoadStoreUsingInsnWithOffsetImm8Shl2(kThumb2LdrdI8, r_base, displacement, r_dest,
874 r_dest.GetLow());
875 }
876 already_generated = true;
877 break;
878 case kSingle:
879 // Intentional fall-though.
880 case k32:
881 // Intentional fall-though.
882 case kReference:
883 if (r_dest.IsFloat()) {
884 DCHECK(r_dest.IsSingle());
885 load = LoadStoreUsingInsnWithOffsetImm8Shl2(kThumb2Vldrs, r_base, displacement, r_dest);
886 already_generated = true;
887 break;
888 }
889 if (r_dest.Low8() && (r_base == rs_rARM_PC) && (displacement <= 1020) &&
890 (displacement >= 0)) {
891 short_form = true;
892 encoded_disp >>= 2;
893 opcode = kThumbLdrPcRel;
894 } else if (r_dest.Low8() && (r_base == rs_rARM_SP) && (displacement <= 1020) &&
895 (displacement >= 0)) {
896 short_form = true;
897 encoded_disp >>= 2;
898 opcode = kThumbLdrSpRel;
899 } else if (all_low && displacement < 128 && displacement >= 0) {
900 DCHECK_EQ((displacement & 0x3), 0);
901 short_form = true;
902 encoded_disp >>= 2;
903 opcode = kThumbLdrRRI5;
904 } else if (thumb2Form) {
905 short_form = true;
906 opcode = kThumb2LdrRRI12;
907 }
908 break;
909 case kUnsignedHalf:
910 if (all_low && displacement < 64 && displacement >= 0) {
911 DCHECK_EQ((displacement & 0x1), 0);
912 short_form = true;
913 encoded_disp >>= 1;
914 opcode = kThumbLdrhRRI5;
915 } else if (displacement < 4092 && displacement >= 0) {
916 short_form = true;
917 opcode = kThumb2LdrhRRI12;
918 }
919 break;
920 case kSignedHalf:
921 if (thumb2Form) {
922 short_form = true;
923 opcode = kThumb2LdrshRRI12;
924 }
925 break;
926 case kUnsignedByte:
927 if (all_low && displacement < 32 && displacement >= 0) {
928 short_form = true;
929 opcode = kThumbLdrbRRI5;
930 } else if (thumb2Form) {
931 short_form = true;
932 opcode = kThumb2LdrbRRI12;
933 }
934 break;
935 case kSignedByte:
936 if (thumb2Form) {
937 short_form = true;
938 opcode = kThumb2LdrsbRRI12;
939 }
940 break;
941 default:
942 LOG(FATAL) << "Bad size: " << size;
943 }
944
945 if (!already_generated) {
946 if (short_form) {
947 load = NewLIR3(opcode, r_dest.GetReg(), r_base.GetReg(), encoded_disp);
948 } else {
949 RegStorage reg_offset = AllocTemp();
950 LoadConstant(reg_offset, encoded_disp);
951 DCHECK(!r_dest.IsFloat());
952 load = LoadBaseIndexed(r_base, reg_offset, r_dest, 0, size);
953 FreeTemp(reg_offset);
954 }
955 }
956
957 // TODO: in future may need to differentiate Dalvik accesses w/ spills
958 if (mem_ref_type_ == ResourceMask::kDalvikReg) {
959 DCHECK(r_base == rs_rARM_SP);
960 AnnotateDalvikRegAccess(load, displacement >> 2, true /* is_load */, r_dest.Is64Bit());
961 }
962 return load;
963 }
964
LoadBaseDisp(RegStorage r_base,int displacement,RegStorage r_dest,OpSize size,VolatileKind is_volatile)965 LIR* ArmMir2Lir::LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest,
966 OpSize size, VolatileKind is_volatile) {
967 // TODO: base this on target.
968 if (size == kWord) {
969 size = k32;
970 }
971 LIR* load;
972 if (UNLIKELY(is_volatile == kVolatile &&
973 (size == k64 || size == kDouble) &&
974 !cu_->compiler_driver->GetInstructionSetFeatures().HasLpae())) {
975 // Only 64-bit load needs special handling.
976 // If the cpu supports LPAE, aligned LDRD is atomic - fall through to LoadBaseDisp().
977 DCHECK(!r_dest.IsFloat()); // See RegClassForFieldLoadSave().
978 // Use LDREXD for the atomic load. (Expect displacement > 0, don't optimize for == 0.)
979 RegStorage r_ptr = AllocTemp();
980 OpRegRegImm(kOpAdd, r_ptr, r_base, displacement);
981 load = NewLIR3(kThumb2Ldrexd, r_dest.GetLowReg(), r_dest.GetHighReg(), r_ptr.GetReg());
982 FreeTemp(r_ptr);
983 } else {
984 load = LoadBaseDispBody(r_base, displacement, r_dest, size);
985 }
986
987 if (UNLIKELY(is_volatile == kVolatile)) {
988 GenMemBarrier(kLoadAny);
989 }
990
991 return load;
992 }
993
994
StoreBaseDispBody(RegStorage r_base,int displacement,RegStorage r_src,OpSize size)995 LIR* ArmMir2Lir::StoreBaseDispBody(RegStorage r_base, int displacement, RegStorage r_src,
996 OpSize size) {
997 LIR* store = NULL;
998 ArmOpcode opcode = kThumbBkpt;
999 bool short_form = false;
1000 bool thumb2Form = (displacement < 4092 && displacement >= 0);
1001 bool all_low = r_src.Is32Bit() && r_base.Low8() && r_src.Low8();
1002 int encoded_disp = displacement;
1003 bool already_generated = false;
1004 switch (size) {
1005 case kDouble:
1006 // Intentional fall-though.
1007 case k64:
1008 if (r_src.IsFloat()) {
1009 DCHECK(!r_src.IsPair());
1010 store = LoadStoreUsingInsnWithOffsetImm8Shl2(kThumb2Vstrd, r_base, displacement, r_src);
1011 } else {
1012 DCHECK(r_src.IsPair());
1013 store = LoadStoreUsingInsnWithOffsetImm8Shl2(kThumb2StrdI8, r_base, displacement, r_src);
1014 }
1015 already_generated = true;
1016 break;
1017 case kSingle:
1018 // Intentional fall-through.
1019 case k32:
1020 // Intentional fall-through.
1021 case kReference:
1022 if (r_src.IsFloat()) {
1023 DCHECK(r_src.IsSingle());
1024 store = LoadStoreUsingInsnWithOffsetImm8Shl2(kThumb2Vstrs, r_base, displacement, r_src);
1025 already_generated = true;
1026 break;
1027 }
1028 if (r_src.Low8() && (r_base == rs_r13sp) && (displacement <= 1020) && (displacement >= 0)) {
1029 short_form = true;
1030 encoded_disp >>= 2;
1031 opcode = kThumbStrSpRel;
1032 } else if (all_low && displacement < 128 && displacement >= 0) {
1033 DCHECK_EQ((displacement & 0x3), 0);
1034 short_form = true;
1035 encoded_disp >>= 2;
1036 opcode = kThumbStrRRI5;
1037 } else if (thumb2Form) {
1038 short_form = true;
1039 opcode = kThumb2StrRRI12;
1040 }
1041 break;
1042 case kUnsignedHalf:
1043 case kSignedHalf:
1044 if (all_low && displacement < 64 && displacement >= 0) {
1045 DCHECK_EQ((displacement & 0x1), 0);
1046 short_form = true;
1047 encoded_disp >>= 1;
1048 opcode = kThumbStrhRRI5;
1049 } else if (thumb2Form) {
1050 short_form = true;
1051 opcode = kThumb2StrhRRI12;
1052 }
1053 break;
1054 case kUnsignedByte:
1055 case kSignedByte:
1056 if (all_low && displacement < 32 && displacement >= 0) {
1057 short_form = true;
1058 opcode = kThumbStrbRRI5;
1059 } else if (thumb2Form) {
1060 short_form = true;
1061 opcode = kThumb2StrbRRI12;
1062 }
1063 break;
1064 default:
1065 LOG(FATAL) << "Bad size: " << size;
1066 }
1067 if (!already_generated) {
1068 if (short_form) {
1069 store = NewLIR3(opcode, r_src.GetReg(), r_base.GetReg(), encoded_disp);
1070 } else {
1071 RegStorage r_scratch = AllocTemp();
1072 LoadConstant(r_scratch, encoded_disp);
1073 DCHECK(!r_src.IsFloat());
1074 store = StoreBaseIndexed(r_base, r_scratch, r_src, 0, size);
1075 FreeTemp(r_scratch);
1076 }
1077 }
1078
1079 // TODO: In future, may need to differentiate Dalvik & spill accesses
1080 if (mem_ref_type_ == ResourceMask::kDalvikReg) {
1081 DCHECK(r_base == rs_rARM_SP);
1082 AnnotateDalvikRegAccess(store, displacement >> 2, false /* is_load */, r_src.Is64Bit());
1083 }
1084 return store;
1085 }
1086
StoreBaseDisp(RegStorage r_base,int displacement,RegStorage r_src,OpSize size,VolatileKind is_volatile)1087 LIR* ArmMir2Lir::StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src,
1088 OpSize size, VolatileKind is_volatile) {
1089 if (UNLIKELY(is_volatile == kVolatile)) {
1090 // Ensure that prior accesses become visible to other threads first.
1091 GenMemBarrier(kAnyStore);
1092 }
1093
1094 LIR* null_ck_insn;
1095 if (UNLIKELY(is_volatile == kVolatile &&
1096 (size == k64 || size == kDouble) &&
1097 !cu_->compiler_driver->GetInstructionSetFeatures().HasLpae())) {
1098 // Only 64-bit store needs special handling.
1099 // If the cpu supports LPAE, aligned STRD is atomic - fall through to StoreBaseDisp().
1100 // Use STREXD for the atomic store. (Expect displacement > 0, don't optimize for == 0.)
1101 DCHECK(!r_src.IsFloat()); // See RegClassForFieldLoadSave().
1102 RegStorage r_ptr = AllocTemp();
1103 OpRegRegImm(kOpAdd, r_ptr, r_base, displacement);
1104 LIR* fail_target = NewLIR0(kPseudoTargetLabel);
1105 // We have only 5 temporary registers available and if r_base, r_src and r_ptr already
1106 // take 4, we can't directly allocate 2 more for LDREXD temps. In that case clobber r_ptr
1107 // in LDREXD and recalculate it from r_base.
1108 RegStorage r_temp = AllocTemp();
1109 RegStorage r_temp_high = AllocTemp(false); // We may not have another temp.
1110 if (r_temp_high.Valid()) {
1111 null_ck_insn = NewLIR3(kThumb2Ldrexd, r_temp.GetReg(), r_temp_high.GetReg(), r_ptr.GetReg());
1112 FreeTemp(r_temp_high);
1113 FreeTemp(r_temp);
1114 } else {
1115 // If we don't have another temp, clobber r_ptr in LDREXD and reload it.
1116 null_ck_insn = NewLIR3(kThumb2Ldrexd, r_temp.GetReg(), r_ptr.GetReg(), r_ptr.GetReg());
1117 FreeTemp(r_temp); // May need the temp for kOpAdd.
1118 OpRegRegImm(kOpAdd, r_ptr, r_base, displacement);
1119 }
1120 NewLIR4(kThumb2Strexd, r_temp.GetReg(), r_src.GetLowReg(), r_src.GetHighReg(), r_ptr.GetReg());
1121 OpCmpImmBranch(kCondNe, r_temp, 0, fail_target);
1122 FreeTemp(r_ptr);
1123 } else {
1124 // TODO: base this on target.
1125 if (size == kWord) {
1126 size = k32;
1127 }
1128
1129 null_ck_insn = StoreBaseDispBody(r_base, displacement, r_src, size);
1130 }
1131
1132 if (UNLIKELY(is_volatile == kVolatile)) {
1133 // Preserve order with respect to any subsequent volatile loads.
1134 // We need StoreLoad, but that generally requires the most expensive barrier.
1135 GenMemBarrier(kAnyAny);
1136 }
1137
1138 return null_ck_insn;
1139 }
1140
OpFpRegCopy(RegStorage r_dest,RegStorage r_src)1141 LIR* ArmMir2Lir::OpFpRegCopy(RegStorage r_dest, RegStorage r_src) {
1142 int opcode;
1143 DCHECK_EQ(r_dest.IsDouble(), r_src.IsDouble());
1144 if (r_dest.IsDouble()) {
1145 opcode = kThumb2Vmovd;
1146 } else {
1147 if (r_dest.IsSingle()) {
1148 opcode = r_src.IsSingle() ? kThumb2Vmovs : kThumb2Fmsr;
1149 } else {
1150 DCHECK(r_src.IsSingle());
1151 opcode = kThumb2Fmrs;
1152 }
1153 }
1154 LIR* res = RawLIR(current_dalvik_offset_, opcode, r_dest.GetReg(), r_src.GetReg());
1155 if (!(cu_->disable_opt & (1 << kSafeOptimizations)) && r_dest == r_src) {
1156 res->flags.is_nop = true;
1157 }
1158 return res;
1159 }
1160
OpMem(OpKind op,RegStorage r_base,int disp)1161 LIR* ArmMir2Lir::OpMem(OpKind op, RegStorage r_base, int disp) {
1162 LOG(FATAL) << "Unexpected use of OpMem for Arm";
1163 return NULL;
1164 }
1165
InvokeTrampoline(OpKind op,RegStorage r_tgt,QuickEntrypointEnum trampoline)1166 LIR* ArmMir2Lir::InvokeTrampoline(OpKind op, RegStorage r_tgt, QuickEntrypointEnum trampoline) {
1167 return OpReg(op, r_tgt);
1168 }
1169
GetInstructionOffset(LIR * lir)1170 size_t ArmMir2Lir::GetInstructionOffset(LIR* lir) {
1171 uint64_t check_flags = GetTargetInstFlags(lir->opcode);
1172 DCHECK((check_flags & IS_LOAD) || (check_flags & IS_STORE));
1173 size_t offset = (check_flags & IS_TERTIARY_OP) ? lir->operands[2] : 0;
1174
1175 if (check_flags & SCALED_OFFSET_X2) {
1176 offset = offset * 2;
1177 } else if (check_flags & SCALED_OFFSET_X4) {
1178 offset = offset * 4;
1179 }
1180 return offset;
1181 }
1182
1183 } // namespace art
1184