1 /*
2  * Copyright (C) 2012 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "codegen_x86.h"
18 #include "dex/quick/mir_to_lir-inl.h"
19 #include "dex/dataflow_iterator-inl.h"
20 #include "x86_lir.h"
21 #include "dex/quick/dex_file_method_inliner.h"
22 #include "dex/quick/dex_file_to_method_inliner_map.h"
23 #include "dex/reg_storage_eq.h"
24 
25 namespace art {
26 
27 /* This file contains codegen for the X86 ISA */
28 
OpFpRegCopy(RegStorage r_dest,RegStorage r_src)29 LIR* X86Mir2Lir::OpFpRegCopy(RegStorage r_dest, RegStorage r_src) {
30   int opcode;
31   /* must be both DOUBLE or both not DOUBLE */
32   DCHECK(r_dest.IsFloat() || r_src.IsFloat());
33   DCHECK_EQ(r_dest.IsDouble(), r_src.IsDouble());
34   if (r_dest.IsDouble()) {
35     opcode = kX86MovsdRR;
36   } else {
37     if (r_dest.IsSingle()) {
38       if (r_src.IsSingle()) {
39         opcode = kX86MovssRR;
40       } else {  // Fpr <- Gpr
41         opcode = kX86MovdxrRR;
42       }
43     } else {  // Gpr <- Fpr
44       DCHECK(r_src.IsSingle()) << "Raw: 0x" << std::hex << r_src.GetRawBits();
45       opcode = kX86MovdrxRR;
46     }
47   }
48   DCHECK_NE((EncodingMap[opcode].flags & IS_BINARY_OP), 0ULL);
49   LIR* res = RawLIR(current_dalvik_offset_, opcode, r_dest.GetReg(), r_src.GetReg());
50   if (r_dest == r_src) {
51     res->flags.is_nop = true;
52   }
53   return res;
54 }
55 
InexpensiveConstantInt(int32_t value)56 bool X86Mir2Lir::InexpensiveConstantInt(int32_t value) {
57   return true;
58 }
59 
InexpensiveConstantFloat(int32_t value)60 bool X86Mir2Lir::InexpensiveConstantFloat(int32_t value) {
61   return false;
62 }
63 
InexpensiveConstantLong(int64_t value)64 bool X86Mir2Lir::InexpensiveConstantLong(int64_t value) {
65   return true;
66 }
67 
InexpensiveConstantDouble(int64_t value)68 bool X86Mir2Lir::InexpensiveConstantDouble(int64_t value) {
69   return value == 0;
70 }
71 
72 /*
73  * Load a immediate using a shortcut if possible; otherwise
74  * grab from the per-translation literal pool.  If target is
75  * a high register, build constant into a low register and copy.
76  *
77  * No additional register clobbering operation performed. Use this version when
78  * 1) r_dest is freshly returned from AllocTemp or
79  * 2) The codegen is under fixed register usage
80  */
LoadConstantNoClobber(RegStorage r_dest,int value)81 LIR* X86Mir2Lir::LoadConstantNoClobber(RegStorage r_dest, int value) {
82   RegStorage r_dest_save = r_dest;
83   if (r_dest.IsFloat()) {
84     if (value == 0) {
85       return NewLIR2(kX86XorpsRR, r_dest.GetReg(), r_dest.GetReg());
86     }
87     r_dest = AllocTemp();
88   }
89 
90   LIR *res;
91   if (value == 0) {
92     res = NewLIR2(kX86Xor32RR, r_dest.GetReg(), r_dest.GetReg());
93   } else {
94     // Note, there is no byte immediate form of a 32 bit immediate move.
95     // 64-bit immediate is not supported by LIR structure
96     res = NewLIR2(kX86Mov32RI, r_dest.GetReg(), value);
97   }
98 
99   if (r_dest_save.IsFloat()) {
100     NewLIR2(kX86MovdxrRR, r_dest_save.GetReg(), r_dest.GetReg());
101     FreeTemp(r_dest);
102   }
103 
104   return res;
105 }
106 
OpUnconditionalBranch(LIR * target)107 LIR* X86Mir2Lir::OpUnconditionalBranch(LIR* target) {
108   LIR* res = NewLIR1(kX86Jmp8, 0 /* offset to be patched during assembly*/);
109   res->target = target;
110   return res;
111 }
112 
OpCondBranch(ConditionCode cc,LIR * target)113 LIR* X86Mir2Lir::OpCondBranch(ConditionCode cc, LIR* target) {
114   LIR* branch = NewLIR2(kX86Jcc8, 0 /* offset to be patched */,
115                         X86ConditionEncoding(cc));
116   branch->target = target;
117   return branch;
118 }
119 
OpReg(OpKind op,RegStorage r_dest_src)120 LIR* X86Mir2Lir::OpReg(OpKind op, RegStorage r_dest_src) {
121   X86OpCode opcode = kX86Bkpt;
122   switch (op) {
123     case kOpNeg: opcode = r_dest_src.Is64Bit() ? kX86Neg64R : kX86Neg32R; break;
124     case kOpNot: opcode = r_dest_src.Is64Bit() ? kX86Not64R : kX86Not32R; break;
125     case kOpRev: opcode = r_dest_src.Is64Bit() ? kX86Bswap64R : kX86Bswap32R; break;
126     case kOpBlx: opcode = kX86CallR; break;
127     default:
128       LOG(FATAL) << "Bad case in OpReg " << op;
129   }
130   return NewLIR1(opcode, r_dest_src.GetReg());
131 }
132 
OpRegImm(OpKind op,RegStorage r_dest_src1,int value)133 LIR* X86Mir2Lir::OpRegImm(OpKind op, RegStorage r_dest_src1, int value) {
134   X86OpCode opcode = kX86Bkpt;
135   bool byte_imm = IS_SIMM8(value);
136   DCHECK(!r_dest_src1.IsFloat());
137   if (r_dest_src1.Is64Bit()) {
138     switch (op) {
139       case kOpAdd: opcode = byte_imm ? kX86Add64RI8 : kX86Add64RI; break;
140       case kOpSub: opcode = byte_imm ? kX86Sub64RI8 : kX86Sub64RI; break;
141       case kOpLsl: opcode = kX86Sal64RI; break;
142       case kOpLsr: opcode = kX86Shr64RI; break;
143       case kOpAsr: opcode = kX86Sar64RI; break;
144       case kOpCmp: opcode = byte_imm ? kX86Cmp64RI8 : kX86Cmp64RI; break;
145       default:
146         LOG(FATAL) << "Bad case in OpRegImm (64-bit) " << op;
147     }
148   } else {
149     switch (op) {
150       case kOpLsl: opcode = kX86Sal32RI; break;
151       case kOpLsr: opcode = kX86Shr32RI; break;
152       case kOpAsr: opcode = kX86Sar32RI; break;
153       case kOpAdd: opcode = byte_imm ? kX86Add32RI8 : kX86Add32RI; break;
154       case kOpOr:  opcode = byte_imm ? kX86Or32RI8  : kX86Or32RI;  break;
155       case kOpAdc: opcode = byte_imm ? kX86Adc32RI8 : kX86Adc32RI; break;
156       // case kOpSbb: opcode = kX86Sbb32RI; break;
157       case kOpAnd: opcode = byte_imm ? kX86And32RI8 : kX86And32RI; break;
158       case kOpSub: opcode = byte_imm ? kX86Sub32RI8 : kX86Sub32RI; break;
159       case kOpXor: opcode = byte_imm ? kX86Xor32RI8 : kX86Xor32RI; break;
160       case kOpCmp: opcode = byte_imm ? kX86Cmp32RI8 : kX86Cmp32RI; break;
161       case kOpMov:
162         /*
163          * Moving the constant zero into register can be specialized as an xor of the register.
164          * However, that sets eflags while the move does not. For that reason here, always do
165          * the move and if caller is flexible, they should be calling LoadConstantNoClobber instead.
166          */
167         opcode = kX86Mov32RI;
168         break;
169       case kOpMul:
170         opcode = byte_imm ? kX86Imul32RRI8 : kX86Imul32RRI;
171         return NewLIR3(opcode, r_dest_src1.GetReg(), r_dest_src1.GetReg(), value);
172       case kOp2Byte:
173         opcode = kX86Mov32RI;
174         value = static_cast<int8_t>(value);
175         break;
176       case kOp2Short:
177         opcode = kX86Mov32RI;
178         value = static_cast<int16_t>(value);
179         break;
180       case kOp2Char:
181         opcode = kX86Mov32RI;
182         value = static_cast<uint16_t>(value);
183         break;
184       case kOpNeg:
185         opcode = kX86Mov32RI;
186         value = -value;
187         break;
188       default:
189         LOG(FATAL) << "Bad case in OpRegImm " << op;
190     }
191   }
192   return NewLIR2(opcode, r_dest_src1.GetReg(), value);
193 }
194 
OpRegReg(OpKind op,RegStorage r_dest_src1,RegStorage r_src2)195 LIR* X86Mir2Lir::OpRegReg(OpKind op, RegStorage r_dest_src1, RegStorage r_src2) {
196     bool is64Bit = r_dest_src1.Is64Bit();
197     X86OpCode opcode = kX86Nop;
198     bool src2_must_be_cx = false;
199     switch (op) {
200         // X86 unary opcodes
201       case kOpMvn:
202         OpRegCopy(r_dest_src1, r_src2);
203         return OpReg(kOpNot, r_dest_src1);
204       case kOpNeg:
205         OpRegCopy(r_dest_src1, r_src2);
206         return OpReg(kOpNeg, r_dest_src1);
207       case kOpRev:
208         OpRegCopy(r_dest_src1, r_src2);
209         return OpReg(kOpRev, r_dest_src1);
210       case kOpRevsh:
211         OpRegCopy(r_dest_src1, r_src2);
212         OpReg(kOpRev, r_dest_src1);
213         return OpRegImm(kOpAsr, r_dest_src1, 16);
214         // X86 binary opcodes
215       case kOpSub: opcode = is64Bit ? kX86Sub64RR : kX86Sub32RR; break;
216       case kOpSbc: opcode = is64Bit ? kX86Sbb64RR : kX86Sbb32RR; break;
217       case kOpLsl: opcode = is64Bit ? kX86Sal64RC : kX86Sal32RC; src2_must_be_cx = true; break;
218       case kOpLsr: opcode = is64Bit ? kX86Shr64RC : kX86Shr32RC; src2_must_be_cx = true; break;
219       case kOpAsr: opcode = is64Bit ? kX86Sar64RC : kX86Sar32RC; src2_must_be_cx = true; break;
220       case kOpMov: opcode = is64Bit ? kX86Mov64RR : kX86Mov32RR; break;
221       case kOpCmp: opcode = is64Bit ? kX86Cmp64RR : kX86Cmp32RR; break;
222       case kOpAdd: opcode = is64Bit ? kX86Add64RR : kX86Add32RR; break;
223       case kOpAdc: opcode = is64Bit ? kX86Adc64RR : kX86Adc32RR; break;
224       case kOpAnd: opcode = is64Bit ? kX86And64RR : kX86And32RR; break;
225       case kOpOr:  opcode = is64Bit ? kX86Or64RR : kX86Or32RR; break;
226       case kOpXor: opcode = is64Bit ? kX86Xor64RR : kX86Xor32RR; break;
227       case kOp2Byte:
228         // TODO: there are several instances of this check.  A utility function perhaps?
229         // TODO: Similar to Arm's reg < 8 check.  Perhaps add attribute checks to RegStorage?
230         // Use shifts instead of a byte operand if the source can't be byte accessed.
231         if (r_src2.GetRegNum() >= rs_rX86_SP.GetRegNum()) {
232           NewLIR2(is64Bit ? kX86Mov64RR : kX86Mov32RR, r_dest_src1.GetReg(), r_src2.GetReg());
233           NewLIR2(is64Bit ? kX86Sal64RI : kX86Sal32RI, r_dest_src1.GetReg(), is64Bit ? 56 : 24);
234           return NewLIR2(is64Bit ? kX86Sar64RI : kX86Sar32RI, r_dest_src1.GetReg(),
235                          is64Bit ? 56 : 24);
236         } else {
237           opcode = is64Bit ? kX86Bkpt : kX86Movsx8RR;
238         }
239         break;
240       case kOp2Short: opcode = is64Bit ? kX86Bkpt : kX86Movsx16RR; break;
241       case kOp2Char: opcode = is64Bit ? kX86Bkpt : kX86Movzx16RR; break;
242       case kOpMul: opcode = is64Bit ? kX86Bkpt : kX86Imul32RR; break;
243       default:
244         LOG(FATAL) << "Bad case in OpRegReg " << op;
245         break;
246     }
247     CHECK(!src2_must_be_cx || r_src2.GetReg() == rs_rCX.GetReg());
248     return NewLIR2(opcode, r_dest_src1.GetReg(), r_src2.GetReg());
249 }
250 
OpMovRegMem(RegStorage r_dest,RegStorage r_base,int offset,MoveType move_type)251 LIR* X86Mir2Lir::OpMovRegMem(RegStorage r_dest, RegStorage r_base, int offset, MoveType move_type) {
252   DCHECK(!r_base.IsFloat());
253   X86OpCode opcode = kX86Nop;
254   int dest = r_dest.IsPair() ? r_dest.GetLowReg() : r_dest.GetReg();
255   switch (move_type) {
256     case kMov8GP:
257       CHECK(!r_dest.IsFloat());
258       opcode = kX86Mov8RM;
259       break;
260     case kMov16GP:
261       CHECK(!r_dest.IsFloat());
262       opcode = kX86Mov16RM;
263       break;
264     case kMov32GP:
265       CHECK(!r_dest.IsFloat());
266       opcode = kX86Mov32RM;
267       break;
268     case kMov32FP:
269       CHECK(r_dest.IsFloat());
270       opcode = kX86MovssRM;
271       break;
272     case kMov64FP:
273       CHECK(r_dest.IsFloat());
274       opcode = kX86MovsdRM;
275       break;
276     case kMovU128FP:
277       CHECK(r_dest.IsFloat());
278       opcode = kX86MovupsRM;
279       break;
280     case kMovA128FP:
281       CHECK(r_dest.IsFloat());
282       opcode = kX86MovapsRM;
283       break;
284     case kMovLo128FP:
285       CHECK(r_dest.IsFloat());
286       opcode = kX86MovlpsRM;
287       break;
288     case kMovHi128FP:
289       CHECK(r_dest.IsFloat());
290       opcode = kX86MovhpsRM;
291       break;
292     case kMov64GP:
293     case kMovLo64FP:
294     case kMovHi64FP:
295     default:
296       LOG(FATAL) << "Bad case in OpMovRegMem";
297       break;
298   }
299 
300   return NewLIR3(opcode, dest, r_base.GetReg(), offset);
301 }
302 
OpMovMemReg(RegStorage r_base,int offset,RegStorage r_src,MoveType move_type)303 LIR* X86Mir2Lir::OpMovMemReg(RegStorage r_base, int offset, RegStorage r_src, MoveType move_type) {
304   DCHECK(!r_base.IsFloat());
305   int src = r_src.IsPair() ? r_src.GetLowReg() : r_src.GetReg();
306 
307   X86OpCode opcode = kX86Nop;
308   switch (move_type) {
309     case kMov8GP:
310       CHECK(!r_src.IsFloat());
311       opcode = kX86Mov8MR;
312       break;
313     case kMov16GP:
314       CHECK(!r_src.IsFloat());
315       opcode = kX86Mov16MR;
316       break;
317     case kMov32GP:
318       CHECK(!r_src.IsFloat());
319       opcode = kX86Mov32MR;
320       break;
321     case kMov32FP:
322       CHECK(r_src.IsFloat());
323       opcode = kX86MovssMR;
324       break;
325     case kMov64FP:
326       CHECK(r_src.IsFloat());
327       opcode = kX86MovsdMR;
328       break;
329     case kMovU128FP:
330       CHECK(r_src.IsFloat());
331       opcode = kX86MovupsMR;
332       break;
333     case kMovA128FP:
334       CHECK(r_src.IsFloat());
335       opcode = kX86MovapsMR;
336       break;
337     case kMovLo128FP:
338       CHECK(r_src.IsFloat());
339       opcode = kX86MovlpsMR;
340       break;
341     case kMovHi128FP:
342       CHECK(r_src.IsFloat());
343       opcode = kX86MovhpsMR;
344       break;
345     case kMov64GP:
346     case kMovLo64FP:
347     case kMovHi64FP:
348     default:
349       LOG(FATAL) << "Bad case in OpMovMemReg";
350       break;
351   }
352 
353   return NewLIR3(opcode, r_base.GetReg(), offset, src);
354 }
355 
OpCondRegReg(OpKind op,ConditionCode cc,RegStorage r_dest,RegStorage r_src)356 LIR* X86Mir2Lir::OpCondRegReg(OpKind op, ConditionCode cc, RegStorage r_dest, RegStorage r_src) {
357   // The only conditional reg to reg operation supported is Cmov
358   DCHECK_EQ(op, kOpCmov);
359   DCHECK_EQ(r_dest.Is64Bit(), r_src.Is64Bit());
360   return NewLIR3(r_dest.Is64Bit() ? kX86Cmov64RRC : kX86Cmov32RRC, r_dest.GetReg(),
361                  r_src.GetReg(), X86ConditionEncoding(cc));
362 }
363 
OpRegMem(OpKind op,RegStorage r_dest,RegStorage r_base,int offset)364 LIR* X86Mir2Lir::OpRegMem(OpKind op, RegStorage r_dest, RegStorage r_base, int offset) {
365   bool is64Bit = r_dest.Is64Bit();
366   X86OpCode opcode = kX86Nop;
367   switch (op) {
368       // X86 binary opcodes
369     case kOpSub: opcode = is64Bit ? kX86Sub64RM : kX86Sub32RM; break;
370     case kOpMov: opcode = is64Bit ? kX86Mov64RM : kX86Mov32RM; break;
371     case kOpCmp: opcode = is64Bit ? kX86Cmp64RM : kX86Cmp32RM; break;
372     case kOpAdd: opcode = is64Bit ? kX86Add64RM : kX86Add32RM; break;
373     case kOpAnd: opcode = is64Bit ? kX86And64RM : kX86And32RM; break;
374     case kOpOr:  opcode = is64Bit ? kX86Or64RM : kX86Or32RM; break;
375     case kOpXor: opcode = is64Bit ? kX86Xor64RM : kX86Xor32RM; break;
376     case kOp2Byte: opcode = kX86Movsx8RM; break;
377     case kOp2Short: opcode = kX86Movsx16RM; break;
378     case kOp2Char: opcode = kX86Movzx16RM; break;
379     case kOpMul:
380     default:
381       LOG(FATAL) << "Bad case in OpRegMem " << op;
382       break;
383   }
384   LIR *l = NewLIR3(opcode, r_dest.GetReg(), r_base.GetReg(), offset);
385   if (mem_ref_type_ == ResourceMask::kDalvikReg) {
386     DCHECK(r_base == rs_rX86_SP);
387     AnnotateDalvikRegAccess(l, offset >> 2, true /* is_load */, false /* is_64bit */);
388   }
389   return l;
390 }
391 
OpMemReg(OpKind op,RegLocation rl_dest,int r_value)392 LIR* X86Mir2Lir::OpMemReg(OpKind op, RegLocation rl_dest, int r_value) {
393   DCHECK_NE(rl_dest.location, kLocPhysReg);
394   int displacement = SRegOffset(rl_dest.s_reg_low);
395   bool is64Bit = rl_dest.wide != 0;
396   X86OpCode opcode = kX86Nop;
397   switch (op) {
398     case kOpSub: opcode = is64Bit ? kX86Sub64MR : kX86Sub32MR; break;
399     case kOpMov: opcode = is64Bit ? kX86Mov64MR : kX86Mov32MR; break;
400     case kOpCmp: opcode = is64Bit ? kX86Cmp64MR : kX86Cmp32MR; break;
401     case kOpAdd: opcode = is64Bit ? kX86Add64MR : kX86Add32MR; break;
402     case kOpAnd: opcode = is64Bit ? kX86And64MR : kX86And32MR; break;
403     case kOpOr:  opcode = is64Bit ? kX86Or64MR : kX86Or32MR; break;
404     case kOpXor: opcode = is64Bit ? kX86Xor64MR : kX86Xor32MR; break;
405     case kOpLsl: opcode = is64Bit ? kX86Sal64MC : kX86Sal32MC; break;
406     case kOpLsr: opcode = is64Bit ? kX86Shr64MC : kX86Shr32MC; break;
407     case kOpAsr: opcode = is64Bit ? kX86Sar64MC : kX86Sar32MC; break;
408     default:
409       LOG(FATAL) << "Bad case in OpMemReg " << op;
410       break;
411   }
412   LIR *l = NewLIR3(opcode, rs_rX86_SP.GetReg(), displacement, r_value);
413   if (mem_ref_type_ == ResourceMask::kDalvikReg) {
414     AnnotateDalvikRegAccess(l, displacement >> 2, true /* is_load */, is64Bit /* is_64bit */);
415     AnnotateDalvikRegAccess(l, displacement >> 2, false /* is_load */, is64Bit /* is_64bit */);
416   }
417   return l;
418 }
419 
OpRegMem(OpKind op,RegStorage r_dest,RegLocation rl_value)420 LIR* X86Mir2Lir::OpRegMem(OpKind op, RegStorage r_dest, RegLocation rl_value) {
421   DCHECK_NE(rl_value.location, kLocPhysReg);
422   bool is64Bit = r_dest.Is64Bit();
423   int displacement = SRegOffset(rl_value.s_reg_low);
424   X86OpCode opcode = kX86Nop;
425   switch (op) {
426     case kOpSub: opcode = is64Bit ? kX86Sub64RM : kX86Sub32RM; break;
427     case kOpMov: opcode = is64Bit ? kX86Mov64RM : kX86Mov32RM; break;
428     case kOpCmp: opcode = is64Bit ? kX86Cmp64RM : kX86Cmp32RM; break;
429     case kOpAdd: opcode = is64Bit ? kX86Add64RM : kX86Add32RM; break;
430     case kOpAnd: opcode = is64Bit ? kX86And64RM : kX86And32RM; break;
431     case kOpOr:  opcode = is64Bit ? kX86Or64RM : kX86Or32RM; break;
432     case kOpXor: opcode = is64Bit ? kX86Xor64RM : kX86Xor32RM; break;
433     case kOpMul: opcode = is64Bit ? kX86Bkpt : kX86Imul32RM; break;
434     default:
435       LOG(FATAL) << "Bad case in OpRegMem " << op;
436       break;
437   }
438   LIR *l = NewLIR3(opcode, r_dest.GetReg(), rs_rX86_SP.GetReg(), displacement);
439   if (mem_ref_type_ == ResourceMask::kDalvikReg) {
440     AnnotateDalvikRegAccess(l, displacement >> 2, true /* is_load */, is64Bit /* is_64bit */);
441   }
442   return l;
443 }
444 
OpRegRegReg(OpKind op,RegStorage r_dest,RegStorage r_src1,RegStorage r_src2)445 LIR* X86Mir2Lir::OpRegRegReg(OpKind op, RegStorage r_dest, RegStorage r_src1,
446                              RegStorage r_src2) {
447   bool is64Bit = r_dest.Is64Bit();
448   if (r_dest != r_src1 && r_dest != r_src2) {
449     if (op == kOpAdd) {  // lea special case, except can't encode rbp as base
450       if (r_src1 == r_src2) {
451         OpRegCopy(r_dest, r_src1);
452         return OpRegImm(kOpLsl, r_dest, 1);
453       } else if (r_src1 != rs_rBP) {
454         return NewLIR5(is64Bit ? kX86Lea64RA : kX86Lea32RA, r_dest.GetReg(),
455                        r_src1.GetReg() /* base */, r_src2.GetReg() /* index */,
456                        0 /* scale */, 0 /* disp */);
457       } else {
458         return NewLIR5(is64Bit ? kX86Lea64RA : kX86Lea32RA, r_dest.GetReg(),
459                        r_src2.GetReg() /* base */, r_src1.GetReg() /* index */,
460                        0 /* scale */, 0 /* disp */);
461       }
462     } else {
463       OpRegCopy(r_dest, r_src1);
464       return OpRegReg(op, r_dest, r_src2);
465     }
466   } else if (r_dest == r_src1) {
467     return OpRegReg(op, r_dest, r_src2);
468   } else {  // r_dest == r_src2
469     switch (op) {
470       case kOpSub:  // non-commutative
471         OpReg(kOpNeg, r_dest);
472         op = kOpAdd;
473         break;
474       case kOpSbc:
475       case kOpLsl: case kOpLsr: case kOpAsr: case kOpRor: {
476         RegStorage t_reg = AllocTemp();
477         OpRegCopy(t_reg, r_src1);
478         OpRegReg(op, t_reg, r_src2);
479         LIR* res = OpRegCopyNoInsert(r_dest, t_reg);
480         AppendLIR(res);
481         FreeTemp(t_reg);
482         return res;
483       }
484       case kOpAdd:  // commutative
485       case kOpOr:
486       case kOpAdc:
487       case kOpAnd:
488       case kOpXor:
489         break;
490       default:
491         LOG(FATAL) << "Bad case in OpRegRegReg " << op;
492     }
493     return OpRegReg(op, r_dest, r_src1);
494   }
495 }
496 
OpRegRegImm(OpKind op,RegStorage r_dest,RegStorage r_src,int value)497 LIR* X86Mir2Lir::OpRegRegImm(OpKind op, RegStorage r_dest, RegStorage r_src, int value) {
498   if (op == kOpMul && !cu_->target64) {
499     X86OpCode opcode = IS_SIMM8(value) ? kX86Imul32RRI8 : kX86Imul32RRI;
500     return NewLIR3(opcode, r_dest.GetReg(), r_src.GetReg(), value);
501   } else if (op == kOpAnd && !cu_->target64) {
502     if (value == 0xFF && r_src.Low4()) {
503       return NewLIR2(kX86Movzx8RR, r_dest.GetReg(), r_src.GetReg());
504     } else if (value == 0xFFFF) {
505       return NewLIR2(kX86Movzx16RR, r_dest.GetReg(), r_src.GetReg());
506     }
507   }
508   if (r_dest != r_src) {
509     if (false && op == kOpLsl && value >= 0 && value <= 3) {  // lea shift special case
510       // TODO: fix bug in LEA encoding when disp == 0
511       return NewLIR5(kX86Lea32RA, r_dest.GetReg(),  r5sib_no_base /* base */,
512                      r_src.GetReg() /* index */, value /* scale */, 0 /* disp */);
513     } else if (op == kOpAdd) {  // lea add special case
514       return NewLIR5(r_dest.Is64Bit() ? kX86Lea64RA : kX86Lea32RA, r_dest.GetReg(),
515                      r_src.GetReg() /* base */, rs_rX86_SP.GetReg()/*r4sib_no_index*/ /* index */,
516                      0 /* scale */, value /* disp */);
517     }
518     OpRegCopy(r_dest, r_src);
519   }
520   return OpRegImm(op, r_dest, value);
521 }
522 
OpThreadMem(OpKind op,ThreadOffset<4> thread_offset)523 LIR* X86Mir2Lir::OpThreadMem(OpKind op, ThreadOffset<4> thread_offset) {
524   DCHECK_EQ(kX86, cu_->instruction_set);
525   X86OpCode opcode = kX86Bkpt;
526   switch (op) {
527     case kOpBlx: opcode = kX86CallT;  break;
528     case kOpBx: opcode = kX86JmpT;  break;
529     default:
530       LOG(FATAL) << "Bad opcode: " << op;
531       break;
532   }
533   return NewLIR1(opcode, thread_offset.Int32Value());
534 }
535 
OpThreadMem(OpKind op,ThreadOffset<8> thread_offset)536 LIR* X86Mir2Lir::OpThreadMem(OpKind op, ThreadOffset<8> thread_offset) {
537   DCHECK_EQ(kX86_64, cu_->instruction_set);
538   X86OpCode opcode = kX86Bkpt;
539   switch (op) {
540     case kOpBlx: opcode = kX86CallT;  break;
541     case kOpBx: opcode = kX86JmpT;  break;
542     default:
543       LOG(FATAL) << "Bad opcode: " << op;
544       break;
545   }
546   return NewLIR1(opcode, thread_offset.Int32Value());
547 }
548 
OpMem(OpKind op,RegStorage r_base,int disp)549 LIR* X86Mir2Lir::OpMem(OpKind op, RegStorage r_base, int disp) {
550   X86OpCode opcode = kX86Bkpt;
551   switch (op) {
552     case kOpBlx: opcode = kX86CallM;  break;
553     default:
554       LOG(FATAL) << "Bad opcode: " << op;
555       break;
556   }
557   return NewLIR2(opcode, r_base.GetReg(), disp);
558 }
559 
LoadConstantWide(RegStorage r_dest,int64_t value)560 LIR* X86Mir2Lir::LoadConstantWide(RegStorage r_dest, int64_t value) {
561     int32_t val_lo = Low32Bits(value);
562     int32_t val_hi = High32Bits(value);
563     int32_t low_reg_val = r_dest.IsPair() ? r_dest.GetLowReg() : r_dest.GetReg();
564     LIR *res;
565     bool is_fp = r_dest.IsFloat();
566     // TODO: clean this up once we fully recognize 64-bit storage containers.
567     if (is_fp) {
568       DCHECK(r_dest.IsDouble());
569       if (value == 0) {
570         return NewLIR2(kX86XorpsRR, low_reg_val, low_reg_val);
571       } else if (base_of_code_ != nullptr) {
572         // We will load the value from the literal area.
573         LIR* data_target = ScanLiteralPoolWide(literal_list_, val_lo, val_hi);
574         if (data_target == NULL) {
575           data_target = AddWideData(&literal_list_, val_lo, val_hi);
576         }
577 
578         // Address the start of the method
579         RegLocation rl_method = mir_graph_->GetRegLocation(base_of_code_->s_reg_low);
580         if (rl_method.wide) {
581           rl_method = LoadValueWide(rl_method, kCoreReg);
582         } else {
583           rl_method = LoadValue(rl_method, kCoreReg);
584         }
585 
586         // Load the proper value from the literal area.
587         // We don't know the proper offset for the value, so pick one that will force
588         // 4 byte offset.  We will fix this up in the assembler later to have the right
589         // value.
590         ScopedMemRefType mem_ref_type(this, ResourceMask::kLiteral);
591         res = LoadBaseDisp(rl_method.reg, 256 /* bogus */, RegStorage::FloatSolo64(low_reg_val),
592                            kDouble, kNotVolatile);
593         res->target = data_target;
594         res->flags.fixup = kFixupLoad;
595         Clobber(rl_method.reg);
596         store_method_addr_used_ = true;
597       } else {
598         if (r_dest.IsPair()) {
599           if (val_lo == 0) {
600             res = NewLIR2(kX86XorpsRR, low_reg_val, low_reg_val);
601           } else {
602             res = LoadConstantNoClobber(RegStorage::FloatSolo32(low_reg_val), val_lo);
603           }
604           if (val_hi != 0) {
605             RegStorage r_dest_hi = AllocTempDouble();
606             LoadConstantNoClobber(r_dest_hi, val_hi);
607             NewLIR2(kX86PunpckldqRR, low_reg_val, r_dest_hi.GetReg());
608             FreeTemp(r_dest_hi);
609           }
610         } else {
611           RegStorage r_temp = AllocTypedTempWide(false, kCoreReg);
612           res = LoadConstantWide(r_temp, value);
613           OpRegCopyWide(r_dest, r_temp);
614           FreeTemp(r_temp);
615         }
616       }
617     } else {
618       if (r_dest.IsPair()) {
619         res = LoadConstantNoClobber(r_dest.GetLow(), val_lo);
620         LoadConstantNoClobber(r_dest.GetHigh(), val_hi);
621       } else {
622         if (value == 0) {
623           res = NewLIR2(kX86Xor64RR, r_dest.GetReg(), r_dest.GetReg());
624         } else if (value >= INT_MIN && value <= INT_MAX) {
625           res = NewLIR2(kX86Mov64RI32, r_dest.GetReg(), val_lo);
626         } else {
627           res = NewLIR3(kX86Mov64RI64, r_dest.GetReg(), val_hi, val_lo);
628         }
629       }
630     }
631     return res;
632 }
633 
LoadBaseIndexedDisp(RegStorage r_base,RegStorage r_index,int scale,int displacement,RegStorage r_dest,OpSize size)634 LIR* X86Mir2Lir::LoadBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale,
635                                      int displacement, RegStorage r_dest, OpSize size) {
636   LIR *load = NULL;
637   LIR *load2 = NULL;
638   bool is_array = r_index.Valid();
639   bool pair = r_dest.IsPair();
640   bool is64bit = ((size == k64) || (size == kDouble));
641   X86OpCode opcode = kX86Nop;
642   switch (size) {
643     case k64:
644     case kDouble:
645       if (r_dest.IsFloat()) {
646         opcode = is_array ? kX86MovsdRA : kX86MovsdRM;
647       } else if (!pair) {
648         opcode = is_array ? kX86Mov64RA  : kX86Mov64RM;
649       } else {
650         opcode = is_array ? kX86Mov32RA  : kX86Mov32RM;
651       }
652       // TODO: double store is to unaligned address
653       DCHECK_EQ((displacement & 0x3), 0);
654       break;
655     case kWord:
656       if (cu_->target64) {
657         opcode = is_array ? kX86Mov64RA  : kX86Mov64RM;
658         CHECK_EQ(is_array, false);
659         CHECK_EQ(r_dest.IsFloat(), false);
660         break;
661       }  // else fall-through to k32 case
662     case k32:
663     case kSingle:
664     case kReference:  // TODO: update for reference decompression on 64-bit targets.
665       opcode = is_array ? kX86Mov32RA : kX86Mov32RM;
666       if (r_dest.IsFloat()) {
667         opcode = is_array ? kX86MovssRA : kX86MovssRM;
668         DCHECK(r_dest.IsFloat());
669       }
670       DCHECK_EQ((displacement & 0x3), 0);
671       break;
672     case kUnsignedHalf:
673       opcode = is_array ? kX86Movzx16RA : kX86Movzx16RM;
674       DCHECK_EQ((displacement & 0x1), 0);
675       break;
676     case kSignedHalf:
677       opcode = is_array ? kX86Movsx16RA : kX86Movsx16RM;
678       DCHECK_EQ((displacement & 0x1), 0);
679       break;
680     case kUnsignedByte:
681       opcode = is_array ? kX86Movzx8RA : kX86Movzx8RM;
682       break;
683     case kSignedByte:
684       opcode = is_array ? kX86Movsx8RA : kX86Movsx8RM;
685       break;
686     default:
687       LOG(FATAL) << "Bad case in LoadBaseIndexedDispBody";
688   }
689 
690   if (!is_array) {
691     if (!pair) {
692       load = NewLIR3(opcode, r_dest.GetReg(), r_base.GetReg(), displacement + LOWORD_OFFSET);
693     } else {
694       DCHECK(!r_dest.IsFloat());  // Make sure we're not still using a pair here.
695       if (r_base == r_dest.GetLow()) {
696         load = NewLIR3(opcode, r_dest.GetHighReg(), r_base.GetReg(),
697                         displacement + HIWORD_OFFSET);
698         load2 = NewLIR3(opcode, r_dest.GetLowReg(), r_base.GetReg(), displacement + LOWORD_OFFSET);
699       } else {
700         load = NewLIR3(opcode, r_dest.GetLowReg(), r_base.GetReg(), displacement + LOWORD_OFFSET);
701         load2 = NewLIR3(opcode, r_dest.GetHighReg(), r_base.GetReg(),
702                         displacement + HIWORD_OFFSET);
703       }
704     }
705     if (mem_ref_type_ == ResourceMask::kDalvikReg) {
706       DCHECK(r_base == rs_rX86_SP);
707       AnnotateDalvikRegAccess(load, (displacement + (pair ? LOWORD_OFFSET : 0)) >> 2,
708                               true /* is_load */, is64bit);
709       if (pair) {
710         AnnotateDalvikRegAccess(load2, (displacement + HIWORD_OFFSET) >> 2,
711                                 true /* is_load */, is64bit);
712       }
713     }
714   } else {
715     if (!pair) {
716       load = NewLIR5(opcode, r_dest.GetReg(), r_base.GetReg(), r_index.GetReg(), scale,
717                      displacement + LOWORD_OFFSET);
718     } else {
719       DCHECK(!r_dest.IsFloat());  // Make sure we're not still using a pair here.
720       if (r_base == r_dest.GetLow()) {
721         if (r_dest.GetHigh() == r_index) {
722           // We can't use either register for the first load.
723           RegStorage temp = AllocTemp();
724           load = NewLIR5(opcode, temp.GetReg(), r_base.GetReg(), r_index.GetReg(), scale,
725                           displacement + HIWORD_OFFSET);
726           load2 = NewLIR5(opcode, r_dest.GetLowReg(), r_base.GetReg(), r_index.GetReg(), scale,
727                          displacement + LOWORD_OFFSET);
728           OpRegCopy(r_dest.GetHigh(), temp);
729           FreeTemp(temp);
730         } else {
731           load = NewLIR5(opcode, r_dest.GetHighReg(), r_base.GetReg(), r_index.GetReg(), scale,
732                           displacement + HIWORD_OFFSET);
733           load2 = NewLIR5(opcode, r_dest.GetLowReg(), r_base.GetReg(), r_index.GetReg(), scale,
734                          displacement + LOWORD_OFFSET);
735         }
736       } else {
737         if (r_dest.GetLow() == r_index) {
738           // We can't use either register for the first load.
739           RegStorage temp = AllocTemp();
740           load = NewLIR5(opcode, temp.GetReg(), r_base.GetReg(), r_index.GetReg(), scale,
741                          displacement + LOWORD_OFFSET);
742           load2 = NewLIR5(opcode, r_dest.GetHighReg(), r_base.GetReg(), r_index.GetReg(), scale,
743                           displacement + HIWORD_OFFSET);
744           OpRegCopy(r_dest.GetLow(), temp);
745           FreeTemp(temp);
746         } else {
747           load = NewLIR5(opcode, r_dest.GetLowReg(), r_base.GetReg(), r_index.GetReg(), scale,
748                          displacement + LOWORD_OFFSET);
749           load2 = NewLIR5(opcode, r_dest.GetHighReg(), r_base.GetReg(), r_index.GetReg(), scale,
750                           displacement + HIWORD_OFFSET);
751         }
752       }
753     }
754   }
755 
756   // Always return first load generated as this might cause a fault if base is nullptr.
757   return load;
758 }
759 
760 /* Load value from base + scaled index. */
LoadBaseIndexed(RegStorage r_base,RegStorage r_index,RegStorage r_dest,int scale,OpSize size)761 LIR* X86Mir2Lir::LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest,
762                                  int scale, OpSize size) {
763   return LoadBaseIndexedDisp(r_base, r_index, scale, 0, r_dest, size);
764 }
765 
LoadBaseDisp(RegStorage r_base,int displacement,RegStorage r_dest,OpSize size,VolatileKind is_volatile)766 LIR* X86Mir2Lir::LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest,
767                               OpSize size, VolatileKind is_volatile) {
768   // LoadBaseDisp() will emit correct insn for atomic load on x86
769   // assuming r_dest is correctly prepared using RegClassForFieldLoadStore().
770 
771   LIR* load = LoadBaseIndexedDisp(r_base, RegStorage::InvalidReg(), 0, displacement, r_dest,
772                                   size);
773 
774   if (UNLIKELY(is_volatile == kVolatile)) {
775     GenMemBarrier(kLoadAny);  // Only a scheduling barrier.
776   }
777 
778   return load;
779 }
780 
StoreBaseIndexedDisp(RegStorage r_base,RegStorage r_index,int scale,int displacement,RegStorage r_src,OpSize size)781 LIR* X86Mir2Lir::StoreBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale,
782                                       int displacement, RegStorage r_src, OpSize size) {
783   LIR *store = NULL;
784   LIR *store2 = NULL;
785   bool is_array = r_index.Valid();
786   bool pair = r_src.IsPair();
787   bool is64bit = (size == k64) || (size == kDouble);
788   X86OpCode opcode = kX86Nop;
789   switch (size) {
790     case k64:
791     case kDouble:
792       if (r_src.IsFloat()) {
793         opcode = is_array ? kX86MovsdAR : kX86MovsdMR;
794       } else if (!pair) {
795         opcode = is_array ? kX86Mov64AR  : kX86Mov64MR;
796       } else {
797         opcode = is_array ? kX86Mov32AR  : kX86Mov32MR;
798       }
799       // TODO: double store is to unaligned address
800       DCHECK_EQ((displacement & 0x3), 0);
801       break;
802     case kWord:
803       if (cu_->target64) {
804         opcode = is_array ? kX86Mov64AR  : kX86Mov64MR;
805         CHECK_EQ(is_array, false);
806         CHECK_EQ(r_src.IsFloat(), false);
807         break;
808       }  // else fall-through to k32 case
809     case k32:
810     case kSingle:
811     case kReference:
812       opcode = is_array ? kX86Mov32AR : kX86Mov32MR;
813       if (r_src.IsFloat()) {
814         opcode = is_array ? kX86MovssAR : kX86MovssMR;
815         DCHECK(r_src.IsSingle());
816       }
817       DCHECK_EQ((displacement & 0x3), 0);
818       break;
819     case kUnsignedHalf:
820     case kSignedHalf:
821       opcode = is_array ? kX86Mov16AR : kX86Mov16MR;
822       DCHECK_EQ((displacement & 0x1), 0);
823       break;
824     case kUnsignedByte:
825     case kSignedByte:
826       opcode = is_array ? kX86Mov8AR : kX86Mov8MR;
827       break;
828     default:
829       LOG(FATAL) << "Bad case in StoreBaseIndexedDispBody";
830   }
831 
832   if (!is_array) {
833     if (!pair) {
834       store = NewLIR3(opcode, r_base.GetReg(), displacement + LOWORD_OFFSET, r_src.GetReg());
835     } else {
836       DCHECK(!r_src.IsFloat());  // Make sure we're not still using a pair here.
837       store = NewLIR3(opcode, r_base.GetReg(), displacement + LOWORD_OFFSET, r_src.GetLowReg());
838       store2 = NewLIR3(opcode, r_base.GetReg(), displacement + HIWORD_OFFSET, r_src.GetHighReg());
839     }
840     if (mem_ref_type_ == ResourceMask::kDalvikReg) {
841       DCHECK(r_base == rs_rX86_SP);
842       AnnotateDalvikRegAccess(store, (displacement + (pair ? LOWORD_OFFSET : 0)) >> 2,
843                               false /* is_load */, is64bit);
844       if (pair) {
845         AnnotateDalvikRegAccess(store2, (displacement + HIWORD_OFFSET) >> 2,
846                                 false /* is_load */, is64bit);
847       }
848     }
849   } else {
850     if (!pair) {
851       store = NewLIR5(opcode, r_base.GetReg(), r_index.GetReg(), scale,
852                       displacement + LOWORD_OFFSET, r_src.GetReg());
853     } else {
854       DCHECK(!r_src.IsFloat());  // Make sure we're not still using a pair here.
855       store = NewLIR5(opcode, r_base.GetReg(), r_index.GetReg(), scale,
856                       displacement + LOWORD_OFFSET, r_src.GetLowReg());
857       store2 = NewLIR5(opcode, r_base.GetReg(), r_index.GetReg(), scale,
858                        displacement + HIWORD_OFFSET, r_src.GetHighReg());
859     }
860   }
861   return store;
862 }
863 
864 /* store value base base + scaled index. */
StoreBaseIndexed(RegStorage r_base,RegStorage r_index,RegStorage r_src,int scale,OpSize size)865 LIR* X86Mir2Lir::StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src,
866                                   int scale, OpSize size) {
867   return StoreBaseIndexedDisp(r_base, r_index, scale, 0, r_src, size);
868 }
869 
StoreBaseDisp(RegStorage r_base,int displacement,RegStorage r_src,OpSize size,VolatileKind is_volatile)870 LIR* X86Mir2Lir::StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src, OpSize size,
871                                VolatileKind is_volatile) {
872   if (UNLIKELY(is_volatile == kVolatile)) {
873     GenMemBarrier(kAnyStore);  // Only a scheduling barrier.
874   }
875 
876   // StoreBaseDisp() will emit correct insn for atomic store on x86
877   // assuming r_dest is correctly prepared using RegClassForFieldLoadStore().
878 
879   LIR* store = StoreBaseIndexedDisp(r_base, RegStorage::InvalidReg(), 0, displacement, r_src, size);
880 
881   if (UNLIKELY(is_volatile == kVolatile)) {
882     // A volatile load might follow the volatile store so insert a StoreLoad barrier.
883     // This does require a fence, even on x86.
884     GenMemBarrier(kAnyAny);
885   }
886 
887   return store;
888 }
889 
OpCmpMemImmBranch(ConditionCode cond,RegStorage temp_reg,RegStorage base_reg,int offset,int check_value,LIR * target,LIR ** compare)890 LIR* X86Mir2Lir::OpCmpMemImmBranch(ConditionCode cond, RegStorage temp_reg, RegStorage base_reg,
891                                    int offset, int check_value, LIR* target, LIR** compare) {
892     LIR* inst = NewLIR3(IS_SIMM8(check_value) ? kX86Cmp32MI8 : kX86Cmp32MI, base_reg.GetReg(),
893             offset, check_value);
894     if (compare != nullptr) {
895         *compare = inst;
896     }
897     LIR* branch = OpCondBranch(cond, target);
898     return branch;
899 }
900 
AnalyzeMIR()901 void X86Mir2Lir::AnalyzeMIR() {
902   // Assume we don't need a pointer to the base of the code.
903   cu_->NewTimingSplit("X86 MIR Analysis");
904   store_method_addr_ = false;
905 
906   // Walk the MIR looking for interesting items.
907   PreOrderDfsIterator iter(mir_graph_);
908   BasicBlock* curr_bb = iter.Next();
909   while (curr_bb != NULL) {
910     AnalyzeBB(curr_bb);
911     curr_bb = iter.Next();
912   }
913 
914   // Did we need a pointer to the method code?
915   if (store_method_addr_) {
916     base_of_code_ = mir_graph_->GetNewCompilerTemp(kCompilerTempVR, cu_->target64 == true);
917   } else {
918     base_of_code_ = nullptr;
919   }
920 }
921 
AnalyzeBB(BasicBlock * bb)922 void X86Mir2Lir::AnalyzeBB(BasicBlock * bb) {
923   if (bb->block_type == kDead) {
924     // Ignore dead blocks
925     return;
926   }
927 
928   for (MIR *mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
929     int opcode = mir->dalvikInsn.opcode;
930     if (MIR::DecodedInstruction::IsPseudoMirOp(opcode)) {
931       AnalyzeExtendedMIR(opcode, bb, mir);
932     } else {
933       AnalyzeMIR(opcode, bb, mir);
934     }
935   }
936 }
937 
938 
AnalyzeExtendedMIR(int opcode,BasicBlock * bb,MIR * mir)939 void X86Mir2Lir::AnalyzeExtendedMIR(int opcode, BasicBlock * bb, MIR *mir) {
940   switch (opcode) {
941     // Instructions referencing doubles.
942     case kMirOpFusedCmplDouble:
943     case kMirOpFusedCmpgDouble:
944       AnalyzeFPInstruction(opcode, bb, mir);
945       break;
946     case kMirOpConstVector:
947       store_method_addr_ = true;
948       break;
949     default:
950       // Ignore the rest.
951       break;
952   }
953 }
954 
AnalyzeMIR(int opcode,BasicBlock * bb,MIR * mir)955 void X86Mir2Lir::AnalyzeMIR(int opcode, BasicBlock * bb, MIR *mir) {
956   // Looking for
957   // - Do we need a pointer to the code (used for packed switches and double lits)?
958 
959   switch (opcode) {
960     // Instructions referencing doubles.
961     case Instruction::CMPL_DOUBLE:
962     case Instruction::CMPG_DOUBLE:
963     case Instruction::NEG_DOUBLE:
964     case Instruction::ADD_DOUBLE:
965     case Instruction::SUB_DOUBLE:
966     case Instruction::MUL_DOUBLE:
967     case Instruction::DIV_DOUBLE:
968     case Instruction::REM_DOUBLE:
969     case Instruction::ADD_DOUBLE_2ADDR:
970     case Instruction::SUB_DOUBLE_2ADDR:
971     case Instruction::MUL_DOUBLE_2ADDR:
972     case Instruction::DIV_DOUBLE_2ADDR:
973     case Instruction::REM_DOUBLE_2ADDR:
974       AnalyzeFPInstruction(opcode, bb, mir);
975       break;
976 
977     // Packed switches and array fills need a pointer to the base of the method.
978     case Instruction::FILL_ARRAY_DATA:
979     case Instruction::PACKED_SWITCH:
980       store_method_addr_ = true;
981       break;
982     case Instruction::INVOKE_STATIC:
983       AnalyzeInvokeStatic(opcode, bb, mir);
984       break;
985     default:
986       // Other instructions are not interesting yet.
987       break;
988   }
989 }
990 
AnalyzeFPInstruction(int opcode,BasicBlock * bb,MIR * mir)991 void X86Mir2Lir::AnalyzeFPInstruction(int opcode, BasicBlock * bb, MIR *mir) {
992   // Look at all the uses, and see if they are double constants.
993   uint64_t attrs = MIRGraph::GetDataFlowAttributes(static_cast<Instruction::Code>(opcode));
994   int next_sreg = 0;
995   if (attrs & DF_UA) {
996     if (attrs & DF_A_WIDE) {
997       AnalyzeDoubleUse(mir_graph_->GetSrcWide(mir, next_sreg));
998       next_sreg += 2;
999     } else {
1000       next_sreg++;
1001     }
1002   }
1003   if (attrs & DF_UB) {
1004     if (attrs & DF_B_WIDE) {
1005       AnalyzeDoubleUse(mir_graph_->GetSrcWide(mir, next_sreg));
1006       next_sreg += 2;
1007     } else {
1008       next_sreg++;
1009     }
1010   }
1011   if (attrs & DF_UC) {
1012     if (attrs & DF_C_WIDE) {
1013       AnalyzeDoubleUse(mir_graph_->GetSrcWide(mir, next_sreg));
1014     }
1015   }
1016 }
1017 
AnalyzeDoubleUse(RegLocation use)1018 void X86Mir2Lir::AnalyzeDoubleUse(RegLocation use) {
1019   // If this is a double literal, we will want it in the literal pool on 32b platforms.
1020   if (use.is_const && !cu_->target64) {
1021     store_method_addr_ = true;
1022   }
1023 }
1024 
UpdateLocTyped(RegLocation loc,int reg_class)1025 RegLocation X86Mir2Lir::UpdateLocTyped(RegLocation loc, int reg_class) {
1026   loc = UpdateLoc(loc);
1027   if ((loc.location == kLocPhysReg) && (loc.fp != loc.reg.IsFloat())) {
1028     if (GetRegInfo(loc.reg)->IsTemp()) {
1029       Clobber(loc.reg);
1030       FreeTemp(loc.reg);
1031       loc.reg = RegStorage::InvalidReg();
1032       loc.location = kLocDalvikFrame;
1033     }
1034   }
1035   DCHECK(CheckCorePoolSanity());
1036   return loc;
1037 }
1038 
UpdateLocWideTyped(RegLocation loc,int reg_class)1039 RegLocation X86Mir2Lir::UpdateLocWideTyped(RegLocation loc, int reg_class) {
1040   loc = UpdateLocWide(loc);
1041   if ((loc.location == kLocPhysReg) && (loc.fp != loc.reg.IsFloat())) {
1042     if (GetRegInfo(loc.reg)->IsTemp()) {
1043       Clobber(loc.reg);
1044       FreeTemp(loc.reg);
1045       loc.reg = RegStorage::InvalidReg();
1046       loc.location = kLocDalvikFrame;
1047     }
1048   }
1049   DCHECK(CheckCorePoolSanity());
1050   return loc;
1051 }
1052 
AnalyzeInvokeStatic(int opcode,BasicBlock * bb,MIR * mir)1053 void X86Mir2Lir::AnalyzeInvokeStatic(int opcode, BasicBlock * bb, MIR *mir) {
1054   // For now this is only actual for x86-32.
1055   if (cu_->target64) {
1056     return;
1057   }
1058 
1059   uint32_t index = mir->dalvikInsn.vB;
1060   if (!(mir->optimization_flags & MIR_INLINED)) {
1061     DCHECK(cu_->compiler_driver->GetMethodInlinerMap() != nullptr);
1062     DexFileMethodInliner* method_inliner =
1063       cu_->compiler_driver->GetMethodInlinerMap()->GetMethodInliner(cu_->dex_file);
1064     InlineMethod method;
1065     if (method_inliner->IsIntrinsic(index, &method)) {
1066       switch (method.opcode) {
1067         case kIntrinsicAbsDouble:
1068         case kIntrinsicMinMaxDouble:
1069           store_method_addr_ = true;
1070           break;
1071         default:
1072           break;
1073       }
1074     }
1075   }
1076 }
1077 
InvokeTrampoline(OpKind op,RegStorage r_tgt,QuickEntrypointEnum trampoline)1078 LIR* X86Mir2Lir::InvokeTrampoline(OpKind op, RegStorage r_tgt, QuickEntrypointEnum trampoline) {
1079   if (cu_->target64) {
1080     return OpThreadMem(op, GetThreadOffset<8>(trampoline));
1081   } else {
1082     return OpThreadMem(op, GetThreadOffset<4>(trampoline));
1083   }
1084 }
1085 
1086 }  // namespace art
1087