1 /*
2  * Copyright (C) 2011 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 /* This file contains register alloction support. */
18 
19 #include "dex/compiler_ir.h"
20 #include "dex/compiler_internals.h"
21 #include "mir_to_lir-inl.h"
22 
23 namespace art {
24 
25 /*
26  * Free all allocated temps in the temp pools.  Note that this does
27  * not affect the "liveness" of a temp register, which will stay
28  * live until it is either explicitly killed or reallocated.
29  */
ResetRegPool()30 void Mir2Lir::ResetRegPool() {
31   GrowableArray<RegisterInfo*>::Iterator iter(&tempreg_info_);
32   for (RegisterInfo* info = iter.Next(); info != NULL; info = iter.Next()) {
33     info->MarkFree();
34   }
35   // Reset temp tracking sanity check.
36   if (kIsDebugBuild) {
37     live_sreg_ = INVALID_SREG;
38   }
39 }
40 
RegisterInfo(RegStorage r,const ResourceMask & mask)41 Mir2Lir::RegisterInfo::RegisterInfo(RegStorage r, const ResourceMask& mask)
42   : reg_(r), is_temp_(false), wide_value_(false), dirty_(false), aliased_(false), partner_(r),
43     s_reg_(INVALID_SREG), def_use_mask_(mask), master_(this), def_start_(nullptr),
44     def_end_(nullptr), alias_chain_(nullptr) {
45   switch (r.StorageSize()) {
46     case 0: storage_mask_ = 0xffffffff; break;
47     case 4: storage_mask_ = 0x00000001; break;
48     case 8: storage_mask_ = 0x00000003; break;
49     case 16: storage_mask_ = 0x0000000f; break;
50     case 32: storage_mask_ = 0x000000ff; break;
51     case 64: storage_mask_ = 0x0000ffff; break;
52     case 128: storage_mask_ = 0xffffffff; break;
53   }
54   used_storage_ = r.Valid() ? ~storage_mask_ : storage_mask_;
55   liveness_ = used_storage_;
56 }
57 
RegisterPool(Mir2Lir * m2l,ArenaAllocator * arena,const ArrayRef<const RegStorage> & core_regs,const ArrayRef<const RegStorage> & core64_regs,const ArrayRef<const RegStorage> & sp_regs,const ArrayRef<const RegStorage> & dp_regs,const ArrayRef<const RegStorage> & reserved_regs,const ArrayRef<const RegStorage> & reserved64_regs,const ArrayRef<const RegStorage> & core_temps,const ArrayRef<const RegStorage> & core64_temps,const ArrayRef<const RegStorage> & sp_temps,const ArrayRef<const RegStorage> & dp_temps)58 Mir2Lir::RegisterPool::RegisterPool(Mir2Lir* m2l, ArenaAllocator* arena,
59                                     const ArrayRef<const RegStorage>& core_regs,
60                                     const ArrayRef<const RegStorage>& core64_regs,
61                                     const ArrayRef<const RegStorage>& sp_regs,
62                                     const ArrayRef<const RegStorage>& dp_regs,
63                                     const ArrayRef<const RegStorage>& reserved_regs,
64                                     const ArrayRef<const RegStorage>& reserved64_regs,
65                                     const ArrayRef<const RegStorage>& core_temps,
66                                     const ArrayRef<const RegStorage>& core64_temps,
67                                     const ArrayRef<const RegStorage>& sp_temps,
68                                     const ArrayRef<const RegStorage>& dp_temps) :
69     core_regs_(arena, core_regs.size()), next_core_reg_(0),
70     core64_regs_(arena, core64_regs.size()), next_core64_reg_(0),
71     sp_regs_(arena, sp_regs.size()), next_sp_reg_(0),
72     dp_regs_(arena, dp_regs.size()), next_dp_reg_(0), m2l_(m2l)  {
73   // Initialize the fast lookup map.
74   m2l_->reginfo_map_.Reset();
75   if (kIsDebugBuild) {
76     m2l_->reginfo_map_.Resize(RegStorage::kMaxRegs);
77     for (unsigned i = 0; i < RegStorage::kMaxRegs; i++) {
78       m2l_->reginfo_map_.Insert(nullptr);
79     }
80   } else {
81     m2l_->reginfo_map_.SetSize(RegStorage::kMaxRegs);
82   }
83 
84   // Construct the register pool.
85   for (const RegStorage& reg : core_regs) {
86     RegisterInfo* info = new (arena) RegisterInfo(reg, m2l_->GetRegMaskCommon(reg));
87     m2l_->reginfo_map_.Put(reg.GetReg(), info);
88     core_regs_.Insert(info);
89   }
90   for (const RegStorage& reg : core64_regs) {
91     RegisterInfo* info = new (arena) RegisterInfo(reg, m2l_->GetRegMaskCommon(reg));
92     m2l_->reginfo_map_.Put(reg.GetReg(), info);
93     core64_regs_.Insert(info);
94   }
95   for (const RegStorage& reg : sp_regs) {
96     RegisterInfo* info = new (arena) RegisterInfo(reg, m2l_->GetRegMaskCommon(reg));
97     m2l_->reginfo_map_.Put(reg.GetReg(), info);
98     sp_regs_.Insert(info);
99   }
100   for (const RegStorage& reg : dp_regs) {
101     RegisterInfo* info = new (arena) RegisterInfo(reg, m2l_->GetRegMaskCommon(reg));
102     m2l_->reginfo_map_.Put(reg.GetReg(), info);
103     dp_regs_.Insert(info);
104   }
105 
106   // Keep special registers from being allocated.
107   for (RegStorage reg : reserved_regs) {
108     m2l_->MarkInUse(reg);
109   }
110   for (RegStorage reg : reserved64_regs) {
111     m2l_->MarkInUse(reg);
112   }
113 
114   // Mark temp regs - all others not in use can be used for promotion
115   for (RegStorage reg : core_temps) {
116     m2l_->MarkTemp(reg);
117   }
118   for (RegStorage reg : core64_temps) {
119     m2l_->MarkTemp(reg);
120   }
121   for (RegStorage reg : sp_temps) {
122     m2l_->MarkTemp(reg);
123   }
124   for (RegStorage reg : dp_temps) {
125     m2l_->MarkTemp(reg);
126   }
127 
128   // Add an entry for InvalidReg with zero'd mask.
129   RegisterInfo* invalid_reg = new (arena) RegisterInfo(RegStorage::InvalidReg(), kEncodeNone);
130   m2l_->reginfo_map_.Put(RegStorage::InvalidReg().GetReg(), invalid_reg);
131 
132   // Existence of core64 registers implies wide references.
133   if (core64_regs_.Size() != 0) {
134     ref_regs_ = &core64_regs_;
135     next_ref_reg_ = &next_core64_reg_;
136   } else {
137     ref_regs_ = &core_regs_;
138     next_ref_reg_ = &next_core_reg_;
139   }
140 }
141 
DumpRegPool(GrowableArray<RegisterInfo * > * regs)142 void Mir2Lir::DumpRegPool(GrowableArray<RegisterInfo*>* regs) {
143   LOG(INFO) << "================================================";
144   GrowableArray<RegisterInfo*>::Iterator it(regs);
145   for (RegisterInfo* info = it.Next(); info != nullptr; info = it.Next()) {
146     LOG(INFO) << StringPrintf(
147         "R[%d:%d:%c]: T:%d, U:%d, W:%d, p:%d, LV:%d, D:%d, SR:%d, DEF:%d",
148         info->GetReg().GetReg(), info->GetReg().GetRegNum(), info->GetReg().IsFloat() ?  'f' : 'c',
149         info->IsTemp(), info->InUse(), info->IsWide(), info->Partner().GetReg(), info->IsLive(),
150         info->IsDirty(), info->SReg(), info->DefStart() != nullptr);
151   }
152   LOG(INFO) << "================================================";
153 }
154 
DumpCoreRegPool()155 void Mir2Lir::DumpCoreRegPool() {
156   DumpRegPool(&reg_pool_->core_regs_);
157   DumpRegPool(&reg_pool_->core64_regs_);
158 }
159 
DumpFpRegPool()160 void Mir2Lir::DumpFpRegPool() {
161   DumpRegPool(&reg_pool_->sp_regs_);
162   DumpRegPool(&reg_pool_->dp_regs_);
163 }
164 
DumpRegPools()165 void Mir2Lir::DumpRegPools() {
166   LOG(INFO) << "Core registers";
167   DumpCoreRegPool();
168   LOG(INFO) << "FP registers";
169   DumpFpRegPool();
170 }
171 
Clobber(RegStorage reg)172 void Mir2Lir::Clobber(RegStorage reg) {
173   if (UNLIKELY(reg.IsPair())) {
174     DCHECK(!GetRegInfo(reg.GetLow())->IsAliased());
175     Clobber(reg.GetLow());
176     DCHECK(!GetRegInfo(reg.GetHigh())->IsAliased());
177     Clobber(reg.GetHigh());
178   } else {
179     RegisterInfo* info = GetRegInfo(reg);
180     if (info->IsTemp() && !info->IsDead()) {
181       if (info->GetReg().NotExactlyEquals(info->Partner())) {
182         ClobberBody(GetRegInfo(info->Partner()));
183       }
184       ClobberBody(info);
185       if (info->IsAliased()) {
186         ClobberAliases(info, info->StorageMask());
187       } else {
188         RegisterInfo* master = info->Master();
189         if (info != master) {
190           ClobberBody(info->Master());
191           ClobberAliases(info->Master(), info->StorageMask());
192         }
193       }
194     }
195   }
196 }
197 
ClobberAliases(RegisterInfo * info,uint32_t clobber_mask)198 void Mir2Lir::ClobberAliases(RegisterInfo* info, uint32_t clobber_mask) {
199   for (RegisterInfo* alias = info->GetAliasChain(); alias != nullptr;
200        alias = alias->GetAliasChain()) {
201     DCHECK(!alias->IsAliased());  // Only the master should be marked as alised.
202     // Only clobber if we have overlap.
203     if ((alias->StorageMask() & clobber_mask) != 0) {
204       ClobberBody(alias);
205     }
206   }
207 }
208 
209 /*
210  * Break the association between a Dalvik vreg and a physical temp register of either register
211  * class.
212  * TODO: Ideally, the public version of this code should not exist.  Besides its local usage
213  * in the register utilities, is is also used by code gen routines to work around a deficiency in
214  * local register allocation, which fails to distinguish between the "in" and "out" identities
215  * of Dalvik vregs.  This can result in useless register copies when the same Dalvik vreg
216  * is used both as the source and destination register of an operation in which the type
217  * changes (for example: INT_TO_FLOAT v1, v1).  Revisit when improved register allocation is
218  * addressed.
219  */
ClobberSReg(int s_reg)220 void Mir2Lir::ClobberSReg(int s_reg) {
221   if (s_reg != INVALID_SREG) {
222     if (kIsDebugBuild && s_reg == live_sreg_) {
223       live_sreg_ = INVALID_SREG;
224     }
225     GrowableArray<RegisterInfo*>::Iterator iter(&tempreg_info_);
226     for (RegisterInfo* info = iter.Next(); info != NULL; info = iter.Next()) {
227       if (info->SReg() == s_reg) {
228         if (info->GetReg().NotExactlyEquals(info->Partner())) {
229           // Dealing with a pair - clobber the other half.
230           DCHECK(!info->IsAliased());
231           ClobberBody(GetRegInfo(info->Partner()));
232         }
233         ClobberBody(info);
234         if (info->IsAliased()) {
235           ClobberAliases(info, info->StorageMask());
236         }
237       }
238     }
239   }
240 }
241 
242 /*
243  * SSA names associated with the initial definitions of Dalvik
244  * registers are the same as the Dalvik register number (and
245  * thus take the same position in the promotion_map.  However,
246  * the special Method* and compiler temp resisters use negative
247  * v_reg numbers to distinguish them and can have an arbitrary
248  * ssa name (above the last original Dalvik register).  This function
249  * maps SSA names to positions in the promotion_map array.
250  */
SRegToPMap(int s_reg)251 int Mir2Lir::SRegToPMap(int s_reg) {
252   DCHECK_LT(s_reg, mir_graph_->GetNumSSARegs());
253   DCHECK_GE(s_reg, 0);
254   int v_reg = mir_graph_->SRegToVReg(s_reg);
255   if (v_reg >= 0) {
256     DCHECK_LT(v_reg, cu_->num_dalvik_registers);
257     return v_reg;
258   } else {
259     /*
260      * It must be the case that the v_reg for temporary is less than or equal to the
261      * base reg for temps. For that reason, "position" must be zero or positive.
262      */
263     unsigned int position = std::abs(v_reg) - std::abs(static_cast<int>(kVRegTempBaseReg));
264 
265     // The temporaries are placed after dalvik registers in the promotion map
266     DCHECK_LT(position, mir_graph_->GetNumUsedCompilerTemps());
267     return cu_->num_dalvik_registers + position;
268   }
269 }
270 
271 // TODO: refactor following Alloc/Record routines - much commonality.
RecordCorePromotion(RegStorage reg,int s_reg)272 void Mir2Lir::RecordCorePromotion(RegStorage reg, int s_reg) {
273   int p_map_idx = SRegToPMap(s_reg);
274   int v_reg = mir_graph_->SRegToVReg(s_reg);
275   int reg_num = reg.GetRegNum();
276   GetRegInfo(reg)->MarkInUse();
277   core_spill_mask_ |= (1 << reg_num);
278   // Include reg for later sort
279   core_vmap_table_.push_back(reg_num << VREG_NUM_WIDTH | (v_reg & ((1 << VREG_NUM_WIDTH) - 1)));
280   num_core_spills_++;
281   promotion_map_[p_map_idx].core_location = kLocPhysReg;
282   promotion_map_[p_map_idx].core_reg = reg_num;
283 }
284 
285 /* Reserve a callee-save register.  Return InvalidReg if none available */
AllocPreservedCoreReg(int s_reg)286 RegStorage Mir2Lir::AllocPreservedCoreReg(int s_reg) {
287   RegStorage res;
288   /*
289    * Note: it really doesn't matter much whether we allocate from the core or core64
290    * pool for 64-bit targets - but for some targets it does matter whether allocations
291    * happens from the single or double pool.  This entire section of code could stand
292    * a good refactoring.
293    */
294   GrowableArray<RegisterInfo*>::Iterator it(&reg_pool_->core_regs_);
295   for (RegisterInfo* info = it.Next(); info != nullptr; info = it.Next()) {
296     if (!info->IsTemp() && !info->InUse()) {
297       res = info->GetReg();
298       RecordCorePromotion(res, s_reg);
299       break;
300     }
301   }
302   return res;
303 }
304 
RecordFpPromotion(RegStorage reg,int s_reg)305 void Mir2Lir::RecordFpPromotion(RegStorage reg, int s_reg) {
306   DCHECK_NE(cu_->instruction_set, kThumb2);
307   int p_map_idx = SRegToPMap(s_reg);
308   int v_reg = mir_graph_->SRegToVReg(s_reg);
309   int reg_num = reg.GetRegNum();
310   GetRegInfo(reg)->MarkInUse();
311   fp_spill_mask_ |= (1 << reg_num);
312   // Include reg for later sort
313   fp_vmap_table_.push_back(reg_num << VREG_NUM_WIDTH | (v_reg & ((1 << VREG_NUM_WIDTH) - 1)));
314   num_fp_spills_++;
315   promotion_map_[p_map_idx].fp_location = kLocPhysReg;
316   promotion_map_[p_map_idx].fp_reg = reg.GetReg();
317 }
318 
319 // Reserve a callee-save floating point.
AllocPreservedFpReg(int s_reg)320 RegStorage Mir2Lir::AllocPreservedFpReg(int s_reg) {
321   /*
322    * For targets other than Thumb2, it doesn't matter whether we allocate from
323    * the sp_regs_ or dp_regs_ pool.  Some refactoring is in order here.
324    */
325   DCHECK_NE(cu_->instruction_set, kThumb2);
326   RegStorage res;
327   GrowableArray<RegisterInfo*>::Iterator it(&reg_pool_->sp_regs_);
328   for (RegisterInfo* info = it.Next(); info != nullptr; info = it.Next()) {
329     if (!info->IsTemp() && !info->InUse()) {
330       res = info->GetReg();
331       RecordFpPromotion(res, s_reg);
332       break;
333     }
334   }
335   return res;
336 }
337 
338 // TODO: this is Thumb2 only.  Remove when DoPromotion refactored.
AllocPreservedDouble(int s_reg)339 RegStorage Mir2Lir::AllocPreservedDouble(int s_reg) {
340   RegStorage res;
341   UNIMPLEMENTED(FATAL) << "Unexpected use of AllocPreservedDouble";
342   return res;
343 }
344 
345 // TODO: this is Thumb2 only.  Remove when DoPromotion refactored.
AllocPreservedSingle(int s_reg)346 RegStorage Mir2Lir::AllocPreservedSingle(int s_reg) {
347   RegStorage res;
348   UNIMPLEMENTED(FATAL) << "Unexpected use of AllocPreservedSingle";
349   return res;
350 }
351 
352 
AllocTempBody(GrowableArray<RegisterInfo * > & regs,int * next_temp,bool required)353 RegStorage Mir2Lir::AllocTempBody(GrowableArray<RegisterInfo*> &regs, int* next_temp, bool required) {
354   int num_regs = regs.Size();
355   int next = *next_temp;
356   for (int i = 0; i< num_regs; i++) {
357     if (next >= num_regs)
358       next = 0;
359     RegisterInfo* info = regs.Get(next);
360     // Try to allocate a register that doesn't hold a live value.
361     if (info->IsTemp() && !info->InUse() && info->IsDead()) {
362       // If it's wide, split it up.
363       if (info->IsWide()) {
364         // If the pair was associated with a wide value, unmark the partner as well.
365         if (info->SReg() != INVALID_SREG) {
366           RegisterInfo* partner = GetRegInfo(info->Partner());
367           DCHECK_EQ(info->GetReg().GetRegNum(), partner->Partner().GetRegNum());
368           DCHECK(partner->IsWide());
369           partner->SetIsWide(false);
370         }
371         info->SetIsWide(false);
372       }
373       Clobber(info->GetReg());
374       info->MarkInUse();
375       *next_temp = next + 1;
376       return info->GetReg();
377     }
378     next++;
379   }
380   next = *next_temp;
381   // No free non-live regs.  Anything we can kill?
382   for (int i = 0; i< num_regs; i++) {
383     if (next >= num_regs)
384       next = 0;
385     RegisterInfo* info = regs.Get(next);
386     if (info->IsTemp() && !info->InUse()) {
387       // Got one.  Kill it.
388       ClobberSReg(info->SReg());
389       Clobber(info->GetReg());
390       info->MarkInUse();
391       if (info->IsWide()) {
392         RegisterInfo* partner = GetRegInfo(info->Partner());
393         DCHECK_EQ(info->GetReg().GetRegNum(), partner->Partner().GetRegNum());
394         DCHECK(partner->IsWide());
395         info->SetIsWide(false);
396         partner->SetIsWide(false);
397       }
398       *next_temp = next + 1;
399       return info->GetReg();
400     }
401     next++;
402   }
403   if (required) {
404     CodegenDump();
405     DumpRegPools();
406     LOG(FATAL) << "No free temp registers";
407   }
408   return RegStorage::InvalidReg();  // No register available
409 }
410 
AllocTemp(bool required)411 RegStorage Mir2Lir::AllocTemp(bool required) {
412   return AllocTempBody(reg_pool_->core_regs_, &reg_pool_->next_core_reg_, required);
413 }
414 
AllocTempWide(bool required)415 RegStorage Mir2Lir::AllocTempWide(bool required) {
416   RegStorage res;
417   if (reg_pool_->core64_regs_.Size() != 0) {
418     res = AllocTempBody(reg_pool_->core64_regs_, &reg_pool_->next_core64_reg_, required);
419   } else {
420     RegStorage low_reg = AllocTemp();
421     RegStorage high_reg = AllocTemp();
422     res = RegStorage::MakeRegPair(low_reg, high_reg);
423   }
424   if (required) {
425     CheckRegStorage(res, WidenessCheck::kCheckWide, RefCheck::kIgnoreRef, FPCheck::kCheckNotFP);
426   }
427   return res;
428 }
429 
AllocTempRef(bool required)430 RegStorage Mir2Lir::AllocTempRef(bool required) {
431   RegStorage res = AllocTempBody(*reg_pool_->ref_regs_, reg_pool_->next_ref_reg_, required);
432   if (required) {
433     DCHECK(!res.IsPair());
434     CheckRegStorage(res, WidenessCheck::kCheckNotWide, RefCheck::kCheckRef, FPCheck::kCheckNotFP);
435   }
436   return res;
437 }
438 
AllocTempSingle(bool required)439 RegStorage Mir2Lir::AllocTempSingle(bool required) {
440   RegStorage res = AllocTempBody(reg_pool_->sp_regs_, &reg_pool_->next_sp_reg_, required);
441   if (required) {
442     DCHECK(res.IsSingle()) << "Reg: 0x" << std::hex << res.GetRawBits();
443     CheckRegStorage(res, WidenessCheck::kCheckNotWide, RefCheck::kCheckNotRef, FPCheck::kIgnoreFP);
444   }
445   return res;
446 }
447 
AllocTempDouble(bool required)448 RegStorage Mir2Lir::AllocTempDouble(bool required) {
449   RegStorage res = AllocTempBody(reg_pool_->dp_regs_, &reg_pool_->next_dp_reg_, required);
450   if (required) {
451     DCHECK(res.IsDouble()) << "Reg: 0x" << std::hex << res.GetRawBits();
452     CheckRegStorage(res, WidenessCheck::kCheckWide, RefCheck::kCheckNotRef, FPCheck::kIgnoreFP);
453   }
454   return res;
455 }
456 
AllocTypedTempWide(bool fp_hint,int reg_class,bool required)457 RegStorage Mir2Lir::AllocTypedTempWide(bool fp_hint, int reg_class, bool required) {
458   DCHECK_NE(reg_class, kRefReg);  // NOTE: the Dalvik width of a reference is always 32 bits.
459   if (((reg_class == kAnyReg) && fp_hint) || (reg_class == kFPReg)) {
460     return AllocTempDouble(required);
461   }
462   return AllocTempWide(required);
463 }
464 
AllocTypedTemp(bool fp_hint,int reg_class,bool required)465 RegStorage Mir2Lir::AllocTypedTemp(bool fp_hint, int reg_class, bool required) {
466   if (((reg_class == kAnyReg) && fp_hint) || (reg_class == kFPReg)) {
467     return AllocTempSingle(required);
468   } else if (reg_class == kRefReg) {
469     return AllocTempRef(required);
470   }
471   return AllocTemp(required);
472 }
473 
FindLiveReg(GrowableArray<RegisterInfo * > & regs,int s_reg)474 RegStorage Mir2Lir::FindLiveReg(GrowableArray<RegisterInfo*> &regs, int s_reg) {
475   RegStorage res;
476   GrowableArray<RegisterInfo*>::Iterator it(&regs);
477   for (RegisterInfo* info = it.Next(); info != nullptr; info = it.Next()) {
478     if ((info->SReg() == s_reg) && info->IsLive()) {
479       res = info->GetReg();
480       break;
481     }
482   }
483   return res;
484 }
485 
AllocLiveReg(int s_reg,int reg_class,bool wide)486 RegStorage Mir2Lir::AllocLiveReg(int s_reg, int reg_class, bool wide) {
487   RegStorage reg;
488   if (reg_class == kRefReg) {
489     reg = FindLiveReg(*reg_pool_->ref_regs_, s_reg);
490     CheckRegStorage(reg, WidenessCheck::kCheckNotWide, RefCheck::kCheckRef, FPCheck::kCheckNotFP);
491   }
492   if (!reg.Valid() && ((reg_class == kAnyReg) || (reg_class == kFPReg))) {
493     reg = FindLiveReg(wide ? reg_pool_->dp_regs_ : reg_pool_->sp_regs_, s_reg);
494   }
495   if (!reg.Valid() && (reg_class != kFPReg)) {
496     if (cu_->target64) {
497       reg = FindLiveReg(wide || reg_class == kRefReg ? reg_pool_->core64_regs_ :
498                                                        reg_pool_->core_regs_, s_reg);
499     } else {
500       reg = FindLiveReg(reg_pool_->core_regs_, s_reg);
501     }
502   }
503   if (reg.Valid()) {
504     if (wide && !reg.IsFloat() && !cu_->target64) {
505       // Only allow reg pairs for core regs on 32-bit targets.
506       RegStorage high_reg = FindLiveReg(reg_pool_->core_regs_, s_reg + 1);
507       if (high_reg.Valid()) {
508         reg = RegStorage::MakeRegPair(reg, high_reg);
509         MarkWide(reg);
510       } else {
511         // Only half available.
512         reg = RegStorage::InvalidReg();
513       }
514     }
515     if (reg.Valid() && (wide != GetRegInfo(reg)->IsWide())) {
516       // Width mismatch - don't try to reuse.
517       reg = RegStorage::InvalidReg();
518     }
519   }
520   if (reg.Valid()) {
521     if (reg.IsPair()) {
522       RegisterInfo* info_low = GetRegInfo(reg.GetLow());
523       RegisterInfo* info_high = GetRegInfo(reg.GetHigh());
524       if (info_low->IsTemp()) {
525         info_low->MarkInUse();
526       }
527       if (info_high->IsTemp()) {
528         info_high->MarkInUse();
529       }
530     } else {
531       RegisterInfo* info = GetRegInfo(reg);
532       if (info->IsTemp()) {
533         info->MarkInUse();
534       }
535     }
536   } else {
537     // Either not found, or something didn't match up. Clobber to prevent any stale instances.
538     ClobberSReg(s_reg);
539     if (wide) {
540       ClobberSReg(s_reg + 1);
541     }
542   }
543   CheckRegStorage(reg, WidenessCheck::kIgnoreWide,
544                   reg_class == kRefReg ? RefCheck::kCheckRef : RefCheck::kIgnoreRef,
545                   FPCheck::kIgnoreFP);
546   return reg;
547 }
548 
FreeTemp(RegStorage reg)549 void Mir2Lir::FreeTemp(RegStorage reg) {
550   if (reg.IsPair()) {
551     FreeTemp(reg.GetLow());
552     FreeTemp(reg.GetHigh());
553   } else {
554     RegisterInfo* p = GetRegInfo(reg);
555     if (p->IsTemp()) {
556       p->MarkFree();
557       p->SetIsWide(false);
558       p->SetPartner(reg);
559     }
560   }
561 }
562 
FreeRegLocTemps(RegLocation rl_keep,RegLocation rl_free)563 void Mir2Lir::FreeRegLocTemps(RegLocation rl_keep, RegLocation rl_free) {
564   DCHECK(rl_keep.wide);
565   DCHECK(rl_free.wide);
566   int free_low = rl_free.reg.GetLowReg();
567   int free_high = rl_free.reg.GetHighReg();
568   int keep_low = rl_keep.reg.GetLowReg();
569   int keep_high = rl_keep.reg.GetHighReg();
570   if ((free_low != keep_low) && (free_low != keep_high) &&
571       (free_high != keep_low) && (free_high != keep_high)) {
572     // No overlap, free both
573     FreeTemp(rl_free.reg);
574   }
575 }
576 
IsLive(RegStorage reg)577 bool Mir2Lir::IsLive(RegStorage reg) {
578   bool res;
579   if (reg.IsPair()) {
580     RegisterInfo* p_lo = GetRegInfo(reg.GetLow());
581     RegisterInfo* p_hi = GetRegInfo(reg.GetHigh());
582     DCHECK_EQ(p_lo->IsLive(), p_hi->IsLive());
583     res = p_lo->IsLive() || p_hi->IsLive();
584   } else {
585     RegisterInfo* p = GetRegInfo(reg);
586     res = p->IsLive();
587   }
588   return res;
589 }
590 
IsTemp(RegStorage reg)591 bool Mir2Lir::IsTemp(RegStorage reg) {
592   bool res;
593   if (reg.IsPair()) {
594     RegisterInfo* p_lo = GetRegInfo(reg.GetLow());
595     RegisterInfo* p_hi = GetRegInfo(reg.GetHigh());
596     res = p_lo->IsTemp() || p_hi->IsTemp();
597   } else {
598     RegisterInfo* p = GetRegInfo(reg);
599     res = p->IsTemp();
600   }
601   return res;
602 }
603 
IsPromoted(RegStorage reg)604 bool Mir2Lir::IsPromoted(RegStorage reg) {
605   bool res;
606   if (reg.IsPair()) {
607     RegisterInfo* p_lo = GetRegInfo(reg.GetLow());
608     RegisterInfo* p_hi = GetRegInfo(reg.GetHigh());
609     res = !p_lo->IsTemp() || !p_hi->IsTemp();
610   } else {
611     RegisterInfo* p = GetRegInfo(reg);
612     res = !p->IsTemp();
613   }
614   return res;
615 }
616 
IsDirty(RegStorage reg)617 bool Mir2Lir::IsDirty(RegStorage reg) {
618   bool res;
619   if (reg.IsPair()) {
620     RegisterInfo* p_lo = GetRegInfo(reg.GetLow());
621     RegisterInfo* p_hi = GetRegInfo(reg.GetHigh());
622     res = p_lo->IsDirty() || p_hi->IsDirty();
623   } else {
624     RegisterInfo* p = GetRegInfo(reg);
625     res = p->IsDirty();
626   }
627   return res;
628 }
629 
630 /*
631  * Similar to AllocTemp(), but forces the allocation of a specific
632  * register.  No check is made to see if the register was previously
633  * allocated.  Use with caution.
634  */
LockTemp(RegStorage reg)635 void Mir2Lir::LockTemp(RegStorage reg) {
636   DCHECK(IsTemp(reg));
637   if (reg.IsPair()) {
638     RegisterInfo* p_lo = GetRegInfo(reg.GetLow());
639     RegisterInfo* p_hi = GetRegInfo(reg.GetHigh());
640     p_lo->MarkInUse();
641     p_lo->MarkDead();
642     p_hi->MarkInUse();
643     p_hi->MarkDead();
644   } else {
645     RegisterInfo* p = GetRegInfo(reg);
646     p->MarkInUse();
647     p->MarkDead();
648   }
649 }
650 
ResetDef(RegStorage reg)651 void Mir2Lir::ResetDef(RegStorage reg) {
652   if (reg.IsPair()) {
653     GetRegInfo(reg.GetLow())->ResetDefBody();
654     GetRegInfo(reg.GetHigh())->ResetDefBody();
655   } else {
656     GetRegInfo(reg)->ResetDefBody();
657   }
658 }
659 
NullifyRange(RegStorage reg,int s_reg)660 void Mir2Lir::NullifyRange(RegStorage reg, int s_reg) {
661   RegisterInfo* info = nullptr;
662   RegStorage rs = reg.IsPair() ? reg.GetLow() : reg;
663   if (IsTemp(rs)) {
664     info = GetRegInfo(reg);
665   }
666   if ((info != nullptr) && (info->DefStart() != nullptr) && (info->DefEnd() != nullptr)) {
667     DCHECK_EQ(info->SReg(), s_reg);  // Make sure we're on the same page.
668     for (LIR* p = info->DefStart();; p = p->next) {
669       NopLIR(p);
670       if (p == info->DefEnd()) {
671         break;
672       }
673     }
674   }
675 }
676 
677 /*
678  * Mark the beginning and end LIR of a def sequence.  Note that
679  * on entry start points to the LIR prior to the beginning of the
680  * sequence.
681  */
MarkDef(RegLocation rl,LIR * start,LIR * finish)682 void Mir2Lir::MarkDef(RegLocation rl, LIR *start, LIR *finish) {
683   DCHECK(!rl.wide);
684   DCHECK(start && start->next);
685   DCHECK(finish);
686   RegisterInfo* p = GetRegInfo(rl.reg);
687   p->SetDefStart(start->next);
688   p->SetDefEnd(finish);
689 }
690 
691 /*
692  * Mark the beginning and end LIR of a def sequence.  Note that
693  * on entry start points to the LIR prior to the beginning of the
694  * sequence.
695  */
MarkDefWide(RegLocation rl,LIR * start,LIR * finish)696 void Mir2Lir::MarkDefWide(RegLocation rl, LIR *start, LIR *finish) {
697   DCHECK(rl.wide);
698   DCHECK(start && start->next);
699   DCHECK(finish);
700   RegisterInfo* p;
701   if (rl.reg.IsPair()) {
702     p = GetRegInfo(rl.reg.GetLow());
703     ResetDef(rl.reg.GetHigh());  // Only track low of pair
704   } else {
705     p = GetRegInfo(rl.reg);
706   }
707   p->SetDefStart(start->next);
708   p->SetDefEnd(finish);
709 }
710 
ResetDefLoc(RegLocation rl)711 void Mir2Lir::ResetDefLoc(RegLocation rl) {
712   DCHECK(!rl.wide);
713   if (IsTemp(rl.reg) && !(cu_->disable_opt & (1 << kSuppressLoads))) {
714     NullifyRange(rl.reg, rl.s_reg_low);
715   }
716   ResetDef(rl.reg);
717 }
718 
ResetDefLocWide(RegLocation rl)719 void Mir2Lir::ResetDefLocWide(RegLocation rl) {
720   DCHECK(rl.wide);
721   // If pair, only track low reg of pair.
722   RegStorage rs = rl.reg.IsPair() ? rl.reg.GetLow() : rl.reg;
723   if (IsTemp(rs) && !(cu_->disable_opt & (1 << kSuppressLoads))) {
724     NullifyRange(rs, rl.s_reg_low);
725   }
726   ResetDef(rs);
727 }
728 
ResetDefTracking()729 void Mir2Lir::ResetDefTracking() {
730   GrowableArray<RegisterInfo*>::Iterator iter(&tempreg_info_);
731   for (RegisterInfo* info = iter.Next(); info != NULL; info = iter.Next()) {
732     info->ResetDefBody();
733   }
734 }
735 
ClobberAllTemps()736 void Mir2Lir::ClobberAllTemps() {
737   GrowableArray<RegisterInfo*>::Iterator iter(&tempreg_info_);
738   for (RegisterInfo* info = iter.Next(); info != NULL; info = iter.Next()) {
739     ClobberBody(info);
740   }
741 }
742 
FlushRegWide(RegStorage reg)743 void Mir2Lir::FlushRegWide(RegStorage reg) {
744   if (reg.IsPair()) {
745     RegisterInfo* info1 = GetRegInfo(reg.GetLow());
746     RegisterInfo* info2 = GetRegInfo(reg.GetHigh());
747     DCHECK(info1 && info2 && info1->IsWide() && info2->IsWide() &&
748            (info1->Partner().ExactlyEquals(info2->GetReg())) &&
749            (info2->Partner().ExactlyEquals(info1->GetReg())));
750     if ((info1->IsLive() && info1->IsDirty()) || (info2->IsLive() && info2->IsDirty())) {
751       if (!(info1->IsTemp() && info2->IsTemp())) {
752         /* Should not happen.  If it does, there's a problem in eval_loc */
753         LOG(FATAL) << "Long half-temp, half-promoted";
754       }
755 
756       info1->SetIsDirty(false);
757       info2->SetIsDirty(false);
758       if (mir_graph_->SRegToVReg(info2->SReg()) < mir_graph_->SRegToVReg(info1->SReg())) {
759         info1 = info2;
760       }
761       int v_reg = mir_graph_->SRegToVReg(info1->SReg());
762       ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
763       StoreBaseDisp(TargetPtrReg(kSp), VRegOffset(v_reg), reg, k64, kNotVolatile);
764     }
765   } else {
766     RegisterInfo* info = GetRegInfo(reg);
767     if (info->IsLive() && info->IsDirty()) {
768       info->SetIsDirty(false);
769       int v_reg = mir_graph_->SRegToVReg(info->SReg());
770       ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
771       StoreBaseDisp(TargetPtrReg(kSp), VRegOffset(v_reg), reg, k64, kNotVolatile);
772     }
773   }
774 }
775 
FlushReg(RegStorage reg)776 void Mir2Lir::FlushReg(RegStorage reg) {
777   DCHECK(!reg.IsPair());
778   RegisterInfo* info = GetRegInfo(reg);
779   if (info->IsLive() && info->IsDirty()) {
780     info->SetIsDirty(false);
781     int v_reg = mir_graph_->SRegToVReg(info->SReg());
782     ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
783     StoreBaseDisp(TargetPtrReg(kSp), VRegOffset(v_reg), reg, kWord, kNotVolatile);
784   }
785 }
786 
FlushSpecificReg(RegisterInfo * info)787 void Mir2Lir::FlushSpecificReg(RegisterInfo* info) {
788   if (info->IsWide()) {
789     FlushRegWide(info->GetReg());
790   } else {
791     FlushReg(info->GetReg());
792   }
793 }
794 
FlushAllRegs()795 void Mir2Lir::FlushAllRegs() {
796   GrowableArray<RegisterInfo*>::Iterator it(&tempreg_info_);
797   for (RegisterInfo* info = it.Next(); info != nullptr; info = it.Next()) {
798     if (info->IsDirty() && info->IsLive()) {
799       FlushSpecificReg(info);
800     }
801     info->MarkDead();
802     info->SetIsWide(false);
803   }
804 }
805 
806 
RegClassMatches(int reg_class,RegStorage reg)807 bool Mir2Lir::RegClassMatches(int reg_class, RegStorage reg) {
808   if (reg_class == kAnyReg) {
809     return true;
810   } else if ((reg_class == kCoreReg) || (reg_class == kRefReg)) {
811     /*
812      * For this purpose, consider Core and Ref to be the same class. We aren't dealing
813      * with width here - that should be checked at a higher level (if needed).
814      */
815     return !reg.IsFloat();
816   } else {
817     return reg.IsFloat();
818   }
819 }
820 
MarkLive(RegLocation loc)821 void Mir2Lir::MarkLive(RegLocation loc) {
822   RegStorage reg = loc.reg;
823   if (!IsTemp(reg)) {
824     return;
825   }
826   int s_reg = loc.s_reg_low;
827   if (s_reg == INVALID_SREG) {
828     // Can't be live if no associated sreg.
829     if (reg.IsPair()) {
830       GetRegInfo(reg.GetLow())->MarkDead();
831       GetRegInfo(reg.GetHigh())->MarkDead();
832     } else {
833       GetRegInfo(reg)->MarkDead();
834     }
835   } else {
836     if (reg.IsPair()) {
837       RegisterInfo* info_lo = GetRegInfo(reg.GetLow());
838       RegisterInfo* info_hi = GetRegInfo(reg.GetHigh());
839       if (info_lo->IsLive() && (info_lo->SReg() == s_reg) && info_hi->IsLive() &&
840           (info_hi->SReg() == s_reg)) {
841         return;  // Already live.
842       }
843       ClobberSReg(s_reg);
844       ClobberSReg(s_reg + 1);
845       info_lo->MarkLive(s_reg);
846       info_hi->MarkLive(s_reg + 1);
847     } else {
848       RegisterInfo* info = GetRegInfo(reg);
849       if (info->IsLive() && (info->SReg() == s_reg)) {
850         return;  // Already live.
851       }
852       ClobberSReg(s_reg);
853       if (loc.wide) {
854         ClobberSReg(s_reg + 1);
855       }
856       info->MarkLive(s_reg);
857     }
858     if (loc.wide) {
859       MarkWide(reg);
860     } else {
861       MarkNarrow(reg);
862     }
863   }
864 }
865 
MarkTemp(RegStorage reg)866 void Mir2Lir::MarkTemp(RegStorage reg) {
867   DCHECK(!reg.IsPair());
868   RegisterInfo* info = GetRegInfo(reg);
869   tempreg_info_.Insert(info);
870   info->SetIsTemp(true);
871 }
872 
UnmarkTemp(RegStorage reg)873 void Mir2Lir::UnmarkTemp(RegStorage reg) {
874   DCHECK(!reg.IsPair());
875   RegisterInfo* info = GetRegInfo(reg);
876   tempreg_info_.Delete(info);
877   info->SetIsTemp(false);
878 }
879 
MarkWide(RegStorage reg)880 void Mir2Lir::MarkWide(RegStorage reg) {
881   if (reg.IsPair()) {
882     RegisterInfo* info_lo = GetRegInfo(reg.GetLow());
883     RegisterInfo* info_hi = GetRegInfo(reg.GetHigh());
884     // Unpair any old partners.
885     if (info_lo->IsWide() && info_lo->Partner().NotExactlyEquals(info_hi->GetReg())) {
886       GetRegInfo(info_lo->Partner())->SetIsWide(false);
887     }
888     if (info_hi->IsWide() && info_hi->Partner().NotExactlyEquals(info_lo->GetReg())) {
889       GetRegInfo(info_hi->Partner())->SetIsWide(false);
890     }
891     info_lo->SetIsWide(true);
892     info_hi->SetIsWide(true);
893     info_lo->SetPartner(reg.GetHigh());
894     info_hi->SetPartner(reg.GetLow());
895   } else {
896     RegisterInfo* info = GetRegInfo(reg);
897     info->SetIsWide(true);
898     info->SetPartner(reg);
899   }
900 }
901 
MarkNarrow(RegStorage reg)902 void Mir2Lir::MarkNarrow(RegStorage reg) {
903   DCHECK(!reg.IsPair());
904   RegisterInfo* info = GetRegInfo(reg);
905   info->SetIsWide(false);
906   info->SetPartner(reg);
907 }
908 
MarkClean(RegLocation loc)909 void Mir2Lir::MarkClean(RegLocation loc) {
910   if (loc.reg.IsPair()) {
911     RegisterInfo* info = GetRegInfo(loc.reg.GetLow());
912     info->SetIsDirty(false);
913     info = GetRegInfo(loc.reg.GetHigh());
914     info->SetIsDirty(false);
915   } else {
916     RegisterInfo* info = GetRegInfo(loc.reg);
917     info->SetIsDirty(false);
918   }
919 }
920 
921 // FIXME: need to verify rules/assumptions about how wide values are treated in 64BitSolos.
MarkDirty(RegLocation loc)922 void Mir2Lir::MarkDirty(RegLocation loc) {
923   if (loc.home) {
924     // If already home, can't be dirty
925     return;
926   }
927   if (loc.reg.IsPair()) {
928     RegisterInfo* info = GetRegInfo(loc.reg.GetLow());
929     info->SetIsDirty(true);
930     info = GetRegInfo(loc.reg.GetHigh());
931     info->SetIsDirty(true);
932   } else {
933     RegisterInfo* info = GetRegInfo(loc.reg);
934     info->SetIsDirty(true);
935   }
936 }
937 
MarkInUse(RegStorage reg)938 void Mir2Lir::MarkInUse(RegStorage reg) {
939   if (reg.IsPair()) {
940     GetRegInfo(reg.GetLow())->MarkInUse();
941     GetRegInfo(reg.GetHigh())->MarkInUse();
942   } else {
943     GetRegInfo(reg)->MarkInUse();
944   }
945 }
946 
CheckCorePoolSanity()947 bool Mir2Lir::CheckCorePoolSanity() {
948   GrowableArray<RegisterInfo*>::Iterator it(&tempreg_info_);
949   for (RegisterInfo* info = it.Next(); info != nullptr; info = it.Next()) {
950     int my_sreg = info->SReg();
951     if (info->IsTemp() && info->IsLive() && info->IsWide() && my_sreg != INVALID_SREG) {
952       RegStorage my_reg = info->GetReg();
953       RegStorage partner_reg = info->Partner();
954       RegisterInfo* partner = GetRegInfo(partner_reg);
955       DCHECK(partner != NULL);
956       DCHECK(partner->IsWide());
957       DCHECK_EQ(my_reg.GetReg(), partner->Partner().GetReg());
958       DCHECK(partner->IsLive());
959       int partner_sreg = partner->SReg();
960       int diff = my_sreg - partner_sreg;
961       DCHECK((diff == 0) || (diff == -1) || (diff == 1));
962     }
963     if (info->Master() != info) {
964       // Aliased.
965       if (info->IsLive() && (info->SReg() != INVALID_SREG)) {
966         // If I'm live, master should not be live, but should show liveness in alias set.
967         DCHECK_EQ(info->Master()->SReg(), INVALID_SREG);
968         DCHECK(!info->Master()->IsDead());
969       }
970 // TODO: Add checks in !info->IsDead() case to ensure every live bit is owned by exactly 1 reg.
971     }
972     if (info->IsAliased()) {
973       // Has child aliases.
974       DCHECK_EQ(info->Master(), info);
975       if (info->IsLive() && (info->SReg() != INVALID_SREG)) {
976         // Master live, no child should be dead - all should show liveness in set.
977         for (RegisterInfo* p = info->GetAliasChain(); p != nullptr; p = p->GetAliasChain()) {
978           DCHECK(!p->IsDead());
979           DCHECK_EQ(p->SReg(), INVALID_SREG);
980         }
981       } else if (!info->IsDead()) {
982         // Master not live, one or more aliases must be.
983         bool live_alias = false;
984         for (RegisterInfo* p = info->GetAliasChain(); p != nullptr; p = p->GetAliasChain()) {
985           live_alias |= p->IsLive();
986         }
987         DCHECK(live_alias);
988       }
989     }
990     if (info->IsLive() && (info->SReg() == INVALID_SREG)) {
991       // If not fully live, should have INVALID_SREG and def's should be null.
992       DCHECK(info->DefStart() == nullptr);
993       DCHECK(info->DefEnd() == nullptr);
994     }
995   }
996   return true;
997 }
998 
999 /*
1000  * Return an updated location record with current in-register status.
1001  * If the value lives in live temps, reflect that fact.  No code
1002  * is generated.  If the live value is part of an older pair,
1003  * clobber both low and high.
1004  * TUNING: clobbering both is a bit heavy-handed, but the alternative
1005  * is a bit complex when dealing with FP regs.  Examine code to see
1006  * if it's worthwhile trying to be more clever here.
1007  */
UpdateLoc(RegLocation loc)1008 RegLocation Mir2Lir::UpdateLoc(RegLocation loc) {
1009   DCHECK(!loc.wide);
1010   DCHECK(CheckCorePoolSanity());
1011   if (loc.location != kLocPhysReg) {
1012     DCHECK((loc.location == kLocDalvikFrame) ||
1013          (loc.location == kLocCompilerTemp));
1014     RegStorage reg = AllocLiveReg(loc.s_reg_low, loc.ref ? kRefReg : kAnyReg, false);
1015     if (reg.Valid()) {
1016       bool match = true;
1017       RegisterInfo* info = GetRegInfo(reg);
1018       match &= !reg.IsPair();
1019       match &= !info->IsWide();
1020       if (match) {
1021         loc.location = kLocPhysReg;
1022         loc.reg = reg;
1023       } else {
1024         Clobber(reg);
1025         FreeTemp(reg);
1026       }
1027     }
1028     CheckRegLocation(loc);
1029   }
1030   return loc;
1031 }
1032 
UpdateLocWide(RegLocation loc)1033 RegLocation Mir2Lir::UpdateLocWide(RegLocation loc) {
1034   DCHECK(loc.wide);
1035   DCHECK(CheckCorePoolSanity());
1036   if (loc.location != kLocPhysReg) {
1037     DCHECK((loc.location == kLocDalvikFrame) ||
1038          (loc.location == kLocCompilerTemp));
1039     RegStorage reg = AllocLiveReg(loc.s_reg_low, kAnyReg, true);
1040     if (reg.Valid()) {
1041       bool match = true;
1042       if (reg.IsPair()) {
1043         // If we've got a register pair, make sure that it was last used as the same pair.
1044         RegisterInfo* info_lo = GetRegInfo(reg.GetLow());
1045         RegisterInfo* info_hi = GetRegInfo(reg.GetHigh());
1046         match &= info_lo->IsWide();
1047         match &= info_hi->IsWide();
1048         match &= (info_lo->Partner().ExactlyEquals(info_hi->GetReg()));
1049         match &= (info_hi->Partner().ExactlyEquals(info_lo->GetReg()));
1050       } else {
1051         RegisterInfo* info = GetRegInfo(reg);
1052         match &= info->IsWide();
1053         match &= (info->GetReg().ExactlyEquals(info->Partner()));
1054       }
1055       if (match) {
1056         loc.location = kLocPhysReg;
1057         loc.reg = reg;
1058       } else {
1059         Clobber(reg);
1060         FreeTemp(reg);
1061       }
1062     }
1063     CheckRegLocation(loc);
1064   }
1065   return loc;
1066 }
1067 
1068 /* For use in cases we don't know (or care) width */
UpdateRawLoc(RegLocation loc)1069 RegLocation Mir2Lir::UpdateRawLoc(RegLocation loc) {
1070   if (loc.wide)
1071     return UpdateLocWide(loc);
1072   else
1073     return UpdateLoc(loc);
1074 }
1075 
EvalLocWide(RegLocation loc,int reg_class,bool update)1076 RegLocation Mir2Lir::EvalLocWide(RegLocation loc, int reg_class, bool update) {
1077   DCHECK(loc.wide);
1078 
1079   loc = UpdateLocWide(loc);
1080 
1081   /* If already in registers, we can assume proper form.  Right reg class? */
1082   if (loc.location == kLocPhysReg) {
1083     if (!RegClassMatches(reg_class, loc.reg)) {
1084       // Wrong register class.  Reallocate and transfer ownership.
1085       RegStorage new_regs = AllocTypedTempWide(loc.fp, reg_class);
1086       // Clobber the old regs.
1087       Clobber(loc.reg);
1088       // ...and mark the new ones live.
1089       loc.reg = new_regs;
1090       MarkWide(loc.reg);
1091       MarkLive(loc);
1092     }
1093     CheckRegLocation(loc);
1094     return loc;
1095   }
1096 
1097   DCHECK_NE(loc.s_reg_low, INVALID_SREG);
1098   DCHECK_NE(GetSRegHi(loc.s_reg_low), INVALID_SREG);
1099 
1100   loc.reg = AllocTypedTempWide(loc.fp, reg_class);
1101   MarkWide(loc.reg);
1102 
1103   if (update) {
1104     loc.location = kLocPhysReg;
1105     MarkLive(loc);
1106   }
1107   CheckRegLocation(loc);
1108   return loc;
1109 }
1110 
EvalLoc(RegLocation loc,int reg_class,bool update)1111 RegLocation Mir2Lir::EvalLoc(RegLocation loc, int reg_class, bool update) {
1112   // Narrow reg_class if the loc is a ref.
1113   if (loc.ref && reg_class == kAnyReg) {
1114     reg_class = kRefReg;
1115   }
1116 
1117   if (loc.wide) {
1118     return EvalLocWide(loc, reg_class, update);
1119   }
1120 
1121   loc = UpdateLoc(loc);
1122 
1123   if (loc.location == kLocPhysReg) {
1124     if (!RegClassMatches(reg_class, loc.reg)) {
1125       // Wrong register class.  Reallocate and transfer ownership.
1126       RegStorage new_reg = AllocTypedTemp(loc.fp, reg_class);
1127       // Clobber the old reg.
1128       Clobber(loc.reg);
1129       // ...and mark the new one live.
1130       loc.reg = new_reg;
1131       MarkLive(loc);
1132     }
1133     CheckRegLocation(loc);
1134     return loc;
1135   }
1136 
1137   DCHECK_NE(loc.s_reg_low, INVALID_SREG);
1138 
1139   loc.reg = AllocTypedTemp(loc.fp, reg_class);
1140   CheckRegLocation(loc);
1141 
1142   if (update) {
1143     loc.location = kLocPhysReg;
1144     MarkLive(loc);
1145   }
1146   CheckRegLocation(loc);
1147   return loc;
1148 }
1149 
1150 /* USE SSA names to count references of base Dalvik v_regs. */
CountRefs(RefCounts * core_counts,RefCounts * fp_counts,size_t num_regs)1151 void Mir2Lir::CountRefs(RefCounts* core_counts, RefCounts* fp_counts, size_t num_regs) {
1152   for (int i = 0; i < mir_graph_->GetNumSSARegs(); i++) {
1153     RegLocation loc = mir_graph_->reg_location_[i];
1154     RefCounts* counts = loc.fp ? fp_counts : core_counts;
1155     int p_map_idx = SRegToPMap(loc.s_reg_low);
1156     int use_count = mir_graph_->GetUseCount(i);
1157     if (loc.fp) {
1158       if (loc.wide) {
1159         if (WideFPRsAreAliases()) {
1160           // Floats and doubles can be counted together.
1161           counts[p_map_idx].count += use_count;
1162         } else {
1163           // Treat doubles as a unit, using upper half of fp_counts array.
1164           counts[p_map_idx + num_regs].count += use_count;
1165         }
1166         i++;
1167       } else {
1168         counts[p_map_idx].count += use_count;
1169       }
1170     } else {
1171       if (loc.wide && WideGPRsAreAliases()) {
1172         i++;
1173       }
1174       if (!IsInexpensiveConstant(loc)) {
1175         counts[p_map_idx].count += use_count;
1176       }
1177     }
1178   }
1179 }
1180 
1181 /* qsort callback function, sort descending */
SortCounts(const void * val1,const void * val2)1182 static int SortCounts(const void *val1, const void *val2) {
1183   const Mir2Lir::RefCounts* op1 = reinterpret_cast<const Mir2Lir::RefCounts*>(val1);
1184   const Mir2Lir::RefCounts* op2 = reinterpret_cast<const Mir2Lir::RefCounts*>(val2);
1185   // Note that we fall back to sorting on reg so we get stable output on differing qsort
1186   // implementations (such as on host and target or between local host and build servers).
1187   // Note also that if a wide val1 and a non-wide val2 have the same count, then val1 always
1188   // ``loses'' (as STARTING_WIDE_SREG is or-ed in val1->s_reg).
1189   return (op1->count == op2->count)
1190           ? (op1->s_reg - op2->s_reg)
1191           : (op1->count < op2->count ? 1 : -1);
1192 }
1193 
DumpCounts(const RefCounts * arr,int size,const char * msg)1194 void Mir2Lir::DumpCounts(const RefCounts* arr, int size, const char* msg) {
1195   LOG(INFO) << msg;
1196   for (int i = 0; i < size; i++) {
1197     if ((arr[i].s_reg & STARTING_WIDE_SREG) != 0) {
1198       LOG(INFO) << "s_reg[64_" << (arr[i].s_reg & ~STARTING_WIDE_SREG) << "]: " << arr[i].count;
1199     } else {
1200       LOG(INFO) << "s_reg[32_" << arr[i].s_reg << "]: " << arr[i].count;
1201     }
1202   }
1203 }
1204 
1205 /*
1206  * Note: some portions of this code required even if the kPromoteRegs
1207  * optimization is disabled.
1208  */
DoPromotion()1209 void Mir2Lir::DoPromotion() {
1210   int dalvik_regs = cu_->num_dalvik_registers;
1211   int num_regs = dalvik_regs + mir_graph_->GetNumUsedCompilerTemps();
1212   const int promotion_threshold = 1;
1213   // Allocate the promotion map - one entry for each Dalvik vReg or compiler temp
1214   promotion_map_ = static_cast<PromotionMap*>
1215       (arena_->Alloc(num_regs * sizeof(promotion_map_[0]), kArenaAllocRegAlloc));
1216 
1217   // Allow target code to add any special registers
1218   AdjustSpillMask();
1219 
1220   /*
1221    * Simple register promotion. Just do a static count of the uses
1222    * of Dalvik registers.  Note that we examine the SSA names, but
1223    * count based on original Dalvik register name.  Count refs
1224    * separately based on type in order to give allocation
1225    * preference to fp doubles - which must be allocated sequential
1226    * physical single fp registers starting with an even-numbered
1227    * reg.
1228    * TUNING: replace with linear scan once we have the ability
1229    * to describe register live ranges for GC.
1230    */
1231   size_t core_reg_count_size = WideGPRsAreAliases() ? num_regs : num_regs * 2;
1232   size_t fp_reg_count_size = WideFPRsAreAliases() ? num_regs : num_regs * 2;
1233   RefCounts *core_regs =
1234       static_cast<RefCounts*>(arena_->Alloc(sizeof(RefCounts) * core_reg_count_size,
1235                                             kArenaAllocRegAlloc));
1236   RefCounts *fp_regs =
1237       static_cast<RefCounts *>(arena_->Alloc(sizeof(RefCounts) * fp_reg_count_size,
1238                                              kArenaAllocRegAlloc));
1239   // Set ssa names for original Dalvik registers
1240   for (int i = 0; i < dalvik_regs; i++) {
1241     core_regs[i].s_reg = fp_regs[i].s_reg = i;
1242   }
1243 
1244   // Set ssa names for compiler temporaries
1245   for (unsigned int ct_idx = 0; ct_idx < mir_graph_->GetNumUsedCompilerTemps(); ct_idx++) {
1246     CompilerTemp* ct = mir_graph_->GetCompilerTemp(ct_idx);
1247     core_regs[dalvik_regs + ct_idx].s_reg = ct->s_reg_low;
1248     fp_regs[dalvik_regs + ct_idx].s_reg = ct->s_reg_low;
1249   }
1250 
1251   // Duplicate in upper half to represent possible wide starting sregs.
1252   for (size_t i = num_regs; i < fp_reg_count_size; i++) {
1253     fp_regs[i].s_reg = fp_regs[i - num_regs].s_reg | STARTING_WIDE_SREG;
1254   }
1255   for (size_t i = num_regs; i < core_reg_count_size; i++) {
1256     core_regs[i].s_reg = core_regs[i - num_regs].s_reg | STARTING_WIDE_SREG;
1257   }
1258 
1259   // Sum use counts of SSA regs by original Dalvik vreg.
1260   CountRefs(core_regs, fp_regs, num_regs);
1261 
1262   // Sort the count arrays
1263   qsort(core_regs, core_reg_count_size, sizeof(RefCounts), SortCounts);
1264   qsort(fp_regs, fp_reg_count_size, sizeof(RefCounts), SortCounts);
1265 
1266   if (cu_->verbose) {
1267     DumpCounts(core_regs, core_reg_count_size, "Core regs after sort");
1268     DumpCounts(fp_regs, fp_reg_count_size, "Fp regs after sort");
1269   }
1270 
1271   if (!(cu_->disable_opt & (1 << kPromoteRegs))) {
1272     // Promote fp regs
1273     for (size_t i = 0; (i < fp_reg_count_size) && (fp_regs[i].count >= promotion_threshold); i++) {
1274       int low_sreg = fp_regs[i].s_reg & ~STARTING_WIDE_SREG;
1275       size_t p_map_idx = SRegToPMap(low_sreg);
1276       RegStorage reg = RegStorage::InvalidReg();
1277       if (promotion_map_[p_map_idx].fp_location != kLocPhysReg) {
1278         // TODO: break out the Thumb2-specific code.
1279         if (cu_->instruction_set == kThumb2) {
1280           bool wide = fp_regs[i].s_reg & STARTING_WIDE_SREG;
1281           if (wide) {
1282             if (promotion_map_[p_map_idx + 1].fp_location != kLocPhysReg) {
1283               // Ignore result - if can't alloc double may still be able to alloc singles.
1284               AllocPreservedDouble(low_sreg);
1285             }
1286             // Continue regardless of success - might still be able to grab a single.
1287             continue;
1288           } else {
1289             reg = AllocPreservedSingle(low_sreg);
1290           }
1291         } else {
1292           reg = AllocPreservedFpReg(low_sreg);
1293         }
1294         if (!reg.Valid()) {
1295            break;  // No more left
1296         }
1297       }
1298     }
1299 
1300     // Promote core regs
1301     for (size_t i = 0; (i < core_reg_count_size) &&
1302          (core_regs[i].count >= promotion_threshold); i++) {
1303       int low_sreg = core_regs[i].s_reg & ~STARTING_WIDE_SREG;
1304       size_t p_map_idx = SRegToPMap(low_sreg);
1305       if (promotion_map_[p_map_idx].core_location != kLocPhysReg) {
1306         RegStorage reg = AllocPreservedCoreReg(low_sreg);
1307         if (!reg.Valid()) {
1308            break;  // No more left
1309         }
1310       }
1311     }
1312   }
1313 
1314   // Now, update SSA names to new home locations
1315   for (int i = 0; i < mir_graph_->GetNumSSARegs(); i++) {
1316     RegLocation *curr = &mir_graph_->reg_location_[i];
1317     int p_map_idx = SRegToPMap(curr->s_reg_low);
1318     int reg_num = curr->fp ? promotion_map_[p_map_idx].fp_reg : promotion_map_[p_map_idx].core_reg;
1319     bool wide = curr->wide || (cu_->target64 && curr->ref);
1320     RegStorage reg = RegStorage::InvalidReg();
1321     if (curr->fp && promotion_map_[p_map_idx].fp_location == kLocPhysReg) {
1322       if (wide && cu_->instruction_set == kThumb2) {
1323         if (promotion_map_[p_map_idx + 1].fp_location == kLocPhysReg) {
1324           int high_reg = promotion_map_[p_map_idx+1].fp_reg;
1325           // TODO: move target-specific restrictions out of here.
1326           if (((reg_num & 0x1) == 0) && ((reg_num + 1) == high_reg)) {
1327             reg = RegStorage::FloatSolo64(RegStorage::RegNum(reg_num) >> 1);
1328           }
1329         }
1330       } else {
1331         reg = wide ? RegStorage::FloatSolo64(reg_num) : RegStorage::FloatSolo32(reg_num);
1332       }
1333     } else if (!curr->fp && promotion_map_[p_map_idx].core_location == kLocPhysReg) {
1334       if (wide && !cu_->target64) {
1335         if (promotion_map_[p_map_idx + 1].core_location == kLocPhysReg) {
1336           int high_reg = promotion_map_[p_map_idx+1].core_reg;
1337           reg = RegStorage(RegStorage::k64BitPair, reg_num, high_reg);
1338         }
1339       } else {
1340         reg = wide ? RegStorage::Solo64(reg_num) : RegStorage::Solo32(reg_num);
1341       }
1342     }
1343     if (reg.Valid()) {
1344       curr->reg = reg;
1345       curr->location = kLocPhysReg;
1346       curr->home = true;
1347     }
1348   }
1349   if (cu_->verbose) {
1350     DumpPromotionMap();
1351   }
1352 }
1353 
1354 /* Returns sp-relative offset in bytes for a VReg */
VRegOffset(int v_reg)1355 int Mir2Lir::VRegOffset(int v_reg) {
1356   return StackVisitor::GetVRegOffset(cu_->code_item, core_spill_mask_,
1357                                      fp_spill_mask_, frame_size_, v_reg,
1358                                      cu_->instruction_set);
1359 }
1360 
1361 /* Returns sp-relative offset in bytes for a SReg */
SRegOffset(int s_reg)1362 int Mir2Lir::SRegOffset(int s_reg) {
1363   return VRegOffset(mir_graph_->SRegToVReg(s_reg));
1364 }
1365 
1366 /* Mark register usage state and return long retloc */
GetReturnWide(RegisterClass reg_class)1367 RegLocation Mir2Lir::GetReturnWide(RegisterClass reg_class) {
1368   RegLocation res;
1369   switch (reg_class) {
1370     case kRefReg: LOG(FATAL); break;
1371     case kFPReg: res = LocCReturnDouble(); break;
1372     default: res = LocCReturnWide(); break;
1373   }
1374   Clobber(res.reg);
1375   LockTemp(res.reg);
1376   MarkWide(res.reg);
1377   CheckRegLocation(res);
1378   return res;
1379 }
1380 
GetReturn(RegisterClass reg_class)1381 RegLocation Mir2Lir::GetReturn(RegisterClass reg_class) {
1382   RegLocation res;
1383   switch (reg_class) {
1384     case kRefReg: res = LocCReturnRef(); break;
1385     case kFPReg: res = LocCReturnFloat(); break;
1386     default: res = LocCReturn(); break;
1387   }
1388   Clobber(res.reg);
1389   if (cu_->instruction_set == kMips) {
1390     MarkInUse(res.reg);
1391   } else {
1392     LockTemp(res.reg);
1393   }
1394   CheckRegLocation(res);
1395   return res;
1396 }
1397 
SimpleRegAlloc()1398 void Mir2Lir::SimpleRegAlloc() {
1399   DoPromotion();
1400 
1401   if (cu_->verbose && !(cu_->disable_opt & (1 << kPromoteRegs))) {
1402     LOG(INFO) << "After Promotion";
1403     mir_graph_->DumpRegLocTable(mir_graph_->reg_location_, mir_graph_->GetNumSSARegs());
1404   }
1405 
1406   /* Set the frame size */
1407   frame_size_ = ComputeFrameSize();
1408 }
1409 
1410 /*
1411  * Get the "real" sreg number associated with an s_reg slot.  In general,
1412  * s_reg values passed through codegen are the SSA names created by
1413  * dataflow analysis and refer to slot numbers in the mir_graph_->reg_location
1414  * array.  However, renaming is accomplished by simply replacing RegLocation
1415  * entries in the reglocation[] array.  Therefore, when location
1416  * records for operands are first created, we need to ask the locRecord
1417  * identified by the dataflow pass what it's new name is.
1418  */
GetSRegHi(int lowSreg)1419 int Mir2Lir::GetSRegHi(int lowSreg) {
1420   return (lowSreg == INVALID_SREG) ? INVALID_SREG : lowSreg + 1;
1421 }
1422 
LiveOut(int s_reg)1423 bool Mir2Lir::LiveOut(int s_reg) {
1424   // For now.
1425   return true;
1426 }
1427 
1428 }  // namespace art
1429