1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/base/adapters.h"
6 #include "src/compiler/linkage.h"
7 #include "src/compiler/register-allocator.h"
8 #include "src/string-stream.h"
9 
10 namespace v8 {
11 namespace internal {
12 namespace compiler {
13 
14 #define TRACE(...)                             \
15   do {                                         \
16     if (FLAG_trace_alloc) PrintF(__VA_ARGS__); \
17   } while (false)
18 
19 
20 namespace {
21 
22 static const int kFloatRepBit =
23     1 << static_cast<int>(MachineRepresentation::kFloat32);
24 static const int kSimd128RepBit =
25     1 << static_cast<int>(MachineRepresentation::kSimd128);
26 
RemoveElement(ZoneVector<LiveRange * > * v,LiveRange * range)27 void RemoveElement(ZoneVector<LiveRange*>* v, LiveRange* range) {
28   auto it = std::find(v->begin(), v->end(), range);
29   DCHECK(it != v->end());
30   v->erase(it);
31 }
32 
GetRegisterCount(const RegisterConfiguration * cfg,RegisterKind kind)33 int GetRegisterCount(const RegisterConfiguration* cfg, RegisterKind kind) {
34   return kind == FP_REGISTERS ? cfg->num_double_registers()
35                               : cfg->num_general_registers();
36 }
37 
38 
GetAllocatableRegisterCount(const RegisterConfiguration * cfg,RegisterKind kind)39 int GetAllocatableRegisterCount(const RegisterConfiguration* cfg,
40                                 RegisterKind kind) {
41   return kind == FP_REGISTERS ? cfg->num_allocatable_double_registers()
42                               : cfg->num_allocatable_general_registers();
43 }
44 
45 
GetAllocatableRegisterCodes(const RegisterConfiguration * cfg,RegisterKind kind)46 const int* GetAllocatableRegisterCodes(const RegisterConfiguration* cfg,
47                                        RegisterKind kind) {
48   return kind == FP_REGISTERS ? cfg->allocatable_double_codes()
49                               : cfg->allocatable_general_codes();
50 }
51 
52 
GetContainingLoop(const InstructionSequence * sequence,const InstructionBlock * block)53 const InstructionBlock* GetContainingLoop(const InstructionSequence* sequence,
54                                           const InstructionBlock* block) {
55   RpoNumber index = block->loop_header();
56   if (!index.IsValid()) return nullptr;
57   return sequence->InstructionBlockAt(index);
58 }
59 
60 
GetInstructionBlock(const InstructionSequence * code,LifetimePosition pos)61 const InstructionBlock* GetInstructionBlock(const InstructionSequence* code,
62                                             LifetimePosition pos) {
63   return code->GetInstructionBlock(pos.ToInstructionIndex());
64 }
65 
66 
GetLastInstruction(InstructionSequence * code,const InstructionBlock * block)67 Instruction* GetLastInstruction(InstructionSequence* code,
68                                 const InstructionBlock* block) {
69   return code->InstructionAt(block->last_instruction_index());
70 }
71 
72 // TODO(dcarney): fix frame to allow frame accesses to half size location.
GetByteWidth(MachineRepresentation rep)73 int GetByteWidth(MachineRepresentation rep) {
74   switch (rep) {
75     case MachineRepresentation::kBit:
76     case MachineRepresentation::kWord8:
77     case MachineRepresentation::kWord16:
78     case MachineRepresentation::kWord32:
79     case MachineRepresentation::kTaggedSigned:
80     case MachineRepresentation::kTaggedPointer:
81     case MachineRepresentation::kTagged:
82     case MachineRepresentation::kFloat32:
83       return kPointerSize;
84     case MachineRepresentation::kWord64:
85     case MachineRepresentation::kFloat64:
86       return kDoubleSize;
87     case MachineRepresentation::kSimd128:
88       return kSimd128Size;
89     case MachineRepresentation::kNone:
90       break;
91   }
92   UNREACHABLE();
93   return 0;
94 }
95 
96 }  // namespace
97 
98 class LiveRangeBound {
99  public:
LiveRangeBound(LiveRange * range,bool skip)100   explicit LiveRangeBound(LiveRange* range, bool skip)
101       : range_(range), start_(range->Start()), end_(range->End()), skip_(skip) {
102     DCHECK(!range->IsEmpty());
103   }
104 
CanCover(LifetimePosition position)105   bool CanCover(LifetimePosition position) {
106     return start_ <= position && position < end_;
107   }
108 
109   LiveRange* const range_;
110   const LifetimePosition start_;
111   const LifetimePosition end_;
112   const bool skip_;
113 
114  private:
115   DISALLOW_COPY_AND_ASSIGN(LiveRangeBound);
116 };
117 
118 
119 struct FindResult {
120   LiveRange* cur_cover_;
121   LiveRange* pred_cover_;
122 };
123 
124 
125 class LiveRangeBoundArray {
126  public:
LiveRangeBoundArray()127   LiveRangeBoundArray() : length_(0), start_(nullptr) {}
128 
ShouldInitialize()129   bool ShouldInitialize() { return start_ == nullptr; }
130 
Initialize(Zone * zone,TopLevelLiveRange * range)131   void Initialize(Zone* zone, TopLevelLiveRange* range) {
132     length_ = range->GetChildCount();
133 
134     start_ = zone->NewArray<LiveRangeBound>(length_);
135     LiveRangeBound* curr = start_;
136     // Normally, spilled ranges do not need connecting moves, because the spill
137     // location has been assigned at definition. For ranges spilled in deferred
138     // blocks, that is not the case, so we need to connect the spilled children.
139     for (LiveRange *i = range; i != nullptr; i = i->next(), ++curr) {
140       new (curr) LiveRangeBound(i, i->spilled());
141     }
142   }
143 
Find(const LifetimePosition position) const144   LiveRangeBound* Find(const LifetimePosition position) const {
145     size_t left_index = 0;
146     size_t right_index = length_;
147     while (true) {
148       size_t current_index = left_index + (right_index - left_index) / 2;
149       DCHECK(right_index > current_index);
150       LiveRangeBound* bound = &start_[current_index];
151       if (bound->start_ <= position) {
152         if (position < bound->end_) return bound;
153         DCHECK(left_index < current_index);
154         left_index = current_index;
155       } else {
156         right_index = current_index;
157       }
158     }
159   }
160 
FindPred(const InstructionBlock * pred)161   LiveRangeBound* FindPred(const InstructionBlock* pred) {
162     LifetimePosition pred_end =
163         LifetimePosition::InstructionFromInstructionIndex(
164             pred->last_instruction_index());
165     return Find(pred_end);
166   }
167 
FindSucc(const InstructionBlock * succ)168   LiveRangeBound* FindSucc(const InstructionBlock* succ) {
169     LifetimePosition succ_start = LifetimePosition::GapFromInstructionIndex(
170         succ->first_instruction_index());
171     return Find(succ_start);
172   }
173 
FindConnectableSubranges(const InstructionBlock * block,const InstructionBlock * pred,FindResult * result) const174   bool FindConnectableSubranges(const InstructionBlock* block,
175                                 const InstructionBlock* pred,
176                                 FindResult* result) const {
177     LifetimePosition pred_end =
178         LifetimePosition::InstructionFromInstructionIndex(
179             pred->last_instruction_index());
180     LiveRangeBound* bound = Find(pred_end);
181     result->pred_cover_ = bound->range_;
182     LifetimePosition cur_start = LifetimePosition::GapFromInstructionIndex(
183         block->first_instruction_index());
184 
185     if (bound->CanCover(cur_start)) {
186       // Both blocks are covered by the same range, so there is nothing to
187       // connect.
188       return false;
189     }
190     bound = Find(cur_start);
191     if (bound->skip_) {
192       return false;
193     }
194     result->cur_cover_ = bound->range_;
195     DCHECK(result->pred_cover_ != nullptr && result->cur_cover_ != nullptr);
196     return (result->cur_cover_ != result->pred_cover_);
197   }
198 
199  private:
200   size_t length_;
201   LiveRangeBound* start_;
202 
203   DISALLOW_COPY_AND_ASSIGN(LiveRangeBoundArray);
204 };
205 
206 
207 class LiveRangeFinder {
208  public:
LiveRangeFinder(const RegisterAllocationData * data,Zone * zone)209   explicit LiveRangeFinder(const RegisterAllocationData* data, Zone* zone)
210       : data_(data),
211         bounds_length_(static_cast<int>(data_->live_ranges().size())),
212         bounds_(zone->NewArray<LiveRangeBoundArray>(bounds_length_)),
213         zone_(zone) {
214     for (int i = 0; i < bounds_length_; ++i) {
215       new (&bounds_[i]) LiveRangeBoundArray();
216     }
217   }
218 
ArrayFor(int operand_index)219   LiveRangeBoundArray* ArrayFor(int operand_index) {
220     DCHECK(operand_index < bounds_length_);
221     TopLevelLiveRange* range = data_->live_ranges()[operand_index];
222     DCHECK(range != nullptr && !range->IsEmpty());
223     LiveRangeBoundArray* array = &bounds_[operand_index];
224     if (array->ShouldInitialize()) {
225       array->Initialize(zone_, range);
226     }
227     return array;
228   }
229 
230  private:
231   const RegisterAllocationData* const data_;
232   const int bounds_length_;
233   LiveRangeBoundArray* const bounds_;
234   Zone* const zone_;
235 
236   DISALLOW_COPY_AND_ASSIGN(LiveRangeFinder);
237 };
238 
239 
240 typedef std::pair<ParallelMove*, InstructionOperand> DelayedInsertionMapKey;
241 
242 
243 struct DelayedInsertionMapCompare {
operator ()v8::internal::compiler::DelayedInsertionMapCompare244   bool operator()(const DelayedInsertionMapKey& a,
245                   const DelayedInsertionMapKey& b) const {
246     if (a.first == b.first) {
247       return a.second.Compare(b.second);
248     }
249     return a.first < b.first;
250   }
251 };
252 
253 
254 typedef ZoneMap<DelayedInsertionMapKey, InstructionOperand,
255                 DelayedInsertionMapCompare> DelayedInsertionMap;
256 
257 
UsePosition(LifetimePosition pos,InstructionOperand * operand,void * hint,UsePositionHintType hint_type)258 UsePosition::UsePosition(LifetimePosition pos, InstructionOperand* operand,
259                          void* hint, UsePositionHintType hint_type)
260     : operand_(operand), hint_(hint), next_(nullptr), pos_(pos), flags_(0) {
261   DCHECK_IMPLIES(hint == nullptr, hint_type == UsePositionHintType::kNone);
262   bool register_beneficial = true;
263   UsePositionType type = UsePositionType::kAny;
264   if (operand_ != nullptr && operand_->IsUnallocated()) {
265     const UnallocatedOperand* unalloc = UnallocatedOperand::cast(operand_);
266     if (unalloc->HasRegisterPolicy()) {
267       type = UsePositionType::kRequiresRegister;
268     } else if (unalloc->HasSlotPolicy()) {
269       type = UsePositionType::kRequiresSlot;
270       register_beneficial = false;
271     } else {
272       register_beneficial = !unalloc->HasAnyPolicy();
273     }
274   }
275   flags_ = TypeField::encode(type) | HintTypeField::encode(hint_type) |
276            RegisterBeneficialField::encode(register_beneficial) |
277            AssignedRegisterField::encode(kUnassignedRegister);
278   DCHECK(pos_.IsValid());
279 }
280 
281 
HasHint() const282 bool UsePosition::HasHint() const {
283   int hint_register;
284   return HintRegister(&hint_register);
285 }
286 
287 
HintRegister(int * register_code) const288 bool UsePosition::HintRegister(int* register_code) const {
289   if (hint_ == nullptr) return false;
290   switch (HintTypeField::decode(flags_)) {
291     case UsePositionHintType::kNone:
292     case UsePositionHintType::kUnresolved:
293       return false;
294     case UsePositionHintType::kUsePos: {
295       UsePosition* use_pos = reinterpret_cast<UsePosition*>(hint_);
296       int assigned_register = AssignedRegisterField::decode(use_pos->flags_);
297       if (assigned_register == kUnassignedRegister) return false;
298       *register_code = assigned_register;
299       return true;
300     }
301     case UsePositionHintType::kOperand: {
302       InstructionOperand* operand =
303           reinterpret_cast<InstructionOperand*>(hint_);
304       *register_code = LocationOperand::cast(operand)->register_code();
305       return true;
306     }
307     case UsePositionHintType::kPhi: {
308       RegisterAllocationData::PhiMapValue* phi =
309           reinterpret_cast<RegisterAllocationData::PhiMapValue*>(hint_);
310       int assigned_register = phi->assigned_register();
311       if (assigned_register == kUnassignedRegister) return false;
312       *register_code = assigned_register;
313       return true;
314     }
315   }
316   UNREACHABLE();
317   return false;
318 }
319 
320 
HintTypeForOperand(const InstructionOperand & op)321 UsePositionHintType UsePosition::HintTypeForOperand(
322     const InstructionOperand& op) {
323   switch (op.kind()) {
324     case InstructionOperand::CONSTANT:
325     case InstructionOperand::IMMEDIATE:
326     case InstructionOperand::EXPLICIT:
327       return UsePositionHintType::kNone;
328     case InstructionOperand::UNALLOCATED:
329       return UsePositionHintType::kUnresolved;
330     case InstructionOperand::ALLOCATED:
331       if (op.IsRegister() || op.IsFPRegister()) {
332         return UsePositionHintType::kOperand;
333       } else {
334         DCHECK(op.IsStackSlot() || op.IsFPStackSlot());
335         return UsePositionHintType::kNone;
336       }
337     case InstructionOperand::INVALID:
338       break;
339   }
340   UNREACHABLE();
341   return UsePositionHintType::kNone;
342 }
343 
SetHint(UsePosition * use_pos)344 void UsePosition::SetHint(UsePosition* use_pos) {
345   DCHECK_NOT_NULL(use_pos);
346   hint_ = use_pos;
347   flags_ = HintTypeField::update(flags_, UsePositionHintType::kUsePos);
348 }
349 
ResolveHint(UsePosition * use_pos)350 void UsePosition::ResolveHint(UsePosition* use_pos) {
351   DCHECK_NOT_NULL(use_pos);
352   if (HintTypeField::decode(flags_) != UsePositionHintType::kUnresolved) return;
353   hint_ = use_pos;
354   flags_ = HintTypeField::update(flags_, UsePositionHintType::kUsePos);
355 }
356 
357 
set_type(UsePositionType type,bool register_beneficial)358 void UsePosition::set_type(UsePositionType type, bool register_beneficial) {
359   DCHECK_IMPLIES(type == UsePositionType::kRequiresSlot, !register_beneficial);
360   DCHECK_EQ(kUnassignedRegister, AssignedRegisterField::decode(flags_));
361   flags_ = TypeField::encode(type) |
362            RegisterBeneficialField::encode(register_beneficial) |
363            HintTypeField::encode(HintTypeField::decode(flags_)) |
364            AssignedRegisterField::encode(kUnassignedRegister);
365 }
366 
367 
SplitAt(LifetimePosition pos,Zone * zone)368 UseInterval* UseInterval::SplitAt(LifetimePosition pos, Zone* zone) {
369   DCHECK(Contains(pos) && pos != start());
370   UseInterval* after = new (zone) UseInterval(pos, end_);
371   after->next_ = next_;
372   next_ = nullptr;
373   end_ = pos;
374   return after;
375 }
376 
377 
Print() const378 void LifetimePosition::Print() const {
379   OFStream os(stdout);
380   os << *this << std::endl;
381 }
382 
383 
operator <<(std::ostream & os,const LifetimePosition pos)384 std::ostream& operator<<(std::ostream& os, const LifetimePosition pos) {
385   os << '@' << pos.ToInstructionIndex();
386   if (pos.IsGapPosition()) {
387     os << 'g';
388   } else {
389     os << 'i';
390   }
391   if (pos.IsStart()) {
392     os << 's';
393   } else {
394     os << 'e';
395   }
396   return os;
397 }
398 
LiveRange(int relative_id,MachineRepresentation rep,TopLevelLiveRange * top_level)399 LiveRange::LiveRange(int relative_id, MachineRepresentation rep,
400                      TopLevelLiveRange* top_level)
401     : relative_id_(relative_id),
402       bits_(0),
403       last_interval_(nullptr),
404       first_interval_(nullptr),
405       first_pos_(nullptr),
406       top_level_(top_level),
407       next_(nullptr),
408       current_interval_(nullptr),
409       last_processed_use_(nullptr),
410       current_hint_position_(nullptr),
411       splitting_pointer_(nullptr) {
412   DCHECK(AllocatedOperand::IsSupportedRepresentation(rep));
413   bits_ = AssignedRegisterField::encode(kUnassignedRegister) |
414           RepresentationField::encode(rep);
415 }
416 
417 
VerifyPositions() const418 void LiveRange::VerifyPositions() const {
419   // Walk the positions, verifying that each is in an interval.
420   UseInterval* interval = first_interval_;
421   for (UsePosition* pos = first_pos_; pos != nullptr; pos = pos->next()) {
422     CHECK(Start() <= pos->pos());
423     CHECK(pos->pos() <= End());
424     CHECK_NOT_NULL(interval);
425     while (!interval->Contains(pos->pos()) && interval->end() != pos->pos()) {
426       interval = interval->next();
427       CHECK_NOT_NULL(interval);
428     }
429   }
430 }
431 
432 
VerifyIntervals() const433 void LiveRange::VerifyIntervals() const {
434   DCHECK(first_interval()->start() == Start());
435   LifetimePosition last_end = first_interval()->end();
436   for (UseInterval* interval = first_interval()->next(); interval != nullptr;
437        interval = interval->next()) {
438     DCHECK(last_end <= interval->start());
439     last_end = interval->end();
440   }
441   DCHECK(last_end == End());
442 }
443 
444 
set_assigned_register(int reg)445 void LiveRange::set_assigned_register(int reg) {
446   DCHECK(!HasRegisterAssigned() && !spilled());
447   bits_ = AssignedRegisterField::update(bits_, reg);
448 }
449 
450 
UnsetAssignedRegister()451 void LiveRange::UnsetAssignedRegister() {
452   DCHECK(HasRegisterAssigned() && !spilled());
453   bits_ = AssignedRegisterField::update(bits_, kUnassignedRegister);
454 }
455 
456 
Spill()457 void LiveRange::Spill() {
458   DCHECK(!spilled());
459   DCHECK(!TopLevel()->HasNoSpillType());
460   set_spilled(true);
461   bits_ = AssignedRegisterField::update(bits_, kUnassignedRegister);
462 }
463 
464 
kind() const465 RegisterKind LiveRange::kind() const {
466   return IsFloatingPoint(representation()) ? FP_REGISTERS : GENERAL_REGISTERS;
467 }
468 
469 
FirstHintPosition(int * register_index) const470 UsePosition* LiveRange::FirstHintPosition(int* register_index) const {
471   for (UsePosition* pos = first_pos_; pos != nullptr; pos = pos->next()) {
472     if (pos->HintRegister(register_index)) return pos;
473   }
474   return nullptr;
475 }
476 
477 
NextUsePosition(LifetimePosition start) const478 UsePosition* LiveRange::NextUsePosition(LifetimePosition start) const {
479   UsePosition* use_pos = last_processed_use_;
480   if (use_pos == nullptr || use_pos->pos() > start) {
481     use_pos = first_pos();
482   }
483   while (use_pos != nullptr && use_pos->pos() < start) {
484     use_pos = use_pos->next();
485   }
486   last_processed_use_ = use_pos;
487   return use_pos;
488 }
489 
490 
NextUsePositionRegisterIsBeneficial(LifetimePosition start) const491 UsePosition* LiveRange::NextUsePositionRegisterIsBeneficial(
492     LifetimePosition start) const {
493   UsePosition* pos = NextUsePosition(start);
494   while (pos != nullptr && !pos->RegisterIsBeneficial()) {
495     pos = pos->next();
496   }
497   return pos;
498 }
499 
NextLifetimePositionRegisterIsBeneficial(const LifetimePosition & start) const500 LifetimePosition LiveRange::NextLifetimePositionRegisterIsBeneficial(
501     const LifetimePosition& start) const {
502   UsePosition* next_use = NextUsePositionRegisterIsBeneficial(start);
503   if (next_use == nullptr) return End();
504   return next_use->pos();
505 }
506 
PreviousUsePositionRegisterIsBeneficial(LifetimePosition start) const507 UsePosition* LiveRange::PreviousUsePositionRegisterIsBeneficial(
508     LifetimePosition start) const {
509   UsePosition* pos = first_pos();
510   UsePosition* prev = nullptr;
511   while (pos != nullptr && pos->pos() < start) {
512     if (pos->RegisterIsBeneficial()) prev = pos;
513     pos = pos->next();
514   }
515   return prev;
516 }
517 
518 
NextRegisterPosition(LifetimePosition start) const519 UsePosition* LiveRange::NextRegisterPosition(LifetimePosition start) const {
520   UsePosition* pos = NextUsePosition(start);
521   while (pos != nullptr && pos->type() != UsePositionType::kRequiresRegister) {
522     pos = pos->next();
523   }
524   return pos;
525 }
526 
527 
NextSlotPosition(LifetimePosition start) const528 UsePosition* LiveRange::NextSlotPosition(LifetimePosition start) const {
529   for (UsePosition* pos = NextUsePosition(start); pos != nullptr;
530        pos = pos->next()) {
531     if (pos->type() != UsePositionType::kRequiresSlot) continue;
532     return pos;
533   }
534   return nullptr;
535 }
536 
537 
CanBeSpilled(LifetimePosition pos) const538 bool LiveRange::CanBeSpilled(LifetimePosition pos) const {
539   // We cannot spill a live range that has a use requiring a register
540   // at the current or the immediate next position.
541   UsePosition* use_pos = NextRegisterPosition(pos);
542   if (use_pos == nullptr) return true;
543   return use_pos->pos() > pos.NextStart().End();
544 }
545 
546 
IsTopLevel() const547 bool LiveRange::IsTopLevel() const { return top_level_ == this; }
548 
549 
GetAssignedOperand() const550 InstructionOperand LiveRange::GetAssignedOperand() const {
551   if (HasRegisterAssigned()) {
552     DCHECK(!spilled());
553     return AllocatedOperand(LocationOperand::REGISTER, representation(),
554                             assigned_register());
555   }
556   DCHECK(spilled());
557   DCHECK(!HasRegisterAssigned());
558   if (TopLevel()->HasSpillOperand()) {
559     InstructionOperand* op = TopLevel()->GetSpillOperand();
560     DCHECK(!op->IsUnallocated());
561     return *op;
562   }
563   return TopLevel()->GetSpillRangeOperand();
564 }
565 
566 
FirstSearchIntervalForPosition(LifetimePosition position) const567 UseInterval* LiveRange::FirstSearchIntervalForPosition(
568     LifetimePosition position) const {
569   if (current_interval_ == nullptr) return first_interval_;
570   if (current_interval_->start() > position) {
571     current_interval_ = nullptr;
572     return first_interval_;
573   }
574   return current_interval_;
575 }
576 
577 
AdvanceLastProcessedMarker(UseInterval * to_start_of,LifetimePosition but_not_past) const578 void LiveRange::AdvanceLastProcessedMarker(
579     UseInterval* to_start_of, LifetimePosition but_not_past) const {
580   if (to_start_of == nullptr) return;
581   if (to_start_of->start() > but_not_past) return;
582   LifetimePosition start = current_interval_ == nullptr
583                                ? LifetimePosition::Invalid()
584                                : current_interval_->start();
585   if (to_start_of->start() > start) {
586     current_interval_ = to_start_of;
587   }
588 }
589 
590 
SplitAt(LifetimePosition position,Zone * zone)591 LiveRange* LiveRange::SplitAt(LifetimePosition position, Zone* zone) {
592   int new_id = TopLevel()->GetNextChildId();
593   LiveRange* child = new (zone) LiveRange(new_id, representation(), TopLevel());
594   // If we split, we do so because we're about to switch registers or move
595   // to/from a slot, so there's no value in connecting hints.
596   DetachAt(position, child, zone, DoNotConnectHints);
597 
598   child->top_level_ = TopLevel();
599   child->next_ = next_;
600   next_ = child;
601   return child;
602 }
603 
DetachAt(LifetimePosition position,LiveRange * result,Zone * zone,HintConnectionOption connect_hints)604 UsePosition* LiveRange::DetachAt(LifetimePosition position, LiveRange* result,
605                                  Zone* zone,
606                                  HintConnectionOption connect_hints) {
607   DCHECK(Start() < position);
608   DCHECK(End() > position);
609   DCHECK(result->IsEmpty());
610   // Find the last interval that ends before the position. If the
611   // position is contained in one of the intervals in the chain, we
612   // split that interval and use the first part.
613   UseInterval* current = FirstSearchIntervalForPosition(position);
614 
615   // If the split position coincides with the beginning of a use interval
616   // we need to split use positons in a special way.
617   bool split_at_start = false;
618 
619   if (current->start() == position) {
620     // When splitting at start we need to locate the previous use interval.
621     current = first_interval_;
622   }
623 
624   UseInterval* after = nullptr;
625   while (current != nullptr) {
626     if (current->Contains(position)) {
627       after = current->SplitAt(position, zone);
628       break;
629     }
630     UseInterval* next = current->next();
631     if (next->start() >= position) {
632       split_at_start = (next->start() == position);
633       after = next;
634       current->set_next(nullptr);
635       break;
636     }
637     current = next;
638   }
639   DCHECK(nullptr != after);
640 
641   // Partition original use intervals to the two live ranges.
642   UseInterval* before = current;
643   result->last_interval_ =
644       (last_interval_ == before)
645           ? after            // Only interval in the range after split.
646           : last_interval_;  // Last interval of the original range.
647   result->first_interval_ = after;
648   last_interval_ = before;
649 
650   // Find the last use position before the split and the first use
651   // position after it.
652   UsePosition* use_after =
653       splitting_pointer_ == nullptr || splitting_pointer_->pos() > position
654           ? first_pos()
655           : splitting_pointer_;
656   UsePosition* use_before = nullptr;
657   if (split_at_start) {
658     // The split position coincides with the beginning of a use interval (the
659     // end of a lifetime hole). Use at this position should be attributed to
660     // the split child because split child owns use interval covering it.
661     while (use_after != nullptr && use_after->pos() < position) {
662       use_before = use_after;
663       use_after = use_after->next();
664     }
665   } else {
666     while (use_after != nullptr && use_after->pos() <= position) {
667       use_before = use_after;
668       use_after = use_after->next();
669     }
670   }
671 
672   // Partition original use positions to the two live ranges.
673   if (use_before != nullptr) {
674     use_before->set_next(nullptr);
675   } else {
676     first_pos_ = nullptr;
677   }
678   result->first_pos_ = use_after;
679 
680   // Discard cached iteration state. It might be pointing
681   // to the use that no longer belongs to this live range.
682   last_processed_use_ = nullptr;
683   current_interval_ = nullptr;
684 
685   if (connect_hints == ConnectHints && use_before != nullptr &&
686       use_after != nullptr) {
687     use_after->SetHint(use_before);
688   }
689 #ifdef DEBUG
690   VerifyChildStructure();
691   result->VerifyChildStructure();
692 #endif
693   return use_before;
694 }
695 
696 
UpdateParentForAllChildren(TopLevelLiveRange * new_top_level)697 void LiveRange::UpdateParentForAllChildren(TopLevelLiveRange* new_top_level) {
698   LiveRange* child = this;
699   for (; child != nullptr; child = child->next()) {
700     child->top_level_ = new_top_level;
701   }
702 }
703 
704 
ConvertUsesToOperand(const InstructionOperand & op,const InstructionOperand & spill_op)705 void LiveRange::ConvertUsesToOperand(const InstructionOperand& op,
706                                      const InstructionOperand& spill_op) {
707   for (UsePosition* pos = first_pos(); pos != nullptr; pos = pos->next()) {
708     DCHECK(Start() <= pos->pos() && pos->pos() <= End());
709     if (!pos->HasOperand()) continue;
710     switch (pos->type()) {
711       case UsePositionType::kRequiresSlot:
712         DCHECK(spill_op.IsStackSlot() || spill_op.IsFPStackSlot());
713         InstructionOperand::ReplaceWith(pos->operand(), &spill_op);
714         break;
715       case UsePositionType::kRequiresRegister:
716         DCHECK(op.IsRegister() || op.IsFPRegister());
717       // Fall through.
718       case UsePositionType::kAny:
719         InstructionOperand::ReplaceWith(pos->operand(), &op);
720         break;
721     }
722   }
723 }
724 
725 
726 // This implements an ordering on live ranges so that they are ordered by their
727 // start positions.  This is needed for the correctness of the register
728 // allocation algorithm.  If two live ranges start at the same offset then there
729 // is a tie breaker based on where the value is first used.  This part of the
730 // ordering is merely a heuristic.
ShouldBeAllocatedBefore(const LiveRange * other) const731 bool LiveRange::ShouldBeAllocatedBefore(const LiveRange* other) const {
732   LifetimePosition start = Start();
733   LifetimePosition other_start = other->Start();
734   if (start == other_start) {
735     UsePosition* pos = first_pos();
736     if (pos == nullptr) return false;
737     UsePosition* other_pos = other->first_pos();
738     if (other_pos == nullptr) return true;
739     return pos->pos() < other_pos->pos();
740   }
741   return start < other_start;
742 }
743 
744 
SetUseHints(int register_index)745 void LiveRange::SetUseHints(int register_index) {
746   for (UsePosition* pos = first_pos(); pos != nullptr; pos = pos->next()) {
747     if (!pos->HasOperand()) continue;
748     switch (pos->type()) {
749       case UsePositionType::kRequiresSlot:
750         break;
751       case UsePositionType::kRequiresRegister:
752       case UsePositionType::kAny:
753         pos->set_assigned_register(register_index);
754         break;
755     }
756   }
757 }
758 
759 
CanCover(LifetimePosition position) const760 bool LiveRange::CanCover(LifetimePosition position) const {
761   if (IsEmpty()) return false;
762   return Start() <= position && position < End();
763 }
764 
765 
Covers(LifetimePosition position) const766 bool LiveRange::Covers(LifetimePosition position) const {
767   if (!CanCover(position)) return false;
768   UseInterval* start_search = FirstSearchIntervalForPosition(position);
769   for (UseInterval* interval = start_search; interval != nullptr;
770        interval = interval->next()) {
771     DCHECK(interval->next() == nullptr ||
772            interval->next()->start() >= interval->start());
773     AdvanceLastProcessedMarker(interval, position);
774     if (interval->Contains(position)) return true;
775     if (interval->start() > position) return false;
776   }
777   return false;
778 }
779 
780 
FirstIntersection(LiveRange * other) const781 LifetimePosition LiveRange::FirstIntersection(LiveRange* other) const {
782   UseInterval* b = other->first_interval();
783   if (b == nullptr) return LifetimePosition::Invalid();
784   LifetimePosition advance_last_processed_up_to = b->start();
785   UseInterval* a = FirstSearchIntervalForPosition(b->start());
786   while (a != nullptr && b != nullptr) {
787     if (a->start() > other->End()) break;
788     if (b->start() > End()) break;
789     LifetimePosition cur_intersection = a->Intersect(b);
790     if (cur_intersection.IsValid()) {
791       return cur_intersection;
792     }
793     if (a->start() < b->start()) {
794       a = a->next();
795       if (a == nullptr || a->start() > other->End()) break;
796       AdvanceLastProcessedMarker(a, advance_last_processed_up_to);
797     } else {
798       b = b->next();
799     }
800   }
801   return LifetimePosition::Invalid();
802 }
803 
Print(const RegisterConfiguration * config,bool with_children) const804 void LiveRange::Print(const RegisterConfiguration* config,
805                       bool with_children) const {
806   OFStream os(stdout);
807   PrintableLiveRange wrapper;
808   wrapper.register_configuration_ = config;
809   for (const LiveRange* i = this; i != nullptr; i = i->next()) {
810     wrapper.range_ = i;
811     os << wrapper << std::endl;
812     if (!with_children) break;
813   }
814 }
815 
816 
Print(bool with_children) const817 void LiveRange::Print(bool with_children) const {
818   Print(RegisterConfiguration::Turbofan(), with_children);
819 }
820 
821 
822 struct TopLevelLiveRange::SpillMoveInsertionList : ZoneObject {
SpillMoveInsertionListv8::internal::compiler::TopLevelLiveRange::SpillMoveInsertionList823   SpillMoveInsertionList(int gap_index, InstructionOperand* operand,
824                          SpillMoveInsertionList* next)
825       : gap_index(gap_index), operand(operand), next(next) {}
826   const int gap_index;
827   InstructionOperand* const operand;
828   SpillMoveInsertionList* const next;
829 };
830 
831 
TopLevelLiveRange(int vreg,MachineRepresentation rep)832 TopLevelLiveRange::TopLevelLiveRange(int vreg, MachineRepresentation rep)
833     : LiveRange(0, rep, this),
834       vreg_(vreg),
835       last_child_id_(0),
836       splintered_from_(nullptr),
837       spill_operand_(nullptr),
838       spill_move_insertion_locations_(nullptr),
839       spilled_in_deferred_blocks_(false),
840       spill_start_index_(kMaxInt),
841       last_pos_(nullptr),
842       splinter_(nullptr),
843       has_preassigned_slot_(false) {
844   bits_ |= SpillTypeField::encode(SpillType::kNoSpillType);
845 }
846 
847 
848 #if DEBUG
debug_virt_reg() const849 int TopLevelLiveRange::debug_virt_reg() const {
850   return IsSplinter() ? splintered_from()->vreg() : vreg();
851 }
852 #endif
853 
854 
RecordSpillLocation(Zone * zone,int gap_index,InstructionOperand * operand)855 void TopLevelLiveRange::RecordSpillLocation(Zone* zone, int gap_index,
856                                             InstructionOperand* operand) {
857   DCHECK(HasNoSpillType());
858   spill_move_insertion_locations_ = new (zone) SpillMoveInsertionList(
859       gap_index, operand, spill_move_insertion_locations_);
860 }
861 
CommitSpillMoves(InstructionSequence * sequence,const InstructionOperand & op,bool might_be_duplicated)862 void TopLevelLiveRange::CommitSpillMoves(InstructionSequence* sequence,
863                                          const InstructionOperand& op,
864                                          bool might_be_duplicated) {
865   DCHECK_IMPLIES(op.IsConstant(), GetSpillMoveInsertionLocations() == nullptr);
866   Zone* zone = sequence->zone();
867 
868   for (SpillMoveInsertionList* to_spill = GetSpillMoveInsertionLocations();
869        to_spill != nullptr; to_spill = to_spill->next) {
870     Instruction* instr = sequence->InstructionAt(to_spill->gap_index);
871     ParallelMove* move =
872         instr->GetOrCreateParallelMove(Instruction::START, zone);
873     // Skip insertion if it's possible that the move exists already as a
874     // constraint move from a fixed output register to a slot.
875     if (might_be_duplicated || has_preassigned_slot()) {
876       bool found = false;
877       for (MoveOperands* move_op : *move) {
878         if (move_op->IsEliminated()) continue;
879         if (move_op->source().Equals(*to_spill->operand) &&
880             move_op->destination().Equals(op)) {
881           found = true;
882           if (has_preassigned_slot()) move_op->Eliminate();
883           break;
884         }
885       }
886       if (found) continue;
887     }
888     if (!has_preassigned_slot()) {
889       move->AddMove(*to_spill->operand, op);
890     }
891   }
892 }
893 
894 
SetSpillOperand(InstructionOperand * operand)895 void TopLevelLiveRange::SetSpillOperand(InstructionOperand* operand) {
896   DCHECK(HasNoSpillType());
897   DCHECK(!operand->IsUnallocated() && !operand->IsImmediate());
898   set_spill_type(SpillType::kSpillOperand);
899   spill_operand_ = operand;
900 }
901 
902 
SetSpillRange(SpillRange * spill_range)903 void TopLevelLiveRange::SetSpillRange(SpillRange* spill_range) {
904   DCHECK(!HasSpillOperand());
905   DCHECK(spill_range);
906   spill_range_ = spill_range;
907 }
908 
909 
GetSpillRangeOperand() const910 AllocatedOperand TopLevelLiveRange::GetSpillRangeOperand() const {
911   SpillRange* spill_range = GetSpillRange();
912   int index = spill_range->assigned_slot();
913   return AllocatedOperand(LocationOperand::STACK_SLOT, representation(), index);
914 }
915 
916 
Splinter(LifetimePosition start,LifetimePosition end,Zone * zone)917 void TopLevelLiveRange::Splinter(LifetimePosition start, LifetimePosition end,
918                                  Zone* zone) {
919   DCHECK(start != Start() || end != End());
920   DCHECK(start < end);
921 
922   TopLevelLiveRange splinter_temp(-1, representation());
923   UsePosition* last_in_splinter = nullptr;
924   // Live ranges defined in deferred blocks stay in deferred blocks, so we
925   // don't need to splinter them. That means that start should always be
926   // after the beginning of the range.
927   DCHECK(start > Start());
928 
929   if (end >= End()) {
930     DCHECK(start > Start());
931     DetachAt(start, &splinter_temp, zone, ConnectHints);
932     next_ = nullptr;
933   } else {
934     DCHECK(start < End() && Start() < end);
935 
936     const int kInvalidId = std::numeric_limits<int>::max();
937 
938     UsePosition* last = DetachAt(start, &splinter_temp, zone, ConnectHints);
939 
940     LiveRange end_part(kInvalidId, this->representation(), nullptr);
941     // The last chunk exits the deferred region, and we don't want to connect
942     // hints here, because the non-deferred region shouldn't be affected
943     // by allocation decisions on the deferred path.
944     last_in_splinter =
945         splinter_temp.DetachAt(end, &end_part, zone, DoNotConnectHints);
946 
947     next_ = end_part.next_;
948     last_interval_->set_next(end_part.first_interval_);
949     // The next splinter will happen either at or after the current interval.
950     // We can optimize DetachAt by setting current_interval_ accordingly,
951     // which will then be picked up by FirstSearchIntervalForPosition.
952     current_interval_ = last_interval_;
953     last_interval_ = end_part.last_interval_;
954 
955     if (first_pos_ == nullptr) {
956       first_pos_ = end_part.first_pos_;
957     } else {
958       splitting_pointer_ = last;
959       if (last != nullptr) last->set_next(end_part.first_pos_);
960     }
961   }
962 
963   if (splinter()->IsEmpty()) {
964     splinter()->first_interval_ = splinter_temp.first_interval_;
965     splinter()->last_interval_ = splinter_temp.last_interval_;
966   } else {
967     splinter()->last_interval_->set_next(splinter_temp.first_interval_);
968     splinter()->last_interval_ = splinter_temp.last_interval_;
969   }
970   if (splinter()->first_pos() == nullptr) {
971     splinter()->first_pos_ = splinter_temp.first_pos_;
972   } else {
973     splinter()->last_pos_->set_next(splinter_temp.first_pos_);
974   }
975   if (last_in_splinter != nullptr) {
976     splinter()->last_pos_ = last_in_splinter;
977   } else {
978     if (splinter()->first_pos() != nullptr &&
979         splinter()->last_pos_ == nullptr) {
980       splinter()->last_pos_ = splinter()->first_pos();
981       for (UsePosition* pos = splinter()->first_pos(); pos != nullptr;
982            pos = pos->next()) {
983         splinter()->last_pos_ = pos;
984       }
985     }
986   }
987 #if DEBUG
988   Verify();
989   splinter()->Verify();
990 #endif
991 }
992 
993 
SetSplinteredFrom(TopLevelLiveRange * splinter_parent)994 void TopLevelLiveRange::SetSplinteredFrom(TopLevelLiveRange* splinter_parent) {
995   splintered_from_ = splinter_parent;
996   if (!HasSpillOperand() && splinter_parent->spill_range_ != nullptr) {
997     SetSpillRange(splinter_parent->spill_range_);
998   }
999 }
1000 
1001 
UpdateSpillRangePostMerge(TopLevelLiveRange * merged)1002 void TopLevelLiveRange::UpdateSpillRangePostMerge(TopLevelLiveRange* merged) {
1003   DCHECK(merged->TopLevel() == this);
1004 
1005   if (HasNoSpillType() && merged->HasSpillRange()) {
1006     set_spill_type(merged->spill_type());
1007     DCHECK(GetSpillRange()->live_ranges().size() > 0);
1008     merged->spill_range_ = nullptr;
1009     merged->bits_ =
1010         SpillTypeField::update(merged->bits_, SpillType::kNoSpillType);
1011   }
1012 }
1013 
1014 
Merge(TopLevelLiveRange * other,Zone * zone)1015 void TopLevelLiveRange::Merge(TopLevelLiveRange* other, Zone* zone) {
1016   DCHECK(Start() < other->Start());
1017   DCHECK(other->splintered_from() == this);
1018 
1019   LiveRange* first = this;
1020   LiveRange* second = other;
1021   DCHECK(first->Start() < second->Start());
1022   while (first != nullptr && second != nullptr) {
1023     DCHECK(first != second);
1024     // Make sure the ranges are in order each time we iterate.
1025     if (second->Start() < first->Start()) {
1026       LiveRange* tmp = second;
1027       second = first;
1028       first = tmp;
1029       continue;
1030     }
1031 
1032     if (first->End() <= second->Start()) {
1033       if (first->next() == nullptr ||
1034           first->next()->Start() > second->Start()) {
1035         // First is in order before second.
1036         LiveRange* temp = first->next();
1037         first->next_ = second;
1038         first = temp;
1039       } else {
1040         // First is in order before its successor (or second), so advance first.
1041         first = first->next();
1042       }
1043       continue;
1044     }
1045 
1046     DCHECK(first->Start() < second->Start());
1047     // If first and second intersect, split first.
1048     if (first->Start() < second->End() && second->Start() < first->End()) {
1049       LiveRange* temp = first->SplitAt(second->Start(), zone);
1050       CHECK(temp != first);
1051       temp->set_spilled(first->spilled());
1052       if (!temp->spilled())
1053         temp->set_assigned_register(first->assigned_register());
1054 
1055       first->next_ = second;
1056       first = temp;
1057       continue;
1058     }
1059     DCHECK(first->End() <= second->Start());
1060   }
1061 
1062   TopLevel()->UpdateParentForAllChildren(TopLevel());
1063   TopLevel()->UpdateSpillRangePostMerge(other);
1064   TopLevel()->set_has_slot_use(TopLevel()->has_slot_use() ||
1065                                other->has_slot_use());
1066 
1067 #if DEBUG
1068   Verify();
1069 #endif
1070 }
1071 
1072 
VerifyChildrenInOrder() const1073 void TopLevelLiveRange::VerifyChildrenInOrder() const {
1074   LifetimePosition last_end = End();
1075   for (const LiveRange* child = this->next(); child != nullptr;
1076        child = child->next()) {
1077     DCHECK(last_end <= child->Start());
1078     last_end = child->End();
1079   }
1080 }
1081 
1082 
Verify() const1083 void TopLevelLiveRange::Verify() const {
1084   VerifyChildrenInOrder();
1085   for (const LiveRange* child = this; child != nullptr; child = child->next()) {
1086     VerifyChildStructure();
1087   }
1088 }
1089 
1090 
ShortenTo(LifetimePosition start)1091 void TopLevelLiveRange::ShortenTo(LifetimePosition start) {
1092   TRACE("Shorten live range %d to [%d\n", vreg(), start.value());
1093   DCHECK(first_interval_ != nullptr);
1094   DCHECK(first_interval_->start() <= start);
1095   DCHECK(start < first_interval_->end());
1096   first_interval_->set_start(start);
1097 }
1098 
1099 
EnsureInterval(LifetimePosition start,LifetimePosition end,Zone * zone)1100 void TopLevelLiveRange::EnsureInterval(LifetimePosition start,
1101                                        LifetimePosition end, Zone* zone) {
1102   TRACE("Ensure live range %d in interval [%d %d[\n", vreg(), start.value(),
1103         end.value());
1104   LifetimePosition new_end = end;
1105   while (first_interval_ != nullptr && first_interval_->start() <= end) {
1106     if (first_interval_->end() > end) {
1107       new_end = first_interval_->end();
1108     }
1109     first_interval_ = first_interval_->next();
1110   }
1111 
1112   UseInterval* new_interval = new (zone) UseInterval(start, new_end);
1113   new_interval->set_next(first_interval_);
1114   first_interval_ = new_interval;
1115   if (new_interval->next() == nullptr) {
1116     last_interval_ = new_interval;
1117   }
1118 }
1119 
1120 
AddUseInterval(LifetimePosition start,LifetimePosition end,Zone * zone)1121 void TopLevelLiveRange::AddUseInterval(LifetimePosition start,
1122                                        LifetimePosition end, Zone* zone) {
1123   TRACE("Add to live range %d interval [%d %d[\n", vreg(), start.value(),
1124         end.value());
1125   if (first_interval_ == nullptr) {
1126     UseInterval* interval = new (zone) UseInterval(start, end);
1127     first_interval_ = interval;
1128     last_interval_ = interval;
1129   } else {
1130     if (end == first_interval_->start()) {
1131       first_interval_->set_start(start);
1132     } else if (end < first_interval_->start()) {
1133       UseInterval* interval = new (zone) UseInterval(start, end);
1134       interval->set_next(first_interval_);
1135       first_interval_ = interval;
1136     } else {
1137       // Order of instruction's processing (see ProcessInstructions) guarantees
1138       // that each new use interval either precedes, intersects with or touches
1139       // the last added interval.
1140       DCHECK(start <= first_interval_->end());
1141       first_interval_->set_start(Min(start, first_interval_->start()));
1142       first_interval_->set_end(Max(end, first_interval_->end()));
1143     }
1144   }
1145 }
1146 
1147 
AddUsePosition(UsePosition * use_pos)1148 void TopLevelLiveRange::AddUsePosition(UsePosition* use_pos) {
1149   LifetimePosition pos = use_pos->pos();
1150   TRACE("Add to live range %d use position %d\n", vreg(), pos.value());
1151   UsePosition* prev_hint = nullptr;
1152   UsePosition* prev = nullptr;
1153   UsePosition* current = first_pos_;
1154   while (current != nullptr && current->pos() < pos) {
1155     prev_hint = current->HasHint() ? current : prev_hint;
1156     prev = current;
1157     current = current->next();
1158   }
1159 
1160   if (prev == nullptr) {
1161     use_pos->set_next(first_pos_);
1162     first_pos_ = use_pos;
1163   } else {
1164     use_pos->set_next(prev->next());
1165     prev->set_next(use_pos);
1166   }
1167 
1168   if (prev_hint == nullptr && use_pos->HasHint()) {
1169     current_hint_position_ = use_pos;
1170   }
1171 }
1172 
1173 
AreUseIntervalsIntersecting(UseInterval * interval1,UseInterval * interval2)1174 static bool AreUseIntervalsIntersecting(UseInterval* interval1,
1175                                         UseInterval* interval2) {
1176   while (interval1 != nullptr && interval2 != nullptr) {
1177     if (interval1->start() < interval2->start()) {
1178       if (interval1->end() > interval2->start()) {
1179         return true;
1180       }
1181       interval1 = interval1->next();
1182     } else {
1183       if (interval2->end() > interval1->start()) {
1184         return true;
1185       }
1186       interval2 = interval2->next();
1187     }
1188   }
1189   return false;
1190 }
1191 
1192 
operator <<(std::ostream & os,const PrintableLiveRange & printable_range)1193 std::ostream& operator<<(std::ostream& os,
1194                          const PrintableLiveRange& printable_range) {
1195   const LiveRange* range = printable_range.range_;
1196   os << "Range: " << range->TopLevel()->vreg() << ":" << range->relative_id()
1197      << " ";
1198   if (range->TopLevel()->is_phi()) os << "phi ";
1199   if (range->TopLevel()->is_non_loop_phi()) os << "nlphi ";
1200 
1201   os << "{" << std::endl;
1202   UseInterval* interval = range->first_interval();
1203   UsePosition* use_pos = range->first_pos();
1204   PrintableInstructionOperand pio;
1205   pio.register_configuration_ = printable_range.register_configuration_;
1206   while (use_pos != nullptr) {
1207     if (use_pos->HasOperand()) {
1208       pio.op_ = *use_pos->operand();
1209       os << pio << use_pos->pos() << " ";
1210     }
1211     use_pos = use_pos->next();
1212   }
1213   os << std::endl;
1214 
1215   while (interval != nullptr) {
1216     os << '[' << interval->start() << ", " << interval->end() << ')'
1217        << std::endl;
1218     interval = interval->next();
1219   }
1220   os << "}";
1221   return os;
1222 }
1223 
SpillRange(TopLevelLiveRange * parent,Zone * zone)1224 SpillRange::SpillRange(TopLevelLiveRange* parent, Zone* zone)
1225     : live_ranges_(zone),
1226       assigned_slot_(kUnassignedSlot),
1227       byte_width_(GetByteWidth(parent->representation())) {
1228   // Spill ranges are created for top level, non-splintered ranges. This is so
1229   // that, when merging decisions are made, we consider the full extent of the
1230   // virtual register, and avoid clobbering it.
1231   DCHECK(!parent->IsSplinter());
1232   UseInterval* result = nullptr;
1233   UseInterval* node = nullptr;
1234   // Copy the intervals for all ranges.
1235   for (LiveRange* range = parent; range != nullptr; range = range->next()) {
1236     UseInterval* src = range->first_interval();
1237     while (src != nullptr) {
1238       UseInterval* new_node = new (zone) UseInterval(src->start(), src->end());
1239       if (result == nullptr) {
1240         result = new_node;
1241       } else {
1242         node->set_next(new_node);
1243       }
1244       node = new_node;
1245       src = src->next();
1246     }
1247   }
1248   use_interval_ = result;
1249   live_ranges().push_back(parent);
1250   end_position_ = node->end();
1251   parent->SetSpillRange(this);
1252 }
1253 
IsIntersectingWith(SpillRange * other) const1254 bool SpillRange::IsIntersectingWith(SpillRange* other) const {
1255   if (this->use_interval_ == nullptr || other->use_interval_ == nullptr ||
1256       this->End() <= other->use_interval_->start() ||
1257       other->End() <= this->use_interval_->start()) {
1258     return false;
1259   }
1260   return AreUseIntervalsIntersecting(use_interval_, other->use_interval_);
1261 }
1262 
1263 
TryMerge(SpillRange * other)1264 bool SpillRange::TryMerge(SpillRange* other) {
1265   if (HasSlot() || other->HasSlot()) return false;
1266   if (byte_width() != other->byte_width() || IsIntersectingWith(other))
1267     return false;
1268 
1269   LifetimePosition max = LifetimePosition::MaxPosition();
1270   if (End() < other->End() && other->End() != max) {
1271     end_position_ = other->End();
1272   }
1273   other->end_position_ = max;
1274 
1275   MergeDisjointIntervals(other->use_interval_);
1276   other->use_interval_ = nullptr;
1277 
1278   for (TopLevelLiveRange* range : other->live_ranges()) {
1279     DCHECK(range->GetSpillRange() == other);
1280     range->SetSpillRange(this);
1281   }
1282 
1283   live_ranges().insert(live_ranges().end(), other->live_ranges().begin(),
1284                        other->live_ranges().end());
1285   other->live_ranges().clear();
1286 
1287   return true;
1288 }
1289 
1290 
MergeDisjointIntervals(UseInterval * other)1291 void SpillRange::MergeDisjointIntervals(UseInterval* other) {
1292   UseInterval* tail = nullptr;
1293   UseInterval* current = use_interval_;
1294   while (other != nullptr) {
1295     // Make sure the 'current' list starts first
1296     if (current == nullptr || current->start() > other->start()) {
1297       std::swap(current, other);
1298     }
1299     // Check disjointness
1300     DCHECK(other == nullptr || current->end() <= other->start());
1301     // Append the 'current' node to the result accumulator and move forward
1302     if (tail == nullptr) {
1303       use_interval_ = current;
1304     } else {
1305       tail->set_next(current);
1306     }
1307     tail = current;
1308     current = current->next();
1309   }
1310   // Other list is empty => we are done
1311 }
1312 
1313 
Print() const1314 void SpillRange::Print() const {
1315   OFStream os(stdout);
1316   os << "{" << std::endl;
1317   for (TopLevelLiveRange* range : live_ranges()) {
1318     os << range->vreg() << " ";
1319   }
1320   os << std::endl;
1321 
1322   for (UseInterval* i = interval(); i != nullptr; i = i->next()) {
1323     os << '[' << i->start() << ", " << i->end() << ')' << std::endl;
1324   }
1325   os << "}" << std::endl;
1326 }
1327 
1328 
PhiMapValue(PhiInstruction * phi,const InstructionBlock * block,Zone * zone)1329 RegisterAllocationData::PhiMapValue::PhiMapValue(PhiInstruction* phi,
1330                                                  const InstructionBlock* block,
1331                                                  Zone* zone)
1332     : phi_(phi),
1333       block_(block),
1334       incoming_operands_(zone),
1335       assigned_register_(kUnassignedRegister) {
1336   incoming_operands_.reserve(phi->operands().size());
1337 }
1338 
1339 
AddOperand(InstructionOperand * operand)1340 void RegisterAllocationData::PhiMapValue::AddOperand(
1341     InstructionOperand* operand) {
1342   incoming_operands_.push_back(operand);
1343 }
1344 
1345 
CommitAssignment(const InstructionOperand & assigned)1346 void RegisterAllocationData::PhiMapValue::CommitAssignment(
1347     const InstructionOperand& assigned) {
1348   for (InstructionOperand* operand : incoming_operands_) {
1349     InstructionOperand::ReplaceWith(operand, &assigned);
1350   }
1351 }
1352 
RegisterAllocationData(const RegisterConfiguration * config,Zone * zone,Frame * frame,InstructionSequence * code,const char * debug_name)1353 RegisterAllocationData::RegisterAllocationData(
1354     const RegisterConfiguration* config, Zone* zone, Frame* frame,
1355     InstructionSequence* code, const char* debug_name)
1356     : allocation_zone_(zone),
1357       frame_(frame),
1358       code_(code),
1359       debug_name_(debug_name),
1360       config_(config),
1361       phi_map_(allocation_zone()),
1362       live_in_sets_(code->InstructionBlockCount(), nullptr, allocation_zone()),
1363       live_out_sets_(code->InstructionBlockCount(), nullptr, allocation_zone()),
1364       live_ranges_(code->VirtualRegisterCount() * 2, nullptr,
1365                    allocation_zone()),
1366       fixed_live_ranges_(this->config()->num_general_registers(), nullptr,
1367                          allocation_zone()),
1368       fixed_float_live_ranges_(allocation_zone()),
1369       fixed_double_live_ranges_(this->config()->num_double_registers(), nullptr,
1370                                 allocation_zone()),
1371       fixed_simd128_live_ranges_(allocation_zone()),
1372       spill_ranges_(code->VirtualRegisterCount(), nullptr, allocation_zone()),
1373       delayed_references_(allocation_zone()),
1374       assigned_registers_(nullptr),
1375       assigned_double_registers_(nullptr),
1376       virtual_register_count_(code->VirtualRegisterCount()),
1377       preassigned_slot_ranges_(zone) {
1378   if (!kSimpleFPAliasing) {
1379     fixed_float_live_ranges_.resize(this->config()->num_float_registers(),
1380                                     nullptr);
1381     fixed_simd128_live_ranges_.resize(this->config()->num_simd128_registers(),
1382                                       nullptr);
1383   }
1384 
1385   assigned_registers_ = new (code_zone())
1386       BitVector(this->config()->num_general_registers(), code_zone());
1387   assigned_double_registers_ = new (code_zone())
1388       BitVector(this->config()->num_double_registers(), code_zone());
1389   this->frame()->SetAllocatedRegisters(assigned_registers_);
1390   this->frame()->SetAllocatedDoubleRegisters(assigned_double_registers_);
1391 }
1392 
1393 
AddGapMove(int index,Instruction::GapPosition position,const InstructionOperand & from,const InstructionOperand & to)1394 MoveOperands* RegisterAllocationData::AddGapMove(
1395     int index, Instruction::GapPosition position,
1396     const InstructionOperand& from, const InstructionOperand& to) {
1397   Instruction* instr = code()->InstructionAt(index);
1398   ParallelMove* moves = instr->GetOrCreateParallelMove(position, code_zone());
1399   return moves->AddMove(from, to);
1400 }
1401 
1402 
RepresentationFor(int virtual_register)1403 MachineRepresentation RegisterAllocationData::RepresentationFor(
1404     int virtual_register) {
1405   DCHECK_LT(virtual_register, code()->VirtualRegisterCount());
1406   return code()->GetRepresentation(virtual_register);
1407 }
1408 
1409 
GetOrCreateLiveRangeFor(int index)1410 TopLevelLiveRange* RegisterAllocationData::GetOrCreateLiveRangeFor(int index) {
1411   if (index >= static_cast<int>(live_ranges().size())) {
1412     live_ranges().resize(index + 1, nullptr);
1413   }
1414   TopLevelLiveRange* result = live_ranges()[index];
1415   if (result == nullptr) {
1416     result = NewLiveRange(index, RepresentationFor(index));
1417     live_ranges()[index] = result;
1418   }
1419   return result;
1420 }
1421 
1422 
NewLiveRange(int index,MachineRepresentation rep)1423 TopLevelLiveRange* RegisterAllocationData::NewLiveRange(
1424     int index, MachineRepresentation rep) {
1425   return new (allocation_zone()) TopLevelLiveRange(index, rep);
1426 }
1427 
1428 
GetNextLiveRangeId()1429 int RegisterAllocationData::GetNextLiveRangeId() {
1430   int vreg = virtual_register_count_++;
1431   if (vreg >= static_cast<int>(live_ranges().size())) {
1432     live_ranges().resize(vreg + 1, nullptr);
1433   }
1434   return vreg;
1435 }
1436 
1437 
NextLiveRange(MachineRepresentation rep)1438 TopLevelLiveRange* RegisterAllocationData::NextLiveRange(
1439     MachineRepresentation rep) {
1440   int vreg = GetNextLiveRangeId();
1441   TopLevelLiveRange* ret = NewLiveRange(vreg, rep);
1442   return ret;
1443 }
1444 
1445 
InitializePhiMap(const InstructionBlock * block,PhiInstruction * phi)1446 RegisterAllocationData::PhiMapValue* RegisterAllocationData::InitializePhiMap(
1447     const InstructionBlock* block, PhiInstruction* phi) {
1448   RegisterAllocationData::PhiMapValue* map_value = new (allocation_zone())
1449       RegisterAllocationData::PhiMapValue(phi, block, allocation_zone());
1450   auto res =
1451       phi_map_.insert(std::make_pair(phi->virtual_register(), map_value));
1452   DCHECK(res.second);
1453   USE(res);
1454   return map_value;
1455 }
1456 
1457 
GetPhiMapValueFor(int virtual_register)1458 RegisterAllocationData::PhiMapValue* RegisterAllocationData::GetPhiMapValueFor(
1459     int virtual_register) {
1460   auto it = phi_map_.find(virtual_register);
1461   DCHECK(it != phi_map_.end());
1462   return it->second;
1463 }
1464 
1465 
GetPhiMapValueFor(TopLevelLiveRange * top_range)1466 RegisterAllocationData::PhiMapValue* RegisterAllocationData::GetPhiMapValueFor(
1467     TopLevelLiveRange* top_range) {
1468   return GetPhiMapValueFor(top_range->vreg());
1469 }
1470 
1471 
ExistsUseWithoutDefinition()1472 bool RegisterAllocationData::ExistsUseWithoutDefinition() {
1473   bool found = false;
1474   BitVector::Iterator iterator(live_in_sets()[0]);
1475   while (!iterator.Done()) {
1476     found = true;
1477     int operand_index = iterator.Current();
1478     PrintF("Register allocator error: live v%d reached first block.\n",
1479            operand_index);
1480     LiveRange* range = GetOrCreateLiveRangeFor(operand_index);
1481     PrintF("  (first use is at %d)\n", range->first_pos()->pos().value());
1482     if (debug_name() == nullptr) {
1483       PrintF("\n");
1484     } else {
1485       PrintF("  (function: %s)\n", debug_name());
1486     }
1487     iterator.Advance();
1488   }
1489   return found;
1490 }
1491 
1492 
1493 // If a range is defined in a deferred block, we can expect all the range
1494 // to only cover positions in deferred blocks. Otherwise, a block on the
1495 // hot path would be dominated by a deferred block, meaning it is unreachable
1496 // without passing through the deferred block, which is contradictory.
1497 // In particular, when such a range contributes a result back on the hot
1498 // path, it will be as one of the inputs of a phi. In that case, the value
1499 // will be transferred via a move in the Gap::END's of the last instruction
1500 // of a deferred block.
RangesDefinedInDeferredStayInDeferred()1501 bool RegisterAllocationData::RangesDefinedInDeferredStayInDeferred() {
1502   for (const TopLevelLiveRange* range : live_ranges()) {
1503     if (range == nullptr || range->IsEmpty() ||
1504         !code()
1505              ->GetInstructionBlock(range->Start().ToInstructionIndex())
1506              ->IsDeferred()) {
1507       continue;
1508     }
1509     for (const UseInterval* i = range->first_interval(); i != nullptr;
1510          i = i->next()) {
1511       int first = i->FirstGapIndex();
1512       int last = i->LastGapIndex();
1513       for (int instr = first; instr <= last;) {
1514         const InstructionBlock* block = code()->GetInstructionBlock(instr);
1515         if (!block->IsDeferred()) return false;
1516         instr = block->last_instruction_index() + 1;
1517       }
1518     }
1519   }
1520   return true;
1521 }
1522 
AssignSpillRangeToLiveRange(TopLevelLiveRange * range)1523 SpillRange* RegisterAllocationData::AssignSpillRangeToLiveRange(
1524     TopLevelLiveRange* range) {
1525   DCHECK(!range->HasSpillOperand());
1526 
1527   SpillRange* spill_range = range->GetAllocatedSpillRange();
1528   if (spill_range == nullptr) {
1529     DCHECK(!range->IsSplinter());
1530     spill_range = new (allocation_zone()) SpillRange(range, allocation_zone());
1531   }
1532   range->set_spill_type(TopLevelLiveRange::SpillType::kSpillRange);
1533 
1534   int spill_range_index =
1535       range->IsSplinter() ? range->splintered_from()->vreg() : range->vreg();
1536 
1537   spill_ranges()[spill_range_index] = spill_range;
1538 
1539   return spill_range;
1540 }
1541 
1542 
CreateSpillRangeForLiveRange(TopLevelLiveRange * range)1543 SpillRange* RegisterAllocationData::CreateSpillRangeForLiveRange(
1544     TopLevelLiveRange* range) {
1545   DCHECK(!range->HasSpillOperand());
1546   DCHECK(!range->IsSplinter());
1547   SpillRange* spill_range =
1548       new (allocation_zone()) SpillRange(range, allocation_zone());
1549   return spill_range;
1550 }
1551 
MarkAllocated(MachineRepresentation rep,int index)1552 void RegisterAllocationData::MarkAllocated(MachineRepresentation rep,
1553                                            int index) {
1554   switch (rep) {
1555     case MachineRepresentation::kFloat32:
1556     case MachineRepresentation::kSimd128:
1557       if (kSimpleFPAliasing) {
1558         assigned_double_registers_->Add(index);
1559       } else {
1560         int alias_base_index = -1;
1561         int aliases = config()->GetAliases(
1562             rep, index, MachineRepresentation::kFloat64, &alias_base_index);
1563         DCHECK(aliases > 0 || (aliases == 0 && alias_base_index == -1));
1564         while (aliases--) {
1565           int aliased_reg = alias_base_index + aliases;
1566           assigned_double_registers_->Add(aliased_reg);
1567         }
1568       }
1569       break;
1570     case MachineRepresentation::kFloat64:
1571       assigned_double_registers_->Add(index);
1572       break;
1573     default:
1574       DCHECK(!IsFloatingPoint(rep));
1575       assigned_registers_->Add(index);
1576       break;
1577   }
1578 }
1579 
IsBlockBoundary(LifetimePosition pos) const1580 bool RegisterAllocationData::IsBlockBoundary(LifetimePosition pos) const {
1581   return pos.IsFullStart() &&
1582          code()->GetInstructionBlock(pos.ToInstructionIndex())->code_start() ==
1583              pos.ToInstructionIndex();
1584 }
1585 
1586 
ConstraintBuilder(RegisterAllocationData * data)1587 ConstraintBuilder::ConstraintBuilder(RegisterAllocationData* data)
1588     : data_(data) {}
1589 
1590 
AllocateFixed(UnallocatedOperand * operand,int pos,bool is_tagged)1591 InstructionOperand* ConstraintBuilder::AllocateFixed(
1592     UnallocatedOperand* operand, int pos, bool is_tagged) {
1593   TRACE("Allocating fixed reg for op %d\n", operand->virtual_register());
1594   DCHECK(operand->HasFixedPolicy());
1595   InstructionOperand allocated;
1596   MachineRepresentation rep = InstructionSequence::DefaultRepresentation();
1597   int virtual_register = operand->virtual_register();
1598   if (virtual_register != InstructionOperand::kInvalidVirtualRegister) {
1599     rep = data()->RepresentationFor(virtual_register);
1600   }
1601   if (operand->HasFixedSlotPolicy()) {
1602     allocated = AllocatedOperand(AllocatedOperand::STACK_SLOT, rep,
1603                                  operand->fixed_slot_index());
1604   } else if (operand->HasFixedRegisterPolicy()) {
1605     DCHECK(!IsFloatingPoint(rep));
1606     allocated = AllocatedOperand(AllocatedOperand::REGISTER, rep,
1607                                  operand->fixed_register_index());
1608   } else if (operand->HasFixedFPRegisterPolicy()) {
1609     DCHECK(IsFloatingPoint(rep));
1610     DCHECK_NE(InstructionOperand::kInvalidVirtualRegister, virtual_register);
1611     allocated = AllocatedOperand(AllocatedOperand::REGISTER, rep,
1612                                  operand->fixed_register_index());
1613   } else {
1614     UNREACHABLE();
1615   }
1616   InstructionOperand::ReplaceWith(operand, &allocated);
1617   if (is_tagged) {
1618     TRACE("Fixed reg is tagged at %d\n", pos);
1619     Instruction* instr = code()->InstructionAt(pos);
1620     if (instr->HasReferenceMap()) {
1621       instr->reference_map()->RecordReference(*AllocatedOperand::cast(operand));
1622     }
1623   }
1624   return operand;
1625 }
1626 
1627 
MeetRegisterConstraints()1628 void ConstraintBuilder::MeetRegisterConstraints() {
1629   for (InstructionBlock* block : code()->instruction_blocks()) {
1630     MeetRegisterConstraints(block);
1631   }
1632 }
1633 
1634 
MeetRegisterConstraints(const InstructionBlock * block)1635 void ConstraintBuilder::MeetRegisterConstraints(const InstructionBlock* block) {
1636   int start = block->first_instruction_index();
1637   int end = block->last_instruction_index();
1638   DCHECK_NE(-1, start);
1639   for (int i = start; i <= end; ++i) {
1640     MeetConstraintsBefore(i);
1641     if (i != end) MeetConstraintsAfter(i);
1642   }
1643   // Meet register constraints for the instruction in the end.
1644   MeetRegisterConstraintsForLastInstructionInBlock(block);
1645 }
1646 
1647 
MeetRegisterConstraintsForLastInstructionInBlock(const InstructionBlock * block)1648 void ConstraintBuilder::MeetRegisterConstraintsForLastInstructionInBlock(
1649     const InstructionBlock* block) {
1650   int end = block->last_instruction_index();
1651   Instruction* last_instruction = code()->InstructionAt(end);
1652   for (size_t i = 0; i < last_instruction->OutputCount(); i++) {
1653     InstructionOperand* output_operand = last_instruction->OutputAt(i);
1654     DCHECK(!output_operand->IsConstant());
1655     UnallocatedOperand* output = UnallocatedOperand::cast(output_operand);
1656     int output_vreg = output->virtual_register();
1657     TopLevelLiveRange* range = data()->GetOrCreateLiveRangeFor(output_vreg);
1658     bool assigned = false;
1659     if (output->HasFixedPolicy()) {
1660       AllocateFixed(output, -1, false);
1661       // This value is produced on the stack, we never need to spill it.
1662       if (output->IsStackSlot()) {
1663         DCHECK(LocationOperand::cast(output)->index() <
1664                data()->frame()->GetSpillSlotCount());
1665         range->SetSpillOperand(LocationOperand::cast(output));
1666         range->SetSpillStartIndex(end);
1667         assigned = true;
1668       }
1669 
1670       for (const RpoNumber& succ : block->successors()) {
1671         const InstructionBlock* successor = code()->InstructionBlockAt(succ);
1672         DCHECK(successor->PredecessorCount() == 1);
1673         int gap_index = successor->first_instruction_index();
1674         // Create an unconstrained operand for the same virtual register
1675         // and insert a gap move from the fixed output to the operand.
1676         UnallocatedOperand output_copy(UnallocatedOperand::ANY, output_vreg);
1677         data()->AddGapMove(gap_index, Instruction::START, *output, output_copy);
1678       }
1679     }
1680 
1681     if (!assigned) {
1682       for (const RpoNumber& succ : block->successors()) {
1683         const InstructionBlock* successor = code()->InstructionBlockAt(succ);
1684         DCHECK(successor->PredecessorCount() == 1);
1685         int gap_index = successor->first_instruction_index();
1686         range->RecordSpillLocation(allocation_zone(), gap_index, output);
1687         range->SetSpillStartIndex(gap_index);
1688       }
1689     }
1690   }
1691 }
1692 
1693 
MeetConstraintsAfter(int instr_index)1694 void ConstraintBuilder::MeetConstraintsAfter(int instr_index) {
1695   Instruction* first = code()->InstructionAt(instr_index);
1696   // Handle fixed temporaries.
1697   for (size_t i = 0; i < first->TempCount(); i++) {
1698     UnallocatedOperand* temp = UnallocatedOperand::cast(first->TempAt(i));
1699     if (temp->HasFixedPolicy()) AllocateFixed(temp, instr_index, false);
1700   }
1701   // Handle constant/fixed output operands.
1702   for (size_t i = 0; i < first->OutputCount(); i++) {
1703     InstructionOperand* output = first->OutputAt(i);
1704     if (output->IsConstant()) {
1705       int output_vreg = ConstantOperand::cast(output)->virtual_register();
1706       TopLevelLiveRange* range = data()->GetOrCreateLiveRangeFor(output_vreg);
1707       range->SetSpillStartIndex(instr_index + 1);
1708       range->SetSpillOperand(output);
1709       continue;
1710     }
1711     UnallocatedOperand* first_output = UnallocatedOperand::cast(output);
1712     TopLevelLiveRange* range =
1713         data()->GetOrCreateLiveRangeFor(first_output->virtual_register());
1714     bool assigned = false;
1715     if (first_output->HasFixedPolicy()) {
1716       int output_vreg = first_output->virtual_register();
1717       UnallocatedOperand output_copy(UnallocatedOperand::ANY, output_vreg);
1718       bool is_tagged = code()->IsReference(output_vreg);
1719       if (first_output->HasSecondaryStorage()) {
1720         range->MarkHasPreassignedSlot();
1721         data()->preassigned_slot_ranges().push_back(
1722             std::make_pair(range, first_output->GetSecondaryStorage()));
1723       }
1724       AllocateFixed(first_output, instr_index, is_tagged);
1725 
1726       // This value is produced on the stack, we never need to spill it.
1727       if (first_output->IsStackSlot()) {
1728         DCHECK(LocationOperand::cast(first_output)->index() <
1729                data()->frame()->GetTotalFrameSlotCount());
1730         range->SetSpillOperand(LocationOperand::cast(first_output));
1731         range->SetSpillStartIndex(instr_index + 1);
1732         assigned = true;
1733       }
1734       data()->AddGapMove(instr_index + 1, Instruction::START, *first_output,
1735                          output_copy);
1736     }
1737     // Make sure we add a gap move for spilling (if we have not done
1738     // so already).
1739     if (!assigned) {
1740       range->RecordSpillLocation(allocation_zone(), instr_index + 1,
1741                                  first_output);
1742       range->SetSpillStartIndex(instr_index + 1);
1743     }
1744   }
1745 }
1746 
1747 
MeetConstraintsBefore(int instr_index)1748 void ConstraintBuilder::MeetConstraintsBefore(int instr_index) {
1749   Instruction* second = code()->InstructionAt(instr_index);
1750   // Handle fixed input operands of second instruction.
1751   for (size_t i = 0; i < second->InputCount(); i++) {
1752     InstructionOperand* input = second->InputAt(i);
1753     if (input->IsImmediate() || input->IsExplicit()) {
1754       continue;  // Ignore immediates and explicitly reserved registers.
1755     }
1756     UnallocatedOperand* cur_input = UnallocatedOperand::cast(input);
1757     if (cur_input->HasFixedPolicy()) {
1758       int input_vreg = cur_input->virtual_register();
1759       UnallocatedOperand input_copy(UnallocatedOperand::ANY, input_vreg);
1760       bool is_tagged = code()->IsReference(input_vreg);
1761       AllocateFixed(cur_input, instr_index, is_tagged);
1762       data()->AddGapMove(instr_index, Instruction::END, input_copy, *cur_input);
1763     }
1764   }
1765   // Handle "output same as input" for second instruction.
1766   for (size_t i = 0; i < second->OutputCount(); i++) {
1767     InstructionOperand* output = second->OutputAt(i);
1768     if (!output->IsUnallocated()) continue;
1769     UnallocatedOperand* second_output = UnallocatedOperand::cast(output);
1770     if (!second_output->HasSameAsInputPolicy()) continue;
1771     DCHECK(i == 0);  // Only valid for first output.
1772     UnallocatedOperand* cur_input =
1773         UnallocatedOperand::cast(second->InputAt(0));
1774     int output_vreg = second_output->virtual_register();
1775     int input_vreg = cur_input->virtual_register();
1776     UnallocatedOperand input_copy(UnallocatedOperand::ANY, input_vreg);
1777     cur_input->set_virtual_register(second_output->virtual_register());
1778     MoveOperands* gap_move = data()->AddGapMove(instr_index, Instruction::END,
1779                                                 input_copy, *cur_input);
1780     if (code()->IsReference(input_vreg) && !code()->IsReference(output_vreg)) {
1781       if (second->HasReferenceMap()) {
1782         RegisterAllocationData::DelayedReference delayed_reference = {
1783             second->reference_map(), &gap_move->source()};
1784         data()->delayed_references().push_back(delayed_reference);
1785       }
1786     } else if (!code()->IsReference(input_vreg) &&
1787                code()->IsReference(output_vreg)) {
1788       // The input is assumed to immediately have a tagged representation,
1789       // before the pointer map can be used. I.e. the pointer map at the
1790       // instruction will include the output operand (whose value at the
1791       // beginning of the instruction is equal to the input operand). If
1792       // this is not desired, then the pointer map at this instruction needs
1793       // to be adjusted manually.
1794     }
1795   }
1796 }
1797 
1798 
ResolvePhis()1799 void ConstraintBuilder::ResolvePhis() {
1800   // Process the blocks in reverse order.
1801   for (InstructionBlock* block : base::Reversed(code()->instruction_blocks())) {
1802     ResolvePhis(block);
1803   }
1804 }
1805 
1806 
ResolvePhis(const InstructionBlock * block)1807 void ConstraintBuilder::ResolvePhis(const InstructionBlock* block) {
1808   for (PhiInstruction* phi : block->phis()) {
1809     int phi_vreg = phi->virtual_register();
1810     RegisterAllocationData::PhiMapValue* map_value =
1811         data()->InitializePhiMap(block, phi);
1812     InstructionOperand& output = phi->output();
1813     // Map the destination operands, so the commitment phase can find them.
1814     for (size_t i = 0; i < phi->operands().size(); ++i) {
1815       InstructionBlock* cur_block =
1816           code()->InstructionBlockAt(block->predecessors()[i]);
1817       UnallocatedOperand input(UnallocatedOperand::ANY, phi->operands()[i]);
1818       MoveOperands* move = data()->AddGapMove(
1819           cur_block->last_instruction_index(), Instruction::END, input, output);
1820       map_value->AddOperand(&move->destination());
1821       DCHECK(!code()
1822                   ->InstructionAt(cur_block->last_instruction_index())
1823                   ->HasReferenceMap());
1824     }
1825     TopLevelLiveRange* live_range = data()->GetOrCreateLiveRangeFor(phi_vreg);
1826     int gap_index = block->first_instruction_index();
1827     live_range->RecordSpillLocation(allocation_zone(), gap_index, &output);
1828     live_range->SetSpillStartIndex(gap_index);
1829     // We use the phi-ness of some nodes in some later heuristics.
1830     live_range->set_is_phi(true);
1831     live_range->set_is_non_loop_phi(!block->IsLoopHeader());
1832   }
1833 }
1834 
1835 
LiveRangeBuilder(RegisterAllocationData * data,Zone * local_zone)1836 LiveRangeBuilder::LiveRangeBuilder(RegisterAllocationData* data,
1837                                    Zone* local_zone)
1838     : data_(data), phi_hints_(local_zone) {}
1839 
1840 
ComputeLiveOut(const InstructionBlock * block,RegisterAllocationData * data)1841 BitVector* LiveRangeBuilder::ComputeLiveOut(const InstructionBlock* block,
1842                                             RegisterAllocationData* data) {
1843   size_t block_index = block->rpo_number().ToSize();
1844   BitVector* live_out = data->live_out_sets()[block_index];
1845   if (live_out == nullptr) {
1846     // Compute live out for the given block, except not including backward
1847     // successor edges.
1848     Zone* zone = data->allocation_zone();
1849     const InstructionSequence* code = data->code();
1850 
1851     live_out = new (zone) BitVector(code->VirtualRegisterCount(), zone);
1852 
1853     // Process all successor blocks.
1854     for (const RpoNumber& succ : block->successors()) {
1855       // Add values live on entry to the successor.
1856       if (succ <= block->rpo_number()) continue;
1857       BitVector* live_in = data->live_in_sets()[succ.ToSize()];
1858       if (live_in != nullptr) live_out->Union(*live_in);
1859 
1860       // All phi input operands corresponding to this successor edge are live
1861       // out from this block.
1862       const InstructionBlock* successor = code->InstructionBlockAt(succ);
1863       size_t index = successor->PredecessorIndexOf(block->rpo_number());
1864       DCHECK(index < successor->PredecessorCount());
1865       for (PhiInstruction* phi : successor->phis()) {
1866         live_out->Add(phi->operands()[index]);
1867       }
1868     }
1869     data->live_out_sets()[block_index] = live_out;
1870   }
1871   return live_out;
1872 }
1873 
1874 
AddInitialIntervals(const InstructionBlock * block,BitVector * live_out)1875 void LiveRangeBuilder::AddInitialIntervals(const InstructionBlock* block,
1876                                            BitVector* live_out) {
1877   // Add an interval that includes the entire block to the live range for
1878   // each live_out value.
1879   LifetimePosition start = LifetimePosition::GapFromInstructionIndex(
1880       block->first_instruction_index());
1881   LifetimePosition end = LifetimePosition::InstructionFromInstructionIndex(
1882                              block->last_instruction_index())
1883                              .NextStart();
1884   BitVector::Iterator iterator(live_out);
1885   while (!iterator.Done()) {
1886     int operand_index = iterator.Current();
1887     TopLevelLiveRange* range = data()->GetOrCreateLiveRangeFor(operand_index);
1888     range->AddUseInterval(start, end, allocation_zone());
1889     iterator.Advance();
1890   }
1891 }
1892 
FixedFPLiveRangeID(int index,MachineRepresentation rep)1893 int LiveRangeBuilder::FixedFPLiveRangeID(int index, MachineRepresentation rep) {
1894   int result = -index - 1;
1895   switch (rep) {
1896     case MachineRepresentation::kSimd128:
1897       result -= config()->num_float_registers();
1898     // Fall through.
1899     case MachineRepresentation::kFloat32:
1900       result -= config()->num_double_registers();
1901     // Fall through.
1902     case MachineRepresentation::kFloat64:
1903       result -= config()->num_general_registers();
1904       break;
1905     default:
1906       UNREACHABLE();
1907       break;
1908   }
1909   return result;
1910 }
1911 
FixedLiveRangeFor(int index)1912 TopLevelLiveRange* LiveRangeBuilder::FixedLiveRangeFor(int index) {
1913   DCHECK(index < config()->num_general_registers());
1914   TopLevelLiveRange* result = data()->fixed_live_ranges()[index];
1915   if (result == nullptr) {
1916     MachineRepresentation rep = InstructionSequence::DefaultRepresentation();
1917     result = data()->NewLiveRange(FixedLiveRangeID(index), rep);
1918     DCHECK(result->IsFixed());
1919     result->set_assigned_register(index);
1920     data()->MarkAllocated(rep, index);
1921     data()->fixed_live_ranges()[index] = result;
1922   }
1923   return result;
1924 }
1925 
FixedFPLiveRangeFor(int index,MachineRepresentation rep)1926 TopLevelLiveRange* LiveRangeBuilder::FixedFPLiveRangeFor(
1927     int index, MachineRepresentation rep) {
1928   int num_regs = config()->num_double_registers();
1929   ZoneVector<TopLevelLiveRange*>* live_ranges =
1930       &data()->fixed_double_live_ranges();
1931   if (!kSimpleFPAliasing) {
1932     switch (rep) {
1933       case MachineRepresentation::kFloat32:
1934         num_regs = config()->num_float_registers();
1935         live_ranges = &data()->fixed_float_live_ranges();
1936         break;
1937       case MachineRepresentation::kSimd128:
1938         num_regs = config()->num_simd128_registers();
1939         live_ranges = &data()->fixed_simd128_live_ranges();
1940         break;
1941       default:
1942         break;
1943     }
1944   }
1945 
1946   DCHECK(index < num_regs);
1947   USE(num_regs);
1948   TopLevelLiveRange* result = (*live_ranges)[index];
1949   if (result == nullptr) {
1950     result = data()->NewLiveRange(FixedFPLiveRangeID(index, rep), rep);
1951     DCHECK(result->IsFixed());
1952     result->set_assigned_register(index);
1953     data()->MarkAllocated(rep, index);
1954     (*live_ranges)[index] = result;
1955   }
1956   return result;
1957 }
1958 
LiveRangeFor(InstructionOperand * operand)1959 TopLevelLiveRange* LiveRangeBuilder::LiveRangeFor(InstructionOperand* operand) {
1960   if (operand->IsUnallocated()) {
1961     return data()->GetOrCreateLiveRangeFor(
1962         UnallocatedOperand::cast(operand)->virtual_register());
1963   } else if (operand->IsConstant()) {
1964     return data()->GetOrCreateLiveRangeFor(
1965         ConstantOperand::cast(operand)->virtual_register());
1966   } else if (operand->IsRegister()) {
1967     return FixedLiveRangeFor(
1968         LocationOperand::cast(operand)->GetRegister().code());
1969   } else if (operand->IsFPRegister()) {
1970     LocationOperand* op = LocationOperand::cast(operand);
1971     return FixedFPLiveRangeFor(op->register_code(), op->representation());
1972   } else {
1973     return nullptr;
1974   }
1975 }
1976 
1977 
NewUsePosition(LifetimePosition pos,InstructionOperand * operand,void * hint,UsePositionHintType hint_type)1978 UsePosition* LiveRangeBuilder::NewUsePosition(LifetimePosition pos,
1979                                               InstructionOperand* operand,
1980                                               void* hint,
1981                                               UsePositionHintType hint_type) {
1982   return new (allocation_zone()) UsePosition(pos, operand, hint, hint_type);
1983 }
1984 
1985 
Define(LifetimePosition position,InstructionOperand * operand,void * hint,UsePositionHintType hint_type)1986 UsePosition* LiveRangeBuilder::Define(LifetimePosition position,
1987                                       InstructionOperand* operand, void* hint,
1988                                       UsePositionHintType hint_type) {
1989   TopLevelLiveRange* range = LiveRangeFor(operand);
1990   if (range == nullptr) return nullptr;
1991 
1992   if (range->IsEmpty() || range->Start() > position) {
1993     // Can happen if there is a definition without use.
1994     range->AddUseInterval(position, position.NextStart(), allocation_zone());
1995     range->AddUsePosition(NewUsePosition(position.NextStart()));
1996   } else {
1997     range->ShortenTo(position);
1998   }
1999   if (!operand->IsUnallocated()) return nullptr;
2000   UnallocatedOperand* unalloc_operand = UnallocatedOperand::cast(operand);
2001   UsePosition* use_pos =
2002       NewUsePosition(position, unalloc_operand, hint, hint_type);
2003   range->AddUsePosition(use_pos);
2004   return use_pos;
2005 }
2006 
2007 
Use(LifetimePosition block_start,LifetimePosition position,InstructionOperand * operand,void * hint,UsePositionHintType hint_type)2008 UsePosition* LiveRangeBuilder::Use(LifetimePosition block_start,
2009                                    LifetimePosition position,
2010                                    InstructionOperand* operand, void* hint,
2011                                    UsePositionHintType hint_type) {
2012   TopLevelLiveRange* range = LiveRangeFor(operand);
2013   if (range == nullptr) return nullptr;
2014   UsePosition* use_pos = nullptr;
2015   if (operand->IsUnallocated()) {
2016     UnallocatedOperand* unalloc_operand = UnallocatedOperand::cast(operand);
2017     use_pos = NewUsePosition(position, unalloc_operand, hint, hint_type);
2018     range->AddUsePosition(use_pos);
2019   }
2020   range->AddUseInterval(block_start, position, allocation_zone());
2021   return use_pos;
2022 }
2023 
2024 
ProcessInstructions(const InstructionBlock * block,BitVector * live)2025 void LiveRangeBuilder::ProcessInstructions(const InstructionBlock* block,
2026                                            BitVector* live) {
2027   int block_start = block->first_instruction_index();
2028   LifetimePosition block_start_position =
2029       LifetimePosition::GapFromInstructionIndex(block_start);
2030   bool fixed_float_live_ranges = false;
2031   bool fixed_simd128_live_ranges = false;
2032   if (!kSimpleFPAliasing) {
2033     int mask = data()->code()->representation_mask();
2034     fixed_float_live_ranges = (mask & kFloatRepBit) != 0;
2035     fixed_simd128_live_ranges = (mask & kSimd128RepBit) != 0;
2036   }
2037 
2038   for (int index = block->last_instruction_index(); index >= block_start;
2039        index--) {
2040     LifetimePosition curr_position =
2041         LifetimePosition::InstructionFromInstructionIndex(index);
2042     Instruction* instr = code()->InstructionAt(index);
2043     DCHECK(instr != nullptr);
2044     DCHECK(curr_position.IsInstructionPosition());
2045     // Process output, inputs, and temps of this instruction.
2046     for (size_t i = 0; i < instr->OutputCount(); i++) {
2047       InstructionOperand* output = instr->OutputAt(i);
2048       if (output->IsUnallocated()) {
2049         // Unsupported.
2050         DCHECK(!UnallocatedOperand::cast(output)->HasSlotPolicy());
2051         int out_vreg = UnallocatedOperand::cast(output)->virtual_register();
2052         live->Remove(out_vreg);
2053       } else if (output->IsConstant()) {
2054         int out_vreg = ConstantOperand::cast(output)->virtual_register();
2055         live->Remove(out_vreg);
2056       }
2057       if (block->IsHandler() && index == block_start && output->IsAllocated() &&
2058           output->IsRegister() &&
2059           AllocatedOperand::cast(output)->GetRegister().is(
2060               v8::internal::kReturnRegister0)) {
2061         // The register defined here is blocked from gap start - it is the
2062         // exception value.
2063         // TODO(mtrofin): should we explore an explicit opcode for
2064         // the first instruction in the handler?
2065         Define(LifetimePosition::GapFromInstructionIndex(index), output);
2066       } else {
2067         Define(curr_position, output);
2068       }
2069     }
2070 
2071     if (instr->ClobbersRegisters()) {
2072       for (int i = 0; i < config()->num_allocatable_general_registers(); ++i) {
2073         // Create a UseInterval at this instruction for all fixed registers,
2074         // (including the instruction outputs). Adding another UseInterval here
2075         // is OK because AddUseInterval will just merge it with the existing
2076         // one at the end of the range.
2077         int code = config()->GetAllocatableGeneralCode(i);
2078         TopLevelLiveRange* range = FixedLiveRangeFor(code);
2079         range->AddUseInterval(curr_position, curr_position.End(),
2080                               allocation_zone());
2081       }
2082     }
2083 
2084     if (instr->ClobbersDoubleRegisters()) {
2085       for (int i = 0; i < config()->num_allocatable_double_registers(); ++i) {
2086         // Add a UseInterval for all DoubleRegisters. See comment above for
2087         // general registers.
2088         int code = config()->GetAllocatableDoubleCode(i);
2089         TopLevelLiveRange* range =
2090             FixedFPLiveRangeFor(code, MachineRepresentation::kFloat64);
2091         range->AddUseInterval(curr_position, curr_position.End(),
2092                               allocation_zone());
2093       }
2094       // Clobber fixed float registers on archs with non-simple aliasing.
2095       if (!kSimpleFPAliasing) {
2096         if (fixed_float_live_ranges) {
2097           for (int i = 0; i < config()->num_allocatable_float_registers();
2098                ++i) {
2099             // Add a UseInterval for all FloatRegisters. See comment above for
2100             // general registers.
2101             int code = config()->GetAllocatableFloatCode(i);
2102             TopLevelLiveRange* range =
2103                 FixedFPLiveRangeFor(code, MachineRepresentation::kFloat32);
2104             range->AddUseInterval(curr_position, curr_position.End(),
2105                                   allocation_zone());
2106           }
2107         }
2108         if (fixed_simd128_live_ranges) {
2109           for (int i = 0; i < config()->num_allocatable_simd128_registers();
2110                ++i) {
2111             int code = config()->GetAllocatableSimd128Code(i);
2112             TopLevelLiveRange* range =
2113                 FixedFPLiveRangeFor(code, MachineRepresentation::kSimd128);
2114             range->AddUseInterval(curr_position, curr_position.End(),
2115                                   allocation_zone());
2116           }
2117         }
2118       }
2119     }
2120 
2121     for (size_t i = 0; i < instr->InputCount(); i++) {
2122       InstructionOperand* input = instr->InputAt(i);
2123       if (input->IsImmediate() || input->IsExplicit()) {
2124         continue;  // Ignore immediates and explicitly reserved registers.
2125       }
2126       LifetimePosition use_pos;
2127       if (input->IsUnallocated() &&
2128           UnallocatedOperand::cast(input)->IsUsedAtStart()) {
2129         use_pos = curr_position;
2130       } else {
2131         use_pos = curr_position.End();
2132       }
2133 
2134       if (input->IsUnallocated()) {
2135         UnallocatedOperand* unalloc = UnallocatedOperand::cast(input);
2136         int vreg = unalloc->virtual_register();
2137         live->Add(vreg);
2138         if (unalloc->HasSlotPolicy()) {
2139           data()->GetOrCreateLiveRangeFor(vreg)->set_has_slot_use(true);
2140         }
2141       }
2142       Use(block_start_position, use_pos, input);
2143     }
2144 
2145     for (size_t i = 0; i < instr->TempCount(); i++) {
2146       InstructionOperand* temp = instr->TempAt(i);
2147       // Unsupported.
2148       DCHECK_IMPLIES(temp->IsUnallocated(),
2149                      !UnallocatedOperand::cast(temp)->HasSlotPolicy());
2150       if (instr->ClobbersTemps()) {
2151         if (temp->IsRegister()) continue;
2152         if (temp->IsUnallocated()) {
2153           UnallocatedOperand* temp_unalloc = UnallocatedOperand::cast(temp);
2154           if (temp_unalloc->HasFixedPolicy()) {
2155             continue;
2156           }
2157         }
2158       }
2159       Use(block_start_position, curr_position.End(), temp);
2160       Define(curr_position, temp);
2161     }
2162 
2163     // Process the moves of the instruction's gaps, making their sources live.
2164     const Instruction::GapPosition kPositions[] = {Instruction::END,
2165                                                    Instruction::START};
2166     curr_position = curr_position.PrevStart();
2167     DCHECK(curr_position.IsGapPosition());
2168     for (const Instruction::GapPosition& position : kPositions) {
2169       ParallelMove* move = instr->GetParallelMove(position);
2170       if (move == nullptr) continue;
2171       if (position == Instruction::END) {
2172         curr_position = curr_position.End();
2173       } else {
2174         curr_position = curr_position.Start();
2175       }
2176       for (MoveOperands* cur : *move) {
2177         InstructionOperand& from = cur->source();
2178         InstructionOperand& to = cur->destination();
2179         void* hint = &to;
2180         UsePositionHintType hint_type = UsePosition::HintTypeForOperand(to);
2181         UsePosition* to_use = nullptr;
2182         int phi_vreg = -1;
2183         if (to.IsUnallocated()) {
2184           int to_vreg = UnallocatedOperand::cast(to).virtual_register();
2185           TopLevelLiveRange* to_range =
2186               data()->GetOrCreateLiveRangeFor(to_vreg);
2187           if (to_range->is_phi()) {
2188             phi_vreg = to_vreg;
2189             if (to_range->is_non_loop_phi()) {
2190               hint = to_range->current_hint_position();
2191               hint_type = hint == nullptr ? UsePositionHintType::kNone
2192                                           : UsePositionHintType::kUsePos;
2193             } else {
2194               hint_type = UsePositionHintType::kPhi;
2195               hint = data()->GetPhiMapValueFor(to_vreg);
2196             }
2197           } else {
2198             if (live->Contains(to_vreg)) {
2199               to_use = Define(curr_position, &to, &from,
2200                               UsePosition::HintTypeForOperand(from));
2201               live->Remove(to_vreg);
2202             } else {
2203               cur->Eliminate();
2204               continue;
2205             }
2206           }
2207         } else {
2208           Define(curr_position, &to);
2209         }
2210         UsePosition* from_use =
2211             Use(block_start_position, curr_position, &from, hint, hint_type);
2212         // Mark range live.
2213         if (from.IsUnallocated()) {
2214           live->Add(UnallocatedOperand::cast(from).virtual_register());
2215         }
2216         // Resolve use position hints just created.
2217         if (to_use != nullptr && from_use != nullptr) {
2218           to_use->ResolveHint(from_use);
2219           from_use->ResolveHint(to_use);
2220         }
2221         DCHECK_IMPLIES(to_use != nullptr, to_use->IsResolved());
2222         DCHECK_IMPLIES(from_use != nullptr, from_use->IsResolved());
2223         // Potentially resolve phi hint.
2224         if (phi_vreg != -1) ResolvePhiHint(&from, from_use);
2225       }
2226     }
2227   }
2228 }
2229 
ProcessPhis(const InstructionBlock * block,BitVector * live)2230 void LiveRangeBuilder::ProcessPhis(const InstructionBlock* block,
2231                                    BitVector* live) {
2232   for (PhiInstruction* phi : block->phis()) {
2233     // The live range interval already ends at the first instruction of the
2234     // block.
2235     int phi_vreg = phi->virtual_register();
2236     live->Remove(phi_vreg);
2237     // Select a hint from a predecessor block that preceeds this block in the
2238     // rpo order. In order of priority:
2239     // - Avoid hints from deferred blocks.
2240     // - Prefer hints from allocated (or explicit) operands.
2241     // - Prefer hints from empty blocks (containing just parallel moves and a
2242     //   jump). In these cases, if we can elide the moves, the jump threader
2243     //   is likely to be able to elide the jump.
2244     // The enforcement of hinting in rpo order is required because hint
2245     // resolution that happens later in the compiler pipeline visits
2246     // instructions in reverse rpo order, relying on the fact that phis are
2247     // encountered before their hints.
2248     InstructionOperand* hint = nullptr;
2249     int hint_preference = 0;
2250 
2251     // The cost of hinting increases with the number of predecessors. At the
2252     // same time, the typical benefit decreases, since this hinting only
2253     // optimises the execution path through one predecessor. A limit of 2 is
2254     // sufficient to hit the common if/else pattern.
2255     int predecessor_limit = 2;
2256 
2257     for (RpoNumber predecessor : block->predecessors()) {
2258       const InstructionBlock* predecessor_block =
2259           code()->InstructionBlockAt(predecessor);
2260       DCHECK_EQ(predecessor_block->rpo_number(), predecessor);
2261 
2262       // Only take hints from earlier rpo numbers.
2263       if (predecessor >= block->rpo_number()) continue;
2264 
2265       // Look up the predecessor instruction.
2266       const Instruction* predecessor_instr =
2267           GetLastInstruction(code(), predecessor_block);
2268       InstructionOperand* predecessor_hint = nullptr;
2269       // Phis are assigned in the END position of the last instruction in each
2270       // predecessor block.
2271       for (MoveOperands* move :
2272            *predecessor_instr->GetParallelMove(Instruction::END)) {
2273         InstructionOperand& to = move->destination();
2274         if (to.IsUnallocated() &&
2275             UnallocatedOperand::cast(to).virtual_register() == phi_vreg) {
2276           predecessor_hint = &move->source();
2277           break;
2278         }
2279       }
2280       DCHECK_NOT_NULL(predecessor_hint);
2281 
2282       // For each predecessor, generate a score according to the priorities
2283       // described above, and pick the best one. Flags in higher-order bits have
2284       // a higher priority than those in lower-order bits.
2285       int predecessor_hint_preference = 0;
2286       const int kNotDeferredBlockPreference = (1 << 2);
2287       const int kMoveIsAllocatedPreference = (1 << 1);
2288       const int kBlockIsEmptyPreference = (1 << 0);
2289 
2290       // - Avoid hints from deferred blocks.
2291       if (!predecessor_block->IsDeferred()) {
2292         predecessor_hint_preference |= kNotDeferredBlockPreference;
2293       }
2294 
2295       // - Prefer hints from allocated (or explicit) operands.
2296       //
2297       // Already-allocated or explicit operands are typically assigned using
2298       // the parallel moves on the last instruction. For example:
2299       //
2300       //      gap (v101 = [x0|R|w32]) (v100 = v101)
2301       //      ArchJmp
2302       //    ...
2303       //    phi: v100 = v101 v102
2304       //
2305       // We have already found the END move, so look for a matching START move
2306       // from an allocated (or explicit) operand.
2307       //
2308       // Note that we cannot simply look up data()->live_ranges()[vreg] here
2309       // because the live ranges are still being built when this function is
2310       // called.
2311       // TODO(v8): Find a way to separate hinting from live range analysis in
2312       // BuildLiveRanges so that we can use the O(1) live-range look-up.
2313       auto moves = predecessor_instr->GetParallelMove(Instruction::START);
2314       if (moves != nullptr) {
2315         for (MoveOperands* move : *moves) {
2316           InstructionOperand& to = move->destination();
2317           if (predecessor_hint->Equals(to)) {
2318             if (move->source().IsAllocated() || move->source().IsExplicit()) {
2319               predecessor_hint_preference |= kMoveIsAllocatedPreference;
2320             }
2321             break;
2322           }
2323         }
2324       }
2325 
2326       // - Prefer hints from empty blocks.
2327       if (predecessor_block->last_instruction_index() ==
2328           predecessor_block->first_instruction_index()) {
2329         predecessor_hint_preference |= kBlockIsEmptyPreference;
2330       }
2331 
2332       if ((hint == nullptr) ||
2333           (predecessor_hint_preference > hint_preference)) {
2334         // Take the hint from this predecessor.
2335         hint = predecessor_hint;
2336         hint_preference = predecessor_hint_preference;
2337       }
2338 
2339       if (--predecessor_limit <= 0) break;
2340     }
2341     DCHECK_NOT_NULL(hint);
2342 
2343     LifetimePosition block_start = LifetimePosition::GapFromInstructionIndex(
2344         block->first_instruction_index());
2345     UsePosition* use_pos = Define(block_start, &phi->output(), hint,
2346                                   UsePosition::HintTypeForOperand(*hint));
2347     MapPhiHint(hint, use_pos);
2348   }
2349 }
2350 
2351 
ProcessLoopHeader(const InstructionBlock * block,BitVector * live)2352 void LiveRangeBuilder::ProcessLoopHeader(const InstructionBlock* block,
2353                                          BitVector* live) {
2354   DCHECK(block->IsLoopHeader());
2355   // Add a live range stretching from the first loop instruction to the last
2356   // for each value live on entry to the header.
2357   BitVector::Iterator iterator(live);
2358   LifetimePosition start = LifetimePosition::GapFromInstructionIndex(
2359       block->first_instruction_index());
2360   LifetimePosition end = LifetimePosition::GapFromInstructionIndex(
2361                              code()->LastLoopInstructionIndex(block))
2362                              .NextFullStart();
2363   while (!iterator.Done()) {
2364     int operand_index = iterator.Current();
2365     TopLevelLiveRange* range = data()->GetOrCreateLiveRangeFor(operand_index);
2366     range->EnsureInterval(start, end, allocation_zone());
2367     iterator.Advance();
2368   }
2369   // Insert all values into the live in sets of all blocks in the loop.
2370   for (int i = block->rpo_number().ToInt() + 1; i < block->loop_end().ToInt();
2371        ++i) {
2372     live_in_sets()[i]->Union(*live);
2373   }
2374 }
2375 
2376 
BuildLiveRanges()2377 void LiveRangeBuilder::BuildLiveRanges() {
2378   // Process the blocks in reverse order.
2379   for (int block_id = code()->InstructionBlockCount() - 1; block_id >= 0;
2380        --block_id) {
2381     InstructionBlock* block =
2382         code()->InstructionBlockAt(RpoNumber::FromInt(block_id));
2383     BitVector* live = ComputeLiveOut(block, data());
2384     // Initially consider all live_out values live for the entire block. We
2385     // will shorten these intervals if necessary.
2386     AddInitialIntervals(block, live);
2387     // Process the instructions in reverse order, generating and killing
2388     // live values.
2389     ProcessInstructions(block, live);
2390     // All phi output operands are killed by this block.
2391     ProcessPhis(block, live);
2392     // Now live is live_in for this block except not including values live
2393     // out on backward successor edges.
2394     if (block->IsLoopHeader()) ProcessLoopHeader(block, live);
2395     live_in_sets()[block_id] = live;
2396   }
2397   // Postprocess the ranges.
2398   for (TopLevelLiveRange* range : data()->live_ranges()) {
2399     if (range == nullptr) continue;
2400     // Give slots to all ranges with a non fixed slot use.
2401     if (range->has_slot_use() && range->HasNoSpillType()) {
2402       data()->AssignSpillRangeToLiveRange(range);
2403     }
2404     // TODO(bmeurer): This is a horrible hack to make sure that for constant
2405     // live ranges, every use requires the constant to be in a register.
2406     // Without this hack, all uses with "any" policy would get the constant
2407     // operand assigned.
2408     if (range->HasSpillOperand() && range->GetSpillOperand()->IsConstant()) {
2409       for (UsePosition* pos = range->first_pos(); pos != nullptr;
2410            pos = pos->next()) {
2411         if (pos->type() == UsePositionType::kRequiresSlot) continue;
2412         UsePositionType new_type = UsePositionType::kAny;
2413         // Can't mark phis as needing a register.
2414         if (!pos->pos().IsGapPosition()) {
2415           new_type = UsePositionType::kRequiresRegister;
2416         }
2417         pos->set_type(new_type, true);
2418       }
2419     }
2420   }
2421   for (auto preassigned : data()->preassigned_slot_ranges()) {
2422     TopLevelLiveRange* range = preassigned.first;
2423     int slot_id = preassigned.second;
2424     SpillRange* spill = range->HasSpillRange()
2425                             ? range->GetSpillRange()
2426                             : data()->AssignSpillRangeToLiveRange(range);
2427     spill->set_assigned_slot(slot_id);
2428   }
2429 #ifdef DEBUG
2430   Verify();
2431 #endif
2432 }
2433 
2434 
MapPhiHint(InstructionOperand * operand,UsePosition * use_pos)2435 void LiveRangeBuilder::MapPhiHint(InstructionOperand* operand,
2436                                   UsePosition* use_pos) {
2437   DCHECK(!use_pos->IsResolved());
2438   auto res = phi_hints_.insert(std::make_pair(operand, use_pos));
2439   DCHECK(res.second);
2440   USE(res);
2441 }
2442 
2443 
ResolvePhiHint(InstructionOperand * operand,UsePosition * use_pos)2444 void LiveRangeBuilder::ResolvePhiHint(InstructionOperand* operand,
2445                                       UsePosition* use_pos) {
2446   auto it = phi_hints_.find(operand);
2447   if (it == phi_hints_.end()) return;
2448   DCHECK(!it->second->IsResolved());
2449   it->second->ResolveHint(use_pos);
2450 }
2451 
2452 
Verify() const2453 void LiveRangeBuilder::Verify() const {
2454   for (auto& hint : phi_hints_) {
2455     CHECK(hint.second->IsResolved());
2456   }
2457   for (const TopLevelLiveRange* current : data()->live_ranges()) {
2458     if (current != nullptr && !current->IsEmpty()) {
2459       // New LiveRanges should not be split.
2460       CHECK_NULL(current->next());
2461       // General integrity check.
2462       current->Verify();
2463       const UseInterval* first = current->first_interval();
2464       if (first->next() == nullptr) continue;
2465 
2466       // Consecutive intervals should not end and start in the same block,
2467       // otherwise the intervals should have been joined, because the
2468       // variable is live throughout that block.
2469       CHECK(NextIntervalStartsInDifferentBlocks(first));
2470 
2471       for (const UseInterval* i = first->next(); i != nullptr; i = i->next()) {
2472         // Except for the first interval, the other intevals must start at
2473         // a block boundary, otherwise data wouldn't flow to them.
2474         CHECK(IntervalStartsAtBlockBoundary(i));
2475         // The last instruction of the predecessors of the block the interval
2476         // starts must be covered by the range.
2477         CHECK(IntervalPredecessorsCoveredByRange(i, current));
2478         if (i->next() != nullptr) {
2479           // Check the consecutive intervals property, except for the last
2480           // interval, where it doesn't apply.
2481           CHECK(NextIntervalStartsInDifferentBlocks(i));
2482         }
2483       }
2484     }
2485   }
2486 }
2487 
IntervalStartsAtBlockBoundary(const UseInterval * interval) const2488 bool LiveRangeBuilder::IntervalStartsAtBlockBoundary(
2489     const UseInterval* interval) const {
2490   LifetimePosition start = interval->start();
2491   if (!start.IsFullStart()) return false;
2492   int instruction_index = start.ToInstructionIndex();
2493   const InstructionBlock* block =
2494       data()->code()->GetInstructionBlock(instruction_index);
2495   return block->first_instruction_index() == instruction_index;
2496 }
2497 
IntervalPredecessorsCoveredByRange(const UseInterval * interval,const TopLevelLiveRange * range) const2498 bool LiveRangeBuilder::IntervalPredecessorsCoveredByRange(
2499     const UseInterval* interval, const TopLevelLiveRange* range) const {
2500   LifetimePosition start = interval->start();
2501   int instruction_index = start.ToInstructionIndex();
2502   const InstructionBlock* block =
2503       data()->code()->GetInstructionBlock(instruction_index);
2504   for (RpoNumber pred_index : block->predecessors()) {
2505     const InstructionBlock* predecessor =
2506         data()->code()->InstructionBlockAt(pred_index);
2507     LifetimePosition last_pos = LifetimePosition::GapFromInstructionIndex(
2508         predecessor->last_instruction_index());
2509     last_pos = last_pos.NextStart().End();
2510     if (!range->Covers(last_pos)) return false;
2511   }
2512   return true;
2513 }
2514 
NextIntervalStartsInDifferentBlocks(const UseInterval * interval) const2515 bool LiveRangeBuilder::NextIntervalStartsInDifferentBlocks(
2516     const UseInterval* interval) const {
2517   DCHECK_NOT_NULL(interval->next());
2518   LifetimePosition end = interval->end();
2519   LifetimePosition next_start = interval->next()->start();
2520   // Since end is not covered, but the previous position is, move back a
2521   // position
2522   end = end.IsStart() ? end.PrevStart().End() : end.Start();
2523   int last_covered_index = end.ToInstructionIndex();
2524   const InstructionBlock* block =
2525       data()->code()->GetInstructionBlock(last_covered_index);
2526   const InstructionBlock* next_block =
2527       data()->code()->GetInstructionBlock(next_start.ToInstructionIndex());
2528   return block->rpo_number() < next_block->rpo_number();
2529 }
2530 
RegisterAllocator(RegisterAllocationData * data,RegisterKind kind)2531 RegisterAllocator::RegisterAllocator(RegisterAllocationData* data,
2532                                      RegisterKind kind)
2533     : data_(data),
2534       mode_(kind),
2535       num_registers_(GetRegisterCount(data->config(), kind)),
2536       num_allocatable_registers_(
2537           GetAllocatableRegisterCount(data->config(), kind)),
2538       allocatable_register_codes_(
2539           GetAllocatableRegisterCodes(data->config(), kind)),
2540       check_fp_aliasing_(false) {
2541   if (!kSimpleFPAliasing && kind == FP_REGISTERS) {
2542     check_fp_aliasing_ = (data->code()->representation_mask() &
2543                           (kFloatRepBit | kSimd128RepBit)) != 0;
2544   }
2545 }
2546 
GetSplitPositionForInstruction(const LiveRange * range,int instruction_index)2547 LifetimePosition RegisterAllocator::GetSplitPositionForInstruction(
2548     const LiveRange* range, int instruction_index) {
2549   LifetimePosition ret = LifetimePosition::Invalid();
2550 
2551   ret = LifetimePosition::GapFromInstructionIndex(instruction_index);
2552   if (range->Start() >= ret || ret >= range->End()) {
2553     return LifetimePosition::Invalid();
2554   }
2555   return ret;
2556 }
2557 
SplitAndSpillRangesDefinedByMemoryOperand()2558 void RegisterAllocator::SplitAndSpillRangesDefinedByMemoryOperand() {
2559   size_t initial_range_count = data()->live_ranges().size();
2560   for (size_t i = 0; i < initial_range_count; ++i) {
2561     TopLevelLiveRange* range = data()->live_ranges()[i];
2562     if (!CanProcessRange(range)) continue;
2563     if (range->HasNoSpillType() ||
2564         (range->HasSpillRange() && !range->has_slot_use())) {
2565       continue;
2566     }
2567     LifetimePosition start = range->Start();
2568     TRACE("Live range %d:%d is defined by a spill operand.\n",
2569           range->TopLevel()->vreg(), range->relative_id());
2570     LifetimePosition next_pos = start;
2571     if (next_pos.IsGapPosition()) {
2572       next_pos = next_pos.NextStart();
2573     }
2574 
2575     // With splinters, we can be more strict and skip over positions
2576     // not strictly needing registers.
2577     UsePosition* pos =
2578         range->IsSplinter()
2579             ? range->NextRegisterPosition(next_pos)
2580             : range->NextUsePositionRegisterIsBeneficial(next_pos);
2581     // If the range already has a spill operand and it doesn't need a
2582     // register immediately, split it and spill the first part of the range.
2583     if (pos == nullptr) {
2584       Spill(range);
2585     } else if (pos->pos() > range->Start().NextStart()) {
2586       // Do not spill live range eagerly if use position that can benefit from
2587       // the register is too close to the start of live range.
2588       LifetimePosition split_pos = GetSplitPositionForInstruction(
2589           range, pos->pos().ToInstructionIndex());
2590       // There is no place to split, so we can't split and spill.
2591       if (!split_pos.IsValid()) continue;
2592 
2593       split_pos =
2594           FindOptimalSplitPos(range->Start().NextFullStart(), split_pos);
2595 
2596       SplitRangeAt(range, split_pos);
2597       Spill(range);
2598     }
2599   }
2600 }
2601 
2602 
SplitRangeAt(LiveRange * range,LifetimePosition pos)2603 LiveRange* RegisterAllocator::SplitRangeAt(LiveRange* range,
2604                                            LifetimePosition pos) {
2605   DCHECK(!range->TopLevel()->IsFixed());
2606   TRACE("Splitting live range %d:%d at %d\n", range->TopLevel()->vreg(),
2607         range->relative_id(), pos.value());
2608 
2609   if (pos <= range->Start()) return range;
2610 
2611   // We can't properly connect liveranges if splitting occurred at the end
2612   // a block.
2613   DCHECK(pos.IsStart() || pos.IsGapPosition() ||
2614          (GetInstructionBlock(code(), pos)->last_instruction_index() !=
2615           pos.ToInstructionIndex()));
2616 
2617   LiveRange* result = range->SplitAt(pos, allocation_zone());
2618   return result;
2619 }
2620 
2621 
SplitBetween(LiveRange * range,LifetimePosition start,LifetimePosition end)2622 LiveRange* RegisterAllocator::SplitBetween(LiveRange* range,
2623                                            LifetimePosition start,
2624                                            LifetimePosition end) {
2625   DCHECK(!range->TopLevel()->IsFixed());
2626   TRACE("Splitting live range %d:%d in position between [%d, %d]\n",
2627         range->TopLevel()->vreg(), range->relative_id(), start.value(),
2628         end.value());
2629 
2630   LifetimePosition split_pos = FindOptimalSplitPos(start, end);
2631   DCHECK(split_pos >= start);
2632   return SplitRangeAt(range, split_pos);
2633 }
2634 
2635 
FindOptimalSplitPos(LifetimePosition start,LifetimePosition end)2636 LifetimePosition RegisterAllocator::FindOptimalSplitPos(LifetimePosition start,
2637                                                         LifetimePosition end) {
2638   int start_instr = start.ToInstructionIndex();
2639   int end_instr = end.ToInstructionIndex();
2640   DCHECK(start_instr <= end_instr);
2641 
2642   // We have no choice
2643   if (start_instr == end_instr) return end;
2644 
2645   const InstructionBlock* start_block = GetInstructionBlock(code(), start);
2646   const InstructionBlock* end_block = GetInstructionBlock(code(), end);
2647 
2648   if (end_block == start_block) {
2649     // The interval is split in the same basic block. Split at the latest
2650     // possible position.
2651     return end;
2652   }
2653 
2654   const InstructionBlock* block = end_block;
2655   // Find header of outermost loop.
2656   do {
2657     const InstructionBlock* loop = GetContainingLoop(code(), block);
2658     if (loop == nullptr ||
2659         loop->rpo_number().ToInt() <= start_block->rpo_number().ToInt()) {
2660       // No more loops or loop starts before the lifetime start.
2661       break;
2662     }
2663     block = loop;
2664   } while (true);
2665 
2666   // We did not find any suitable outer loop. Split at the latest possible
2667   // position unless end_block is a loop header itself.
2668   if (block == end_block && !end_block->IsLoopHeader()) return end;
2669 
2670   return LifetimePosition::GapFromInstructionIndex(
2671       block->first_instruction_index());
2672 }
2673 
2674 
FindOptimalSpillingPos(LiveRange * range,LifetimePosition pos)2675 LifetimePosition RegisterAllocator::FindOptimalSpillingPos(
2676     LiveRange* range, LifetimePosition pos) {
2677   const InstructionBlock* block = GetInstructionBlock(code(), pos.Start());
2678   const InstructionBlock* loop_header =
2679       block->IsLoopHeader() ? block : GetContainingLoop(code(), block);
2680 
2681   if (loop_header == nullptr) return pos;
2682 
2683   const UsePosition* prev_use =
2684       range->PreviousUsePositionRegisterIsBeneficial(pos);
2685 
2686   while (loop_header != nullptr) {
2687     // We are going to spill live range inside the loop.
2688     // If possible try to move spilling position backwards to loop header.
2689     // This will reduce number of memory moves on the back edge.
2690     LifetimePosition loop_start = LifetimePosition::GapFromInstructionIndex(
2691         loop_header->first_instruction_index());
2692 
2693     if (range->Covers(loop_start)) {
2694       if (prev_use == nullptr || prev_use->pos() < loop_start) {
2695         // No register beneficial use inside the loop before the pos.
2696         pos = loop_start;
2697       }
2698     }
2699 
2700     // Try hoisting out to an outer loop.
2701     loop_header = GetContainingLoop(code(), loop_header);
2702   }
2703 
2704   return pos;
2705 }
2706 
2707 
Spill(LiveRange * range)2708 void RegisterAllocator::Spill(LiveRange* range) {
2709   DCHECK(!range->spilled());
2710   TopLevelLiveRange* first = range->TopLevel();
2711   TRACE("Spilling live range %d:%d\n", first->vreg(), range->relative_id());
2712 
2713   if (first->HasNoSpillType()) {
2714     data()->AssignSpillRangeToLiveRange(first);
2715   }
2716   range->Spill();
2717 }
2718 
RegisterName(int register_code) const2719 const char* RegisterAllocator::RegisterName(int register_code) const {
2720   if (mode() == GENERAL_REGISTERS) {
2721     return data()->config()->GetGeneralRegisterName(register_code);
2722   } else {
2723     return data()->config()->GetDoubleRegisterName(register_code);
2724   }
2725 }
2726 
2727 
LinearScanAllocator(RegisterAllocationData * data,RegisterKind kind,Zone * local_zone)2728 LinearScanAllocator::LinearScanAllocator(RegisterAllocationData* data,
2729                                          RegisterKind kind, Zone* local_zone)
2730     : RegisterAllocator(data, kind),
2731       unhandled_live_ranges_(local_zone),
2732       active_live_ranges_(local_zone),
2733       inactive_live_ranges_(local_zone) {
2734   unhandled_live_ranges().reserve(
2735       static_cast<size_t>(code()->VirtualRegisterCount() * 2));
2736   active_live_ranges().reserve(8);
2737   inactive_live_ranges().reserve(8);
2738   // TryAllocateFreeReg and AllocateBlockedReg assume this
2739   // when allocating local arrays.
2740   DCHECK(RegisterConfiguration::kMaxFPRegisters >=
2741          this->data()->config()->num_general_registers());
2742 }
2743 
2744 
AllocateRegisters()2745 void LinearScanAllocator::AllocateRegisters() {
2746   DCHECK(unhandled_live_ranges().empty());
2747   DCHECK(active_live_ranges().empty());
2748   DCHECK(inactive_live_ranges().empty());
2749 
2750   SplitAndSpillRangesDefinedByMemoryOperand();
2751 
2752   for (TopLevelLiveRange* range : data()->live_ranges()) {
2753     if (!CanProcessRange(range)) continue;
2754     for (LiveRange* to_add = range; to_add != nullptr;
2755          to_add = to_add->next()) {
2756       if (!to_add->spilled()) {
2757         AddToUnhandledUnsorted(to_add);
2758       }
2759     }
2760   }
2761   SortUnhandled();
2762   DCHECK(UnhandledIsSorted());
2763 
2764   if (mode() == GENERAL_REGISTERS) {
2765     for (TopLevelLiveRange* current : data()->fixed_live_ranges()) {
2766       if (current != nullptr) AddToInactive(current);
2767     }
2768   } else {
2769     for (TopLevelLiveRange* current : data()->fixed_double_live_ranges()) {
2770       if (current != nullptr) AddToInactive(current);
2771     }
2772     if (!kSimpleFPAliasing && check_fp_aliasing()) {
2773       for (TopLevelLiveRange* current : data()->fixed_float_live_ranges()) {
2774         if (current != nullptr) AddToInactive(current);
2775       }
2776       for (TopLevelLiveRange* current : data()->fixed_simd128_live_ranges()) {
2777         if (current != nullptr) AddToInactive(current);
2778       }
2779     }
2780   }
2781 
2782   while (!unhandled_live_ranges().empty()) {
2783     DCHECK(UnhandledIsSorted());
2784     LiveRange* current = unhandled_live_ranges().back();
2785     unhandled_live_ranges().pop_back();
2786     DCHECK(UnhandledIsSorted());
2787     LifetimePosition position = current->Start();
2788 #ifdef DEBUG
2789     allocation_finger_ = position;
2790 #endif
2791     TRACE("Processing interval %d:%d start=%d\n", current->TopLevel()->vreg(),
2792           current->relative_id(), position.value());
2793 
2794     if (current->IsTopLevel() && TryReuseSpillForPhi(current->TopLevel()))
2795       continue;
2796 
2797     for (size_t i = 0; i < active_live_ranges().size(); ++i) {
2798       LiveRange* cur_active = active_live_ranges()[i];
2799       if (cur_active->End() <= position) {
2800         ActiveToHandled(cur_active);
2801         --i;  // The live range was removed from the list of active live ranges.
2802       } else if (!cur_active->Covers(position)) {
2803         ActiveToInactive(cur_active);
2804         --i;  // The live range was removed from the list of active live ranges.
2805       }
2806     }
2807 
2808     for (size_t i = 0; i < inactive_live_ranges().size(); ++i) {
2809       LiveRange* cur_inactive = inactive_live_ranges()[i];
2810       if (cur_inactive->End() <= position) {
2811         InactiveToHandled(cur_inactive);
2812         --i;  // Live range was removed from the list of inactive live ranges.
2813       } else if (cur_inactive->Covers(position)) {
2814         InactiveToActive(cur_inactive);
2815         --i;  // Live range was removed from the list of inactive live ranges.
2816       }
2817     }
2818 
2819     DCHECK(!current->HasRegisterAssigned() && !current->spilled());
2820 
2821     ProcessCurrentRange(current);
2822   }
2823 }
2824 
TrySplitAndSpillSplinter(LiveRange * range)2825 bool LinearScanAllocator::TrySplitAndSpillSplinter(LiveRange* range) {
2826   DCHECK(range->TopLevel()->IsSplinter());
2827   // If we can spill the whole range, great. Otherwise, split above the
2828   // first use needing a register and spill the top part.
2829   const UsePosition* next_reg = range->NextRegisterPosition(range->Start());
2830   if (next_reg == nullptr) {
2831     Spill(range);
2832     return true;
2833   } else if (range->FirstHintPosition() == nullptr) {
2834     // If there was no hint, but we have a use position requiring a
2835     // register, apply the hot path heuristics.
2836     return false;
2837   } else if (next_reg->pos().PrevStart() > range->Start()) {
2838     LiveRange* tail = SplitRangeAt(range, next_reg->pos().PrevStart());
2839     AddToUnhandledSorted(tail);
2840     Spill(range);
2841     return true;
2842   }
2843   return false;
2844 }
2845 
SetLiveRangeAssignedRegister(LiveRange * range,int reg)2846 void LinearScanAllocator::SetLiveRangeAssignedRegister(LiveRange* range,
2847                                                        int reg) {
2848   data()->MarkAllocated(range->representation(), reg);
2849   range->set_assigned_register(reg);
2850   range->SetUseHints(reg);
2851   if (range->IsTopLevel() && range->TopLevel()->is_phi()) {
2852     data()->GetPhiMapValueFor(range->TopLevel())->set_assigned_register(reg);
2853   }
2854 }
2855 
2856 
AddToActive(LiveRange * range)2857 void LinearScanAllocator::AddToActive(LiveRange* range) {
2858   TRACE("Add live range %d:%d to active\n", range->TopLevel()->vreg(),
2859         range->relative_id());
2860   active_live_ranges().push_back(range);
2861 }
2862 
2863 
AddToInactive(LiveRange * range)2864 void LinearScanAllocator::AddToInactive(LiveRange* range) {
2865   TRACE("Add live range %d:%d to inactive\n", range->TopLevel()->vreg(),
2866         range->relative_id());
2867   inactive_live_ranges().push_back(range);
2868 }
2869 
2870 
AddToUnhandledSorted(LiveRange * range)2871 void LinearScanAllocator::AddToUnhandledSorted(LiveRange* range) {
2872   if (range == nullptr || range->IsEmpty()) return;
2873   DCHECK(!range->HasRegisterAssigned() && !range->spilled());
2874   DCHECK(allocation_finger_ <= range->Start());
2875   for (int i = static_cast<int>(unhandled_live_ranges().size() - 1); i >= 0;
2876        --i) {
2877     LiveRange* cur_range = unhandled_live_ranges().at(i);
2878     if (!range->ShouldBeAllocatedBefore(cur_range)) continue;
2879     TRACE("Add live range %d:%d to unhandled at %d\n",
2880           range->TopLevel()->vreg(), range->relative_id(), i + 1);
2881     auto it = unhandled_live_ranges().begin() + (i + 1);
2882     unhandled_live_ranges().insert(it, range);
2883     DCHECK(UnhandledIsSorted());
2884     return;
2885   }
2886   TRACE("Add live range %d:%d to unhandled at start\n",
2887         range->TopLevel()->vreg(), range->relative_id());
2888   unhandled_live_ranges().insert(unhandled_live_ranges().begin(), range);
2889   DCHECK(UnhandledIsSorted());
2890 }
2891 
2892 
AddToUnhandledUnsorted(LiveRange * range)2893 void LinearScanAllocator::AddToUnhandledUnsorted(LiveRange* range) {
2894   if (range == nullptr || range->IsEmpty()) return;
2895   DCHECK(!range->HasRegisterAssigned() && !range->spilled());
2896   TRACE("Add live range %d:%d to unhandled unsorted at end\n",
2897         range->TopLevel()->vreg(), range->relative_id());
2898   unhandled_live_ranges().push_back(range);
2899 }
2900 
2901 
UnhandledSortHelper(LiveRange * a,LiveRange * b)2902 static bool UnhandledSortHelper(LiveRange* a, LiveRange* b) {
2903   DCHECK(!a->ShouldBeAllocatedBefore(b) || !b->ShouldBeAllocatedBefore(a));
2904   if (a->ShouldBeAllocatedBefore(b)) return false;
2905   if (b->ShouldBeAllocatedBefore(a)) return true;
2906   return a->TopLevel()->vreg() < b->TopLevel()->vreg();
2907 }
2908 
2909 
2910 // Sort the unhandled live ranges so that the ranges to be processed first are
2911 // at the end of the array list.  This is convenient for the register allocation
2912 // algorithm because it is efficient to remove elements from the end.
SortUnhandled()2913 void LinearScanAllocator::SortUnhandled() {
2914   TRACE("Sort unhandled\n");
2915   std::sort(unhandled_live_ranges().begin(), unhandled_live_ranges().end(),
2916             &UnhandledSortHelper);
2917 }
2918 
2919 
UnhandledIsSorted()2920 bool LinearScanAllocator::UnhandledIsSorted() {
2921   size_t len = unhandled_live_ranges().size();
2922   for (size_t i = 1; i < len; i++) {
2923     LiveRange* a = unhandled_live_ranges().at(i - 1);
2924     LiveRange* b = unhandled_live_ranges().at(i);
2925     if (a->Start() < b->Start()) return false;
2926   }
2927   return true;
2928 }
2929 
2930 
ActiveToHandled(LiveRange * range)2931 void LinearScanAllocator::ActiveToHandled(LiveRange* range) {
2932   RemoveElement(&active_live_ranges(), range);
2933   TRACE("Moving live range %d:%d from active to handled\n",
2934         range->TopLevel()->vreg(), range->relative_id());
2935 }
2936 
2937 
ActiveToInactive(LiveRange * range)2938 void LinearScanAllocator::ActiveToInactive(LiveRange* range) {
2939   RemoveElement(&active_live_ranges(), range);
2940   inactive_live_ranges().push_back(range);
2941   TRACE("Moving live range %d:%d from active to inactive\n",
2942         range->TopLevel()->vreg(), range->relative_id());
2943 }
2944 
2945 
InactiveToHandled(LiveRange * range)2946 void LinearScanAllocator::InactiveToHandled(LiveRange* range) {
2947   RemoveElement(&inactive_live_ranges(), range);
2948   TRACE("Moving live range %d:%d from inactive to handled\n",
2949         range->TopLevel()->vreg(), range->relative_id());
2950 }
2951 
2952 
InactiveToActive(LiveRange * range)2953 void LinearScanAllocator::InactiveToActive(LiveRange* range) {
2954   RemoveElement(&inactive_live_ranges(), range);
2955   active_live_ranges().push_back(range);
2956   TRACE("Moving live range %d:%d from inactive to active\n",
2957         range->TopLevel()->vreg(), range->relative_id());
2958 }
2959 
GetFPRegisterSet(MachineRepresentation rep,int * num_regs,int * num_codes,const int ** codes) const2960 void LinearScanAllocator::GetFPRegisterSet(MachineRepresentation rep,
2961                                            int* num_regs, int* num_codes,
2962                                            const int** codes) const {
2963   DCHECK(!kSimpleFPAliasing);
2964   if (rep == MachineRepresentation::kFloat32) {
2965     *num_regs = data()->config()->num_float_registers();
2966     *num_codes = data()->config()->num_allocatable_float_registers();
2967     *codes = data()->config()->allocatable_float_codes();
2968   } else if (rep == MachineRepresentation::kSimd128) {
2969     *num_regs = data()->config()->num_simd128_registers();
2970     *num_codes = data()->config()->num_allocatable_simd128_registers();
2971     *codes = data()->config()->allocatable_simd128_codes();
2972   } else {
2973     UNREACHABLE();
2974   }
2975 }
2976 
FindFreeRegistersForRange(LiveRange * range,Vector<LifetimePosition> positions)2977 void LinearScanAllocator::FindFreeRegistersForRange(
2978     LiveRange* range, Vector<LifetimePosition> positions) {
2979   int num_regs = num_registers();
2980   int num_codes = num_allocatable_registers();
2981   const int* codes = allocatable_register_codes();
2982   MachineRepresentation rep = range->representation();
2983   if (!kSimpleFPAliasing && (rep == MachineRepresentation::kFloat32 ||
2984                              rep == MachineRepresentation::kSimd128))
2985     GetFPRegisterSet(rep, &num_regs, &num_codes, &codes);
2986   DCHECK_GE(positions.length(), num_regs);
2987 
2988   for (int i = 0; i < num_regs; i++) {
2989     positions[i] = LifetimePosition::MaxPosition();
2990   }
2991 
2992   for (LiveRange* cur_active : active_live_ranges()) {
2993     int cur_reg = cur_active->assigned_register();
2994     if (kSimpleFPAliasing || !check_fp_aliasing()) {
2995       positions[cur_reg] = LifetimePosition::GapFromInstructionIndex(0);
2996       TRACE("Register %s is free until pos %d (1)\n", RegisterName(cur_reg),
2997             LifetimePosition::GapFromInstructionIndex(0).value());
2998     } else {
2999       int alias_base_index = -1;
3000       int aliases = data()->config()->GetAliases(
3001           cur_active->representation(), cur_reg, rep, &alias_base_index);
3002       DCHECK(aliases > 0 || (aliases == 0 && alias_base_index == -1));
3003       while (aliases--) {
3004         int aliased_reg = alias_base_index + aliases;
3005         positions[aliased_reg] = LifetimePosition::GapFromInstructionIndex(0);
3006       }
3007     }
3008   }
3009 
3010   for (LiveRange* cur_inactive : inactive_live_ranges()) {
3011     DCHECK(cur_inactive->End() > range->Start());
3012     LifetimePosition next_intersection = cur_inactive->FirstIntersection(range);
3013     if (!next_intersection.IsValid()) continue;
3014     int cur_reg = cur_inactive->assigned_register();
3015     if (kSimpleFPAliasing || !check_fp_aliasing()) {
3016       positions[cur_reg] = Min(positions[cur_reg], next_intersection);
3017       TRACE("Register %s is free until pos %d (2)\n", RegisterName(cur_reg),
3018             Min(positions[cur_reg], next_intersection).value());
3019     } else {
3020       int alias_base_index = -1;
3021       int aliases = data()->config()->GetAliases(
3022           cur_inactive->representation(), cur_reg, rep, &alias_base_index);
3023       DCHECK(aliases > 0 || (aliases == 0 && alias_base_index == -1));
3024       while (aliases--) {
3025         int aliased_reg = alias_base_index + aliases;
3026         positions[aliased_reg] = Min(positions[aliased_reg], next_intersection);
3027       }
3028     }
3029   }
3030 }
3031 
3032 // High-level register allocation summary:
3033 //
3034 // For regular, or hot (i.e. not splinter) ranges, we attempt to first
3035 // allocate first the preferred (hint) register. If that is not possible,
3036 // we find a register that's free, and allocate that. If that's not possible,
3037 // we search for a register to steal from a range that was allocated. The
3038 // goal is to optimize for throughput by avoiding register-to-memory
3039 // moves, which are expensive.
3040 //
3041 // For splinters, the goal is to minimize the number of moves. First we try
3042 // to allocate the preferred register (more discussion follows). Failing that,
3043 // we bail out and spill as far as we can, unless the first use is at start,
3044 // case in which we apply the same behavior as we do for regular ranges.
3045 // If there is no hint, we apply the hot-path behavior.
3046 //
3047 // For the splinter, the hint register may come from:
3048 //
3049 // - the hot path (we set it at splintering time with SetHint). In this case, if
3050 // we cannot offer the hint register, spilling is better because it's at most
3051 // 1 move, while trying to find and offer another register is at least 1 move.
3052 //
3053 // - a constraint. If we cannot offer that register, it's because  there is some
3054 // interference. So offering the hint register up to the interference would
3055 // result
3056 // in a move at the interference, plus a move to satisfy the constraint. This is
3057 // also the number of moves if we spill, with the potential of the range being
3058 // already spilled and thus saving a move (the spill).
3059 // Note that this can only be an input constraint, if it were an output one,
3060 // the range wouldn't be a splinter because it means it'd be defined in a
3061 // deferred
3062 // block, and we don't mark those as splinters (they live in deferred blocks
3063 // only).
3064 //
3065 // - a phi. The same analysis as in the case of the input constraint applies.
3066 //
ProcessCurrentRange(LiveRange * current)3067 void LinearScanAllocator::ProcessCurrentRange(LiveRange* current) {
3068   LifetimePosition free_until_pos_buff[RegisterConfiguration::kMaxFPRegisters];
3069   Vector<LifetimePosition> free_until_pos(
3070       free_until_pos_buff, RegisterConfiguration::kMaxFPRegisters);
3071   FindFreeRegistersForRange(current, free_until_pos);
3072   if (!TryAllocatePreferredReg(current, free_until_pos)) {
3073     if (current->TopLevel()->IsSplinter()) {
3074       if (TrySplitAndSpillSplinter(current)) return;
3075     }
3076     if (!TryAllocateFreeReg(current, free_until_pos)) {
3077       AllocateBlockedReg(current);
3078     }
3079   }
3080   if (current->HasRegisterAssigned()) {
3081     AddToActive(current);
3082   }
3083 }
3084 
TryAllocatePreferredReg(LiveRange * current,const Vector<LifetimePosition> & free_until_pos)3085 bool LinearScanAllocator::TryAllocatePreferredReg(
3086     LiveRange* current, const Vector<LifetimePosition>& free_until_pos) {
3087   int hint_register;
3088   if (current->FirstHintPosition(&hint_register) != nullptr) {
3089     TRACE(
3090         "Found reg hint %s (free until [%d) for live range %d:%d (end %d[).\n",
3091         RegisterName(hint_register), free_until_pos[hint_register].value(),
3092         current->TopLevel()->vreg(), current->relative_id(),
3093         current->End().value());
3094 
3095     // The desired register is free until the end of the current live range.
3096     if (free_until_pos[hint_register] >= current->End()) {
3097       TRACE("Assigning preferred reg %s to live range %d:%d\n",
3098             RegisterName(hint_register), current->TopLevel()->vreg(),
3099             current->relative_id());
3100       SetLiveRangeAssignedRegister(current, hint_register);
3101       return true;
3102     }
3103   }
3104   return false;
3105 }
3106 
TryAllocateFreeReg(LiveRange * current,const Vector<LifetimePosition> & free_until_pos)3107 bool LinearScanAllocator::TryAllocateFreeReg(
3108     LiveRange* current, const Vector<LifetimePosition>& free_until_pos) {
3109   int num_regs = 0;  // used only for the call to GetFPRegisterSet.
3110   int num_codes = num_allocatable_registers();
3111   const int* codes = allocatable_register_codes();
3112   MachineRepresentation rep = current->representation();
3113   if (!kSimpleFPAliasing && (rep == MachineRepresentation::kFloat32 ||
3114                              rep == MachineRepresentation::kSimd128))
3115     GetFPRegisterSet(rep, &num_regs, &num_codes, &codes);
3116 
3117   DCHECK_GE(free_until_pos.length(), num_codes);
3118 
3119   // Find the register which stays free for the longest time.
3120   int reg = codes[0];
3121   for (int i = 1; i < num_codes; ++i) {
3122     int code = codes[i];
3123     if (free_until_pos[code] > free_until_pos[reg]) {
3124       reg = code;
3125     }
3126   }
3127 
3128   LifetimePosition pos = free_until_pos[reg];
3129 
3130   if (pos <= current->Start()) {
3131     // All registers are blocked.
3132     return false;
3133   }
3134 
3135   if (pos < current->End()) {
3136     // Register reg is available at the range start but becomes blocked before
3137     // the range end. Split current at position where it becomes blocked.
3138     LiveRange* tail = SplitRangeAt(current, pos);
3139     AddToUnhandledSorted(tail);
3140   }
3141 
3142   // Register reg is available at the range start and is free until the range
3143   // end.
3144   DCHECK(pos >= current->End());
3145   TRACE("Assigning free reg %s to live range %d:%d\n", RegisterName(reg),
3146         current->TopLevel()->vreg(), current->relative_id());
3147   SetLiveRangeAssignedRegister(current, reg);
3148 
3149   return true;
3150 }
3151 
AllocateBlockedReg(LiveRange * current)3152 void LinearScanAllocator::AllocateBlockedReg(LiveRange* current) {
3153   UsePosition* register_use = current->NextRegisterPosition(current->Start());
3154   if (register_use == nullptr) {
3155     // There is no use in the current live range that requires a register.
3156     // We can just spill it.
3157     Spill(current);
3158     return;
3159   }
3160 
3161   int num_regs = num_registers();
3162   int num_codes = num_allocatable_registers();
3163   const int* codes = allocatable_register_codes();
3164   MachineRepresentation rep = current->representation();
3165   if (!kSimpleFPAliasing && (rep == MachineRepresentation::kFloat32 ||
3166                              rep == MachineRepresentation::kSimd128))
3167     GetFPRegisterSet(rep, &num_regs, &num_codes, &codes);
3168 
3169   LifetimePosition use_pos[RegisterConfiguration::kMaxFPRegisters];
3170   LifetimePosition block_pos[RegisterConfiguration::kMaxFPRegisters];
3171   for (int i = 0; i < num_regs; i++) {
3172     use_pos[i] = block_pos[i] = LifetimePosition::MaxPosition();
3173   }
3174 
3175   for (LiveRange* range : active_live_ranges()) {
3176     int cur_reg = range->assigned_register();
3177     bool is_fixed_or_cant_spill =
3178         range->TopLevel()->IsFixed() || !range->CanBeSpilled(current->Start());
3179     if (kSimpleFPAliasing || !check_fp_aliasing()) {
3180       if (is_fixed_or_cant_spill) {
3181         block_pos[cur_reg] = use_pos[cur_reg] =
3182             LifetimePosition::GapFromInstructionIndex(0);
3183       } else {
3184         use_pos[cur_reg] =
3185             range->NextLifetimePositionRegisterIsBeneficial(current->Start());
3186       }
3187     } else {
3188       int alias_base_index = -1;
3189       int aliases = data()->config()->GetAliases(
3190           range->representation(), cur_reg, rep, &alias_base_index);
3191       DCHECK(aliases > 0 || (aliases == 0 && alias_base_index == -1));
3192       while (aliases--) {
3193         int aliased_reg = alias_base_index + aliases;
3194         if (is_fixed_or_cant_spill) {
3195           block_pos[aliased_reg] = use_pos[aliased_reg] =
3196               LifetimePosition::GapFromInstructionIndex(0);
3197         } else {
3198           use_pos[aliased_reg] =
3199               range->NextLifetimePositionRegisterIsBeneficial(current->Start());
3200         }
3201       }
3202     }
3203   }
3204 
3205   for (LiveRange* range : inactive_live_ranges()) {
3206     DCHECK(range->End() > current->Start());
3207     LifetimePosition next_intersection = range->FirstIntersection(current);
3208     if (!next_intersection.IsValid()) continue;
3209     int cur_reg = range->assigned_register();
3210     bool is_fixed = range->TopLevel()->IsFixed();
3211     if (kSimpleFPAliasing || !check_fp_aliasing()) {
3212       if (is_fixed) {
3213         block_pos[cur_reg] = Min(block_pos[cur_reg], next_intersection);
3214         use_pos[cur_reg] = Min(block_pos[cur_reg], use_pos[cur_reg]);
3215       } else {
3216         use_pos[cur_reg] = Min(use_pos[cur_reg], next_intersection);
3217       }
3218     } else {
3219       int alias_base_index = -1;
3220       int aliases = data()->config()->GetAliases(
3221           range->representation(), cur_reg, rep, &alias_base_index);
3222       DCHECK(aliases > 0 || (aliases == 0 && alias_base_index == -1));
3223       while (aliases--) {
3224         int aliased_reg = alias_base_index + aliases;
3225         if (is_fixed) {
3226           block_pos[aliased_reg] =
3227               Min(block_pos[aliased_reg], next_intersection);
3228           use_pos[aliased_reg] =
3229               Min(block_pos[aliased_reg], use_pos[aliased_reg]);
3230         } else {
3231           use_pos[aliased_reg] = Min(use_pos[aliased_reg], next_intersection);
3232         }
3233       }
3234     }
3235   }
3236 
3237   int reg = codes[0];
3238   for (int i = 1; i < num_codes; ++i) {
3239     int code = codes[i];
3240     if (use_pos[code] > use_pos[reg]) {
3241       reg = code;
3242     }
3243   }
3244 
3245   LifetimePosition pos = use_pos[reg];
3246 
3247   if (pos < register_use->pos()) {
3248     if (LifetimePosition::ExistsGapPositionBetween(current->Start(),
3249                                                    register_use->pos())) {
3250       SpillBetween(current, current->Start(), register_use->pos());
3251     } else {
3252       SetLiveRangeAssignedRegister(current, reg);
3253       SplitAndSpillIntersecting(current);
3254     }
3255     return;
3256   }
3257 
3258   if (block_pos[reg] < current->End()) {
3259     // Register becomes blocked before the current range end. Split before that
3260     // position.
3261     LiveRange* tail =
3262         SplitBetween(current, current->Start(), block_pos[reg].Start());
3263     AddToUnhandledSorted(tail);
3264   }
3265 
3266   // Register reg is not blocked for the whole range.
3267   DCHECK(block_pos[reg] >= current->End());
3268   TRACE("Assigning blocked reg %s to live range %d:%d\n", RegisterName(reg),
3269         current->TopLevel()->vreg(), current->relative_id());
3270   SetLiveRangeAssignedRegister(current, reg);
3271 
3272   // This register was not free. Thus we need to find and spill
3273   // parts of active and inactive live regions that use the same register
3274   // at the same lifetime positions as current.
3275   SplitAndSpillIntersecting(current);
3276 }
3277 
3278 
SplitAndSpillIntersecting(LiveRange * current)3279 void LinearScanAllocator::SplitAndSpillIntersecting(LiveRange* current) {
3280   DCHECK(current->HasRegisterAssigned());
3281   int reg = current->assigned_register();
3282   LifetimePosition split_pos = current->Start();
3283   for (size_t i = 0; i < active_live_ranges().size(); ++i) {
3284     LiveRange* range = active_live_ranges()[i];
3285     if (kSimpleFPAliasing || !check_fp_aliasing()) {
3286       if (range->assigned_register() != reg) continue;
3287     } else {
3288       if (!data()->config()->AreAliases(current->representation(), reg,
3289                                         range->representation(),
3290                                         range->assigned_register())) {
3291         continue;
3292       }
3293     }
3294 
3295     UsePosition* next_pos = range->NextRegisterPosition(current->Start());
3296     LifetimePosition spill_pos = FindOptimalSpillingPos(range, split_pos);
3297     if (next_pos == nullptr) {
3298       SpillAfter(range, spill_pos);
3299     } else {
3300       // When spilling between spill_pos and next_pos ensure that the range
3301       // remains spilled at least until the start of the current live range.
3302       // This guarantees that we will not introduce new unhandled ranges that
3303       // start before the current range as this violates allocation invariants
3304       // and will lead to an inconsistent state of active and inactive
3305       // live-ranges: ranges are allocated in order of their start positions,
3306       // ranges are retired from active/inactive when the start of the
3307       // current live-range is larger than their end.
3308       DCHECK(LifetimePosition::ExistsGapPositionBetween(current->Start(),
3309                                                         next_pos->pos()));
3310       SpillBetweenUntil(range, spill_pos, current->Start(), next_pos->pos());
3311     }
3312     ActiveToHandled(range);
3313     --i;
3314   }
3315 
3316   for (size_t i = 0; i < inactive_live_ranges().size(); ++i) {
3317     LiveRange* range = inactive_live_ranges()[i];
3318     DCHECK(range->End() > current->Start());
3319     if (range->TopLevel()->IsFixed()) continue;
3320     if (kSimpleFPAliasing || !check_fp_aliasing()) {
3321       if (range->assigned_register() != reg) continue;
3322     } else {
3323       if (!data()->config()->AreAliases(current->representation(), reg,
3324                                         range->representation(),
3325                                         range->assigned_register()))
3326         continue;
3327     }
3328 
3329     LifetimePosition next_intersection = range->FirstIntersection(current);
3330     if (next_intersection.IsValid()) {
3331       UsePosition* next_pos = range->NextRegisterPosition(current->Start());
3332       if (next_pos == nullptr) {
3333         SpillAfter(range, split_pos);
3334       } else {
3335         next_intersection = Min(next_intersection, next_pos->pos());
3336         SpillBetween(range, split_pos, next_intersection);
3337       }
3338       InactiveToHandled(range);
3339       --i;
3340     }
3341   }
3342 }
3343 
3344 
TryReuseSpillForPhi(TopLevelLiveRange * range)3345 bool LinearScanAllocator::TryReuseSpillForPhi(TopLevelLiveRange* range) {
3346   if (!range->is_phi()) return false;
3347 
3348   DCHECK(!range->HasSpillOperand());
3349   RegisterAllocationData::PhiMapValue* phi_map_value =
3350       data()->GetPhiMapValueFor(range);
3351   const PhiInstruction* phi = phi_map_value->phi();
3352   const InstructionBlock* block = phi_map_value->block();
3353   // Count the number of spilled operands.
3354   size_t spilled_count = 0;
3355   LiveRange* first_op = nullptr;
3356   for (size_t i = 0; i < phi->operands().size(); i++) {
3357     int op = phi->operands()[i];
3358     LiveRange* op_range = data()->GetOrCreateLiveRangeFor(op);
3359     if (!op_range->TopLevel()->HasSpillRange()) continue;
3360     const InstructionBlock* pred =
3361         code()->InstructionBlockAt(block->predecessors()[i]);
3362     LifetimePosition pred_end =
3363         LifetimePosition::InstructionFromInstructionIndex(
3364             pred->last_instruction_index());
3365     while (op_range != nullptr && !op_range->CanCover(pred_end)) {
3366       op_range = op_range->next();
3367     }
3368     if (op_range != nullptr && op_range->spilled()) {
3369       spilled_count++;
3370       if (first_op == nullptr) {
3371         first_op = op_range->TopLevel();
3372       }
3373     }
3374   }
3375 
3376   // Only continue if more than half of the operands are spilled.
3377   if (spilled_count * 2 <= phi->operands().size()) {
3378     return false;
3379   }
3380 
3381   // Try to merge the spilled operands and count the number of merged spilled
3382   // operands.
3383   DCHECK(first_op != nullptr);
3384   SpillRange* first_op_spill = first_op->TopLevel()->GetSpillRange();
3385   size_t num_merged = 1;
3386   for (size_t i = 1; i < phi->operands().size(); i++) {
3387     int op = phi->operands()[i];
3388     TopLevelLiveRange* op_range = data()->live_ranges()[op];
3389     if (!op_range->HasSpillRange()) continue;
3390     SpillRange* op_spill = op_range->GetSpillRange();
3391     if (op_spill == first_op_spill || first_op_spill->TryMerge(op_spill)) {
3392       num_merged++;
3393     }
3394   }
3395 
3396   // Only continue if enough operands could be merged to the
3397   // same spill slot.
3398   if (num_merged * 2 <= phi->operands().size() ||
3399       AreUseIntervalsIntersecting(first_op_spill->interval(),
3400                                   range->first_interval())) {
3401     return false;
3402   }
3403 
3404   // If the range does not need register soon, spill it to the merged
3405   // spill range.
3406   LifetimePosition next_pos = range->Start();
3407   if (next_pos.IsGapPosition()) next_pos = next_pos.NextStart();
3408   UsePosition* pos = range->NextUsePositionRegisterIsBeneficial(next_pos);
3409   if (pos == nullptr) {
3410     SpillRange* spill_range =
3411         range->TopLevel()->HasSpillRange()
3412             ? range->TopLevel()->GetSpillRange()
3413             : data()->AssignSpillRangeToLiveRange(range->TopLevel());
3414     bool merged = first_op_spill->TryMerge(spill_range);
3415     if (!merged) return false;
3416     Spill(range);
3417     return true;
3418   } else if (pos->pos() > range->Start().NextStart()) {
3419     SpillRange* spill_range =
3420         range->TopLevel()->HasSpillRange()
3421             ? range->TopLevel()->GetSpillRange()
3422             : data()->AssignSpillRangeToLiveRange(range->TopLevel());
3423     bool merged = first_op_spill->TryMerge(spill_range);
3424     if (!merged) return false;
3425     SpillBetween(range, range->Start(), pos->pos());
3426     DCHECK(UnhandledIsSorted());
3427     return true;
3428   }
3429   return false;
3430 }
3431 
3432 
SpillAfter(LiveRange * range,LifetimePosition pos)3433 void LinearScanAllocator::SpillAfter(LiveRange* range, LifetimePosition pos) {
3434   LiveRange* second_part = SplitRangeAt(range, pos);
3435   Spill(second_part);
3436 }
3437 
3438 
SpillBetween(LiveRange * range,LifetimePosition start,LifetimePosition end)3439 void LinearScanAllocator::SpillBetween(LiveRange* range, LifetimePosition start,
3440                                        LifetimePosition end) {
3441   SpillBetweenUntil(range, start, start, end);
3442 }
3443 
3444 
SpillBetweenUntil(LiveRange * range,LifetimePosition start,LifetimePosition until,LifetimePosition end)3445 void LinearScanAllocator::SpillBetweenUntil(LiveRange* range,
3446                                             LifetimePosition start,
3447                                             LifetimePosition until,
3448                                             LifetimePosition end) {
3449   CHECK(start < end);
3450   LiveRange* second_part = SplitRangeAt(range, start);
3451 
3452   if (second_part->Start() < end) {
3453     // The split result intersects with [start, end[.
3454     // Split it at position between ]start+1, end[, spill the middle part
3455     // and put the rest to unhandled.
3456     LifetimePosition third_part_end = end.PrevStart().End();
3457     if (data()->IsBlockBoundary(end.Start())) {
3458       third_part_end = end.Start();
3459     }
3460     LiveRange* third_part = SplitBetween(
3461         second_part, Max(second_part->Start().End(), until), third_part_end);
3462 
3463     DCHECK(third_part != second_part);
3464 
3465     Spill(second_part);
3466     AddToUnhandledSorted(third_part);
3467   } else {
3468     // The split result does not intersect with [start, end[.
3469     // Nothing to spill. Just put it to unhandled as whole.
3470     AddToUnhandledSorted(second_part);
3471   }
3472 }
3473 
3474 
SpillSlotLocator(RegisterAllocationData * data)3475 SpillSlotLocator::SpillSlotLocator(RegisterAllocationData* data)
3476     : data_(data) {}
3477 
3478 
LocateSpillSlots()3479 void SpillSlotLocator::LocateSpillSlots() {
3480   const InstructionSequence* code = data()->code();
3481   for (TopLevelLiveRange* range : data()->live_ranges()) {
3482     if (range == nullptr || range->IsEmpty()) continue;
3483     // We care only about ranges which spill in the frame.
3484     if (!range->HasSpillRange() || range->IsSpilledOnlyInDeferredBlocks()) {
3485       continue;
3486     }
3487     TopLevelLiveRange::SpillMoveInsertionList* spills =
3488         range->GetSpillMoveInsertionLocations();
3489     DCHECK_NOT_NULL(spills);
3490     for (; spills != nullptr; spills = spills->next) {
3491       code->GetInstructionBlock(spills->gap_index)->mark_needs_frame();
3492     }
3493   }
3494 }
3495 
3496 
OperandAssigner(RegisterAllocationData * data)3497 OperandAssigner::OperandAssigner(RegisterAllocationData* data) : data_(data) {}
3498 
3499 
AssignSpillSlots()3500 void OperandAssigner::AssignSpillSlots() {
3501   ZoneVector<SpillRange*>& spill_ranges = data()->spill_ranges();
3502   // Merge disjoint spill ranges
3503   for (size_t i = 0; i < spill_ranges.size(); ++i) {
3504     SpillRange* range = spill_ranges[i];
3505     if (range == nullptr) continue;
3506     if (range->IsEmpty()) continue;
3507     for (size_t j = i + 1; j < spill_ranges.size(); ++j) {
3508       SpillRange* other = spill_ranges[j];
3509       if (other != nullptr && !other->IsEmpty()) {
3510         range->TryMerge(other);
3511       }
3512     }
3513   }
3514   // Allocate slots for the merged spill ranges.
3515   for (SpillRange* range : spill_ranges) {
3516     if (range == nullptr || range->IsEmpty()) continue;
3517     // Allocate a new operand referring to the spill slot.
3518     if (!range->HasSlot()) {
3519       int index = data()->frame()->AllocateSpillSlot(range->byte_width());
3520       range->set_assigned_slot(index);
3521     }
3522   }
3523 }
3524 
3525 
CommitAssignment()3526 void OperandAssigner::CommitAssignment() {
3527   for (TopLevelLiveRange* top_range : data()->live_ranges()) {
3528     if (top_range == nullptr || top_range->IsEmpty()) continue;
3529     InstructionOperand spill_operand;
3530     if (top_range->HasSpillOperand()) {
3531       spill_operand = *top_range->TopLevel()->GetSpillOperand();
3532     } else if (top_range->TopLevel()->HasSpillRange()) {
3533       spill_operand = top_range->TopLevel()->GetSpillRangeOperand();
3534     }
3535     if (top_range->is_phi()) {
3536       data()->GetPhiMapValueFor(top_range)->CommitAssignment(
3537           top_range->GetAssignedOperand());
3538     }
3539     for (LiveRange* range = top_range; range != nullptr;
3540          range = range->next()) {
3541       InstructionOperand assigned = range->GetAssignedOperand();
3542       range->ConvertUsesToOperand(assigned, spill_operand);
3543     }
3544 
3545     if (!spill_operand.IsInvalid()) {
3546       // If this top level range has a child spilled in a deferred block, we use
3547       // the range and control flow connection mechanism instead of spilling at
3548       // definition. Refer to the ConnectLiveRanges and ResolveControlFlow
3549       // phases. Normally, when we spill at definition, we do not insert a
3550       // connecting move when a successor child range is spilled - because the
3551       // spilled range picks up its value from the slot which was assigned at
3552       // definition. For ranges that are determined to spill only in deferred
3553       // blocks, we let ConnectLiveRanges and ResolveControlFlow find the blocks
3554       // where a spill operand is expected, and then finalize by inserting the
3555       // spills in the deferred blocks dominators.
3556       if (!top_range->IsSpilledOnlyInDeferredBlocks()) {
3557         // Spill at definition if the range isn't spilled only in deferred
3558         // blocks.
3559         top_range->CommitSpillMoves(
3560             data()->code(), spill_operand,
3561             top_range->has_slot_use() || top_range->spilled());
3562       }
3563     }
3564   }
3565 }
3566 
3567 
ReferenceMapPopulator(RegisterAllocationData * data)3568 ReferenceMapPopulator::ReferenceMapPopulator(RegisterAllocationData* data)
3569     : data_(data) {}
3570 
3571 
SafePointsAreInOrder() const3572 bool ReferenceMapPopulator::SafePointsAreInOrder() const {
3573   int safe_point = 0;
3574   for (ReferenceMap* map : *data()->code()->reference_maps()) {
3575     if (safe_point > map->instruction_position()) return false;
3576     safe_point = map->instruction_position();
3577   }
3578   return true;
3579 }
3580 
3581 
PopulateReferenceMaps()3582 void ReferenceMapPopulator::PopulateReferenceMaps() {
3583   DCHECK(SafePointsAreInOrder());
3584   // Map all delayed references.
3585   for (RegisterAllocationData::DelayedReference& delayed_reference :
3586        data()->delayed_references()) {
3587     delayed_reference.map->RecordReference(
3588         AllocatedOperand::cast(*delayed_reference.operand));
3589   }
3590   // Iterate over all safe point positions and record a pointer
3591   // for all spilled live ranges at this point.
3592   int last_range_start = 0;
3593   const ReferenceMapDeque* reference_maps = data()->code()->reference_maps();
3594   ReferenceMapDeque::const_iterator first_it = reference_maps->begin();
3595   for (TopLevelLiveRange* range : data()->live_ranges()) {
3596     if (range == nullptr) continue;
3597     // Skip non-reference values.
3598     if (!data()->IsReference(range)) continue;
3599     // Skip empty live ranges.
3600     if (range->IsEmpty()) continue;
3601     if (range->has_preassigned_slot()) continue;
3602 
3603     // Find the extent of the range and its children.
3604     int start = range->Start().ToInstructionIndex();
3605     int end = 0;
3606     for (LiveRange* cur = range; cur != nullptr; cur = cur->next()) {
3607       LifetimePosition this_end = cur->End();
3608       if (this_end.ToInstructionIndex() > end)
3609         end = this_end.ToInstructionIndex();
3610       DCHECK(cur->Start().ToInstructionIndex() >= start);
3611     }
3612 
3613     // Most of the ranges are in order, but not all.  Keep an eye on when they
3614     // step backwards and reset the first_it so we don't miss any safe points.
3615     if (start < last_range_start) first_it = reference_maps->begin();
3616     last_range_start = start;
3617 
3618     // Step across all the safe points that are before the start of this range,
3619     // recording how far we step in order to save doing this for the next range.
3620     for (; first_it != reference_maps->end(); ++first_it) {
3621       ReferenceMap* map = *first_it;
3622       if (map->instruction_position() >= start) break;
3623     }
3624 
3625     InstructionOperand spill_operand;
3626     if (((range->HasSpillOperand() &&
3627           !range->GetSpillOperand()->IsConstant()) ||
3628          range->HasSpillRange())) {
3629       if (range->HasSpillOperand()) {
3630         spill_operand = *range->GetSpillOperand();
3631       } else {
3632         spill_operand = range->GetSpillRangeOperand();
3633       }
3634       DCHECK(spill_operand.IsStackSlot());
3635       DCHECK(CanBeTaggedPointer(
3636           AllocatedOperand::cast(spill_operand).representation()));
3637     }
3638 
3639     LiveRange* cur = range;
3640     // Step through the safe points to see whether they are in the range.
3641     for (auto it = first_it; it != reference_maps->end(); ++it) {
3642       ReferenceMap* map = *it;
3643       int safe_point = map->instruction_position();
3644 
3645       // The safe points are sorted so we can stop searching here.
3646       if (safe_point - 1 > end) break;
3647 
3648       // Advance to the next active range that covers the current
3649       // safe point position.
3650       LifetimePosition safe_point_pos =
3651           LifetimePosition::InstructionFromInstructionIndex(safe_point);
3652 
3653       // Search for the child range (cur) that covers safe_point_pos. If we
3654       // don't find it before the children pass safe_point_pos, keep cur at
3655       // the last child, because the next safe_point_pos may be covered by cur.
3656       // This may happen if cur has more than one interval, and the current
3657       // safe_point_pos is in between intervals.
3658       // For that reason, cur may be at most the last child.
3659       DCHECK_NOT_NULL(cur);
3660       DCHECK(safe_point_pos >= cur->Start() || range == cur);
3661       bool found = false;
3662       while (!found) {
3663         if (cur->Covers(safe_point_pos)) {
3664           found = true;
3665         } else {
3666           LiveRange* next = cur->next();
3667           if (next == nullptr || next->Start() > safe_point_pos) {
3668             break;
3669           }
3670           cur = next;
3671         }
3672       }
3673 
3674       if (!found) {
3675         continue;
3676       }
3677 
3678       // Check if the live range is spilled and the safe point is after
3679       // the spill position.
3680       int spill_index = range->IsSpilledOnlyInDeferredBlocks()
3681                             ? cur->Start().ToInstructionIndex()
3682                             : range->spill_start_index();
3683 
3684       if (!spill_operand.IsInvalid() && safe_point >= spill_index) {
3685         TRACE("Pointer for range %d (spilled at %d) at safe point %d\n",
3686               range->vreg(), spill_index, safe_point);
3687         map->RecordReference(AllocatedOperand::cast(spill_operand));
3688       }
3689 
3690       if (!cur->spilled()) {
3691         TRACE(
3692             "Pointer in register for range %d:%d (start at %d) "
3693             "at safe point %d\n",
3694             range->vreg(), cur->relative_id(), cur->Start().value(),
3695             safe_point);
3696         InstructionOperand operand = cur->GetAssignedOperand();
3697         DCHECK(!operand.IsStackSlot());
3698         DCHECK(CanBeTaggedPointer(
3699             AllocatedOperand::cast(operand).representation()));
3700         map->RecordReference(AllocatedOperand::cast(operand));
3701       }
3702     }
3703   }
3704 }
3705 
3706 
LiveRangeConnector(RegisterAllocationData * data)3707 LiveRangeConnector::LiveRangeConnector(RegisterAllocationData* data)
3708     : data_(data) {}
3709 
3710 
CanEagerlyResolveControlFlow(const InstructionBlock * block) const3711 bool LiveRangeConnector::CanEagerlyResolveControlFlow(
3712     const InstructionBlock* block) const {
3713   if (block->PredecessorCount() != 1) return false;
3714   return block->predecessors()[0].IsNext(block->rpo_number());
3715 }
3716 
3717 
ResolveControlFlow(Zone * local_zone)3718 void LiveRangeConnector::ResolveControlFlow(Zone* local_zone) {
3719   // Lazily linearize live ranges in memory for fast lookup.
3720   LiveRangeFinder finder(data(), local_zone);
3721   ZoneVector<BitVector*>& live_in_sets = data()->live_in_sets();
3722   for (const InstructionBlock* block : code()->instruction_blocks()) {
3723     if (CanEagerlyResolveControlFlow(block)) continue;
3724     BitVector* live = live_in_sets[block->rpo_number().ToInt()];
3725     BitVector::Iterator iterator(live);
3726     while (!iterator.Done()) {
3727       int vreg = iterator.Current();
3728       LiveRangeBoundArray* array = finder.ArrayFor(vreg);
3729       for (const RpoNumber& pred : block->predecessors()) {
3730         FindResult result;
3731         const InstructionBlock* pred_block = code()->InstructionBlockAt(pred);
3732         if (!array->FindConnectableSubranges(block, pred_block, &result)) {
3733           continue;
3734         }
3735         InstructionOperand pred_op = result.pred_cover_->GetAssignedOperand();
3736         InstructionOperand cur_op = result.cur_cover_->GetAssignedOperand();
3737         if (pred_op.Equals(cur_op)) continue;
3738         if (!pred_op.IsAnyRegister() && cur_op.IsAnyRegister()) {
3739           // We're doing a reload.
3740           // We don't need to, if:
3741           // 1) there's no register use in this block, and
3742           // 2) the range ends before the block does, and
3743           // 3) we don't have a successor, or the successor is spilled.
3744           LifetimePosition block_start =
3745               LifetimePosition::GapFromInstructionIndex(block->code_start());
3746           LifetimePosition block_end =
3747               LifetimePosition::GapFromInstructionIndex(block->code_end());
3748           const LiveRange* current = result.cur_cover_;
3749           const LiveRange* successor = current->next();
3750           if (current->End() < block_end &&
3751               (successor == nullptr || successor->spilled())) {
3752             // verify point 1: no register use. We can go to the end of the
3753             // range, since it's all within the block.
3754 
3755             bool uses_reg = false;
3756             for (const UsePosition* use = current->NextUsePosition(block_start);
3757                  use != nullptr; use = use->next()) {
3758               if (use->operand()->IsAnyRegister()) {
3759                 uses_reg = true;
3760                 break;
3761               }
3762             }
3763             if (!uses_reg) continue;
3764           }
3765           if (current->TopLevel()->IsSpilledOnlyInDeferredBlocks() &&
3766               pred_block->IsDeferred()) {
3767             // The spill location should be defined in pred_block, so add
3768             // pred_block to the list of blocks requiring a spill operand.
3769             current->TopLevel()->GetListOfBlocksRequiringSpillOperands()->Add(
3770                 pred_block->rpo_number().ToInt());
3771           }
3772         }
3773         int move_loc = ResolveControlFlow(block, cur_op, pred_block, pred_op);
3774         USE(move_loc);
3775         DCHECK_IMPLIES(
3776             result.cur_cover_->TopLevel()->IsSpilledOnlyInDeferredBlocks() &&
3777                 !(pred_op.IsAnyRegister() && cur_op.IsAnyRegister()),
3778             code()->GetInstructionBlock(move_loc)->IsDeferred());
3779       }
3780       iterator.Advance();
3781     }
3782   }
3783 
3784   // At this stage, we collected blocks needing a spill operand from
3785   // ConnectRanges and from ResolveControlFlow. Time to commit the spills for
3786   // deferred blocks.
3787   for (TopLevelLiveRange* top : data()->live_ranges()) {
3788     if (top == nullptr || top->IsEmpty() ||
3789         !top->IsSpilledOnlyInDeferredBlocks())
3790       continue;
3791     CommitSpillsInDeferredBlocks(top, finder.ArrayFor(top->vreg()), local_zone);
3792   }
3793 }
3794 
3795 
ResolveControlFlow(const InstructionBlock * block,const InstructionOperand & cur_op,const InstructionBlock * pred,const InstructionOperand & pred_op)3796 int LiveRangeConnector::ResolveControlFlow(const InstructionBlock* block,
3797                                            const InstructionOperand& cur_op,
3798                                            const InstructionBlock* pred,
3799                                            const InstructionOperand& pred_op) {
3800   DCHECK(!pred_op.Equals(cur_op));
3801   int gap_index;
3802   Instruction::GapPosition position;
3803   if (block->PredecessorCount() == 1) {
3804     gap_index = block->first_instruction_index();
3805     position = Instruction::START;
3806   } else {
3807     DCHECK(pred->SuccessorCount() == 1);
3808     DCHECK(!code()
3809                 ->InstructionAt(pred->last_instruction_index())
3810                 ->HasReferenceMap());
3811     gap_index = pred->last_instruction_index();
3812     position = Instruction::END;
3813   }
3814   data()->AddGapMove(gap_index, position, pred_op, cur_op);
3815   return gap_index;
3816 }
3817 
ConnectRanges(Zone * local_zone)3818 void LiveRangeConnector::ConnectRanges(Zone* local_zone) {
3819   DelayedInsertionMap delayed_insertion_map(local_zone);
3820   for (TopLevelLiveRange* top_range : data()->live_ranges()) {
3821     if (top_range == nullptr) continue;
3822     bool connect_spilled = top_range->IsSpilledOnlyInDeferredBlocks();
3823     LiveRange* first_range = top_range;
3824     for (LiveRange *second_range = first_range->next(); second_range != nullptr;
3825          first_range = second_range, second_range = second_range->next()) {
3826       LifetimePosition pos = second_range->Start();
3827       // Add gap move if the two live ranges touch and there is no block
3828       // boundary.
3829       if (second_range->spilled()) continue;
3830       if (first_range->End() != pos) continue;
3831       if (data()->IsBlockBoundary(pos) &&
3832           !CanEagerlyResolveControlFlow(GetInstructionBlock(code(), pos))) {
3833         continue;
3834       }
3835       InstructionOperand prev_operand = first_range->GetAssignedOperand();
3836       InstructionOperand cur_operand = second_range->GetAssignedOperand();
3837       if (prev_operand.Equals(cur_operand)) continue;
3838       bool delay_insertion = false;
3839       Instruction::GapPosition gap_pos;
3840       int gap_index = pos.ToInstructionIndex();
3841       if (connect_spilled && !prev_operand.IsAnyRegister() &&
3842           cur_operand.IsAnyRegister()) {
3843         const InstructionBlock* block = code()->GetInstructionBlock(gap_index);
3844         DCHECK(block->IsDeferred());
3845         // Performing a reload in this block, meaning the spill operand must
3846         // be defined here.
3847         top_range->GetListOfBlocksRequiringSpillOperands()->Add(
3848             block->rpo_number().ToInt());
3849       }
3850 
3851       if (pos.IsGapPosition()) {
3852         gap_pos = pos.IsStart() ? Instruction::START : Instruction::END;
3853       } else {
3854         if (pos.IsStart()) {
3855           delay_insertion = true;
3856         } else {
3857           gap_index++;
3858         }
3859         gap_pos = delay_insertion ? Instruction::END : Instruction::START;
3860       }
3861       // Reloads or spills for spilled in deferred blocks ranges must happen
3862       // only in deferred blocks.
3863       DCHECK_IMPLIES(
3864           connect_spilled &&
3865               !(prev_operand.IsAnyRegister() && cur_operand.IsAnyRegister()),
3866           code()->GetInstructionBlock(gap_index)->IsDeferred());
3867 
3868       ParallelMove* move =
3869           code()->InstructionAt(gap_index)->GetOrCreateParallelMove(
3870               gap_pos, code_zone());
3871       if (!delay_insertion) {
3872         move->AddMove(prev_operand, cur_operand);
3873       } else {
3874         delayed_insertion_map.insert(
3875             std::make_pair(std::make_pair(move, prev_operand), cur_operand));
3876       }
3877     }
3878   }
3879   if (delayed_insertion_map.empty()) return;
3880   // Insert all the moves which should occur after the stored move.
3881   ZoneVector<MoveOperands*> to_insert(local_zone);
3882   ZoneVector<MoveOperands*> to_eliminate(local_zone);
3883   to_insert.reserve(4);
3884   to_eliminate.reserve(4);
3885   ParallelMove* moves = delayed_insertion_map.begin()->first.first;
3886   for (auto it = delayed_insertion_map.begin();; ++it) {
3887     bool done = it == delayed_insertion_map.end();
3888     if (done || it->first.first != moves) {
3889       // Commit the MoveOperands for current ParallelMove.
3890       for (MoveOperands* move : to_eliminate) {
3891         move->Eliminate();
3892       }
3893       for (MoveOperands* move : to_insert) {
3894         moves->push_back(move);
3895       }
3896       if (done) break;
3897       // Reset state.
3898       to_eliminate.clear();
3899       to_insert.clear();
3900       moves = it->first.first;
3901     }
3902     // Gather all MoveOperands for a single ParallelMove.
3903     MoveOperands* move =
3904         new (code_zone()) MoveOperands(it->first.second, it->second);
3905     moves->PrepareInsertAfter(move, &to_eliminate);
3906     to_insert.push_back(move);
3907   }
3908 }
3909 
3910 
CommitSpillsInDeferredBlocks(TopLevelLiveRange * range,LiveRangeBoundArray * array,Zone * temp_zone)3911 void LiveRangeConnector::CommitSpillsInDeferredBlocks(
3912     TopLevelLiveRange* range, LiveRangeBoundArray* array, Zone* temp_zone) {
3913   DCHECK(range->IsSpilledOnlyInDeferredBlocks());
3914   DCHECK(!range->spilled());
3915 
3916   InstructionSequence* code = data()->code();
3917   InstructionOperand spill_operand = range->GetSpillRangeOperand();
3918 
3919   TRACE("Live Range %d will be spilled only in deferred blocks.\n",
3920         range->vreg());
3921   // If we have ranges that aren't spilled but require the operand on the stack,
3922   // make sure we insert the spill.
3923   for (const LiveRange* child = range; child != nullptr;
3924        child = child->next()) {
3925     for (const UsePosition* pos = child->first_pos(); pos != nullptr;
3926          pos = pos->next()) {
3927       if (pos->type() != UsePositionType::kRequiresSlot && !child->spilled())
3928         continue;
3929       range->AddBlockRequiringSpillOperand(
3930           code->GetInstructionBlock(pos->pos().ToInstructionIndex())
3931               ->rpo_number());
3932     }
3933   }
3934 
3935   ZoneQueue<int> worklist(temp_zone);
3936 
3937   for (BitVector::Iterator iterator(
3938            range->GetListOfBlocksRequiringSpillOperands());
3939        !iterator.Done(); iterator.Advance()) {
3940     worklist.push(iterator.Current());
3941   }
3942 
3943   ZoneSet<std::pair<RpoNumber, int>> done_moves(temp_zone);
3944   // Seek the deferred blocks that dominate locations requiring spill operands,
3945   // and spill there. We only need to spill at the start of such blocks.
3946   BitVector done_blocks(
3947       range->GetListOfBlocksRequiringSpillOperands()->length(), temp_zone);
3948   while (!worklist.empty()) {
3949     int block_id = worklist.front();
3950     worklist.pop();
3951     if (done_blocks.Contains(block_id)) continue;
3952     done_blocks.Add(block_id);
3953     InstructionBlock* spill_block =
3954         code->InstructionBlockAt(RpoNumber::FromInt(block_id));
3955 
3956     for (const RpoNumber& pred : spill_block->predecessors()) {
3957       const InstructionBlock* pred_block = code->InstructionBlockAt(pred);
3958 
3959       if (pred_block->IsDeferred()) {
3960         worklist.push(pred_block->rpo_number().ToInt());
3961       } else {
3962         LifetimePosition pred_end =
3963             LifetimePosition::InstructionFromInstructionIndex(
3964                 pred_block->last_instruction_index());
3965 
3966         LiveRangeBound* bound = array->Find(pred_end);
3967 
3968         InstructionOperand pred_op = bound->range_->GetAssignedOperand();
3969 
3970         RpoNumber spill_block_number = spill_block->rpo_number();
3971         if (done_moves.find(std::make_pair(
3972                 spill_block_number, range->vreg())) == done_moves.end()) {
3973           data()->AddGapMove(spill_block->first_instruction_index(),
3974                              Instruction::GapPosition::START, pred_op,
3975                              spill_operand);
3976           done_moves.insert(std::make_pair(spill_block_number, range->vreg()));
3977           spill_block->mark_needs_frame();
3978         }
3979       }
3980     }
3981   }
3982 }
3983 
3984 
3985 }  // namespace compiler
3986 }  // namespace internal
3987 }  // namespace v8
3988