1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/base/adapters.h"
6 #include "src/compiler/linkage.h"
7 #include "src/compiler/register-allocator.h"
8 #include "src/string-stream.h"
9 
10 namespace v8 {
11 namespace internal {
12 namespace compiler {
13 
14 #define TRACE(...)                             \
15   do {                                         \
16     if (FLAG_trace_alloc) PrintF(__VA_ARGS__); \
17   } while (false)
18 
19 
20 namespace {
21 
RemoveElement(ZoneVector<LiveRange * > * v,LiveRange * range)22 void RemoveElement(ZoneVector<LiveRange*>* v, LiveRange* range) {
23   auto it = std::find(v->begin(), v->end(), range);
24   DCHECK(it != v->end());
25   v->erase(it);
26 }
27 
28 
GetRegisterCount(const RegisterConfiguration * cfg,RegisterKind kind)29 int GetRegisterCount(const RegisterConfiguration* cfg, RegisterKind kind) {
30   return kind == DOUBLE_REGISTERS ? cfg->num_double_registers()
31                                   : cfg->num_general_registers();
32 }
33 
34 
GetAllocatableRegisterCount(const RegisterConfiguration * cfg,RegisterKind kind)35 int GetAllocatableRegisterCount(const RegisterConfiguration* cfg,
36                                 RegisterKind kind) {
37   return kind == DOUBLE_REGISTERS
38              ? cfg->num_allocatable_aliased_double_registers()
39              : cfg->num_allocatable_general_registers();
40 }
41 
42 
GetAllocatableRegisterCodes(const RegisterConfiguration * cfg,RegisterKind kind)43 const int* GetAllocatableRegisterCodes(const RegisterConfiguration* cfg,
44                                        RegisterKind kind) {
45   return kind == DOUBLE_REGISTERS ? cfg->allocatable_double_codes()
46                                   : cfg->allocatable_general_codes();
47 }
48 
49 
GetContainingLoop(const InstructionSequence * sequence,const InstructionBlock * block)50 const InstructionBlock* GetContainingLoop(const InstructionSequence* sequence,
51                                           const InstructionBlock* block) {
52   RpoNumber index = block->loop_header();
53   if (!index.IsValid()) return nullptr;
54   return sequence->InstructionBlockAt(index);
55 }
56 
57 
GetInstructionBlock(const InstructionSequence * code,LifetimePosition pos)58 const InstructionBlock* GetInstructionBlock(const InstructionSequence* code,
59                                             LifetimePosition pos) {
60   return code->GetInstructionBlock(pos.ToInstructionIndex());
61 }
62 
63 
GetLastInstruction(InstructionSequence * code,const InstructionBlock * block)64 Instruction* GetLastInstruction(InstructionSequence* code,
65                                 const InstructionBlock* block) {
66   return code->InstructionAt(block->last_instruction_index());
67 }
68 
69 
IsOutputRegisterOf(Instruction * instr,Register reg)70 bool IsOutputRegisterOf(Instruction* instr, Register reg) {
71   for (size_t i = 0; i < instr->OutputCount(); i++) {
72     InstructionOperand* output = instr->OutputAt(i);
73     if (output->IsRegister() &&
74         LocationOperand::cast(output)->GetRegister().is(reg)) {
75       return true;
76     }
77   }
78   return false;
79 }
80 
81 
IsOutputDoubleRegisterOf(Instruction * instr,DoubleRegister reg)82 bool IsOutputDoubleRegisterOf(Instruction* instr, DoubleRegister reg) {
83   for (size_t i = 0; i < instr->OutputCount(); i++) {
84     InstructionOperand* output = instr->OutputAt(i);
85     if (output->IsDoubleRegister() &&
86         LocationOperand::cast(output)->GetDoubleRegister().is(reg)) {
87       return true;
88     }
89   }
90   return false;
91 }
92 
93 
94 // TODO(dcarney): fix frame to allow frame accesses to half size location.
GetByteWidth(MachineRepresentation rep)95 int GetByteWidth(MachineRepresentation rep) {
96   switch (rep) {
97     case MachineRepresentation::kBit:
98     case MachineRepresentation::kWord8:
99     case MachineRepresentation::kWord16:
100     case MachineRepresentation::kWord32:
101     case MachineRepresentation::kTagged:
102       return kPointerSize;
103     case MachineRepresentation::kFloat32:
104     case MachineRepresentation::kWord64:
105     case MachineRepresentation::kFloat64:
106       return 8;
107     case MachineRepresentation::kNone:
108       break;
109   }
110   UNREACHABLE();
111   return 0;
112 }
113 
114 }  // namespace
115 
116 
UsePosition(LifetimePosition pos,InstructionOperand * operand,void * hint,UsePositionHintType hint_type)117 UsePosition::UsePosition(LifetimePosition pos, InstructionOperand* operand,
118                          void* hint, UsePositionHintType hint_type)
119     : operand_(operand), hint_(hint), next_(nullptr), pos_(pos), flags_(0) {
120   DCHECK_IMPLIES(hint == nullptr, hint_type == UsePositionHintType::kNone);
121   bool register_beneficial = true;
122   UsePositionType type = UsePositionType::kAny;
123   if (operand_ != nullptr && operand_->IsUnallocated()) {
124     const UnallocatedOperand* unalloc = UnallocatedOperand::cast(operand_);
125     if (unalloc->HasRegisterPolicy()) {
126       type = UsePositionType::kRequiresRegister;
127     } else if (unalloc->HasSlotPolicy()) {
128       type = UsePositionType::kRequiresSlot;
129       register_beneficial = false;
130     } else {
131       register_beneficial = !unalloc->HasAnyPolicy();
132     }
133   }
134   flags_ = TypeField::encode(type) | HintTypeField::encode(hint_type) |
135            RegisterBeneficialField::encode(register_beneficial) |
136            AssignedRegisterField::encode(kUnassignedRegister);
137   DCHECK(pos_.IsValid());
138 }
139 
140 
HasHint() const141 bool UsePosition::HasHint() const {
142   int hint_register;
143   return HintRegister(&hint_register);
144 }
145 
146 
HintRegister(int * register_code) const147 bool UsePosition::HintRegister(int* register_code) const {
148   if (hint_ == nullptr) return false;
149   switch (HintTypeField::decode(flags_)) {
150     case UsePositionHintType::kNone:
151     case UsePositionHintType::kUnresolved:
152       return false;
153     case UsePositionHintType::kUsePos: {
154       UsePosition* use_pos = reinterpret_cast<UsePosition*>(hint_);
155       int assigned_register = AssignedRegisterField::decode(use_pos->flags_);
156       if (assigned_register == kUnassignedRegister) return false;
157       *register_code = assigned_register;
158       return true;
159     }
160     case UsePositionHintType::kOperand: {
161       InstructionOperand* operand =
162           reinterpret_cast<InstructionOperand*>(hint_);
163       int assigned_register =
164           operand->IsRegister()
165               ? LocationOperand::cast(operand)->GetRegister().code()
166               : LocationOperand::cast(operand)->GetDoubleRegister().code();
167       *register_code = assigned_register;
168       return true;
169     }
170     case UsePositionHintType::kPhi: {
171       RegisterAllocationData::PhiMapValue* phi =
172           reinterpret_cast<RegisterAllocationData::PhiMapValue*>(hint_);
173       int assigned_register = phi->assigned_register();
174       if (assigned_register == kUnassignedRegister) return false;
175       *register_code = assigned_register;
176       return true;
177     }
178   }
179   UNREACHABLE();
180   return false;
181 }
182 
183 
HintTypeForOperand(const InstructionOperand & op)184 UsePositionHintType UsePosition::HintTypeForOperand(
185     const InstructionOperand& op) {
186   switch (op.kind()) {
187     case InstructionOperand::CONSTANT:
188     case InstructionOperand::IMMEDIATE:
189     case InstructionOperand::EXPLICIT:
190       return UsePositionHintType::kNone;
191     case InstructionOperand::UNALLOCATED:
192       return UsePositionHintType::kUnresolved;
193     case InstructionOperand::ALLOCATED:
194       if (op.IsRegister() || op.IsDoubleRegister()) {
195         return UsePositionHintType::kOperand;
196       } else {
197         DCHECK(op.IsStackSlot() || op.IsDoubleStackSlot());
198         return UsePositionHintType::kNone;
199       }
200     case InstructionOperand::INVALID:
201       break;
202   }
203   UNREACHABLE();
204   return UsePositionHintType::kNone;
205 }
206 
207 
ResolveHint(UsePosition * use_pos)208 void UsePosition::ResolveHint(UsePosition* use_pos) {
209   DCHECK_NOT_NULL(use_pos);
210   if (HintTypeField::decode(flags_) != UsePositionHintType::kUnresolved) return;
211   hint_ = use_pos;
212   flags_ = HintTypeField::update(flags_, UsePositionHintType::kUsePos);
213 }
214 
215 
set_type(UsePositionType type,bool register_beneficial)216 void UsePosition::set_type(UsePositionType type, bool register_beneficial) {
217   DCHECK_IMPLIES(type == UsePositionType::kRequiresSlot, !register_beneficial);
218   DCHECK_EQ(kUnassignedRegister, AssignedRegisterField::decode(flags_));
219   flags_ = TypeField::encode(type) |
220            RegisterBeneficialField::encode(register_beneficial) |
221            HintTypeField::encode(HintTypeField::decode(flags_)) |
222            AssignedRegisterField::encode(kUnassignedRegister);
223 }
224 
225 
SplitAt(LifetimePosition pos,Zone * zone)226 UseInterval* UseInterval::SplitAt(LifetimePosition pos, Zone* zone) {
227   DCHECK(Contains(pos) && pos != start());
228   UseInterval* after = new (zone) UseInterval(pos, end_);
229   after->next_ = next_;
230   next_ = nullptr;
231   end_ = pos;
232   return after;
233 }
234 
235 
Print() const236 void LifetimePosition::Print() const {
237   OFStream os(stdout);
238   os << *this << std::endl;
239 }
240 
241 
operator <<(std::ostream & os,const LifetimePosition pos)242 std::ostream& operator<<(std::ostream& os, const LifetimePosition pos) {
243   os << '@' << pos.ToInstructionIndex();
244   if (pos.IsGapPosition()) {
245     os << 'g';
246   } else {
247     os << 'i';
248   }
249   if (pos.IsStart()) {
250     os << 's';
251   } else {
252     os << 'e';
253   }
254   return os;
255 }
256 
257 
258 const float LiveRange::kInvalidWeight = -1;
259 const float LiveRange::kMaxWeight = std::numeric_limits<float>::max();
260 
261 
LiveRange(int relative_id,MachineRepresentation rep,TopLevelLiveRange * top_level)262 LiveRange::LiveRange(int relative_id, MachineRepresentation rep,
263                      TopLevelLiveRange* top_level)
264     : relative_id_(relative_id),
265       bits_(0),
266       last_interval_(nullptr),
267       first_interval_(nullptr),
268       first_pos_(nullptr),
269       top_level_(top_level),
270       next_(nullptr),
271       current_interval_(nullptr),
272       last_processed_use_(nullptr),
273       current_hint_position_(nullptr),
274       splitting_pointer_(nullptr),
275       size_(kInvalidSize),
276       weight_(kInvalidWeight),
277       group_(nullptr) {
278   DCHECK(AllocatedOperand::IsSupportedRepresentation(rep));
279   bits_ = AssignedRegisterField::encode(kUnassignedRegister) |
280           RepresentationField::encode(rep);
281 }
282 
283 
VerifyPositions() const284 void LiveRange::VerifyPositions() const {
285   // Walk the positions, verifying that each is in an interval.
286   UseInterval* interval = first_interval_;
287   for (UsePosition* pos = first_pos_; pos != nullptr; pos = pos->next()) {
288     CHECK(Start() <= pos->pos());
289     CHECK(pos->pos() <= End());
290     CHECK_NOT_NULL(interval);
291     while (!interval->Contains(pos->pos()) && interval->end() != pos->pos()) {
292       interval = interval->next();
293       CHECK_NOT_NULL(interval);
294     }
295   }
296 }
297 
298 
VerifyIntervals() const299 void LiveRange::VerifyIntervals() const {
300   DCHECK(first_interval()->start() == Start());
301   LifetimePosition last_end = first_interval()->end();
302   for (UseInterval* interval = first_interval()->next(); interval != nullptr;
303        interval = interval->next()) {
304     DCHECK(last_end <= interval->start());
305     last_end = interval->end();
306   }
307   DCHECK(last_end == End());
308 }
309 
310 
set_assigned_register(int reg)311 void LiveRange::set_assigned_register(int reg) {
312   DCHECK(!HasRegisterAssigned() && !spilled());
313   bits_ = AssignedRegisterField::update(bits_, reg);
314 }
315 
316 
UnsetAssignedRegister()317 void LiveRange::UnsetAssignedRegister() {
318   DCHECK(HasRegisterAssigned() && !spilled());
319   bits_ = AssignedRegisterField::update(bits_, kUnassignedRegister);
320 }
321 
322 
Spill()323 void LiveRange::Spill() {
324   DCHECK(!spilled());
325   DCHECK(!TopLevel()->HasNoSpillType());
326   set_spilled(true);
327   bits_ = AssignedRegisterField::update(bits_, kUnassignedRegister);
328 }
329 
330 
kind() const331 RegisterKind LiveRange::kind() const {
332   return IsFloatingPoint(representation()) ? DOUBLE_REGISTERS
333                                            : GENERAL_REGISTERS;
334 }
335 
336 
FirstHintPosition(int * register_index) const337 UsePosition* LiveRange::FirstHintPosition(int* register_index) const {
338   for (UsePosition* pos = first_pos_; pos != nullptr; pos = pos->next()) {
339     if (pos->HintRegister(register_index)) return pos;
340   }
341   return nullptr;
342 }
343 
344 
NextUsePosition(LifetimePosition start) const345 UsePosition* LiveRange::NextUsePosition(LifetimePosition start) const {
346   UsePosition* use_pos = last_processed_use_;
347   if (use_pos == nullptr || use_pos->pos() > start) {
348     use_pos = first_pos();
349   }
350   while (use_pos != nullptr && use_pos->pos() < start) {
351     use_pos = use_pos->next();
352   }
353   last_processed_use_ = use_pos;
354   return use_pos;
355 }
356 
357 
NextUsePositionRegisterIsBeneficial(LifetimePosition start) const358 UsePosition* LiveRange::NextUsePositionRegisterIsBeneficial(
359     LifetimePosition start) const {
360   UsePosition* pos = NextUsePosition(start);
361   while (pos != nullptr && !pos->RegisterIsBeneficial()) {
362     pos = pos->next();
363   }
364   return pos;
365 }
366 
367 
PreviousUsePositionRegisterIsBeneficial(LifetimePosition start) const368 UsePosition* LiveRange::PreviousUsePositionRegisterIsBeneficial(
369     LifetimePosition start) const {
370   UsePosition* pos = first_pos();
371   UsePosition* prev = nullptr;
372   while (pos != nullptr && pos->pos() < start) {
373     if (pos->RegisterIsBeneficial()) prev = pos;
374     pos = pos->next();
375   }
376   return prev;
377 }
378 
379 
NextRegisterPosition(LifetimePosition start) const380 UsePosition* LiveRange::NextRegisterPosition(LifetimePosition start) const {
381   UsePosition* pos = NextUsePosition(start);
382   while (pos != nullptr && pos->type() != UsePositionType::kRequiresRegister) {
383     pos = pos->next();
384   }
385   return pos;
386 }
387 
388 
NextSlotPosition(LifetimePosition start) const389 UsePosition* LiveRange::NextSlotPosition(LifetimePosition start) const {
390   for (UsePosition* pos = NextUsePosition(start); pos != nullptr;
391        pos = pos->next()) {
392     if (pos->type() != UsePositionType::kRequiresSlot) continue;
393     return pos;
394   }
395   return nullptr;
396 }
397 
398 
CanBeSpilled(LifetimePosition pos) const399 bool LiveRange::CanBeSpilled(LifetimePosition pos) const {
400   // We cannot spill a live range that has a use requiring a register
401   // at the current or the immediate next position.
402   UsePosition* use_pos = NextRegisterPosition(pos);
403   if (use_pos == nullptr) return true;
404   return use_pos->pos() > pos.NextStart().End();
405 }
406 
407 
IsTopLevel() const408 bool LiveRange::IsTopLevel() const { return top_level_ == this; }
409 
410 
GetAssignedOperand() const411 InstructionOperand LiveRange::GetAssignedOperand() const {
412   if (HasRegisterAssigned()) {
413     DCHECK(!spilled());
414     return AllocatedOperand(LocationOperand::REGISTER, representation(),
415                             assigned_register());
416   }
417   DCHECK(spilled());
418   DCHECK(!HasRegisterAssigned());
419   if (TopLevel()->HasSpillOperand()) {
420     InstructionOperand* op = TopLevel()->GetSpillOperand();
421     DCHECK(!op->IsUnallocated());
422     return *op;
423   }
424   return TopLevel()->GetSpillRangeOperand();
425 }
426 
427 
FirstSearchIntervalForPosition(LifetimePosition position) const428 UseInterval* LiveRange::FirstSearchIntervalForPosition(
429     LifetimePosition position) const {
430   if (current_interval_ == nullptr) return first_interval_;
431   if (current_interval_->start() > position) {
432     current_interval_ = nullptr;
433     return first_interval_;
434   }
435   return current_interval_;
436 }
437 
438 
AdvanceLastProcessedMarker(UseInterval * to_start_of,LifetimePosition but_not_past) const439 void LiveRange::AdvanceLastProcessedMarker(
440     UseInterval* to_start_of, LifetimePosition but_not_past) const {
441   if (to_start_of == nullptr) return;
442   if (to_start_of->start() > but_not_past) return;
443   LifetimePosition start = current_interval_ == nullptr
444                                ? LifetimePosition::Invalid()
445                                : current_interval_->start();
446   if (to_start_of->start() > start) {
447     current_interval_ = to_start_of;
448   }
449 }
450 
451 
SplitAt(LifetimePosition position,Zone * zone)452 LiveRange* LiveRange::SplitAt(LifetimePosition position, Zone* zone) {
453   int new_id = TopLevel()->GetNextChildId();
454   LiveRange* child = new (zone) LiveRange(new_id, representation(), TopLevel());
455   DetachAt(position, child, zone);
456 
457   child->top_level_ = TopLevel();
458   child->next_ = next_;
459   next_ = child;
460   return child;
461 }
462 
463 
DetachAt(LifetimePosition position,LiveRange * result,Zone * zone)464 UsePosition* LiveRange::DetachAt(LifetimePosition position, LiveRange* result,
465                                  Zone* zone) {
466   DCHECK(Start() < position);
467   DCHECK(End() > position);
468   DCHECK(result->IsEmpty());
469   // Find the last interval that ends before the position. If the
470   // position is contained in one of the intervals in the chain, we
471   // split that interval and use the first part.
472   UseInterval* current = FirstSearchIntervalForPosition(position);
473 
474   // If the split position coincides with the beginning of a use interval
475   // we need to split use positons in a special way.
476   bool split_at_start = false;
477 
478   if (current->start() == position) {
479     // When splitting at start we need to locate the previous use interval.
480     current = first_interval_;
481   }
482 
483   UseInterval* after = nullptr;
484   while (current != nullptr) {
485     if (current->Contains(position)) {
486       after = current->SplitAt(position, zone);
487       break;
488     }
489     UseInterval* next = current->next();
490     if (next->start() >= position) {
491       split_at_start = (next->start() == position);
492       after = next;
493       current->set_next(nullptr);
494       break;
495     }
496     current = next;
497   }
498   DCHECK(nullptr != after);
499 
500   // Partition original use intervals to the two live ranges.
501   UseInterval* before = current;
502   result->last_interval_ =
503       (last_interval_ == before)
504           ? after            // Only interval in the range after split.
505           : last_interval_;  // Last interval of the original range.
506   result->first_interval_ = after;
507   last_interval_ = before;
508 
509   // Find the last use position before the split and the first use
510   // position after it.
511   UsePosition* use_after =
512       splitting_pointer_ == nullptr || splitting_pointer_->pos() > position
513           ? first_pos()
514           : splitting_pointer_;
515   UsePosition* use_before = nullptr;
516   if (split_at_start) {
517     // The split position coincides with the beginning of a use interval (the
518     // end of a lifetime hole). Use at this position should be attributed to
519     // the split child because split child owns use interval covering it.
520     while (use_after != nullptr && use_after->pos() < position) {
521       use_before = use_after;
522       use_after = use_after->next();
523     }
524   } else {
525     while (use_after != nullptr && use_after->pos() <= position) {
526       use_before = use_after;
527       use_after = use_after->next();
528     }
529   }
530 
531   // Partition original use positions to the two live ranges.
532   if (use_before != nullptr) {
533     use_before->set_next(nullptr);
534   } else {
535     first_pos_ = nullptr;
536   }
537   result->first_pos_ = use_after;
538 
539   // Discard cached iteration state. It might be pointing
540   // to the use that no longer belongs to this live range.
541   last_processed_use_ = nullptr;
542   current_interval_ = nullptr;
543 
544   // Invalidate size and weight of this range. The child range has them
545   // invalid at construction.
546   size_ = kInvalidSize;
547   weight_ = kInvalidWeight;
548 #ifdef DEBUG
549   VerifyChildStructure();
550   result->VerifyChildStructure();
551 #endif
552   return use_before;
553 }
554 
555 
UpdateParentForAllChildren(TopLevelLiveRange * new_top_level)556 void LiveRange::UpdateParentForAllChildren(TopLevelLiveRange* new_top_level) {
557   LiveRange* child = this;
558   for (; child != nullptr; child = child->next()) {
559     child->top_level_ = new_top_level;
560   }
561 }
562 
563 
ConvertUsesToOperand(const InstructionOperand & op,const InstructionOperand & spill_op)564 void LiveRange::ConvertUsesToOperand(const InstructionOperand& op,
565                                      const InstructionOperand& spill_op) {
566   for (UsePosition* pos = first_pos(); pos != nullptr; pos = pos->next()) {
567     DCHECK(Start() <= pos->pos() && pos->pos() <= End());
568     if (!pos->HasOperand()) continue;
569     switch (pos->type()) {
570       case UsePositionType::kRequiresSlot:
571         DCHECK(spill_op.IsStackSlot() || spill_op.IsDoubleStackSlot());
572         InstructionOperand::ReplaceWith(pos->operand(), &spill_op);
573         break;
574       case UsePositionType::kRequiresRegister:
575         DCHECK(op.IsRegister() || op.IsDoubleRegister());
576       // Fall through.
577       case UsePositionType::kAny:
578         InstructionOperand::ReplaceWith(pos->operand(), &op);
579         break;
580     }
581   }
582 }
583 
584 
585 // This implements an ordering on live ranges so that they are ordered by their
586 // start positions.  This is needed for the correctness of the register
587 // allocation algorithm.  If two live ranges start at the same offset then there
588 // is a tie breaker based on where the value is first used.  This part of the
589 // ordering is merely a heuristic.
ShouldBeAllocatedBefore(const LiveRange * other) const590 bool LiveRange::ShouldBeAllocatedBefore(const LiveRange* other) const {
591   LifetimePosition start = Start();
592   LifetimePosition other_start = other->Start();
593   if (start == other_start) {
594     UsePosition* pos = first_pos();
595     if (pos == nullptr) return false;
596     UsePosition* other_pos = other->first_pos();
597     if (other_pos == nullptr) return true;
598     return pos->pos() < other_pos->pos();
599   }
600   return start < other_start;
601 }
602 
603 
SetUseHints(int register_index)604 void LiveRange::SetUseHints(int register_index) {
605   for (UsePosition* pos = first_pos(); pos != nullptr; pos = pos->next()) {
606     if (!pos->HasOperand()) continue;
607     switch (pos->type()) {
608       case UsePositionType::kRequiresSlot:
609         break;
610       case UsePositionType::kRequiresRegister:
611       case UsePositionType::kAny:
612         pos->set_assigned_register(register_index);
613         break;
614     }
615   }
616 }
617 
618 
CanCover(LifetimePosition position) const619 bool LiveRange::CanCover(LifetimePosition position) const {
620   if (IsEmpty()) return false;
621   return Start() <= position && position < End();
622 }
623 
624 
Covers(LifetimePosition position) const625 bool LiveRange::Covers(LifetimePosition position) const {
626   if (!CanCover(position)) return false;
627   UseInterval* start_search = FirstSearchIntervalForPosition(position);
628   for (UseInterval* interval = start_search; interval != nullptr;
629        interval = interval->next()) {
630     DCHECK(interval->next() == nullptr ||
631            interval->next()->start() >= interval->start());
632     AdvanceLastProcessedMarker(interval, position);
633     if (interval->Contains(position)) return true;
634     if (interval->start() > position) return false;
635   }
636   return false;
637 }
638 
639 
FirstIntersection(LiveRange * other) const640 LifetimePosition LiveRange::FirstIntersection(LiveRange* other) const {
641   UseInterval* b = other->first_interval();
642   if (b == nullptr) return LifetimePosition::Invalid();
643   LifetimePosition advance_last_processed_up_to = b->start();
644   UseInterval* a = FirstSearchIntervalForPosition(b->start());
645   while (a != nullptr && b != nullptr) {
646     if (a->start() > other->End()) break;
647     if (b->start() > End()) break;
648     LifetimePosition cur_intersection = a->Intersect(b);
649     if (cur_intersection.IsValid()) {
650       return cur_intersection;
651     }
652     if (a->start() < b->start()) {
653       a = a->next();
654       if (a == nullptr || a->start() > other->End()) break;
655       AdvanceLastProcessedMarker(a, advance_last_processed_up_to);
656     } else {
657       b = b->next();
658     }
659   }
660   return LifetimePosition::Invalid();
661 }
662 
663 
GetSize()664 unsigned LiveRange::GetSize() {
665   if (size_ == kInvalidSize) {
666     size_ = 0;
667     for (const UseInterval* interval = first_interval(); interval != nullptr;
668          interval = interval->next()) {
669       size_ += (interval->end().value() - interval->start().value());
670     }
671   }
672 
673   return static_cast<unsigned>(size_);
674 }
675 
676 
Print(const RegisterConfiguration * config,bool with_children) const677 void LiveRange::Print(const RegisterConfiguration* config,
678                       bool with_children) const {
679   OFStream os(stdout);
680   PrintableLiveRange wrapper;
681   wrapper.register_configuration_ = config;
682   for (const LiveRange* i = this; i != nullptr; i = i->next()) {
683     wrapper.range_ = i;
684     os << wrapper << std::endl;
685     if (!with_children) break;
686   }
687 }
688 
689 
Print(bool with_children) const690 void LiveRange::Print(bool with_children) const {
691   const RegisterConfiguration* config =
692       RegisterConfiguration::ArchDefault(RegisterConfiguration::TURBOFAN);
693   Print(config, with_children);
694 }
695 
696 
697 struct TopLevelLiveRange::SpillMoveInsertionList : ZoneObject {
SpillMoveInsertionListv8::internal::compiler::TopLevelLiveRange::SpillMoveInsertionList698   SpillMoveInsertionList(int gap_index, InstructionOperand* operand,
699                          SpillMoveInsertionList* next)
700       : gap_index(gap_index), operand(operand), next(next) {}
701   const int gap_index;
702   InstructionOperand* const operand;
703   SpillMoveInsertionList* const next;
704 };
705 
706 
TopLevelLiveRange(int vreg,MachineRepresentation rep)707 TopLevelLiveRange::TopLevelLiveRange(int vreg, MachineRepresentation rep)
708     : LiveRange(0, rep, this),
709       vreg_(vreg),
710       last_child_id_(0),
711       splintered_from_(nullptr),
712       spill_operand_(nullptr),
713       spill_move_insertion_locations_(nullptr),
714       spilled_in_deferred_blocks_(false),
715       spill_start_index_(kMaxInt),
716       last_pos_(nullptr),
717       splinter_(nullptr),
718       has_preassigned_slot_(false) {
719   bits_ |= SpillTypeField::encode(SpillType::kNoSpillType);
720 }
721 
722 
723 #if DEBUG
debug_virt_reg() const724 int TopLevelLiveRange::debug_virt_reg() const {
725   return IsSplinter() ? splintered_from()->vreg() : vreg();
726 }
727 #endif
728 
729 
RecordSpillLocation(Zone * zone,int gap_index,InstructionOperand * operand)730 void TopLevelLiveRange::RecordSpillLocation(Zone* zone, int gap_index,
731                                             InstructionOperand* operand) {
732   DCHECK(HasNoSpillType());
733   spill_move_insertion_locations_ = new (zone) SpillMoveInsertionList(
734       gap_index, operand, spill_move_insertion_locations_);
735 }
736 
737 
TryCommitSpillInDeferredBlock(InstructionSequence * code,const InstructionOperand & spill_operand)738 bool TopLevelLiveRange::TryCommitSpillInDeferredBlock(
739     InstructionSequence* code, const InstructionOperand& spill_operand) {
740   if (!IsSpilledOnlyInDeferredBlocks()) return false;
741 
742   TRACE("Live Range %d will be spilled only in deferred blocks.\n", vreg());
743   // If we have ranges that aren't spilled but require the operand on the stack,
744   // make sure we insert the spill.
745   for (const LiveRange* child = this; child != nullptr; child = child->next()) {
746     if (!child->spilled() &&
747         child->NextSlotPosition(child->Start()) != nullptr) {
748       Instruction* instr =
749           code->InstructionAt(child->Start().ToInstructionIndex());
750       // Insert spill at the end to let live range connections happen at START.
751       ParallelMove* move =
752           instr->GetOrCreateParallelMove(Instruction::END, code->zone());
753       InstructionOperand assigned = child->GetAssignedOperand();
754       if (TopLevel()->has_slot_use()) {
755         bool found = false;
756         for (MoveOperands* move_op : *move) {
757           if (move_op->IsEliminated()) continue;
758           if (move_op->source().Equals(assigned) &&
759               move_op->destination().Equals(spill_operand)) {
760             found = true;
761             break;
762           }
763         }
764         if (found) continue;
765       }
766 
767       move->AddMove(assigned, spill_operand);
768     }
769   }
770 
771   return true;
772 }
773 
774 
CommitSpillMoves(InstructionSequence * sequence,const InstructionOperand & op,bool might_be_duplicated)775 void TopLevelLiveRange::CommitSpillMoves(InstructionSequence* sequence,
776                                          const InstructionOperand& op,
777                                          bool might_be_duplicated) {
778   DCHECK_IMPLIES(op.IsConstant(), spill_move_insertion_locations() == nullptr);
779   Zone* zone = sequence->zone();
780 
781   for (SpillMoveInsertionList* to_spill = spill_move_insertion_locations();
782        to_spill != nullptr; to_spill = to_spill->next) {
783     Instruction* instr = sequence->InstructionAt(to_spill->gap_index);
784     ParallelMove* move =
785         instr->GetOrCreateParallelMove(Instruction::START, zone);
786     // Skip insertion if it's possible that the move exists already as a
787     // constraint move from a fixed output register to a slot.
788     if (might_be_duplicated || has_preassigned_slot()) {
789       bool found = false;
790       for (MoveOperands* move_op : *move) {
791         if (move_op->IsEliminated()) continue;
792         if (move_op->source().Equals(*to_spill->operand) &&
793             move_op->destination().Equals(op)) {
794           found = true;
795           if (has_preassigned_slot()) move_op->Eliminate();
796           break;
797         }
798       }
799       if (found) continue;
800     }
801     if (!has_preassigned_slot()) {
802       move->AddMove(*to_spill->operand, op);
803     }
804   }
805 }
806 
807 
SetSpillOperand(InstructionOperand * operand)808 void TopLevelLiveRange::SetSpillOperand(InstructionOperand* operand) {
809   DCHECK(HasNoSpillType());
810   DCHECK(!operand->IsUnallocated() && !operand->IsImmediate());
811   set_spill_type(SpillType::kSpillOperand);
812   spill_operand_ = operand;
813 }
814 
815 
SetSpillRange(SpillRange * spill_range)816 void TopLevelLiveRange::SetSpillRange(SpillRange* spill_range) {
817   DCHECK(!HasSpillOperand());
818   DCHECK(spill_range);
819   spill_range_ = spill_range;
820 }
821 
822 
GetSpillRangeOperand() const823 AllocatedOperand TopLevelLiveRange::GetSpillRangeOperand() const {
824   SpillRange* spill_range = GetSpillRange();
825   int index = spill_range->assigned_slot();
826   return AllocatedOperand(LocationOperand::STACK_SLOT, representation(), index);
827 }
828 
829 
Splinter(LifetimePosition start,LifetimePosition end,Zone * zone)830 void TopLevelLiveRange::Splinter(LifetimePosition start, LifetimePosition end,
831                                  Zone* zone) {
832   DCHECK(start != Start() || end != End());
833   DCHECK(start < end);
834 
835   TopLevelLiveRange splinter_temp(-1, representation());
836   UsePosition* last_in_splinter = nullptr;
837   // Live ranges defined in deferred blocks stay in deferred blocks, so we
838   // don't need to splinter them. That means that start should always be
839   // after the beginning of the range.
840   DCHECK(start > Start());
841 
842   if (end >= End()) {
843     DCHECK(start > Start());
844     DetachAt(start, &splinter_temp, zone);
845     next_ = nullptr;
846   } else {
847     DCHECK(start < End() && Start() < end);
848 
849     const int kInvalidId = std::numeric_limits<int>::max();
850 
851     UsePosition* last = DetachAt(start, &splinter_temp, zone);
852 
853     LiveRange end_part(kInvalidId, this->representation(), nullptr);
854     last_in_splinter = splinter_temp.DetachAt(end, &end_part, zone);
855 
856     next_ = end_part.next_;
857     last_interval_->set_next(end_part.first_interval_);
858     // The next splinter will happen either at or after the current interval.
859     // We can optimize DetachAt by setting current_interval_ accordingly,
860     // which will then be picked up by FirstSearchIntervalForPosition.
861     current_interval_ = last_interval_;
862     last_interval_ = end_part.last_interval_;
863 
864     if (first_pos_ == nullptr) {
865       first_pos_ = end_part.first_pos_;
866     } else {
867       splitting_pointer_ = last;
868       if (last != nullptr) last->set_next(end_part.first_pos_);
869     }
870   }
871 
872   if (splinter()->IsEmpty()) {
873     splinter()->first_interval_ = splinter_temp.first_interval_;
874     splinter()->last_interval_ = splinter_temp.last_interval_;
875   } else {
876     splinter()->last_interval_->set_next(splinter_temp.first_interval_);
877     splinter()->last_interval_ = splinter_temp.last_interval_;
878   }
879   if (splinter()->first_pos() == nullptr) {
880     splinter()->first_pos_ = splinter_temp.first_pos_;
881   } else {
882     splinter()->last_pos_->set_next(splinter_temp.first_pos_);
883   }
884   if (last_in_splinter != nullptr) {
885     splinter()->last_pos_ = last_in_splinter;
886   } else {
887     if (splinter()->first_pos() != nullptr &&
888         splinter()->last_pos_ == nullptr) {
889       splinter()->last_pos_ = splinter()->first_pos();
890       for (UsePosition* pos = splinter()->first_pos(); pos != nullptr;
891            pos = pos->next()) {
892         splinter()->last_pos_ = pos;
893       }
894     }
895   }
896 #if DEBUG
897   Verify();
898   splinter()->Verify();
899 #endif
900 }
901 
902 
SetSplinteredFrom(TopLevelLiveRange * splinter_parent)903 void TopLevelLiveRange::SetSplinteredFrom(TopLevelLiveRange* splinter_parent) {
904   splintered_from_ = splinter_parent;
905   if (!HasSpillOperand() && splinter_parent->spill_range_ != nullptr) {
906     SetSpillRange(splinter_parent->spill_range_);
907   }
908 }
909 
910 
UpdateSpillRangePostMerge(TopLevelLiveRange * merged)911 void TopLevelLiveRange::UpdateSpillRangePostMerge(TopLevelLiveRange* merged) {
912   DCHECK(merged->TopLevel() == this);
913 
914   if (HasNoSpillType() && merged->HasSpillRange()) {
915     set_spill_type(merged->spill_type());
916     DCHECK(GetSpillRange()->live_ranges().size() > 0);
917     merged->spill_range_ = nullptr;
918     merged->bits_ =
919         SpillTypeField::update(merged->bits_, SpillType::kNoSpillType);
920   }
921 }
922 
923 
Merge(TopLevelLiveRange * other,Zone * zone)924 void TopLevelLiveRange::Merge(TopLevelLiveRange* other, Zone* zone) {
925   DCHECK(Start() < other->Start());
926   DCHECK(other->splintered_from() == this);
927 
928   LiveRange* first = this;
929   LiveRange* second = other;
930   DCHECK(first->Start() < second->Start());
931   while (first != nullptr && second != nullptr) {
932     DCHECK(first != second);
933     // Make sure the ranges are in order each time we iterate.
934     if (second->Start() < first->Start()) {
935       LiveRange* tmp = second;
936       second = first;
937       first = tmp;
938       continue;
939     }
940 
941     if (first->End() <= second->Start()) {
942       if (first->next() == nullptr ||
943           first->next()->Start() > second->Start()) {
944         // First is in order before second.
945         LiveRange* temp = first->next();
946         first->next_ = second;
947         first = temp;
948       } else {
949         // First is in order before its successor (or second), so advance first.
950         first = first->next();
951       }
952       continue;
953     }
954 
955     DCHECK(first->Start() < second->Start());
956     // If first and second intersect, split first.
957     if (first->Start() < second->End() && second->Start() < first->End()) {
958       LiveRange* temp = first->SplitAt(second->Start(), zone);
959       CHECK(temp != first);
960       temp->set_spilled(first->spilled());
961       if (!temp->spilled())
962         temp->set_assigned_register(first->assigned_register());
963 
964       first->next_ = second;
965       first = temp;
966       continue;
967     }
968     DCHECK(first->End() <= second->Start());
969   }
970 
971   TopLevel()->UpdateParentForAllChildren(TopLevel());
972   TopLevel()->UpdateSpillRangePostMerge(other);
973 
974 #if DEBUG
975   Verify();
976 #endif
977 }
978 
979 
VerifyChildrenInOrder() const980 void TopLevelLiveRange::VerifyChildrenInOrder() const {
981   LifetimePosition last_end = End();
982   for (const LiveRange* child = this->next(); child != nullptr;
983        child = child->next()) {
984     DCHECK(last_end <= child->Start());
985     last_end = child->End();
986   }
987 }
988 
989 
Verify() const990 void TopLevelLiveRange::Verify() const {
991   VerifyChildrenInOrder();
992   for (const LiveRange* child = this; child != nullptr; child = child->next()) {
993     VerifyChildStructure();
994   }
995 }
996 
997 
ShortenTo(LifetimePosition start)998 void TopLevelLiveRange::ShortenTo(LifetimePosition start) {
999   TRACE("Shorten live range %d to [%d\n", vreg(), start.value());
1000   DCHECK(first_interval_ != nullptr);
1001   DCHECK(first_interval_->start() <= start);
1002   DCHECK(start < first_interval_->end());
1003   first_interval_->set_start(start);
1004 }
1005 
1006 
EnsureInterval(LifetimePosition start,LifetimePosition end,Zone * zone)1007 void TopLevelLiveRange::EnsureInterval(LifetimePosition start,
1008                                        LifetimePosition end, Zone* zone) {
1009   TRACE("Ensure live range %d in interval [%d %d[\n", vreg(), start.value(),
1010         end.value());
1011   LifetimePosition new_end = end;
1012   while (first_interval_ != nullptr && first_interval_->start() <= end) {
1013     if (first_interval_->end() > end) {
1014       new_end = first_interval_->end();
1015     }
1016     first_interval_ = first_interval_->next();
1017   }
1018 
1019   UseInterval* new_interval = new (zone) UseInterval(start, new_end);
1020   new_interval->set_next(first_interval_);
1021   first_interval_ = new_interval;
1022   if (new_interval->next() == nullptr) {
1023     last_interval_ = new_interval;
1024   }
1025 }
1026 
1027 
AddUseInterval(LifetimePosition start,LifetimePosition end,Zone * zone)1028 void TopLevelLiveRange::AddUseInterval(LifetimePosition start,
1029                                        LifetimePosition end, Zone* zone) {
1030   TRACE("Add to live range %d interval [%d %d[\n", vreg(), start.value(),
1031         end.value());
1032   if (first_interval_ == nullptr) {
1033     UseInterval* interval = new (zone) UseInterval(start, end);
1034     first_interval_ = interval;
1035     last_interval_ = interval;
1036   } else {
1037     if (end == first_interval_->start()) {
1038       first_interval_->set_start(start);
1039     } else if (end < first_interval_->start()) {
1040       UseInterval* interval = new (zone) UseInterval(start, end);
1041       interval->set_next(first_interval_);
1042       first_interval_ = interval;
1043     } else {
1044       // Order of instruction's processing (see ProcessInstructions) guarantees
1045       // that each new use interval either precedes or intersects with
1046       // last added interval.
1047       DCHECK(start < first_interval_->end());
1048       first_interval_->set_start(Min(start, first_interval_->start()));
1049       first_interval_->set_end(Max(end, first_interval_->end()));
1050     }
1051   }
1052 }
1053 
1054 
AddUsePosition(UsePosition * use_pos)1055 void TopLevelLiveRange::AddUsePosition(UsePosition* use_pos) {
1056   LifetimePosition pos = use_pos->pos();
1057   TRACE("Add to live range %d use position %d\n", vreg(), pos.value());
1058   UsePosition* prev_hint = nullptr;
1059   UsePosition* prev = nullptr;
1060   UsePosition* current = first_pos_;
1061   while (current != nullptr && current->pos() < pos) {
1062     prev_hint = current->HasHint() ? current : prev_hint;
1063     prev = current;
1064     current = current->next();
1065   }
1066 
1067   if (prev == nullptr) {
1068     use_pos->set_next(first_pos_);
1069     first_pos_ = use_pos;
1070   } else {
1071     use_pos->set_next(prev->next());
1072     prev->set_next(use_pos);
1073   }
1074 
1075   if (prev_hint == nullptr && use_pos->HasHint()) {
1076     current_hint_position_ = use_pos;
1077   }
1078 }
1079 
1080 
AreUseIntervalsIntersecting(UseInterval * interval1,UseInterval * interval2)1081 static bool AreUseIntervalsIntersecting(UseInterval* interval1,
1082                                         UseInterval* interval2) {
1083   while (interval1 != nullptr && interval2 != nullptr) {
1084     if (interval1->start() < interval2->start()) {
1085       if (interval1->end() > interval2->start()) {
1086         return true;
1087       }
1088       interval1 = interval1->next();
1089     } else {
1090       if (interval2->end() > interval1->start()) {
1091         return true;
1092       }
1093       interval2 = interval2->next();
1094     }
1095   }
1096   return false;
1097 }
1098 
1099 
operator <<(std::ostream & os,const PrintableLiveRange & printable_range)1100 std::ostream& operator<<(std::ostream& os,
1101                          const PrintableLiveRange& printable_range) {
1102   const LiveRange* range = printable_range.range_;
1103   os << "Range: " << range->TopLevel()->vreg() << ":" << range->relative_id()
1104      << " ";
1105   if (range->TopLevel()->is_phi()) os << "phi ";
1106   if (range->TopLevel()->is_non_loop_phi()) os << "nlphi ";
1107 
1108   os << "{" << std::endl;
1109   UseInterval* interval = range->first_interval();
1110   UsePosition* use_pos = range->first_pos();
1111   PrintableInstructionOperand pio;
1112   pio.register_configuration_ = printable_range.register_configuration_;
1113   while (use_pos != nullptr) {
1114     if (use_pos->HasOperand()) {
1115       pio.op_ = *use_pos->operand();
1116       os << pio << use_pos->pos() << " ";
1117     }
1118     use_pos = use_pos->next();
1119   }
1120   os << std::endl;
1121 
1122   while (interval != nullptr) {
1123     os << '[' << interval->start() << ", " << interval->end() << ')'
1124        << std::endl;
1125     interval = interval->next();
1126   }
1127   os << "}";
1128   return os;
1129 }
1130 
1131 
SpillRange(TopLevelLiveRange * parent,Zone * zone)1132 SpillRange::SpillRange(TopLevelLiveRange* parent, Zone* zone)
1133     : live_ranges_(zone),
1134       assigned_slot_(kUnassignedSlot),
1135       byte_width_(GetByteWidth(parent->representation())),
1136       kind_(parent->kind()) {
1137   // Spill ranges are created for top level, non-splintered ranges. This is so
1138   // that, when merging decisions are made, we consider the full extent of the
1139   // virtual register, and avoid clobbering it.
1140   DCHECK(!parent->IsSplinter());
1141   UseInterval* result = nullptr;
1142   UseInterval* node = nullptr;
1143   // Copy the intervals for all ranges.
1144   for (LiveRange* range = parent; range != nullptr; range = range->next()) {
1145     UseInterval* src = range->first_interval();
1146     while (src != nullptr) {
1147       UseInterval* new_node = new (zone) UseInterval(src->start(), src->end());
1148       if (result == nullptr) {
1149         result = new_node;
1150       } else {
1151         node->set_next(new_node);
1152       }
1153       node = new_node;
1154       src = src->next();
1155     }
1156   }
1157   use_interval_ = result;
1158   live_ranges().push_back(parent);
1159   end_position_ = node->end();
1160   parent->SetSpillRange(this);
1161 }
1162 
1163 
ByteWidth() const1164 int SpillRange::ByteWidth() const {
1165   return GetByteWidth(live_ranges_[0]->representation());
1166 }
1167 
1168 
IsIntersectingWith(SpillRange * other) const1169 bool SpillRange::IsIntersectingWith(SpillRange* other) const {
1170   if (this->use_interval_ == nullptr || other->use_interval_ == nullptr ||
1171       this->End() <= other->use_interval_->start() ||
1172       other->End() <= this->use_interval_->start()) {
1173     return false;
1174   }
1175   return AreUseIntervalsIntersecting(use_interval_, other->use_interval_);
1176 }
1177 
1178 
TryMerge(SpillRange * other)1179 bool SpillRange::TryMerge(SpillRange* other) {
1180   if (HasSlot() || other->HasSlot()) return false;
1181   // TODO(dcarney): byte widths should be compared here not kinds.
1182   if (live_ranges_[0]->kind() != other->live_ranges_[0]->kind() ||
1183       IsIntersectingWith(other)) {
1184     return false;
1185   }
1186 
1187   LifetimePosition max = LifetimePosition::MaxPosition();
1188   if (End() < other->End() && other->End() != max) {
1189     end_position_ = other->End();
1190   }
1191   other->end_position_ = max;
1192 
1193   MergeDisjointIntervals(other->use_interval_);
1194   other->use_interval_ = nullptr;
1195 
1196   for (TopLevelLiveRange* range : other->live_ranges()) {
1197     DCHECK(range->GetSpillRange() == other);
1198     range->SetSpillRange(this);
1199   }
1200 
1201   live_ranges().insert(live_ranges().end(), other->live_ranges().begin(),
1202                        other->live_ranges().end());
1203   other->live_ranges().clear();
1204 
1205   return true;
1206 }
1207 
1208 
MergeDisjointIntervals(UseInterval * other)1209 void SpillRange::MergeDisjointIntervals(UseInterval* other) {
1210   UseInterval* tail = nullptr;
1211   UseInterval* current = use_interval_;
1212   while (other != nullptr) {
1213     // Make sure the 'current' list starts first
1214     if (current == nullptr || current->start() > other->start()) {
1215       std::swap(current, other);
1216     }
1217     // Check disjointness
1218     DCHECK(other == nullptr || current->end() <= other->start());
1219     // Append the 'current' node to the result accumulator and move forward
1220     if (tail == nullptr) {
1221       use_interval_ = current;
1222     } else {
1223       tail->set_next(current);
1224     }
1225     tail = current;
1226     current = current->next();
1227   }
1228   // Other list is empty => we are done
1229 }
1230 
1231 
Print() const1232 void SpillRange::Print() const {
1233   OFStream os(stdout);
1234   os << "{" << std::endl;
1235   for (TopLevelLiveRange* range : live_ranges()) {
1236     os << range->vreg() << " ";
1237   }
1238   os << std::endl;
1239 
1240   for (UseInterval* i = interval(); i != nullptr; i = i->next()) {
1241     os << '[' << i->start() << ", " << i->end() << ')' << std::endl;
1242   }
1243   os << "}" << std::endl;
1244 }
1245 
1246 
PhiMapValue(PhiInstruction * phi,const InstructionBlock * block,Zone * zone)1247 RegisterAllocationData::PhiMapValue::PhiMapValue(PhiInstruction* phi,
1248                                                  const InstructionBlock* block,
1249                                                  Zone* zone)
1250     : phi_(phi),
1251       block_(block),
1252       incoming_operands_(zone),
1253       assigned_register_(kUnassignedRegister) {
1254   incoming_operands_.reserve(phi->operands().size());
1255 }
1256 
1257 
AddOperand(InstructionOperand * operand)1258 void RegisterAllocationData::PhiMapValue::AddOperand(
1259     InstructionOperand* operand) {
1260   incoming_operands_.push_back(operand);
1261 }
1262 
1263 
CommitAssignment(const InstructionOperand & assigned)1264 void RegisterAllocationData::PhiMapValue::CommitAssignment(
1265     const InstructionOperand& assigned) {
1266   for (InstructionOperand* operand : incoming_operands_) {
1267     InstructionOperand::ReplaceWith(operand, &assigned);
1268   }
1269 }
1270 
1271 
RegisterAllocationData(const RegisterConfiguration * config,Zone * zone,Frame * frame,InstructionSequence * code,const char * debug_name)1272 RegisterAllocationData::RegisterAllocationData(
1273     const RegisterConfiguration* config, Zone* zone, Frame* frame,
1274     InstructionSequence* code, const char* debug_name)
1275     : allocation_zone_(zone),
1276       frame_(frame),
1277       code_(code),
1278       debug_name_(debug_name),
1279       config_(config),
1280       phi_map_(allocation_zone()),
1281       allocatable_codes_(this->config()->num_general_registers(), -1,
1282                          allocation_zone()),
1283       allocatable_double_codes_(this->config()->num_double_registers(), -1,
1284                                 allocation_zone()),
1285       live_in_sets_(code->InstructionBlockCount(), nullptr, allocation_zone()),
1286       live_out_sets_(code->InstructionBlockCount(), nullptr, allocation_zone()),
1287       live_ranges_(code->VirtualRegisterCount() * 2, nullptr,
1288                    allocation_zone()),
1289       fixed_live_ranges_(this->config()->num_general_registers(), nullptr,
1290                          allocation_zone()),
1291       fixed_double_live_ranges_(this->config()->num_double_registers(), nullptr,
1292                                 allocation_zone()),
1293       spill_ranges_(code->VirtualRegisterCount(), nullptr, allocation_zone()),
1294       delayed_references_(allocation_zone()),
1295       assigned_registers_(nullptr),
1296       assigned_double_registers_(nullptr),
1297       virtual_register_count_(code->VirtualRegisterCount()),
1298       preassigned_slot_ranges_(zone) {
1299   DCHECK(this->config()->num_general_registers() <=
1300          RegisterConfiguration::kMaxGeneralRegisters);
1301   DCHECK(this->config()->num_double_registers() <=
1302          RegisterConfiguration::kMaxDoubleRegisters);
1303   assigned_registers_ = new (code_zone())
1304       BitVector(this->config()->num_general_registers(), code_zone());
1305   assigned_double_registers_ = new (code_zone())
1306       BitVector(this->config()->num_double_registers(), code_zone());
1307   this->frame()->SetAllocatedRegisters(assigned_registers_);
1308   this->frame()->SetAllocatedDoubleRegisters(assigned_double_registers_);
1309 }
1310 
1311 
AddGapMove(int index,Instruction::GapPosition position,const InstructionOperand & from,const InstructionOperand & to)1312 MoveOperands* RegisterAllocationData::AddGapMove(
1313     int index, Instruction::GapPosition position,
1314     const InstructionOperand& from, const InstructionOperand& to) {
1315   Instruction* instr = code()->InstructionAt(index);
1316   ParallelMove* moves = instr->GetOrCreateParallelMove(position, code_zone());
1317   return moves->AddMove(from, to);
1318 }
1319 
1320 
RepresentationFor(int virtual_register)1321 MachineRepresentation RegisterAllocationData::RepresentationFor(
1322     int virtual_register) {
1323   DCHECK_LT(virtual_register, code()->VirtualRegisterCount());
1324   return code()->GetRepresentation(virtual_register);
1325 }
1326 
1327 
GetOrCreateLiveRangeFor(int index)1328 TopLevelLiveRange* RegisterAllocationData::GetOrCreateLiveRangeFor(int index) {
1329   if (index >= static_cast<int>(live_ranges().size())) {
1330     live_ranges().resize(index + 1, nullptr);
1331   }
1332   TopLevelLiveRange* result = live_ranges()[index];
1333   if (result == nullptr) {
1334     result = NewLiveRange(index, RepresentationFor(index));
1335     live_ranges()[index] = result;
1336   }
1337   return result;
1338 }
1339 
1340 
NewLiveRange(int index,MachineRepresentation rep)1341 TopLevelLiveRange* RegisterAllocationData::NewLiveRange(
1342     int index, MachineRepresentation rep) {
1343   return new (allocation_zone()) TopLevelLiveRange(index, rep);
1344 }
1345 
1346 
GetNextLiveRangeId()1347 int RegisterAllocationData::GetNextLiveRangeId() {
1348   int vreg = virtual_register_count_++;
1349   if (vreg >= static_cast<int>(live_ranges().size())) {
1350     live_ranges().resize(vreg + 1, nullptr);
1351   }
1352   return vreg;
1353 }
1354 
1355 
NextLiveRange(MachineRepresentation rep)1356 TopLevelLiveRange* RegisterAllocationData::NextLiveRange(
1357     MachineRepresentation rep) {
1358   int vreg = GetNextLiveRangeId();
1359   TopLevelLiveRange* ret = NewLiveRange(vreg, rep);
1360   return ret;
1361 }
1362 
1363 
InitializePhiMap(const InstructionBlock * block,PhiInstruction * phi)1364 RegisterAllocationData::PhiMapValue* RegisterAllocationData::InitializePhiMap(
1365     const InstructionBlock* block, PhiInstruction* phi) {
1366   RegisterAllocationData::PhiMapValue* map_value = new (allocation_zone())
1367       RegisterAllocationData::PhiMapValue(phi, block, allocation_zone());
1368   auto res =
1369       phi_map_.insert(std::make_pair(phi->virtual_register(), map_value));
1370   DCHECK(res.second);
1371   USE(res);
1372   return map_value;
1373 }
1374 
1375 
GetPhiMapValueFor(int virtual_register)1376 RegisterAllocationData::PhiMapValue* RegisterAllocationData::GetPhiMapValueFor(
1377     int virtual_register) {
1378   auto it = phi_map_.find(virtual_register);
1379   DCHECK(it != phi_map_.end());
1380   return it->second;
1381 }
1382 
1383 
GetPhiMapValueFor(TopLevelLiveRange * top_range)1384 RegisterAllocationData::PhiMapValue* RegisterAllocationData::GetPhiMapValueFor(
1385     TopLevelLiveRange* top_range) {
1386   return GetPhiMapValueFor(top_range->vreg());
1387 }
1388 
1389 
ExistsUseWithoutDefinition()1390 bool RegisterAllocationData::ExistsUseWithoutDefinition() {
1391   bool found = false;
1392   BitVector::Iterator iterator(live_in_sets()[0]);
1393   while (!iterator.Done()) {
1394     found = true;
1395     int operand_index = iterator.Current();
1396     PrintF("Register allocator error: live v%d reached first block.\n",
1397            operand_index);
1398     LiveRange* range = GetOrCreateLiveRangeFor(operand_index);
1399     PrintF("  (first use is at %d)\n", range->first_pos()->pos().value());
1400     if (debug_name() == nullptr) {
1401       PrintF("\n");
1402     } else {
1403       PrintF("  (function: %s)\n", debug_name());
1404     }
1405     iterator.Advance();
1406   }
1407   return found;
1408 }
1409 
1410 
1411 // If a range is defined in a deferred block, we can expect all the range
1412 // to only cover positions in deferred blocks. Otherwise, a block on the
1413 // hot path would be dominated by a deferred block, meaning it is unreachable
1414 // without passing through the deferred block, which is contradictory.
1415 // In particular, when such a range contributes a result back on the hot
1416 // path, it will be as one of the inputs of a phi. In that case, the value
1417 // will be transferred via a move in the Gap::END's of the last instruction
1418 // of a deferred block.
RangesDefinedInDeferredStayInDeferred()1419 bool RegisterAllocationData::RangesDefinedInDeferredStayInDeferred() {
1420   for (const TopLevelLiveRange* range : live_ranges()) {
1421     if (range == nullptr || range->IsEmpty() ||
1422         !code()
1423              ->GetInstructionBlock(range->Start().ToInstructionIndex())
1424              ->IsDeferred()) {
1425       continue;
1426     }
1427     for (const UseInterval* i = range->first_interval(); i != nullptr;
1428          i = i->next()) {
1429       int first = i->FirstGapIndex();
1430       int last = i->LastGapIndex();
1431       for (int instr = first; instr <= last;) {
1432         const InstructionBlock* block = code()->GetInstructionBlock(instr);
1433         if (!block->IsDeferred()) return false;
1434         instr = block->last_instruction_index() + 1;
1435       }
1436     }
1437   }
1438   return true;
1439 }
1440 
1441 
AssignSpillRangeToLiveRange(TopLevelLiveRange * range)1442 SpillRange* RegisterAllocationData::AssignSpillRangeToLiveRange(
1443     TopLevelLiveRange* range) {
1444   DCHECK(!range->HasSpillOperand());
1445 
1446   SpillRange* spill_range = range->GetAllocatedSpillRange();
1447   if (spill_range == nullptr) {
1448     DCHECK(!range->IsSplinter());
1449     spill_range = new (allocation_zone()) SpillRange(range, allocation_zone());
1450   }
1451   range->set_spill_type(TopLevelLiveRange::SpillType::kSpillRange);
1452 
1453   int spill_range_index =
1454       range->IsSplinter() ? range->splintered_from()->vreg() : range->vreg();
1455 
1456   spill_ranges()[spill_range_index] = spill_range;
1457 
1458   return spill_range;
1459 }
1460 
1461 
CreateSpillRangeForLiveRange(TopLevelLiveRange * range)1462 SpillRange* RegisterAllocationData::CreateSpillRangeForLiveRange(
1463     TopLevelLiveRange* range) {
1464   DCHECK(!range->HasSpillOperand());
1465   DCHECK(!range->IsSplinter());
1466   SpillRange* spill_range =
1467       new (allocation_zone()) SpillRange(range, allocation_zone());
1468   return spill_range;
1469 }
1470 
1471 
MarkAllocated(RegisterKind kind,int index)1472 void RegisterAllocationData::MarkAllocated(RegisterKind kind, int index) {
1473   if (kind == DOUBLE_REGISTERS) {
1474     assigned_double_registers_->Add(index);
1475   } else {
1476     DCHECK(kind == GENERAL_REGISTERS);
1477     assigned_registers_->Add(index);
1478   }
1479 }
1480 
1481 
IsBlockBoundary(LifetimePosition pos) const1482 bool RegisterAllocationData::IsBlockBoundary(LifetimePosition pos) const {
1483   return pos.IsFullStart() &&
1484          code()->GetInstructionBlock(pos.ToInstructionIndex())->code_start() ==
1485              pos.ToInstructionIndex();
1486 }
1487 
1488 
ConstraintBuilder(RegisterAllocationData * data)1489 ConstraintBuilder::ConstraintBuilder(RegisterAllocationData* data)
1490     : data_(data) {}
1491 
1492 
AllocateFixed(UnallocatedOperand * operand,int pos,bool is_tagged)1493 InstructionOperand* ConstraintBuilder::AllocateFixed(
1494     UnallocatedOperand* operand, int pos, bool is_tagged) {
1495   TRACE("Allocating fixed reg for op %d\n", operand->virtual_register());
1496   DCHECK(operand->HasFixedPolicy());
1497   InstructionOperand allocated;
1498   MachineRepresentation rep = InstructionSequence::DefaultRepresentation();
1499   int virtual_register = operand->virtual_register();
1500   if (virtual_register != InstructionOperand::kInvalidVirtualRegister) {
1501     rep = data()->RepresentationFor(virtual_register);
1502   }
1503   if (operand->HasFixedSlotPolicy()) {
1504     allocated = AllocatedOperand(AllocatedOperand::STACK_SLOT, rep,
1505                                  operand->fixed_slot_index());
1506   } else if (operand->HasFixedRegisterPolicy()) {
1507     DCHECK(!IsFloatingPoint(rep));
1508     allocated = AllocatedOperand(AllocatedOperand::REGISTER, rep,
1509                                  operand->fixed_register_index());
1510   } else if (operand->HasFixedDoubleRegisterPolicy()) {
1511     DCHECK(IsFloatingPoint(rep));
1512     DCHECK_NE(InstructionOperand::kInvalidVirtualRegister, virtual_register);
1513     allocated = AllocatedOperand(AllocatedOperand::REGISTER, rep,
1514                                  operand->fixed_register_index());
1515   } else {
1516     UNREACHABLE();
1517   }
1518   InstructionOperand::ReplaceWith(operand, &allocated);
1519   if (is_tagged) {
1520     TRACE("Fixed reg is tagged at %d\n", pos);
1521     Instruction* instr = code()->InstructionAt(pos);
1522     if (instr->HasReferenceMap()) {
1523       instr->reference_map()->RecordReference(*AllocatedOperand::cast(operand));
1524     }
1525   }
1526   return operand;
1527 }
1528 
1529 
MeetRegisterConstraints()1530 void ConstraintBuilder::MeetRegisterConstraints() {
1531   for (InstructionBlock* block : code()->instruction_blocks()) {
1532     MeetRegisterConstraints(block);
1533   }
1534 }
1535 
1536 
MeetRegisterConstraints(const InstructionBlock * block)1537 void ConstraintBuilder::MeetRegisterConstraints(const InstructionBlock* block) {
1538   int start = block->first_instruction_index();
1539   int end = block->last_instruction_index();
1540   DCHECK_NE(-1, start);
1541   for (int i = start; i <= end; ++i) {
1542     MeetConstraintsBefore(i);
1543     if (i != end) MeetConstraintsAfter(i);
1544   }
1545   // Meet register constraints for the instruction in the end.
1546   MeetRegisterConstraintsForLastInstructionInBlock(block);
1547 }
1548 
1549 
MeetRegisterConstraintsForLastInstructionInBlock(const InstructionBlock * block)1550 void ConstraintBuilder::MeetRegisterConstraintsForLastInstructionInBlock(
1551     const InstructionBlock* block) {
1552   int end = block->last_instruction_index();
1553   Instruction* last_instruction = code()->InstructionAt(end);
1554   for (size_t i = 0; i < last_instruction->OutputCount(); i++) {
1555     InstructionOperand* output_operand = last_instruction->OutputAt(i);
1556     DCHECK(!output_operand->IsConstant());
1557     UnallocatedOperand* output = UnallocatedOperand::cast(output_operand);
1558     int output_vreg = output->virtual_register();
1559     TopLevelLiveRange* range = data()->GetOrCreateLiveRangeFor(output_vreg);
1560     bool assigned = false;
1561     if (output->HasFixedPolicy()) {
1562       AllocateFixed(output, -1, false);
1563       // This value is produced on the stack, we never need to spill it.
1564       if (output->IsStackSlot()) {
1565         DCHECK(LocationOperand::cast(output)->index() <
1566                data()->frame()->GetSpillSlotCount());
1567         range->SetSpillOperand(LocationOperand::cast(output));
1568         range->SetSpillStartIndex(end);
1569         assigned = true;
1570       }
1571 
1572       for (const RpoNumber& succ : block->successors()) {
1573         const InstructionBlock* successor = code()->InstructionBlockAt(succ);
1574         DCHECK(successor->PredecessorCount() == 1);
1575         int gap_index = successor->first_instruction_index();
1576         // Create an unconstrained operand for the same virtual register
1577         // and insert a gap move from the fixed output to the operand.
1578         UnallocatedOperand output_copy(UnallocatedOperand::ANY, output_vreg);
1579         data()->AddGapMove(gap_index, Instruction::START, *output, output_copy);
1580       }
1581     }
1582 
1583     if (!assigned) {
1584       for (const RpoNumber& succ : block->successors()) {
1585         const InstructionBlock* successor = code()->InstructionBlockAt(succ);
1586         DCHECK(successor->PredecessorCount() == 1);
1587         int gap_index = successor->first_instruction_index();
1588         range->RecordSpillLocation(allocation_zone(), gap_index, output);
1589         range->SetSpillStartIndex(gap_index);
1590       }
1591     }
1592   }
1593 }
1594 
1595 
MeetConstraintsAfter(int instr_index)1596 void ConstraintBuilder::MeetConstraintsAfter(int instr_index) {
1597   Instruction* first = code()->InstructionAt(instr_index);
1598   // Handle fixed temporaries.
1599   for (size_t i = 0; i < first->TempCount(); i++) {
1600     UnallocatedOperand* temp = UnallocatedOperand::cast(first->TempAt(i));
1601     if (temp->HasFixedPolicy()) AllocateFixed(temp, instr_index, false);
1602   }
1603   // Handle constant/fixed output operands.
1604   for (size_t i = 0; i < first->OutputCount(); i++) {
1605     InstructionOperand* output = first->OutputAt(i);
1606     if (output->IsConstant()) {
1607       int output_vreg = ConstantOperand::cast(output)->virtual_register();
1608       TopLevelLiveRange* range = data()->GetOrCreateLiveRangeFor(output_vreg);
1609       range->SetSpillStartIndex(instr_index + 1);
1610       range->SetSpillOperand(output);
1611       continue;
1612     }
1613     UnallocatedOperand* first_output = UnallocatedOperand::cast(output);
1614     TopLevelLiveRange* range =
1615         data()->GetOrCreateLiveRangeFor(first_output->virtual_register());
1616     bool assigned = false;
1617     if (first_output->HasFixedPolicy()) {
1618       int output_vreg = first_output->virtual_register();
1619       UnallocatedOperand output_copy(UnallocatedOperand::ANY, output_vreg);
1620       bool is_tagged = code()->IsReference(output_vreg);
1621       if (first_output->HasSecondaryStorage()) {
1622         range->MarkHasPreassignedSlot();
1623         data()->preassigned_slot_ranges().push_back(
1624             std::make_pair(range, first_output->GetSecondaryStorage()));
1625       }
1626       AllocateFixed(first_output, instr_index, is_tagged);
1627 
1628       // This value is produced on the stack, we never need to spill it.
1629       if (first_output->IsStackSlot()) {
1630         DCHECK(LocationOperand::cast(first_output)->index() <
1631                data()->frame()->GetTotalFrameSlotCount());
1632         range->SetSpillOperand(LocationOperand::cast(first_output));
1633         range->SetSpillStartIndex(instr_index + 1);
1634         assigned = true;
1635       }
1636       data()->AddGapMove(instr_index + 1, Instruction::START, *first_output,
1637                          output_copy);
1638     }
1639     // Make sure we add a gap move for spilling (if we have not done
1640     // so already).
1641     if (!assigned) {
1642       range->RecordSpillLocation(allocation_zone(), instr_index + 1,
1643                                  first_output);
1644       range->SetSpillStartIndex(instr_index + 1);
1645     }
1646   }
1647 }
1648 
1649 
MeetConstraintsBefore(int instr_index)1650 void ConstraintBuilder::MeetConstraintsBefore(int instr_index) {
1651   Instruction* second = code()->InstructionAt(instr_index);
1652   // Handle fixed input operands of second instruction.
1653   for (size_t i = 0; i < second->InputCount(); i++) {
1654     InstructionOperand* input = second->InputAt(i);
1655     if (input->IsImmediate() || input->IsExplicit()) {
1656       continue;  // Ignore immediates and explicitly reserved registers.
1657     }
1658     UnallocatedOperand* cur_input = UnallocatedOperand::cast(input);
1659     if (cur_input->HasFixedPolicy()) {
1660       int input_vreg = cur_input->virtual_register();
1661       UnallocatedOperand input_copy(UnallocatedOperand::ANY, input_vreg);
1662       bool is_tagged = code()->IsReference(input_vreg);
1663       AllocateFixed(cur_input, instr_index, is_tagged);
1664       data()->AddGapMove(instr_index, Instruction::END, input_copy, *cur_input);
1665     }
1666   }
1667   // Handle "output same as input" for second instruction.
1668   for (size_t i = 0; i < second->OutputCount(); i++) {
1669     InstructionOperand* output = second->OutputAt(i);
1670     if (!output->IsUnallocated()) continue;
1671     UnallocatedOperand* second_output = UnallocatedOperand::cast(output);
1672     if (!second_output->HasSameAsInputPolicy()) continue;
1673     DCHECK(i == 0);  // Only valid for first output.
1674     UnallocatedOperand* cur_input =
1675         UnallocatedOperand::cast(second->InputAt(0));
1676     int output_vreg = second_output->virtual_register();
1677     int input_vreg = cur_input->virtual_register();
1678     UnallocatedOperand input_copy(UnallocatedOperand::ANY, input_vreg);
1679     cur_input->set_virtual_register(second_output->virtual_register());
1680     MoveOperands* gap_move = data()->AddGapMove(instr_index, Instruction::END,
1681                                                 input_copy, *cur_input);
1682     if (code()->IsReference(input_vreg) && !code()->IsReference(output_vreg)) {
1683       if (second->HasReferenceMap()) {
1684         RegisterAllocationData::DelayedReference delayed_reference = {
1685             second->reference_map(), &gap_move->source()};
1686         data()->delayed_references().push_back(delayed_reference);
1687       }
1688     } else if (!code()->IsReference(input_vreg) &&
1689                code()->IsReference(output_vreg)) {
1690       // The input is assumed to immediately have a tagged representation,
1691       // before the pointer map can be used. I.e. the pointer map at the
1692       // instruction will include the output operand (whose value at the
1693       // beginning of the instruction is equal to the input operand). If
1694       // this is not desired, then the pointer map at this instruction needs
1695       // to be adjusted manually.
1696     }
1697   }
1698 }
1699 
1700 
ResolvePhis()1701 void ConstraintBuilder::ResolvePhis() {
1702   // Process the blocks in reverse order.
1703   for (InstructionBlock* block : base::Reversed(code()->instruction_blocks())) {
1704     ResolvePhis(block);
1705   }
1706 }
1707 
1708 
ResolvePhis(const InstructionBlock * block)1709 void ConstraintBuilder::ResolvePhis(const InstructionBlock* block) {
1710   for (PhiInstruction* phi : block->phis()) {
1711     int phi_vreg = phi->virtual_register();
1712     RegisterAllocationData::PhiMapValue* map_value =
1713         data()->InitializePhiMap(block, phi);
1714     InstructionOperand& output = phi->output();
1715     // Map the destination operands, so the commitment phase can find them.
1716     for (size_t i = 0; i < phi->operands().size(); ++i) {
1717       InstructionBlock* cur_block =
1718           code()->InstructionBlockAt(block->predecessors()[i]);
1719       UnallocatedOperand input(UnallocatedOperand::ANY, phi->operands()[i]);
1720       MoveOperands* move = data()->AddGapMove(
1721           cur_block->last_instruction_index(), Instruction::END, input, output);
1722       map_value->AddOperand(&move->destination());
1723       DCHECK(!code()
1724                   ->InstructionAt(cur_block->last_instruction_index())
1725                   ->HasReferenceMap());
1726     }
1727     TopLevelLiveRange* live_range = data()->GetOrCreateLiveRangeFor(phi_vreg);
1728     int gap_index = block->first_instruction_index();
1729     live_range->RecordSpillLocation(allocation_zone(), gap_index, &output);
1730     live_range->SetSpillStartIndex(gap_index);
1731     // We use the phi-ness of some nodes in some later heuristics.
1732     live_range->set_is_phi(true);
1733     live_range->set_is_non_loop_phi(!block->IsLoopHeader());
1734   }
1735 }
1736 
1737 
LiveRangeBuilder(RegisterAllocationData * data,Zone * local_zone)1738 LiveRangeBuilder::LiveRangeBuilder(RegisterAllocationData* data,
1739                                    Zone* local_zone)
1740     : data_(data), phi_hints_(local_zone) {}
1741 
1742 
ComputeLiveOut(const InstructionBlock * block,RegisterAllocationData * data)1743 BitVector* LiveRangeBuilder::ComputeLiveOut(const InstructionBlock* block,
1744                                             RegisterAllocationData* data) {
1745   size_t block_index = block->rpo_number().ToSize();
1746   BitVector* live_out = data->live_out_sets()[block_index];
1747   if (live_out == nullptr) {
1748     // Compute live out for the given block, except not including backward
1749     // successor edges.
1750     Zone* zone = data->allocation_zone();
1751     const InstructionSequence* code = data->code();
1752 
1753     live_out = new (zone) BitVector(code->VirtualRegisterCount(), zone);
1754 
1755     // Process all successor blocks.
1756     for (const RpoNumber& succ : block->successors()) {
1757       // Add values live on entry to the successor.
1758       if (succ <= block->rpo_number()) continue;
1759       BitVector* live_in = data->live_in_sets()[succ.ToSize()];
1760       if (live_in != nullptr) live_out->Union(*live_in);
1761 
1762       // All phi input operands corresponding to this successor edge are live
1763       // out from this block.
1764       const InstructionBlock* successor = code->InstructionBlockAt(succ);
1765       size_t index = successor->PredecessorIndexOf(block->rpo_number());
1766       DCHECK(index < successor->PredecessorCount());
1767       for (PhiInstruction* phi : successor->phis()) {
1768         live_out->Add(phi->operands()[index]);
1769       }
1770     }
1771     data->live_out_sets()[block_index] = live_out;
1772   }
1773   return live_out;
1774 }
1775 
1776 
AddInitialIntervals(const InstructionBlock * block,BitVector * live_out)1777 void LiveRangeBuilder::AddInitialIntervals(const InstructionBlock* block,
1778                                            BitVector* live_out) {
1779   // Add an interval that includes the entire block to the live range for
1780   // each live_out value.
1781   LifetimePosition start = LifetimePosition::GapFromInstructionIndex(
1782       block->first_instruction_index());
1783   LifetimePosition end = LifetimePosition::InstructionFromInstructionIndex(
1784                              block->last_instruction_index())
1785                              .NextStart();
1786   BitVector::Iterator iterator(live_out);
1787   while (!iterator.Done()) {
1788     int operand_index = iterator.Current();
1789     TopLevelLiveRange* range = data()->GetOrCreateLiveRangeFor(operand_index);
1790     range->AddUseInterval(start, end, allocation_zone());
1791     iterator.Advance();
1792   }
1793 }
1794 
1795 
FixedDoubleLiveRangeID(int index)1796 int LiveRangeBuilder::FixedDoubleLiveRangeID(int index) {
1797   return -index - 1 - config()->num_general_registers();
1798 }
1799 
1800 
FixedLiveRangeFor(int index)1801 TopLevelLiveRange* LiveRangeBuilder::FixedLiveRangeFor(int index) {
1802   DCHECK(index < config()->num_general_registers());
1803   TopLevelLiveRange* result = data()->fixed_live_ranges()[index];
1804   if (result == nullptr) {
1805     result = data()->NewLiveRange(FixedLiveRangeID(index),
1806                                   InstructionSequence::DefaultRepresentation());
1807     DCHECK(result->IsFixed());
1808     result->set_assigned_register(index);
1809     data()->MarkAllocated(GENERAL_REGISTERS, index);
1810     data()->fixed_live_ranges()[index] = result;
1811   }
1812   return result;
1813 }
1814 
1815 
FixedDoubleLiveRangeFor(int index)1816 TopLevelLiveRange* LiveRangeBuilder::FixedDoubleLiveRangeFor(int index) {
1817   DCHECK(index < config()->num_double_registers());
1818   TopLevelLiveRange* result = data()->fixed_double_live_ranges()[index];
1819   if (result == nullptr) {
1820     result = data()->NewLiveRange(FixedDoubleLiveRangeID(index),
1821                                   MachineRepresentation::kFloat64);
1822     DCHECK(result->IsFixed());
1823     result->set_assigned_register(index);
1824     data()->MarkAllocated(DOUBLE_REGISTERS, index);
1825     data()->fixed_double_live_ranges()[index] = result;
1826   }
1827   return result;
1828 }
1829 
1830 
LiveRangeFor(InstructionOperand * operand)1831 TopLevelLiveRange* LiveRangeBuilder::LiveRangeFor(InstructionOperand* operand) {
1832   if (operand->IsUnallocated()) {
1833     return data()->GetOrCreateLiveRangeFor(
1834         UnallocatedOperand::cast(operand)->virtual_register());
1835   } else if (operand->IsConstant()) {
1836     return data()->GetOrCreateLiveRangeFor(
1837         ConstantOperand::cast(operand)->virtual_register());
1838   } else if (operand->IsRegister()) {
1839     return FixedLiveRangeFor(
1840         LocationOperand::cast(operand)->GetRegister().code());
1841   } else if (operand->IsDoubleRegister()) {
1842     return FixedDoubleLiveRangeFor(
1843         LocationOperand::cast(operand)->GetDoubleRegister().code());
1844   } else {
1845     return nullptr;
1846   }
1847 }
1848 
1849 
NewUsePosition(LifetimePosition pos,InstructionOperand * operand,void * hint,UsePositionHintType hint_type)1850 UsePosition* LiveRangeBuilder::NewUsePosition(LifetimePosition pos,
1851                                               InstructionOperand* operand,
1852                                               void* hint,
1853                                               UsePositionHintType hint_type) {
1854   return new (allocation_zone()) UsePosition(pos, operand, hint, hint_type);
1855 }
1856 
1857 
Define(LifetimePosition position,InstructionOperand * operand,void * hint,UsePositionHintType hint_type)1858 UsePosition* LiveRangeBuilder::Define(LifetimePosition position,
1859                                       InstructionOperand* operand, void* hint,
1860                                       UsePositionHintType hint_type) {
1861   TopLevelLiveRange* range = LiveRangeFor(operand);
1862   if (range == nullptr) return nullptr;
1863 
1864   if (range->IsEmpty() || range->Start() > position) {
1865     // Can happen if there is a definition without use.
1866     range->AddUseInterval(position, position.NextStart(), allocation_zone());
1867     range->AddUsePosition(NewUsePosition(position.NextStart()));
1868   } else {
1869     range->ShortenTo(position);
1870   }
1871   if (!operand->IsUnallocated()) return nullptr;
1872   UnallocatedOperand* unalloc_operand = UnallocatedOperand::cast(operand);
1873   UsePosition* use_pos =
1874       NewUsePosition(position, unalloc_operand, hint, hint_type);
1875   range->AddUsePosition(use_pos);
1876   return use_pos;
1877 }
1878 
1879 
Use(LifetimePosition block_start,LifetimePosition position,InstructionOperand * operand,void * hint,UsePositionHintType hint_type)1880 UsePosition* LiveRangeBuilder::Use(LifetimePosition block_start,
1881                                    LifetimePosition position,
1882                                    InstructionOperand* operand, void* hint,
1883                                    UsePositionHintType hint_type) {
1884   TopLevelLiveRange* range = LiveRangeFor(operand);
1885   if (range == nullptr) return nullptr;
1886   UsePosition* use_pos = nullptr;
1887   if (operand->IsUnallocated()) {
1888     UnallocatedOperand* unalloc_operand = UnallocatedOperand::cast(operand);
1889     use_pos = NewUsePosition(position, unalloc_operand, hint, hint_type);
1890     range->AddUsePosition(use_pos);
1891   }
1892   range->AddUseInterval(block_start, position, allocation_zone());
1893   return use_pos;
1894 }
1895 
1896 
ProcessInstructions(const InstructionBlock * block,BitVector * live)1897 void LiveRangeBuilder::ProcessInstructions(const InstructionBlock* block,
1898                                            BitVector* live) {
1899   int block_start = block->first_instruction_index();
1900   LifetimePosition block_start_position =
1901       LifetimePosition::GapFromInstructionIndex(block_start);
1902 
1903   for (int index = block->last_instruction_index(); index >= block_start;
1904        index--) {
1905     LifetimePosition curr_position =
1906         LifetimePosition::InstructionFromInstructionIndex(index);
1907     Instruction* instr = code()->InstructionAt(index);
1908     DCHECK(instr != nullptr);
1909     DCHECK(curr_position.IsInstructionPosition());
1910     // Process output, inputs, and temps of this instruction.
1911     for (size_t i = 0; i < instr->OutputCount(); i++) {
1912       InstructionOperand* output = instr->OutputAt(i);
1913       if (output->IsUnallocated()) {
1914         // Unsupported.
1915         DCHECK(!UnallocatedOperand::cast(output)->HasSlotPolicy());
1916         int out_vreg = UnallocatedOperand::cast(output)->virtual_register();
1917         live->Remove(out_vreg);
1918       } else if (output->IsConstant()) {
1919         int out_vreg = ConstantOperand::cast(output)->virtual_register();
1920         live->Remove(out_vreg);
1921       }
1922       if (block->IsHandler() && index == block_start && output->IsAllocated() &&
1923           output->IsRegister() &&
1924           AllocatedOperand::cast(output)->GetRegister().is(
1925               v8::internal::kReturnRegister0)) {
1926         // The register defined here is blocked from gap start - it is the
1927         // exception value.
1928         // TODO(mtrofin): should we explore an explicit opcode for
1929         // the first instruction in the handler?
1930         Define(LifetimePosition::GapFromInstructionIndex(index), output);
1931       } else {
1932         Define(curr_position, output);
1933       }
1934     }
1935 
1936     if (instr->ClobbersRegisters()) {
1937       for (int i = 0; i < config()->num_allocatable_general_registers(); ++i) {
1938         int code = config()->GetAllocatableGeneralCode(i);
1939         if (!IsOutputRegisterOf(instr, Register::from_code(code))) {
1940           TopLevelLiveRange* range = FixedLiveRangeFor(code);
1941           range->AddUseInterval(curr_position, curr_position.End(),
1942                                 allocation_zone());
1943         }
1944       }
1945     }
1946 
1947     if (instr->ClobbersDoubleRegisters()) {
1948       for (int i = 0; i < config()->num_allocatable_aliased_double_registers();
1949            ++i) {
1950         int code = config()->GetAllocatableDoubleCode(i);
1951         if (!IsOutputDoubleRegisterOf(instr, DoubleRegister::from_code(code))) {
1952           TopLevelLiveRange* range = FixedDoubleLiveRangeFor(code);
1953           range->AddUseInterval(curr_position, curr_position.End(),
1954                                 allocation_zone());
1955         }
1956       }
1957     }
1958 
1959     for (size_t i = 0; i < instr->InputCount(); i++) {
1960       InstructionOperand* input = instr->InputAt(i);
1961       if (input->IsImmediate() || input->IsExplicit()) {
1962         continue;  // Ignore immediates and explicitly reserved registers.
1963       }
1964       LifetimePosition use_pos;
1965       if (input->IsUnallocated() &&
1966           UnallocatedOperand::cast(input)->IsUsedAtStart()) {
1967         use_pos = curr_position;
1968       } else {
1969         use_pos = curr_position.End();
1970       }
1971 
1972       if (input->IsUnallocated()) {
1973         UnallocatedOperand* unalloc = UnallocatedOperand::cast(input);
1974         int vreg = unalloc->virtual_register();
1975         live->Add(vreg);
1976         if (unalloc->HasSlotPolicy()) {
1977           data()->GetOrCreateLiveRangeFor(vreg)->set_has_slot_use(true);
1978         }
1979       }
1980       Use(block_start_position, use_pos, input);
1981     }
1982 
1983     for (size_t i = 0; i < instr->TempCount(); i++) {
1984       InstructionOperand* temp = instr->TempAt(i);
1985       // Unsupported.
1986       DCHECK_IMPLIES(temp->IsUnallocated(),
1987                      !UnallocatedOperand::cast(temp)->HasSlotPolicy());
1988       if (instr->ClobbersTemps()) {
1989         if (temp->IsRegister()) continue;
1990         if (temp->IsUnallocated()) {
1991           UnallocatedOperand* temp_unalloc = UnallocatedOperand::cast(temp);
1992           if (temp_unalloc->HasFixedPolicy()) {
1993             continue;
1994           }
1995         }
1996       }
1997       Use(block_start_position, curr_position.End(), temp);
1998       Define(curr_position, temp);
1999     }
2000 
2001     // Process the moves of the instruction's gaps, making their sources live.
2002     const Instruction::GapPosition kPositions[] = {Instruction::END,
2003                                                    Instruction::START};
2004     curr_position = curr_position.PrevStart();
2005     DCHECK(curr_position.IsGapPosition());
2006     for (const Instruction::GapPosition& position : kPositions) {
2007       ParallelMove* move = instr->GetParallelMove(position);
2008       if (move == nullptr) continue;
2009       if (position == Instruction::END) {
2010         curr_position = curr_position.End();
2011       } else {
2012         curr_position = curr_position.Start();
2013       }
2014       for (MoveOperands* cur : *move) {
2015         InstructionOperand& from = cur->source();
2016         InstructionOperand& to = cur->destination();
2017         void* hint = &to;
2018         UsePositionHintType hint_type = UsePosition::HintTypeForOperand(to);
2019         UsePosition* to_use = nullptr;
2020         int phi_vreg = -1;
2021         if (to.IsUnallocated()) {
2022           int to_vreg = UnallocatedOperand::cast(to).virtual_register();
2023           TopLevelLiveRange* to_range =
2024               data()->GetOrCreateLiveRangeFor(to_vreg);
2025           if (to_range->is_phi()) {
2026             phi_vreg = to_vreg;
2027             if (to_range->is_non_loop_phi()) {
2028               hint = to_range->current_hint_position();
2029               hint_type = hint == nullptr ? UsePositionHintType::kNone
2030                                           : UsePositionHintType::kUsePos;
2031             } else {
2032               hint_type = UsePositionHintType::kPhi;
2033               hint = data()->GetPhiMapValueFor(to_vreg);
2034             }
2035           } else {
2036             if (live->Contains(to_vreg)) {
2037               to_use = Define(curr_position, &to, &from,
2038                               UsePosition::HintTypeForOperand(from));
2039               live->Remove(to_vreg);
2040             } else {
2041               cur->Eliminate();
2042               continue;
2043             }
2044           }
2045         } else {
2046           Define(curr_position, &to);
2047         }
2048         UsePosition* from_use =
2049             Use(block_start_position, curr_position, &from, hint, hint_type);
2050         // Mark range live.
2051         if (from.IsUnallocated()) {
2052           live->Add(UnallocatedOperand::cast(from).virtual_register());
2053         }
2054         // Resolve use position hints just created.
2055         if (to_use != nullptr && from_use != nullptr) {
2056           to_use->ResolveHint(from_use);
2057           from_use->ResolveHint(to_use);
2058         }
2059         DCHECK_IMPLIES(to_use != nullptr, to_use->IsResolved());
2060         DCHECK_IMPLIES(from_use != nullptr, from_use->IsResolved());
2061         // Potentially resolve phi hint.
2062         if (phi_vreg != -1) ResolvePhiHint(&from, from_use);
2063       }
2064     }
2065   }
2066 }
2067 
2068 
ProcessPhis(const InstructionBlock * block,BitVector * live)2069 void LiveRangeBuilder::ProcessPhis(const InstructionBlock* block,
2070                                    BitVector* live) {
2071   for (PhiInstruction* phi : block->phis()) {
2072     // The live range interval already ends at the first instruction of the
2073     // block.
2074     int phi_vreg = phi->virtual_register();
2075     live->Remove(phi_vreg);
2076     InstructionOperand* hint = nullptr;
2077     Instruction* instr = GetLastInstruction(
2078         code(), code()->InstructionBlockAt(block->predecessors()[0]));
2079     for (MoveOperands* move : *instr->GetParallelMove(Instruction::END)) {
2080       InstructionOperand& to = move->destination();
2081       if (to.IsUnallocated() &&
2082           UnallocatedOperand::cast(to).virtual_register() == phi_vreg) {
2083         hint = &move->source();
2084         break;
2085       }
2086     }
2087     DCHECK(hint != nullptr);
2088     LifetimePosition block_start = LifetimePosition::GapFromInstructionIndex(
2089         block->first_instruction_index());
2090     UsePosition* use_pos = Define(block_start, &phi->output(), hint,
2091                                   UsePosition::HintTypeForOperand(*hint));
2092     MapPhiHint(hint, use_pos);
2093   }
2094 }
2095 
2096 
ProcessLoopHeader(const InstructionBlock * block,BitVector * live)2097 void LiveRangeBuilder::ProcessLoopHeader(const InstructionBlock* block,
2098                                          BitVector* live) {
2099   DCHECK(block->IsLoopHeader());
2100   // Add a live range stretching from the first loop instruction to the last
2101   // for each value live on entry to the header.
2102   BitVector::Iterator iterator(live);
2103   LifetimePosition start = LifetimePosition::GapFromInstructionIndex(
2104       block->first_instruction_index());
2105   LifetimePosition end = LifetimePosition::GapFromInstructionIndex(
2106                              code()->LastLoopInstructionIndex(block))
2107                              .NextFullStart();
2108   while (!iterator.Done()) {
2109     int operand_index = iterator.Current();
2110     TopLevelLiveRange* range = data()->GetOrCreateLiveRangeFor(operand_index);
2111     range->EnsureInterval(start, end, allocation_zone());
2112     iterator.Advance();
2113   }
2114   // Insert all values into the live in sets of all blocks in the loop.
2115   for (int i = block->rpo_number().ToInt() + 1; i < block->loop_end().ToInt();
2116        ++i) {
2117     live_in_sets()[i]->Union(*live);
2118   }
2119 }
2120 
2121 
BuildLiveRanges()2122 void LiveRangeBuilder::BuildLiveRanges() {
2123   // Process the blocks in reverse order.
2124   for (int block_id = code()->InstructionBlockCount() - 1; block_id >= 0;
2125        --block_id) {
2126     InstructionBlock* block =
2127         code()->InstructionBlockAt(RpoNumber::FromInt(block_id));
2128     BitVector* live = ComputeLiveOut(block, data());
2129     // Initially consider all live_out values live for the entire block. We
2130     // will shorten these intervals if necessary.
2131     AddInitialIntervals(block, live);
2132     // Process the instructions in reverse order, generating and killing
2133     // live values.
2134     ProcessInstructions(block, live);
2135     // All phi output operands are killed by this block.
2136     ProcessPhis(block, live);
2137     // Now live is live_in for this block except not including values live
2138     // out on backward successor edges.
2139     if (block->IsLoopHeader()) ProcessLoopHeader(block, live);
2140     live_in_sets()[block_id] = live;
2141   }
2142   // Postprocess the ranges.
2143   for (TopLevelLiveRange* range : data()->live_ranges()) {
2144     if (range == nullptr) continue;
2145     // Give slots to all ranges with a non fixed slot use.
2146     if (range->has_slot_use() && range->HasNoSpillType()) {
2147       data()->AssignSpillRangeToLiveRange(range);
2148     }
2149     // TODO(bmeurer): This is a horrible hack to make sure that for constant
2150     // live ranges, every use requires the constant to be in a register.
2151     // Without this hack, all uses with "any" policy would get the constant
2152     // operand assigned.
2153     if (range->HasSpillOperand() && range->GetSpillOperand()->IsConstant()) {
2154       for (UsePosition* pos = range->first_pos(); pos != nullptr;
2155            pos = pos->next()) {
2156         if (pos->type() == UsePositionType::kRequiresSlot) continue;
2157         UsePositionType new_type = UsePositionType::kAny;
2158         // Can't mark phis as needing a register.
2159         if (!pos->pos().IsGapPosition()) {
2160           new_type = UsePositionType::kRequiresRegister;
2161         }
2162         pos->set_type(new_type, true);
2163       }
2164     }
2165   }
2166   for (auto preassigned : data()->preassigned_slot_ranges()) {
2167     TopLevelLiveRange* range = preassigned.first;
2168     int slot_id = preassigned.second;
2169     SpillRange* spill = range->HasSpillRange()
2170                             ? range->GetSpillRange()
2171                             : data()->AssignSpillRangeToLiveRange(range);
2172     spill->set_assigned_slot(slot_id);
2173   }
2174 #ifdef DEBUG
2175   Verify();
2176 #endif
2177 }
2178 
2179 
MapPhiHint(InstructionOperand * operand,UsePosition * use_pos)2180 void LiveRangeBuilder::MapPhiHint(InstructionOperand* operand,
2181                                   UsePosition* use_pos) {
2182   DCHECK(!use_pos->IsResolved());
2183   auto res = phi_hints_.insert(std::make_pair(operand, use_pos));
2184   DCHECK(res.second);
2185   USE(res);
2186 }
2187 
2188 
ResolvePhiHint(InstructionOperand * operand,UsePosition * use_pos)2189 void LiveRangeBuilder::ResolvePhiHint(InstructionOperand* operand,
2190                                       UsePosition* use_pos) {
2191   auto it = phi_hints_.find(operand);
2192   if (it == phi_hints_.end()) return;
2193   DCHECK(!it->second->IsResolved());
2194   it->second->ResolveHint(use_pos);
2195 }
2196 
2197 
Verify() const2198 void LiveRangeBuilder::Verify() const {
2199   for (auto& hint : phi_hints_) {
2200     CHECK(hint.second->IsResolved());
2201   }
2202   for (TopLevelLiveRange* current : data()->live_ranges()) {
2203     if (current != nullptr && !current->IsEmpty()) current->Verify();
2204   }
2205 }
2206 
2207 
RegisterAllocator(RegisterAllocationData * data,RegisterKind kind)2208 RegisterAllocator::RegisterAllocator(RegisterAllocationData* data,
2209                                      RegisterKind kind)
2210     : data_(data),
2211       mode_(kind),
2212       num_registers_(GetRegisterCount(data->config(), kind)),
2213       num_allocatable_registers_(
2214           GetAllocatableRegisterCount(data->config(), kind)),
2215       allocatable_register_codes_(
2216           GetAllocatableRegisterCodes(data->config(), kind)) {}
2217 
2218 
GetSplitPositionForInstruction(const LiveRange * range,int instruction_index)2219 LifetimePosition RegisterAllocator::GetSplitPositionForInstruction(
2220     const LiveRange* range, int instruction_index) {
2221   LifetimePosition ret = LifetimePosition::Invalid();
2222 
2223   ret = LifetimePosition::GapFromInstructionIndex(instruction_index);
2224   if (range->Start() >= ret || ret >= range->End()) {
2225     return LifetimePosition::Invalid();
2226   }
2227   return ret;
2228 }
2229 
2230 
SplitAndSpillRangesDefinedByMemoryOperand(bool operands_only)2231 void RegisterAllocator::SplitAndSpillRangesDefinedByMemoryOperand(
2232     bool operands_only) {
2233   size_t initial_range_count = data()->live_ranges().size();
2234   for (size_t i = 0; i < initial_range_count; ++i) {
2235     TopLevelLiveRange* range = data()->live_ranges()[i];
2236     if (!CanProcessRange(range)) continue;
2237     if (range->HasNoSpillType() || (operands_only && range->HasSpillRange())) {
2238       continue;
2239     }
2240 
2241     LifetimePosition start = range->Start();
2242     TRACE("Live range %d:%d is defined by a spill operand.\n",
2243           range->TopLevel()->vreg(), range->relative_id());
2244     LifetimePosition next_pos = start;
2245     if (next_pos.IsGapPosition()) {
2246       next_pos = next_pos.NextStart();
2247     }
2248     UsePosition* pos = range->NextUsePositionRegisterIsBeneficial(next_pos);
2249     // If the range already has a spill operand and it doesn't need a
2250     // register immediately, split it and spill the first part of the range.
2251     if (pos == nullptr) {
2252       Spill(range);
2253     } else if (pos->pos() > range->Start().NextStart()) {
2254       // Do not spill live range eagerly if use position that can benefit from
2255       // the register is too close to the start of live range.
2256       LifetimePosition split_pos = GetSplitPositionForInstruction(
2257           range, pos->pos().ToInstructionIndex());
2258       // There is no place to split, so we can't split and spill.
2259       if (!split_pos.IsValid()) continue;
2260 
2261       split_pos =
2262           FindOptimalSplitPos(range->Start().NextFullStart(), split_pos);
2263 
2264       SplitRangeAt(range, split_pos);
2265       Spill(range);
2266     }
2267   }
2268 }
2269 
2270 
SplitRangeAt(LiveRange * range,LifetimePosition pos)2271 LiveRange* RegisterAllocator::SplitRangeAt(LiveRange* range,
2272                                            LifetimePosition pos) {
2273   DCHECK(!range->TopLevel()->IsFixed());
2274   TRACE("Splitting live range %d:%d at %d\n", range->TopLevel()->vreg(),
2275         range->relative_id(), pos.value());
2276 
2277   if (pos <= range->Start()) return range;
2278 
2279   // We can't properly connect liveranges if splitting occurred at the end
2280   // a block.
2281   DCHECK(pos.IsStart() || pos.IsGapPosition() ||
2282          (GetInstructionBlock(code(), pos)->last_instruction_index() !=
2283           pos.ToInstructionIndex()));
2284 
2285   LiveRange* result = range->SplitAt(pos, allocation_zone());
2286   return result;
2287 }
2288 
2289 
SplitBetween(LiveRange * range,LifetimePosition start,LifetimePosition end)2290 LiveRange* RegisterAllocator::SplitBetween(LiveRange* range,
2291                                            LifetimePosition start,
2292                                            LifetimePosition end) {
2293   DCHECK(!range->TopLevel()->IsFixed());
2294   TRACE("Splitting live range %d:%d in position between [%d, %d]\n",
2295         range->TopLevel()->vreg(), range->relative_id(), start.value(),
2296         end.value());
2297 
2298   LifetimePosition split_pos = FindOptimalSplitPos(start, end);
2299   DCHECK(split_pos >= start);
2300   return SplitRangeAt(range, split_pos);
2301 }
2302 
2303 
FindOptimalSplitPos(LifetimePosition start,LifetimePosition end)2304 LifetimePosition RegisterAllocator::FindOptimalSplitPos(LifetimePosition start,
2305                                                         LifetimePosition end) {
2306   int start_instr = start.ToInstructionIndex();
2307   int end_instr = end.ToInstructionIndex();
2308   DCHECK(start_instr <= end_instr);
2309 
2310   // We have no choice
2311   if (start_instr == end_instr) return end;
2312 
2313   const InstructionBlock* start_block = GetInstructionBlock(code(), start);
2314   const InstructionBlock* end_block = GetInstructionBlock(code(), end);
2315 
2316   if (end_block == start_block) {
2317     // The interval is split in the same basic block. Split at the latest
2318     // possible position.
2319     return end;
2320   }
2321 
2322   const InstructionBlock* block = end_block;
2323   // Find header of outermost loop.
2324   // TODO(titzer): fix redundancy below.
2325   while (GetContainingLoop(code(), block) != nullptr &&
2326          GetContainingLoop(code(), block)->rpo_number().ToInt() >
2327              start_block->rpo_number().ToInt()) {
2328     block = GetContainingLoop(code(), block);
2329   }
2330 
2331   // We did not find any suitable outer loop. Split at the latest possible
2332   // position unless end_block is a loop header itself.
2333   if (block == end_block && !end_block->IsLoopHeader()) return end;
2334 
2335   return LifetimePosition::GapFromInstructionIndex(
2336       block->first_instruction_index());
2337 }
2338 
2339 
FindOptimalSpillingPos(LiveRange * range,LifetimePosition pos)2340 LifetimePosition RegisterAllocator::FindOptimalSpillingPos(
2341     LiveRange* range, LifetimePosition pos) {
2342   const InstructionBlock* block = GetInstructionBlock(code(), pos.Start());
2343   const InstructionBlock* loop_header =
2344       block->IsLoopHeader() ? block : GetContainingLoop(code(), block);
2345 
2346   if (loop_header == nullptr) return pos;
2347 
2348   const UsePosition* prev_use =
2349       range->PreviousUsePositionRegisterIsBeneficial(pos);
2350 
2351   while (loop_header != nullptr) {
2352     // We are going to spill live range inside the loop.
2353     // If possible try to move spilling position backwards to loop header.
2354     // This will reduce number of memory moves on the back edge.
2355     LifetimePosition loop_start = LifetimePosition::GapFromInstructionIndex(
2356         loop_header->first_instruction_index());
2357 
2358     if (range->Covers(loop_start)) {
2359       if (prev_use == nullptr || prev_use->pos() < loop_start) {
2360         // No register beneficial use inside the loop before the pos.
2361         pos = loop_start;
2362       }
2363     }
2364 
2365     // Try hoisting out to an outer loop.
2366     loop_header = GetContainingLoop(code(), loop_header);
2367   }
2368 
2369   return pos;
2370 }
2371 
2372 
Spill(LiveRange * range)2373 void RegisterAllocator::Spill(LiveRange* range) {
2374   DCHECK(!range->spilled());
2375   TopLevelLiveRange* first = range->TopLevel();
2376   TRACE("Spilling live range %d:%d\n", first->vreg(), range->relative_id());
2377 
2378   if (first->HasNoSpillType()) {
2379     data()->AssignSpillRangeToLiveRange(first);
2380   }
2381   range->Spill();
2382 }
2383 
2384 
GetFixedRegisters() const2385 const ZoneVector<TopLevelLiveRange*>& RegisterAllocator::GetFixedRegisters()
2386     const {
2387   return mode() == DOUBLE_REGISTERS ? data()->fixed_double_live_ranges()
2388                                     : data()->fixed_live_ranges();
2389 }
2390 
2391 
RegisterName(int register_code) const2392 const char* RegisterAllocator::RegisterName(int register_code) const {
2393   if (mode() == GENERAL_REGISTERS) {
2394     return data()->config()->GetGeneralRegisterName(register_code);
2395   } else {
2396     return data()->config()->GetDoubleRegisterName(register_code);
2397   }
2398 }
2399 
2400 
LinearScanAllocator(RegisterAllocationData * data,RegisterKind kind,Zone * local_zone)2401 LinearScanAllocator::LinearScanAllocator(RegisterAllocationData* data,
2402                                          RegisterKind kind, Zone* local_zone)
2403     : RegisterAllocator(data, kind),
2404       unhandled_live_ranges_(local_zone),
2405       active_live_ranges_(local_zone),
2406       inactive_live_ranges_(local_zone) {
2407   unhandled_live_ranges().reserve(
2408       static_cast<size_t>(code()->VirtualRegisterCount() * 2));
2409   active_live_ranges().reserve(8);
2410   inactive_live_ranges().reserve(8);
2411   // TryAllocateFreeReg and AllocateBlockedReg assume this
2412   // when allocating local arrays.
2413   DCHECK(RegisterConfiguration::kMaxDoubleRegisters >=
2414          this->data()->config()->num_general_registers());
2415 }
2416 
2417 
AllocateRegisters()2418 void LinearScanAllocator::AllocateRegisters() {
2419   DCHECK(unhandled_live_ranges().empty());
2420   DCHECK(active_live_ranges().empty());
2421   DCHECK(inactive_live_ranges().empty());
2422 
2423   SplitAndSpillRangesDefinedByMemoryOperand(code()->VirtualRegisterCount() <=
2424                                             num_allocatable_registers());
2425 
2426   for (TopLevelLiveRange* range : data()->live_ranges()) {
2427     if (!CanProcessRange(range)) continue;
2428     for (LiveRange* to_add = range; to_add != nullptr;
2429          to_add = to_add->next()) {
2430       if (!to_add->spilled()) {
2431         AddToUnhandledUnsorted(to_add);
2432       }
2433     }
2434   }
2435   SortUnhandled();
2436   DCHECK(UnhandledIsSorted());
2437 
2438   auto& fixed_ranges = GetFixedRegisters();
2439   for (TopLevelLiveRange* current : fixed_ranges) {
2440     if (current != nullptr) {
2441       DCHECK_EQ(mode(), current->kind());
2442       AddToInactive(current);
2443     }
2444   }
2445 
2446   while (!unhandled_live_ranges().empty()) {
2447     DCHECK(UnhandledIsSorted());
2448     LiveRange* current = unhandled_live_ranges().back();
2449     unhandled_live_ranges().pop_back();
2450     DCHECK(UnhandledIsSorted());
2451     LifetimePosition position = current->Start();
2452 #ifdef DEBUG
2453     allocation_finger_ = position;
2454 #endif
2455     TRACE("Processing interval %d:%d start=%d\n", current->TopLevel()->vreg(),
2456           current->relative_id(), position.value());
2457 
2458     if (current->IsTopLevel() && TryReuseSpillForPhi(current->TopLevel()))
2459       continue;
2460 
2461     for (size_t i = 0; i < active_live_ranges().size(); ++i) {
2462       LiveRange* cur_active = active_live_ranges()[i];
2463       if (cur_active->End() <= position) {
2464         ActiveToHandled(cur_active);
2465         --i;  // The live range was removed from the list of active live ranges.
2466       } else if (!cur_active->Covers(position)) {
2467         ActiveToInactive(cur_active);
2468         --i;  // The live range was removed from the list of active live ranges.
2469       }
2470     }
2471 
2472     for (size_t i = 0; i < inactive_live_ranges().size(); ++i) {
2473       LiveRange* cur_inactive = inactive_live_ranges()[i];
2474       if (cur_inactive->End() <= position) {
2475         InactiveToHandled(cur_inactive);
2476         --i;  // Live range was removed from the list of inactive live ranges.
2477       } else if (cur_inactive->Covers(position)) {
2478         InactiveToActive(cur_inactive);
2479         --i;  // Live range was removed from the list of inactive live ranges.
2480       }
2481     }
2482 
2483     DCHECK(!current->HasRegisterAssigned() && !current->spilled());
2484 
2485     bool result = TryAllocateFreeReg(current);
2486     if (!result) AllocateBlockedReg(current);
2487     if (current->HasRegisterAssigned()) {
2488       AddToActive(current);
2489     }
2490   }
2491 }
2492 
2493 
SetLiveRangeAssignedRegister(LiveRange * range,int reg)2494 void LinearScanAllocator::SetLiveRangeAssignedRegister(LiveRange* range,
2495                                                        int reg) {
2496   data()->MarkAllocated(range->kind(), reg);
2497   range->set_assigned_register(reg);
2498   range->SetUseHints(reg);
2499   if (range->IsTopLevel() && range->TopLevel()->is_phi()) {
2500     data()->GetPhiMapValueFor(range->TopLevel())->set_assigned_register(reg);
2501   }
2502 }
2503 
2504 
AddToActive(LiveRange * range)2505 void LinearScanAllocator::AddToActive(LiveRange* range) {
2506   TRACE("Add live range %d:%d to active\n", range->TopLevel()->vreg(),
2507         range->relative_id());
2508   active_live_ranges().push_back(range);
2509 }
2510 
2511 
AddToInactive(LiveRange * range)2512 void LinearScanAllocator::AddToInactive(LiveRange* range) {
2513   TRACE("Add live range %d:%d to inactive\n", range->TopLevel()->vreg(),
2514         range->relative_id());
2515   inactive_live_ranges().push_back(range);
2516 }
2517 
2518 
AddToUnhandledSorted(LiveRange * range)2519 void LinearScanAllocator::AddToUnhandledSorted(LiveRange* range) {
2520   if (range == nullptr || range->IsEmpty()) return;
2521   DCHECK(!range->HasRegisterAssigned() && !range->spilled());
2522   DCHECK(allocation_finger_ <= range->Start());
2523   for (int i = static_cast<int>(unhandled_live_ranges().size() - 1); i >= 0;
2524        --i) {
2525     LiveRange* cur_range = unhandled_live_ranges().at(i);
2526     if (!range->ShouldBeAllocatedBefore(cur_range)) continue;
2527     TRACE("Add live range %d:%d to unhandled at %d\n",
2528           range->TopLevel()->vreg(), range->relative_id(), i + 1);
2529     auto it = unhandled_live_ranges().begin() + (i + 1);
2530     unhandled_live_ranges().insert(it, range);
2531     DCHECK(UnhandledIsSorted());
2532     return;
2533   }
2534   TRACE("Add live range %d:%d to unhandled at start\n",
2535         range->TopLevel()->vreg(), range->relative_id());
2536   unhandled_live_ranges().insert(unhandled_live_ranges().begin(), range);
2537   DCHECK(UnhandledIsSorted());
2538 }
2539 
2540 
AddToUnhandledUnsorted(LiveRange * range)2541 void LinearScanAllocator::AddToUnhandledUnsorted(LiveRange* range) {
2542   if (range == nullptr || range->IsEmpty()) return;
2543   DCHECK(!range->HasRegisterAssigned() && !range->spilled());
2544   TRACE("Add live range %d:%d to unhandled unsorted at end\n",
2545         range->TopLevel()->vreg(), range->relative_id());
2546   unhandled_live_ranges().push_back(range);
2547 }
2548 
2549 
UnhandledSortHelper(LiveRange * a,LiveRange * b)2550 static bool UnhandledSortHelper(LiveRange* a, LiveRange* b) {
2551   DCHECK(!a->ShouldBeAllocatedBefore(b) || !b->ShouldBeAllocatedBefore(a));
2552   if (a->ShouldBeAllocatedBefore(b)) return false;
2553   if (b->ShouldBeAllocatedBefore(a)) return true;
2554   return a->TopLevel()->vreg() < b->TopLevel()->vreg();
2555 }
2556 
2557 
2558 // Sort the unhandled live ranges so that the ranges to be processed first are
2559 // at the end of the array list.  This is convenient for the register allocation
2560 // algorithm because it is efficient to remove elements from the end.
SortUnhandled()2561 void LinearScanAllocator::SortUnhandled() {
2562   TRACE("Sort unhandled\n");
2563   std::sort(unhandled_live_ranges().begin(), unhandled_live_ranges().end(),
2564             &UnhandledSortHelper);
2565 }
2566 
2567 
UnhandledIsSorted()2568 bool LinearScanAllocator::UnhandledIsSorted() {
2569   size_t len = unhandled_live_ranges().size();
2570   for (size_t i = 1; i < len; i++) {
2571     LiveRange* a = unhandled_live_ranges().at(i - 1);
2572     LiveRange* b = unhandled_live_ranges().at(i);
2573     if (a->Start() < b->Start()) return false;
2574   }
2575   return true;
2576 }
2577 
2578 
ActiveToHandled(LiveRange * range)2579 void LinearScanAllocator::ActiveToHandled(LiveRange* range) {
2580   RemoveElement(&active_live_ranges(), range);
2581   TRACE("Moving live range %d:%d from active to handled\n",
2582         range->TopLevel()->vreg(), range->relative_id());
2583 }
2584 
2585 
ActiveToInactive(LiveRange * range)2586 void LinearScanAllocator::ActiveToInactive(LiveRange* range) {
2587   RemoveElement(&active_live_ranges(), range);
2588   inactive_live_ranges().push_back(range);
2589   TRACE("Moving live range %d:%d from active to inactive\n",
2590         range->TopLevel()->vreg(), range->relative_id());
2591 }
2592 
2593 
InactiveToHandled(LiveRange * range)2594 void LinearScanAllocator::InactiveToHandled(LiveRange* range) {
2595   RemoveElement(&inactive_live_ranges(), range);
2596   TRACE("Moving live range %d:%d from inactive to handled\n",
2597         range->TopLevel()->vreg(), range->relative_id());
2598 }
2599 
2600 
InactiveToActive(LiveRange * range)2601 void LinearScanAllocator::InactiveToActive(LiveRange* range) {
2602   RemoveElement(&inactive_live_ranges(), range);
2603   active_live_ranges().push_back(range);
2604   TRACE("Moving live range %d:%d from inactive to active\n",
2605         range->TopLevel()->vreg(), range->relative_id());
2606 }
2607 
2608 
TryAllocateFreeReg(LiveRange * current)2609 bool LinearScanAllocator::TryAllocateFreeReg(LiveRange* current) {
2610   LifetimePosition free_until_pos[RegisterConfiguration::kMaxDoubleRegisters];
2611 
2612   for (int i = 0; i < num_registers(); i++) {
2613     free_until_pos[i] = LifetimePosition::MaxPosition();
2614   }
2615 
2616   for (LiveRange* cur_active : active_live_ranges()) {
2617     free_until_pos[cur_active->assigned_register()] =
2618         LifetimePosition::GapFromInstructionIndex(0);
2619     TRACE("Register %s is free until pos %d (1)\n",
2620           RegisterName(cur_active->assigned_register()),
2621           LifetimePosition::GapFromInstructionIndex(0).value());
2622   }
2623 
2624   for (LiveRange* cur_inactive : inactive_live_ranges()) {
2625     DCHECK(cur_inactive->End() > current->Start());
2626     LifetimePosition next_intersection =
2627         cur_inactive->FirstIntersection(current);
2628     if (!next_intersection.IsValid()) continue;
2629     int cur_reg = cur_inactive->assigned_register();
2630     free_until_pos[cur_reg] = Min(free_until_pos[cur_reg], next_intersection);
2631     TRACE("Register %s is free until pos %d (2)\n", RegisterName(cur_reg),
2632           Min(free_until_pos[cur_reg], next_intersection).value());
2633   }
2634 
2635   int hint_register;
2636   if (current->FirstHintPosition(&hint_register) != nullptr) {
2637     TRACE(
2638         "Found reg hint %s (free until [%d) for live range %d:%d (end %d[).\n",
2639         RegisterName(hint_register), free_until_pos[hint_register].value(),
2640         current->TopLevel()->vreg(), current->relative_id(),
2641         current->End().value());
2642 
2643     // The desired register is free until the end of the current live range.
2644     if (free_until_pos[hint_register] >= current->End()) {
2645       TRACE("Assigning preferred reg %s to live range %d:%d\n",
2646             RegisterName(hint_register), current->TopLevel()->vreg(),
2647             current->relative_id());
2648       SetLiveRangeAssignedRegister(current, hint_register);
2649       return true;
2650     }
2651   }
2652 
2653   // Find the register which stays free for the longest time.
2654   int reg = allocatable_register_code(0);
2655   for (int i = 1; i < num_allocatable_registers(); ++i) {
2656     int code = allocatable_register_code(i);
2657     if (free_until_pos[code] > free_until_pos[reg]) {
2658       reg = code;
2659     }
2660   }
2661 
2662   LifetimePosition pos = free_until_pos[reg];
2663 
2664   if (pos <= current->Start()) {
2665     // All registers are blocked.
2666     return false;
2667   }
2668 
2669   if (pos < current->End()) {
2670     // Register reg is available at the range start but becomes blocked before
2671     // the range end. Split current at position where it becomes blocked.
2672     LiveRange* tail = SplitRangeAt(current, pos);
2673     AddToUnhandledSorted(tail);
2674   }
2675 
2676   // Register reg is available at the range start and is free until
2677   // the range end.
2678   DCHECK(pos >= current->End());
2679   TRACE("Assigning free reg %s to live range %d:%d\n", RegisterName(reg),
2680         current->TopLevel()->vreg(), current->relative_id());
2681   SetLiveRangeAssignedRegister(current, reg);
2682 
2683   return true;
2684 }
2685 
2686 
AllocateBlockedReg(LiveRange * current)2687 void LinearScanAllocator::AllocateBlockedReg(LiveRange* current) {
2688   UsePosition* register_use = current->NextRegisterPosition(current->Start());
2689   if (register_use == nullptr) {
2690     // There is no use in the current live range that requires a register.
2691     // We can just spill it.
2692     Spill(current);
2693     return;
2694   }
2695 
2696   LifetimePosition use_pos[RegisterConfiguration::kMaxDoubleRegisters];
2697   LifetimePosition block_pos[RegisterConfiguration::kMaxDoubleRegisters];
2698 
2699   for (int i = 0; i < num_registers(); i++) {
2700     use_pos[i] = block_pos[i] = LifetimePosition::MaxPosition();
2701   }
2702 
2703   for (LiveRange* range : active_live_ranges()) {
2704     int cur_reg = range->assigned_register();
2705     if (range->TopLevel()->IsFixed() ||
2706         !range->CanBeSpilled(current->Start())) {
2707       block_pos[cur_reg] = use_pos[cur_reg] =
2708           LifetimePosition::GapFromInstructionIndex(0);
2709     } else {
2710       UsePosition* next_use =
2711           range->NextUsePositionRegisterIsBeneficial(current->Start());
2712       if (next_use == nullptr) {
2713         use_pos[cur_reg] = range->End();
2714       } else {
2715         use_pos[cur_reg] = next_use->pos();
2716       }
2717     }
2718   }
2719 
2720   for (LiveRange* range : inactive_live_ranges()) {
2721     DCHECK(range->End() > current->Start());
2722     LifetimePosition next_intersection = range->FirstIntersection(current);
2723     if (!next_intersection.IsValid()) continue;
2724     int cur_reg = range->assigned_register();
2725     if (range->TopLevel()->IsFixed()) {
2726       block_pos[cur_reg] = Min(block_pos[cur_reg], next_intersection);
2727       use_pos[cur_reg] = Min(block_pos[cur_reg], use_pos[cur_reg]);
2728     } else {
2729       use_pos[cur_reg] = Min(use_pos[cur_reg], next_intersection);
2730     }
2731   }
2732 
2733   int reg = allocatable_register_code(0);
2734   for (int i = 1; i < num_allocatable_registers(); ++i) {
2735     int code = allocatable_register_code(i);
2736     if (use_pos[code] > use_pos[reg]) {
2737       reg = code;
2738     }
2739   }
2740 
2741   LifetimePosition pos = use_pos[reg];
2742 
2743   if (pos < register_use->pos()) {
2744     // All registers are blocked before the first use that requires a register.
2745     // Spill starting part of live range up to that use.
2746     SpillBetween(current, current->Start(), register_use->pos());
2747     return;
2748   }
2749 
2750   if (block_pos[reg] < current->End()) {
2751     // Register becomes blocked before the current range end. Split before that
2752     // position.
2753     LiveRange* tail =
2754         SplitBetween(current, current->Start(), block_pos[reg].Start());
2755     AddToUnhandledSorted(tail);
2756   }
2757 
2758   // Register reg is not blocked for the whole range.
2759   DCHECK(block_pos[reg] >= current->End());
2760   TRACE("Assigning blocked reg %s to live range %d:%d\n", RegisterName(reg),
2761         current->TopLevel()->vreg(), current->relative_id());
2762   SetLiveRangeAssignedRegister(current, reg);
2763 
2764   // This register was not free. Thus we need to find and spill
2765   // parts of active and inactive live regions that use the same register
2766   // at the same lifetime positions as current.
2767   SplitAndSpillIntersecting(current);
2768 }
2769 
2770 
SplitAndSpillIntersecting(LiveRange * current)2771 void LinearScanAllocator::SplitAndSpillIntersecting(LiveRange* current) {
2772   DCHECK(current->HasRegisterAssigned());
2773   int reg = current->assigned_register();
2774   LifetimePosition split_pos = current->Start();
2775   for (size_t i = 0; i < active_live_ranges().size(); ++i) {
2776     LiveRange* range = active_live_ranges()[i];
2777     if (range->assigned_register() == reg) {
2778       UsePosition* next_pos = range->NextRegisterPosition(current->Start());
2779       LifetimePosition spill_pos = FindOptimalSpillingPos(range, split_pos);
2780       if (next_pos == nullptr) {
2781         SpillAfter(range, spill_pos);
2782       } else {
2783         // When spilling between spill_pos and next_pos ensure that the range
2784         // remains spilled at least until the start of the current live range.
2785         // This guarantees that we will not introduce new unhandled ranges that
2786         // start before the current range as this violates allocation invariant
2787         // and will lead to an inconsistent state of active and inactive
2788         // live-ranges: ranges are allocated in order of their start positions,
2789         // ranges are retired from active/inactive when the start of the
2790         // current live-range is larger than their end.
2791         SpillBetweenUntil(range, spill_pos, current->Start(), next_pos->pos());
2792       }
2793       ActiveToHandled(range);
2794       --i;
2795     }
2796   }
2797 
2798   for (size_t i = 0; i < inactive_live_ranges().size(); ++i) {
2799     LiveRange* range = inactive_live_ranges()[i];
2800     DCHECK(range->End() > current->Start());
2801     if (range->assigned_register() == reg && !range->TopLevel()->IsFixed()) {
2802       LifetimePosition next_intersection = range->FirstIntersection(current);
2803       if (next_intersection.IsValid()) {
2804         UsePosition* next_pos = range->NextRegisterPosition(current->Start());
2805         if (next_pos == nullptr) {
2806           SpillAfter(range, split_pos);
2807         } else {
2808           next_intersection = Min(next_intersection, next_pos->pos());
2809           SpillBetween(range, split_pos, next_intersection);
2810         }
2811         InactiveToHandled(range);
2812         --i;
2813       }
2814     }
2815   }
2816 }
2817 
2818 
TryReuseSpillForPhi(TopLevelLiveRange * range)2819 bool LinearScanAllocator::TryReuseSpillForPhi(TopLevelLiveRange* range) {
2820   if (!range->is_phi()) return false;
2821 
2822   DCHECK(!range->HasSpillOperand());
2823   RegisterAllocationData::PhiMapValue* phi_map_value =
2824       data()->GetPhiMapValueFor(range);
2825   const PhiInstruction* phi = phi_map_value->phi();
2826   const InstructionBlock* block = phi_map_value->block();
2827   // Count the number of spilled operands.
2828   size_t spilled_count = 0;
2829   LiveRange* first_op = nullptr;
2830   for (size_t i = 0; i < phi->operands().size(); i++) {
2831     int op = phi->operands()[i];
2832     LiveRange* op_range = data()->GetOrCreateLiveRangeFor(op);
2833     if (!op_range->TopLevel()->HasSpillRange()) continue;
2834     const InstructionBlock* pred =
2835         code()->InstructionBlockAt(block->predecessors()[i]);
2836     LifetimePosition pred_end =
2837         LifetimePosition::InstructionFromInstructionIndex(
2838             pred->last_instruction_index());
2839     while (op_range != nullptr && !op_range->CanCover(pred_end)) {
2840       op_range = op_range->next();
2841     }
2842     if (op_range != nullptr && op_range->spilled()) {
2843       spilled_count++;
2844       if (first_op == nullptr) {
2845         first_op = op_range->TopLevel();
2846       }
2847     }
2848   }
2849 
2850   // Only continue if more than half of the operands are spilled.
2851   if (spilled_count * 2 <= phi->operands().size()) {
2852     return false;
2853   }
2854 
2855   // Try to merge the spilled operands and count the number of merged spilled
2856   // operands.
2857   DCHECK(first_op != nullptr);
2858   SpillRange* first_op_spill = first_op->TopLevel()->GetSpillRange();
2859   size_t num_merged = 1;
2860   for (size_t i = 1; i < phi->operands().size(); i++) {
2861     int op = phi->operands()[i];
2862     TopLevelLiveRange* op_range = data()->live_ranges()[op];
2863     if (!op_range->HasSpillRange()) continue;
2864     SpillRange* op_spill = op_range->GetSpillRange();
2865     if (op_spill == first_op_spill || first_op_spill->TryMerge(op_spill)) {
2866       num_merged++;
2867     }
2868   }
2869 
2870   // Only continue if enough operands could be merged to the
2871   // same spill slot.
2872   if (num_merged * 2 <= phi->operands().size() ||
2873       AreUseIntervalsIntersecting(first_op_spill->interval(),
2874                                   range->first_interval())) {
2875     return false;
2876   }
2877 
2878   // If the range does not need register soon, spill it to the merged
2879   // spill range.
2880   LifetimePosition next_pos = range->Start();
2881   if (next_pos.IsGapPosition()) next_pos = next_pos.NextStart();
2882   UsePosition* pos = range->NextUsePositionRegisterIsBeneficial(next_pos);
2883   if (pos == nullptr) {
2884     SpillRange* spill_range =
2885         range->TopLevel()->HasSpillRange()
2886             ? range->TopLevel()->GetSpillRange()
2887             : data()->AssignSpillRangeToLiveRange(range->TopLevel());
2888     bool merged = first_op_spill->TryMerge(spill_range);
2889     CHECK(merged);
2890     Spill(range);
2891     return true;
2892   } else if (pos->pos() > range->Start().NextStart()) {
2893     SpillRange* spill_range =
2894         range->TopLevel()->HasSpillRange()
2895             ? range->TopLevel()->GetSpillRange()
2896             : data()->AssignSpillRangeToLiveRange(range->TopLevel());
2897     bool merged = first_op_spill->TryMerge(spill_range);
2898     CHECK(merged);
2899     SpillBetween(range, range->Start(), pos->pos());
2900     DCHECK(UnhandledIsSorted());
2901     return true;
2902   }
2903   return false;
2904 }
2905 
2906 
SpillAfter(LiveRange * range,LifetimePosition pos)2907 void LinearScanAllocator::SpillAfter(LiveRange* range, LifetimePosition pos) {
2908   LiveRange* second_part = SplitRangeAt(range, pos);
2909   Spill(second_part);
2910 }
2911 
2912 
SpillBetween(LiveRange * range,LifetimePosition start,LifetimePosition end)2913 void LinearScanAllocator::SpillBetween(LiveRange* range, LifetimePosition start,
2914                                        LifetimePosition end) {
2915   SpillBetweenUntil(range, start, start, end);
2916 }
2917 
2918 
SpillBetweenUntil(LiveRange * range,LifetimePosition start,LifetimePosition until,LifetimePosition end)2919 void LinearScanAllocator::SpillBetweenUntil(LiveRange* range,
2920                                             LifetimePosition start,
2921                                             LifetimePosition until,
2922                                             LifetimePosition end) {
2923   CHECK(start < end);
2924   LiveRange* second_part = SplitRangeAt(range, start);
2925 
2926   if (second_part->Start() < end) {
2927     // The split result intersects with [start, end[.
2928     // Split it at position between ]start+1, end[, spill the middle part
2929     // and put the rest to unhandled.
2930     LifetimePosition third_part_end = end.PrevStart().End();
2931     if (data()->IsBlockBoundary(end.Start())) {
2932       third_part_end = end.Start();
2933     }
2934     LiveRange* third_part = SplitBetween(
2935         second_part, Max(second_part->Start().End(), until), third_part_end);
2936 
2937     DCHECK(third_part != second_part);
2938 
2939     Spill(second_part);
2940     AddToUnhandledSorted(third_part);
2941   } else {
2942     // The split result does not intersect with [start, end[.
2943     // Nothing to spill. Just put it to unhandled as whole.
2944     AddToUnhandledSorted(second_part);
2945   }
2946 }
2947 
2948 
SpillSlotLocator(RegisterAllocationData * data)2949 SpillSlotLocator::SpillSlotLocator(RegisterAllocationData* data)
2950     : data_(data) {}
2951 
2952 
LocateSpillSlots()2953 void SpillSlotLocator::LocateSpillSlots() {
2954   const InstructionSequence* code = data()->code();
2955   for (TopLevelLiveRange* range : data()->live_ranges()) {
2956     if (range == nullptr || range->IsEmpty()) continue;
2957     // We care only about ranges which spill in the frame.
2958     if (!range->HasSpillRange()) continue;
2959     if (range->IsSpilledOnlyInDeferredBlocks()) {
2960       for (LiveRange* child = range; child != nullptr; child = child->next()) {
2961         if (child->spilled()) {
2962           code->GetInstructionBlock(child->Start().ToInstructionIndex())
2963               ->mark_needs_frame();
2964         }
2965       }
2966     } else {
2967       TopLevelLiveRange::SpillMoveInsertionList* spills =
2968           range->spill_move_insertion_locations();
2969       DCHECK_NOT_NULL(spills);
2970       for (; spills != nullptr; spills = spills->next) {
2971         code->GetInstructionBlock(spills->gap_index)->mark_needs_frame();
2972       }
2973     }
2974   }
2975 }
2976 
2977 
OperandAssigner(RegisterAllocationData * data)2978 OperandAssigner::OperandAssigner(RegisterAllocationData* data) : data_(data) {}
2979 
2980 
AssignSpillSlots()2981 void OperandAssigner::AssignSpillSlots() {
2982   ZoneVector<SpillRange*>& spill_ranges = data()->spill_ranges();
2983   // Merge disjoint spill ranges
2984   for (size_t i = 0; i < spill_ranges.size(); ++i) {
2985     SpillRange* range = spill_ranges[i];
2986     if (range == nullptr) continue;
2987     if (range->IsEmpty()) continue;
2988     for (size_t j = i + 1; j < spill_ranges.size(); ++j) {
2989       SpillRange* other = spill_ranges[j];
2990       if (other != nullptr && !other->IsEmpty()) {
2991         range->TryMerge(other);
2992       }
2993     }
2994   }
2995   // Allocate slots for the merged spill ranges.
2996   for (SpillRange* range : spill_ranges) {
2997     if (range == nullptr || range->IsEmpty()) continue;
2998     // Allocate a new operand referring to the spill slot.
2999     if (!range->HasSlot()) {
3000       int byte_width = range->ByteWidth();
3001       int index = data()->frame()->AllocateSpillSlot(byte_width);
3002       range->set_assigned_slot(index);
3003     }
3004   }
3005 }
3006 
3007 
CommitAssignment()3008 void OperandAssigner::CommitAssignment() {
3009   for (TopLevelLiveRange* top_range : data()->live_ranges()) {
3010     if (top_range == nullptr || top_range->IsEmpty()) continue;
3011     InstructionOperand spill_operand;
3012     if (top_range->HasSpillOperand()) {
3013       spill_operand = *top_range->TopLevel()->GetSpillOperand();
3014     } else if (top_range->TopLevel()->HasSpillRange()) {
3015       spill_operand = top_range->TopLevel()->GetSpillRangeOperand();
3016     }
3017     if (top_range->is_phi()) {
3018       data()->GetPhiMapValueFor(top_range)->CommitAssignment(
3019           top_range->GetAssignedOperand());
3020     }
3021     for (LiveRange* range = top_range; range != nullptr;
3022          range = range->next()) {
3023       InstructionOperand assigned = range->GetAssignedOperand();
3024       range->ConvertUsesToOperand(assigned, spill_operand);
3025     }
3026 
3027     if (!spill_operand.IsInvalid()) {
3028       // If this top level range has a child spilled in a deferred block, we use
3029       // the range and control flow connection mechanism instead of spilling at
3030       // definition. Refer to the ConnectLiveRanges and ResolveControlFlow
3031       // phases. Normally, when we spill at definition, we do not insert a
3032       // connecting move when a successor child range is spilled - because the
3033       // spilled range picks up its value from the slot which was assigned at
3034       // definition. For ranges that are determined to spill only in deferred
3035       // blocks, we let ConnectLiveRanges and ResolveControlFlow insert such
3036       // moves between ranges. Because of how the ranges are split around
3037       // deferred blocks, this amounts to spilling and filling inside such
3038       // blocks.
3039       if (!top_range->TryCommitSpillInDeferredBlock(data()->code(),
3040                                                     spill_operand)) {
3041         // Spill at definition if the range isn't spilled only in deferred
3042         // blocks.
3043         top_range->CommitSpillMoves(
3044             data()->code(), spill_operand,
3045             top_range->has_slot_use() || top_range->spilled());
3046       }
3047     }
3048   }
3049 }
3050 
3051 
ReferenceMapPopulator(RegisterAllocationData * data)3052 ReferenceMapPopulator::ReferenceMapPopulator(RegisterAllocationData* data)
3053     : data_(data) {}
3054 
3055 
SafePointsAreInOrder() const3056 bool ReferenceMapPopulator::SafePointsAreInOrder() const {
3057   int safe_point = 0;
3058   for (ReferenceMap* map : *data()->code()->reference_maps()) {
3059     if (safe_point > map->instruction_position()) return false;
3060     safe_point = map->instruction_position();
3061   }
3062   return true;
3063 }
3064 
3065 
PopulateReferenceMaps()3066 void ReferenceMapPopulator::PopulateReferenceMaps() {
3067   DCHECK(SafePointsAreInOrder());
3068   // Map all delayed references.
3069   for (RegisterAllocationData::DelayedReference& delayed_reference :
3070        data()->delayed_references()) {
3071     delayed_reference.map->RecordReference(
3072         AllocatedOperand::cast(*delayed_reference.operand));
3073   }
3074   // Iterate over all safe point positions and record a pointer
3075   // for all spilled live ranges at this point.
3076   int last_range_start = 0;
3077   const ReferenceMapDeque* reference_maps = data()->code()->reference_maps();
3078   ReferenceMapDeque::const_iterator first_it = reference_maps->begin();
3079   for (TopLevelLiveRange* range : data()->live_ranges()) {
3080     if (range == nullptr) continue;
3081     // Skip non-reference values.
3082     if (!data()->IsReference(range)) continue;
3083     // Skip empty live ranges.
3084     if (range->IsEmpty()) continue;
3085     if (range->has_preassigned_slot()) continue;
3086 
3087     // Find the extent of the range and its children.
3088     int start = range->Start().ToInstructionIndex();
3089     int end = 0;
3090     for (LiveRange* cur = range; cur != nullptr; cur = cur->next()) {
3091       LifetimePosition this_end = cur->End();
3092       if (this_end.ToInstructionIndex() > end)
3093         end = this_end.ToInstructionIndex();
3094       DCHECK(cur->Start().ToInstructionIndex() >= start);
3095     }
3096 
3097     // Most of the ranges are in order, but not all.  Keep an eye on when they
3098     // step backwards and reset the first_it so we don't miss any safe points.
3099     if (start < last_range_start) first_it = reference_maps->begin();
3100     last_range_start = start;
3101 
3102     // Step across all the safe points that are before the start of this range,
3103     // recording how far we step in order to save doing this for the next range.
3104     for (; first_it != reference_maps->end(); ++first_it) {
3105       ReferenceMap* map = *first_it;
3106       if (map->instruction_position() >= start) break;
3107     }
3108 
3109     InstructionOperand spill_operand;
3110     if (((range->HasSpillOperand() &&
3111           !range->GetSpillOperand()->IsConstant()) ||
3112          range->HasSpillRange())) {
3113       if (range->HasSpillOperand()) {
3114         spill_operand = *range->GetSpillOperand();
3115       } else {
3116         spill_operand = range->GetSpillRangeOperand();
3117       }
3118       DCHECK(spill_operand.IsStackSlot());
3119       DCHECK_EQ(MachineRepresentation::kTagged,
3120                 AllocatedOperand::cast(spill_operand).representation());
3121     }
3122 
3123     LiveRange* cur = range;
3124     // Step through the safe points to see whether they are in the range.
3125     for (auto it = first_it; it != reference_maps->end(); ++it) {
3126       ReferenceMap* map = *it;
3127       int safe_point = map->instruction_position();
3128 
3129       // The safe points are sorted so we can stop searching here.
3130       if (safe_point - 1 > end) break;
3131 
3132       // Advance to the next active range that covers the current
3133       // safe point position.
3134       LifetimePosition safe_point_pos =
3135           LifetimePosition::InstructionFromInstructionIndex(safe_point);
3136 
3137       // Search for the child range (cur) that covers safe_point_pos. If we
3138       // don't find it before the children pass safe_point_pos, keep cur at
3139       // the last child, because the next safe_point_pos may be covered by cur.
3140       // This may happen if cur has more than one interval, and the current
3141       // safe_point_pos is in between intervals.
3142       // For that reason, cur may be at most the last child.
3143       DCHECK_NOT_NULL(cur);
3144       DCHECK(safe_point_pos >= cur->Start() || range == cur);
3145       bool found = false;
3146       while (!found) {
3147         if (cur->Covers(safe_point_pos)) {
3148           found = true;
3149         } else {
3150           LiveRange* next = cur->next();
3151           if (next == nullptr || next->Start() > safe_point_pos) {
3152             break;
3153           }
3154           cur = next;
3155         }
3156       }
3157 
3158       if (!found) {
3159         continue;
3160       }
3161 
3162       // Check if the live range is spilled and the safe point is after
3163       // the spill position.
3164       int spill_index = range->IsSpilledOnlyInDeferredBlocks()
3165                             ? cur->Start().ToInstructionIndex()
3166                             : range->spill_start_index();
3167 
3168       if (!spill_operand.IsInvalid() && safe_point >= spill_index) {
3169         TRACE("Pointer for range %d (spilled at %d) at safe point %d\n",
3170               range->vreg(), spill_index, safe_point);
3171         map->RecordReference(AllocatedOperand::cast(spill_operand));
3172       }
3173 
3174       if (!cur->spilled()) {
3175         TRACE(
3176             "Pointer in register for range %d:%d (start at %d) "
3177             "at safe point %d\n",
3178             range->vreg(), cur->relative_id(), cur->Start().value(),
3179             safe_point);
3180         InstructionOperand operand = cur->GetAssignedOperand();
3181         DCHECK(!operand.IsStackSlot());
3182         DCHECK_EQ(MachineRepresentation::kTagged,
3183                   AllocatedOperand::cast(operand).representation());
3184         map->RecordReference(AllocatedOperand::cast(operand));
3185       }
3186     }
3187   }
3188 }
3189 
3190 
3191 namespace {
3192 
3193 class LiveRangeBound {
3194  public:
LiveRangeBound(const LiveRange * range,bool skip)3195   explicit LiveRangeBound(const LiveRange* range, bool skip)
3196       : range_(range), start_(range->Start()), end_(range->End()), skip_(skip) {
3197     DCHECK(!range->IsEmpty());
3198   }
3199 
CanCover(LifetimePosition position)3200   bool CanCover(LifetimePosition position) {
3201     return start_ <= position && position < end_;
3202   }
3203 
3204   const LiveRange* const range_;
3205   const LifetimePosition start_;
3206   const LifetimePosition end_;
3207   const bool skip_;
3208 
3209  private:
3210   DISALLOW_COPY_AND_ASSIGN(LiveRangeBound);
3211 };
3212 
3213 
3214 struct FindResult {
3215   const LiveRange* cur_cover_;
3216   const LiveRange* pred_cover_;
3217 };
3218 
3219 
3220 class LiveRangeBoundArray {
3221  public:
LiveRangeBoundArray()3222   LiveRangeBoundArray() : length_(0), start_(nullptr) {}
3223 
ShouldInitialize()3224   bool ShouldInitialize() { return start_ == nullptr; }
3225 
Initialize(Zone * zone,const TopLevelLiveRange * const range)3226   void Initialize(Zone* zone, const TopLevelLiveRange* const range) {
3227     length_ = range->GetChildCount();
3228 
3229     start_ = zone->NewArray<LiveRangeBound>(length_);
3230     LiveRangeBound* curr = start_;
3231     // Normally, spilled ranges do not need connecting moves, because the spill
3232     // location has been assigned at definition. For ranges spilled in deferred
3233     // blocks, that is not the case, so we need to connect the spilled children.
3234     bool spilled_in_blocks = range->IsSpilledOnlyInDeferredBlocks();
3235     for (const LiveRange *i = range; i != nullptr; i = i->next(), ++curr) {
3236       new (curr) LiveRangeBound(i, !spilled_in_blocks && i->spilled());
3237     }
3238   }
3239 
Find(const LifetimePosition position) const3240   LiveRangeBound* Find(const LifetimePosition position) const {
3241     size_t left_index = 0;
3242     size_t right_index = length_;
3243     while (true) {
3244       size_t current_index = left_index + (right_index - left_index) / 2;
3245       DCHECK(right_index > current_index);
3246       LiveRangeBound* bound = &start_[current_index];
3247       if (bound->start_ <= position) {
3248         if (position < bound->end_) return bound;
3249         DCHECK(left_index < current_index);
3250         left_index = current_index;
3251       } else {
3252         right_index = current_index;
3253       }
3254     }
3255   }
3256 
FindPred(const InstructionBlock * pred)3257   LiveRangeBound* FindPred(const InstructionBlock* pred) {
3258     LifetimePosition pred_end =
3259         LifetimePosition::InstructionFromInstructionIndex(
3260             pred->last_instruction_index());
3261     return Find(pred_end);
3262   }
3263 
FindSucc(const InstructionBlock * succ)3264   LiveRangeBound* FindSucc(const InstructionBlock* succ) {
3265     LifetimePosition succ_start = LifetimePosition::GapFromInstructionIndex(
3266         succ->first_instruction_index());
3267     return Find(succ_start);
3268   }
3269 
FindConnectableSubranges(const InstructionBlock * block,const InstructionBlock * pred,FindResult * result) const3270   bool FindConnectableSubranges(const InstructionBlock* block,
3271                                 const InstructionBlock* pred,
3272                                 FindResult* result) const {
3273     LifetimePosition pred_end =
3274         LifetimePosition::InstructionFromInstructionIndex(
3275             pred->last_instruction_index());
3276     LiveRangeBound* bound = Find(pred_end);
3277     result->pred_cover_ = bound->range_;
3278     LifetimePosition cur_start = LifetimePosition::GapFromInstructionIndex(
3279         block->first_instruction_index());
3280 
3281     if (bound->CanCover(cur_start)) {
3282       // Both blocks are covered by the same range, so there is nothing to
3283       // connect.
3284       return false;
3285     }
3286     bound = Find(cur_start);
3287     if (bound->skip_) {
3288       return false;
3289     }
3290     result->cur_cover_ = bound->range_;
3291     DCHECK(result->pred_cover_ != nullptr && result->cur_cover_ != nullptr);
3292     return (result->cur_cover_ != result->pred_cover_);
3293   }
3294 
3295  private:
3296   size_t length_;
3297   LiveRangeBound* start_;
3298 
3299   DISALLOW_COPY_AND_ASSIGN(LiveRangeBoundArray);
3300 };
3301 
3302 
3303 class LiveRangeFinder {
3304  public:
LiveRangeFinder(const RegisterAllocationData * data,Zone * zone)3305   explicit LiveRangeFinder(const RegisterAllocationData* data, Zone* zone)
3306       : data_(data),
3307         bounds_length_(static_cast<int>(data_->live_ranges().size())),
3308         bounds_(zone->NewArray<LiveRangeBoundArray>(bounds_length_)),
3309         zone_(zone) {
3310     for (int i = 0; i < bounds_length_; ++i) {
3311       new (&bounds_[i]) LiveRangeBoundArray();
3312     }
3313   }
3314 
ArrayFor(int operand_index)3315   LiveRangeBoundArray* ArrayFor(int operand_index) {
3316     DCHECK(operand_index < bounds_length_);
3317     TopLevelLiveRange* range = data_->live_ranges()[operand_index];
3318     DCHECK(range != nullptr && !range->IsEmpty());
3319     LiveRangeBoundArray* array = &bounds_[operand_index];
3320     if (array->ShouldInitialize()) {
3321       array->Initialize(zone_, range);
3322     }
3323     return array;
3324   }
3325 
3326  private:
3327   const RegisterAllocationData* const data_;
3328   const int bounds_length_;
3329   LiveRangeBoundArray* const bounds_;
3330   Zone* const zone_;
3331 
3332   DISALLOW_COPY_AND_ASSIGN(LiveRangeFinder);
3333 };
3334 
3335 
3336 typedef std::pair<ParallelMove*, InstructionOperand> DelayedInsertionMapKey;
3337 
3338 
3339 struct DelayedInsertionMapCompare {
operator ()v8::internal::compiler::__anon17341dd60211::DelayedInsertionMapCompare3340   bool operator()(const DelayedInsertionMapKey& a,
3341                   const DelayedInsertionMapKey& b) const {
3342     if (a.first == b.first) {
3343       return a.second.Compare(b.second);
3344     }
3345     return a.first < b.first;
3346   }
3347 };
3348 
3349 
3350 typedef ZoneMap<DelayedInsertionMapKey, InstructionOperand,
3351                 DelayedInsertionMapCompare> DelayedInsertionMap;
3352 
3353 }  // namespace
3354 
3355 
LiveRangeConnector(RegisterAllocationData * data)3356 LiveRangeConnector::LiveRangeConnector(RegisterAllocationData* data)
3357     : data_(data) {}
3358 
3359 
CanEagerlyResolveControlFlow(const InstructionBlock * block) const3360 bool LiveRangeConnector::CanEagerlyResolveControlFlow(
3361     const InstructionBlock* block) const {
3362   if (block->PredecessorCount() != 1) return false;
3363   return block->predecessors()[0].IsNext(block->rpo_number());
3364 }
3365 
3366 
ResolveControlFlow(Zone * local_zone)3367 void LiveRangeConnector::ResolveControlFlow(Zone* local_zone) {
3368   // Lazily linearize live ranges in memory for fast lookup.
3369   LiveRangeFinder finder(data(), local_zone);
3370   ZoneVector<BitVector*>& live_in_sets = data()->live_in_sets();
3371   for (const InstructionBlock* block : code()->instruction_blocks()) {
3372     if (CanEagerlyResolveControlFlow(block)) continue;
3373     BitVector* live = live_in_sets[block->rpo_number().ToInt()];
3374     BitVector::Iterator iterator(live);
3375     while (!iterator.Done()) {
3376       LiveRangeBoundArray* array = finder.ArrayFor(iterator.Current());
3377       for (const RpoNumber& pred : block->predecessors()) {
3378         FindResult result;
3379         const InstructionBlock* pred_block = code()->InstructionBlockAt(pred);
3380         if (!array->FindConnectableSubranges(block, pred_block, &result)) {
3381           continue;
3382         }
3383         InstructionOperand pred_op = result.pred_cover_->GetAssignedOperand();
3384         InstructionOperand cur_op = result.cur_cover_->GetAssignedOperand();
3385         if (pred_op.Equals(cur_op)) continue;
3386         int move_loc = ResolveControlFlow(block, cur_op, pred_block, pred_op);
3387         USE(move_loc);
3388         DCHECK_IMPLIES(
3389             result.cur_cover_->TopLevel()->IsSpilledOnlyInDeferredBlocks() &&
3390                 !(pred_op.IsAnyRegister() && cur_op.IsAnyRegister()),
3391             code()->GetInstructionBlock(move_loc)->IsDeferred());
3392       }
3393       iterator.Advance();
3394     }
3395   }
3396 }
3397 
3398 
ResolveControlFlow(const InstructionBlock * block,const InstructionOperand & cur_op,const InstructionBlock * pred,const InstructionOperand & pred_op)3399 int LiveRangeConnector::ResolveControlFlow(const InstructionBlock* block,
3400                                            const InstructionOperand& cur_op,
3401                                            const InstructionBlock* pred,
3402                                            const InstructionOperand& pred_op) {
3403   DCHECK(!pred_op.Equals(cur_op));
3404   int gap_index;
3405   Instruction::GapPosition position;
3406   if (block->PredecessorCount() == 1) {
3407     gap_index = block->first_instruction_index();
3408     position = Instruction::START;
3409   } else {
3410     DCHECK(pred->SuccessorCount() == 1);
3411     DCHECK(!code()
3412                 ->InstructionAt(pred->last_instruction_index())
3413                 ->HasReferenceMap());
3414     gap_index = pred->last_instruction_index();
3415     position = Instruction::END;
3416   }
3417   data()->AddGapMove(gap_index, position, pred_op, cur_op);
3418   return gap_index;
3419 }
3420 
3421 
ConnectRanges(Zone * local_zone)3422 void LiveRangeConnector::ConnectRanges(Zone* local_zone) {
3423   DelayedInsertionMap delayed_insertion_map(local_zone);
3424   for (TopLevelLiveRange* top_range : data()->live_ranges()) {
3425     if (top_range == nullptr) continue;
3426     bool connect_spilled = top_range->IsSpilledOnlyInDeferredBlocks();
3427     LiveRange* first_range = top_range;
3428     for (LiveRange *second_range = first_range->next(); second_range != nullptr;
3429          first_range = second_range, second_range = second_range->next()) {
3430       LifetimePosition pos = second_range->Start();
3431       // Add gap move if the two live ranges touch and there is no block
3432       // boundary.
3433       if (!connect_spilled && second_range->spilled()) continue;
3434       if (first_range->End() != pos) continue;
3435       if (data()->IsBlockBoundary(pos) &&
3436           !CanEagerlyResolveControlFlow(GetInstructionBlock(code(), pos))) {
3437         continue;
3438       }
3439       InstructionOperand prev_operand = first_range->GetAssignedOperand();
3440       InstructionOperand cur_operand = second_range->GetAssignedOperand();
3441       if (prev_operand.Equals(cur_operand)) continue;
3442       bool delay_insertion = false;
3443       Instruction::GapPosition gap_pos;
3444       int gap_index = pos.ToInstructionIndex();
3445       if (pos.IsGapPosition()) {
3446         gap_pos = pos.IsStart() ? Instruction::START : Instruction::END;
3447       } else {
3448         if (pos.IsStart()) {
3449           delay_insertion = true;
3450         } else {
3451           gap_index++;
3452         }
3453         gap_pos = delay_insertion ? Instruction::END : Instruction::START;
3454       }
3455       // Fills or spills for spilled in deferred blocks ranges must happen
3456       // only in deferred blocks.
3457       DCHECK_IMPLIES(
3458           connect_spilled &&
3459               !(prev_operand.IsAnyRegister() && cur_operand.IsAnyRegister()),
3460           code()->GetInstructionBlock(gap_index)->IsDeferred());
3461 
3462       ParallelMove* move =
3463           code()->InstructionAt(gap_index)->GetOrCreateParallelMove(
3464               gap_pos, code_zone());
3465       if (!delay_insertion) {
3466         move->AddMove(prev_operand, cur_operand);
3467       } else {
3468         delayed_insertion_map.insert(
3469             std::make_pair(std::make_pair(move, prev_operand), cur_operand));
3470       }
3471     }
3472   }
3473   if (delayed_insertion_map.empty()) return;
3474   // Insert all the moves which should occur after the stored move.
3475   ZoneVector<MoveOperands*> to_insert(local_zone);
3476   ZoneVector<MoveOperands*> to_eliminate(local_zone);
3477   to_insert.reserve(4);
3478   to_eliminate.reserve(4);
3479   ParallelMove* moves = delayed_insertion_map.begin()->first.first;
3480   for (auto it = delayed_insertion_map.begin();; ++it) {
3481     bool done = it == delayed_insertion_map.end();
3482     if (done || it->first.first != moves) {
3483       // Commit the MoveOperands for current ParallelMove.
3484       for (MoveOperands* move : to_eliminate) {
3485         move->Eliminate();
3486       }
3487       for (MoveOperands* move : to_insert) {
3488         moves->push_back(move);
3489       }
3490       if (done) break;
3491       // Reset state.
3492       to_eliminate.clear();
3493       to_insert.clear();
3494       moves = it->first.first;
3495     }
3496     // Gather all MoveOperands for a single ParallelMove.
3497     MoveOperands* move =
3498         new (code_zone()) MoveOperands(it->first.second, it->second);
3499     MoveOperands* eliminate = moves->PrepareInsertAfter(move);
3500     to_insert.push_back(move);
3501     if (eliminate != nullptr) to_eliminate.push_back(eliminate);
3502   }
3503 }
3504 
3505 
3506 }  // namespace compiler
3507 }  // namespace internal
3508 }  // namespace v8
3509