1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/bit-vector.h"
6 #include "src/compiler/instruction.h"
7 #include "src/compiler/register-allocator-verifier.h"
8
9 namespace v8 {
10 namespace internal {
11 namespace compiler {
12
13 namespace {
14
OperandCount(const Instruction * instr)15 size_t OperandCount(const Instruction* instr) {
16 return instr->InputCount() + instr->OutputCount() + instr->TempCount();
17 }
18
19
VerifyEmptyGaps(const Instruction * instr)20 void VerifyEmptyGaps(const Instruction* instr) {
21 for (int i = Instruction::FIRST_GAP_POSITION;
22 i <= Instruction::LAST_GAP_POSITION; i++) {
23 Instruction::GapPosition inner_pos =
24 static_cast<Instruction::GapPosition>(i);
25 CHECK(instr->GetParallelMove(inner_pos) == nullptr);
26 }
27 }
28
29
VerifyAllocatedGaps(const Instruction * instr)30 void VerifyAllocatedGaps(const Instruction* instr) {
31 for (int i = Instruction::FIRST_GAP_POSITION;
32 i <= Instruction::LAST_GAP_POSITION; i++) {
33 Instruction::GapPosition inner_pos =
34 static_cast<Instruction::GapPosition>(i);
35 const ParallelMove* moves = instr->GetParallelMove(inner_pos);
36 if (moves == nullptr) continue;
37 for (const MoveOperands* move : *moves) {
38 if (move->IsRedundant()) continue;
39 CHECK(move->source().IsAllocated() || move->source().IsConstant());
40 CHECK(move->destination().IsAllocated());
41 }
42 }
43 }
44
45 } // namespace
46
RegisterAllocatorVerifier(Zone * zone,const RegisterConfiguration * config,const InstructionSequence * sequence)47 RegisterAllocatorVerifier::RegisterAllocatorVerifier(
48 Zone* zone, const RegisterConfiguration* config,
49 const InstructionSequence* sequence)
50 : zone_(zone),
51 config_(config),
52 sequence_(sequence),
53 constraints_(zone),
54 assessments_(zone),
55 outstanding_assessments_(zone) {
56 constraints_.reserve(sequence->instructions().size());
57 // TODO(dcarney): model unique constraints.
58 // Construct OperandConstraints for all InstructionOperands, eliminating
59 // kSameAsFirst along the way.
60 for (const Instruction* instr : sequence->instructions()) {
61 // All gaps should be totally unallocated at this point.
62 VerifyEmptyGaps(instr);
63 const size_t operand_count = OperandCount(instr);
64 OperandConstraint* op_constraints =
65 zone->NewArray<OperandConstraint>(operand_count);
66 size_t count = 0;
67 for (size_t i = 0; i < instr->InputCount(); ++i, ++count) {
68 BuildConstraint(instr->InputAt(i), &op_constraints[count]);
69 VerifyInput(op_constraints[count]);
70 }
71 for (size_t i = 0; i < instr->TempCount(); ++i, ++count) {
72 BuildConstraint(instr->TempAt(i), &op_constraints[count]);
73 VerifyTemp(op_constraints[count]);
74 }
75 for (size_t i = 0; i < instr->OutputCount(); ++i, ++count) {
76 BuildConstraint(instr->OutputAt(i), &op_constraints[count]);
77 if (op_constraints[count].type_ == kSameAsFirst) {
78 CHECK(instr->InputCount() > 0);
79 op_constraints[count].type_ = op_constraints[0].type_;
80 op_constraints[count].value_ = op_constraints[0].value_;
81 }
82 VerifyOutput(op_constraints[count]);
83 }
84 InstructionConstraint instr_constraint = {instr, operand_count,
85 op_constraints};
86 constraints()->push_back(instr_constraint);
87 }
88 }
89
VerifyInput(const OperandConstraint & constraint)90 void RegisterAllocatorVerifier::VerifyInput(
91 const OperandConstraint& constraint) {
92 CHECK_NE(kSameAsFirst, constraint.type_);
93 if (constraint.type_ != kImmediate && constraint.type_ != kExplicit) {
94 CHECK_NE(InstructionOperand::kInvalidVirtualRegister,
95 constraint.virtual_register_);
96 }
97 }
98
VerifyTemp(const OperandConstraint & constraint)99 void RegisterAllocatorVerifier::VerifyTemp(
100 const OperandConstraint& constraint) {
101 CHECK_NE(kSameAsFirst, constraint.type_);
102 CHECK_NE(kImmediate, constraint.type_);
103 CHECK_NE(kExplicit, constraint.type_);
104 CHECK_NE(kConstant, constraint.type_);
105 }
106
VerifyOutput(const OperandConstraint & constraint)107 void RegisterAllocatorVerifier::VerifyOutput(
108 const OperandConstraint& constraint) {
109 CHECK_NE(kImmediate, constraint.type_);
110 CHECK_NE(kExplicit, constraint.type_);
111 CHECK_NE(InstructionOperand::kInvalidVirtualRegister,
112 constraint.virtual_register_);
113 }
114
VerifyAssignment()115 void RegisterAllocatorVerifier::VerifyAssignment() {
116 CHECK(sequence()->instructions().size() == constraints()->size());
117 auto instr_it = sequence()->begin();
118 for (const auto& instr_constraint : *constraints()) {
119 const Instruction* instr = instr_constraint.instruction_;
120 // All gaps should be totally allocated at this point.
121 VerifyAllocatedGaps(instr);
122 const size_t operand_count = instr_constraint.operand_constaints_size_;
123 const OperandConstraint* op_constraints =
124 instr_constraint.operand_constraints_;
125 CHECK_EQ(instr, *instr_it);
126 CHECK(operand_count == OperandCount(instr));
127 size_t count = 0;
128 for (size_t i = 0; i < instr->InputCount(); ++i, ++count) {
129 CheckConstraint(instr->InputAt(i), &op_constraints[count]);
130 }
131 for (size_t i = 0; i < instr->TempCount(); ++i, ++count) {
132 CheckConstraint(instr->TempAt(i), &op_constraints[count]);
133 }
134 for (size_t i = 0; i < instr->OutputCount(); ++i, ++count) {
135 CheckConstraint(instr->OutputAt(i), &op_constraints[count]);
136 }
137 ++instr_it;
138 }
139 }
140
BuildConstraint(const InstructionOperand * op,OperandConstraint * constraint)141 void RegisterAllocatorVerifier::BuildConstraint(const InstructionOperand* op,
142 OperandConstraint* constraint) {
143 constraint->value_ = kMinInt;
144 constraint->virtual_register_ = InstructionOperand::kInvalidVirtualRegister;
145 if (op->IsConstant()) {
146 constraint->type_ = kConstant;
147 constraint->value_ = ConstantOperand::cast(op)->virtual_register();
148 constraint->virtual_register_ = constraint->value_;
149 } else if (op->IsExplicit()) {
150 constraint->type_ = kExplicit;
151 } else if (op->IsImmediate()) {
152 const ImmediateOperand* imm = ImmediateOperand::cast(op);
153 int value = imm->type() == ImmediateOperand::INLINE ? imm->inline_value()
154 : imm->indexed_value();
155 constraint->type_ = kImmediate;
156 constraint->value_ = value;
157 } else {
158 CHECK(op->IsUnallocated());
159 const UnallocatedOperand* unallocated = UnallocatedOperand::cast(op);
160 int vreg = unallocated->virtual_register();
161 constraint->virtual_register_ = vreg;
162 if (unallocated->basic_policy() == UnallocatedOperand::FIXED_SLOT) {
163 constraint->type_ = kFixedSlot;
164 constraint->value_ = unallocated->fixed_slot_index();
165 } else {
166 switch (unallocated->extended_policy()) {
167 case UnallocatedOperand::ANY:
168 case UnallocatedOperand::NONE:
169 if (sequence()->IsFP(vreg)) {
170 constraint->type_ = kNoneFP;
171 } else {
172 constraint->type_ = kNone;
173 }
174 break;
175 case UnallocatedOperand::FIXED_REGISTER:
176 if (unallocated->HasSecondaryStorage()) {
177 constraint->type_ = kRegisterAndSlot;
178 constraint->spilled_slot_ = unallocated->GetSecondaryStorage();
179 } else {
180 constraint->type_ = kFixedRegister;
181 }
182 constraint->value_ = unallocated->fixed_register_index();
183 break;
184 case UnallocatedOperand::FIXED_FP_REGISTER:
185 constraint->type_ = kFixedFPRegister;
186 constraint->value_ = unallocated->fixed_register_index();
187 break;
188 case UnallocatedOperand::MUST_HAVE_REGISTER:
189 if (sequence()->IsFP(vreg)) {
190 constraint->type_ = kFPRegister;
191 } else {
192 constraint->type_ = kRegister;
193 }
194 break;
195 case UnallocatedOperand::MUST_HAVE_SLOT:
196 constraint->type_ = kSlot;
197 constraint->value_ =
198 ElementSizeLog2Of(sequence()->GetRepresentation(vreg));
199 break;
200 case UnallocatedOperand::SAME_AS_FIRST_INPUT:
201 constraint->type_ = kSameAsFirst;
202 break;
203 }
204 }
205 }
206 }
207
CheckConstraint(const InstructionOperand * op,const OperandConstraint * constraint)208 void RegisterAllocatorVerifier::CheckConstraint(
209 const InstructionOperand* op, const OperandConstraint* constraint) {
210 switch (constraint->type_) {
211 case kConstant:
212 CHECK(op->IsConstant());
213 CHECK_EQ(ConstantOperand::cast(op)->virtual_register(),
214 constraint->value_);
215 return;
216 case kImmediate: {
217 CHECK(op->IsImmediate());
218 const ImmediateOperand* imm = ImmediateOperand::cast(op);
219 int value = imm->type() == ImmediateOperand::INLINE
220 ? imm->inline_value()
221 : imm->indexed_value();
222 CHECK_EQ(value, constraint->value_);
223 return;
224 }
225 case kRegister:
226 CHECK(op->IsRegister());
227 return;
228 case kFPRegister:
229 CHECK(op->IsFPRegister());
230 return;
231 case kExplicit:
232 CHECK(op->IsExplicit());
233 return;
234 case kFixedRegister:
235 case kRegisterAndSlot:
236 CHECK(op->IsRegister());
237 CHECK_EQ(LocationOperand::cast(op)->register_code(), constraint->value_);
238 return;
239 case kFixedFPRegister:
240 CHECK(op->IsFPRegister());
241 CHECK_EQ(LocationOperand::cast(op)->register_code(), constraint->value_);
242 return;
243 case kFixedSlot:
244 CHECK(op->IsStackSlot() || op->IsFPStackSlot());
245 CHECK_EQ(LocationOperand::cast(op)->index(), constraint->value_);
246 return;
247 case kSlot:
248 CHECK(op->IsStackSlot() || op->IsFPStackSlot());
249 CHECK_EQ(ElementSizeLog2Of(LocationOperand::cast(op)->representation()),
250 constraint->value_);
251 return;
252 case kNone:
253 CHECK(op->IsRegister() || op->IsStackSlot());
254 return;
255 case kNoneFP:
256 CHECK(op->IsFPRegister() || op->IsFPStackSlot());
257 return;
258 case kSameAsFirst:
259 CHECK(false);
260 return;
261 }
262 }
263
PerformMoves(const Instruction * instruction)264 void BlockAssessments::PerformMoves(const Instruction* instruction) {
265 const ParallelMove* first =
266 instruction->GetParallelMove(Instruction::GapPosition::START);
267 PerformParallelMoves(first);
268 const ParallelMove* last =
269 instruction->GetParallelMove(Instruction::GapPosition::END);
270 PerformParallelMoves(last);
271 }
272
PerformParallelMoves(const ParallelMove * moves)273 void BlockAssessments::PerformParallelMoves(const ParallelMove* moves) {
274 if (moves == nullptr) return;
275
276 CHECK(map_for_moves_.empty());
277 for (MoveOperands* move : *moves) {
278 if (move->IsEliminated() || move->IsRedundant()) continue;
279 auto it = map_.find(move->source());
280 // The RHS of a parallel move should have been already assessed.
281 CHECK(it != map_.end());
282 // The LHS of a parallel move should not have been assigned in this
283 // parallel move.
284 CHECK(map_for_moves_.find(move->destination()) == map_for_moves_.end());
285 // Copy the assessment to the destination.
286 map_for_moves_[move->destination()] = it->second;
287 }
288 for (auto pair : map_for_moves_) {
289 map_[pair.first] = pair.second;
290 }
291 map_for_moves_.clear();
292 }
293
DropRegisters()294 void BlockAssessments::DropRegisters() {
295 for (auto iterator = map().begin(), end = map().end(); iterator != end;) {
296 auto current = iterator;
297 ++iterator;
298 InstructionOperand op = current->first;
299 if (op.IsAnyRegister()) map().erase(current);
300 }
301 }
302
CreateForBlock(const InstructionBlock * block)303 BlockAssessments* RegisterAllocatorVerifier::CreateForBlock(
304 const InstructionBlock* block) {
305 RpoNumber current_block_id = block->rpo_number();
306
307 BlockAssessments* ret = new (zone()) BlockAssessments(zone());
308 if (block->PredecessorCount() == 0) {
309 // TODO(mtrofin): the following check should hold, however, in certain
310 // unit tests it is invalidated by the last block. Investigate and
311 // normalize the CFG.
312 // CHECK(current_block_id.ToInt() == 0);
313 // The phi size test below is because we can, technically, have phi
314 // instructions with one argument. Some tests expose that, too.
315 } else if (block->PredecessorCount() == 1 && block->phis().size() == 0) {
316 const BlockAssessments* prev_block = assessments_[block->predecessors()[0]];
317 ret->CopyFrom(prev_block);
318 } else {
319 for (RpoNumber pred_id : block->predecessors()) {
320 // For every operand coming from any of the predecessors, create an
321 // Unfinalized assessment.
322 auto iterator = assessments_.find(pred_id);
323 if (iterator == assessments_.end()) {
324 // This block is the head of a loop, and this predecessor is the
325 // loopback
326 // arc.
327 // Validate this is a loop case, otherwise the CFG is malformed.
328 CHECK(pred_id >= current_block_id);
329 CHECK(block->IsLoopHeader());
330 continue;
331 }
332 const BlockAssessments* pred_assessments = iterator->second;
333 CHECK_NOT_NULL(pred_assessments);
334 for (auto pair : pred_assessments->map()) {
335 InstructionOperand operand = pair.first;
336 if (ret->map().find(operand) == ret->map().end()) {
337 ret->map().insert(std::make_pair(
338 operand, new (zone()) PendingAssessment(block, operand)));
339 }
340 }
341 }
342 }
343 return ret;
344 }
345
ValidatePendingAssessment(RpoNumber block_id,InstructionOperand op,BlockAssessments * current_assessments,const PendingAssessment * assessment,int virtual_register)346 void RegisterAllocatorVerifier::ValidatePendingAssessment(
347 RpoNumber block_id, InstructionOperand op,
348 BlockAssessments* current_assessments, const PendingAssessment* assessment,
349 int virtual_register) {
350 // When validating a pending assessment, it is possible some of the
351 // assessments
352 // for the original operand (the one where the assessment was created for
353 // first) are also pending. To avoid recursion, we use a work list. To
354 // deal with cycles, we keep a set of seen nodes.
355 ZoneQueue<std::pair<const PendingAssessment*, int>> worklist(zone());
356 ZoneSet<RpoNumber> seen(zone());
357 worklist.push(std::make_pair(assessment, virtual_register));
358 seen.insert(block_id);
359
360 while (!worklist.empty()) {
361 auto work = worklist.front();
362 const PendingAssessment* current_assessment = work.first;
363 int current_virtual_register = work.second;
364 InstructionOperand current_operand = current_assessment->operand();
365 worklist.pop();
366
367 const InstructionBlock* origin = current_assessment->origin();
368 CHECK(origin->PredecessorCount() > 1 || origin->phis().size() > 0);
369
370 // Check if the virtual register is a phi first, instead of relying on
371 // the incoming assessments. In particular, this handles the case
372 // v1 = phi v0 v0, which structurally is identical to v0 having been
373 // defined at the top of a diamond, and arriving at the node joining the
374 // diamond's branches.
375 const PhiInstruction* phi = nullptr;
376 for (const PhiInstruction* candidate : origin->phis()) {
377 if (candidate->virtual_register() == current_virtual_register) {
378 phi = candidate;
379 break;
380 }
381 }
382
383 int op_index = 0;
384 for (RpoNumber pred : origin->predecessors()) {
385 int expected =
386 phi != nullptr ? phi->operands()[op_index] : current_virtual_register;
387
388 ++op_index;
389 auto pred_assignment = assessments_.find(pred);
390 if (pred_assignment == assessments_.end()) {
391 CHECK(origin->IsLoopHeader());
392 auto todo_iter = outstanding_assessments_.find(pred);
393 DelayedAssessments* set = nullptr;
394 if (todo_iter == outstanding_assessments_.end()) {
395 set = new (zone()) DelayedAssessments(zone());
396 outstanding_assessments_.insert(std::make_pair(pred, set));
397 } else {
398 set = todo_iter->second;
399 }
400 set->AddDelayedAssessment(current_operand, expected);
401 continue;
402 }
403
404 const BlockAssessments* pred_assessments = pred_assignment->second;
405 auto found_contribution = pred_assessments->map().find(current_operand);
406 CHECK(found_contribution != pred_assessments->map().end());
407 Assessment* contribution = found_contribution->second;
408
409 switch (contribution->kind()) {
410 case Final:
411 ValidateFinalAssessment(
412 block_id, current_operand, current_assessments,
413 FinalAssessment::cast(contribution), expected);
414 break;
415 case Pending: {
416 // This happens if we have a diamond feeding into another one, and
417 // the inner one never being used - other than for carrying the value.
418 const PendingAssessment* next = PendingAssessment::cast(contribution);
419 if (seen.find(pred) == seen.end()) {
420 worklist.push({next, expected});
421 seen.insert(pred);
422 }
423 // Note that we do not want to finalize pending assessments at the
424 // beginning of a block - which is the information we'd have
425 // available here. This is because this operand may be reused to
426 // define
427 // duplicate phis.
428 break;
429 }
430 }
431 }
432 }
433 // If everything checks out, we may make the assessment.
434 current_assessments->map()[op] =
435 new (zone()) FinalAssessment(virtual_register, assessment);
436 }
437
ValidateFinalAssessment(RpoNumber block_id,InstructionOperand op,BlockAssessments * current_assessments,const FinalAssessment * assessment,int virtual_register)438 void RegisterAllocatorVerifier::ValidateFinalAssessment(
439 RpoNumber block_id, InstructionOperand op,
440 BlockAssessments* current_assessments, const FinalAssessment* assessment,
441 int virtual_register) {
442 if (assessment->virtual_register() == virtual_register) return;
443 // If we have 2 phis with the exact same operand list, and the first phi is
444 // used before the second one, via the operand incoming to the block,
445 // and the second one's operand is defined (via a parallel move) after the
446 // use, then the original operand will be assigned to the first phi. We
447 // then look at the original pending assessment to ascertain if op
448 // is virtual_register.
449 const PendingAssessment* old = assessment->original_pending_assessment();
450 CHECK_NOT_NULL(old);
451 ValidatePendingAssessment(block_id, op, current_assessments, old,
452 virtual_register);
453 }
454
ValidateUse(RpoNumber block_id,BlockAssessments * current_assessments,InstructionOperand op,int virtual_register)455 void RegisterAllocatorVerifier::ValidateUse(
456 RpoNumber block_id, BlockAssessments* current_assessments,
457 InstructionOperand op, int virtual_register) {
458 auto iterator = current_assessments->map().find(op);
459 // We should have seen this operand before.
460 CHECK(iterator != current_assessments->map().end());
461 Assessment* assessment = iterator->second;
462
463 switch (assessment->kind()) {
464 case Final:
465 ValidateFinalAssessment(block_id, op, current_assessments,
466 FinalAssessment::cast(assessment),
467 virtual_register);
468 break;
469 case Pending: {
470 const PendingAssessment* pending = PendingAssessment::cast(assessment);
471 ValidatePendingAssessment(block_id, op, current_assessments, pending,
472 virtual_register);
473 break;
474 }
475 }
476 }
477
VerifyGapMoves()478 void RegisterAllocatorVerifier::VerifyGapMoves() {
479 CHECK(assessments_.empty());
480 CHECK(outstanding_assessments_.empty());
481 const size_t block_count = sequence()->instruction_blocks().size();
482 for (size_t block_index = 0; block_index < block_count; ++block_index) {
483 const InstructionBlock* block =
484 sequence()->instruction_blocks()[block_index];
485 BlockAssessments* block_assessments = CreateForBlock(block);
486
487 for (int instr_index = block->code_start(); instr_index < block->code_end();
488 ++instr_index) {
489 const InstructionConstraint& instr_constraint = constraints_[instr_index];
490 const Instruction* instr = instr_constraint.instruction_;
491 block_assessments->PerformMoves(instr);
492
493 const OperandConstraint* op_constraints =
494 instr_constraint.operand_constraints_;
495 size_t count = 0;
496 for (size_t i = 0; i < instr->InputCount(); ++i, ++count) {
497 if (op_constraints[count].type_ == kImmediate ||
498 op_constraints[count].type_ == kExplicit) {
499 continue;
500 }
501 int virtual_register = op_constraints[count].virtual_register_;
502 InstructionOperand op = *instr->InputAt(i);
503 ValidateUse(block->rpo_number(), block_assessments, op,
504 virtual_register);
505 }
506 for (size_t i = 0; i < instr->TempCount(); ++i, ++count) {
507 block_assessments->Drop(*instr->TempAt(i));
508 }
509 if (instr->IsCall()) {
510 block_assessments->DropRegisters();
511 }
512 for (size_t i = 0; i < instr->OutputCount(); ++i, ++count) {
513 int virtual_register = op_constraints[count].virtual_register_;
514 block_assessments->AddDefinition(*instr->OutputAt(i), virtual_register);
515 if (op_constraints[count].type_ == kRegisterAndSlot) {
516 const AllocatedOperand* reg_op =
517 AllocatedOperand::cast(instr->OutputAt(i));
518 MachineRepresentation rep = reg_op->representation();
519 const AllocatedOperand* stack_op = AllocatedOperand::New(
520 zone(), LocationOperand::LocationKind::STACK_SLOT, rep,
521 op_constraints[i].spilled_slot_);
522 block_assessments->AddDefinition(*stack_op, virtual_register);
523 }
524 }
525 }
526 // Now commit the assessments for this block. If there are any delayed
527 // assessments, ValidatePendingAssessment should see this block, too.
528 assessments_[block->rpo_number()] = block_assessments;
529
530 auto todo_iter = outstanding_assessments_.find(block->rpo_number());
531 if (todo_iter == outstanding_assessments_.end()) continue;
532 DelayedAssessments* todo = todo_iter->second;
533 for (auto pair : todo->map()) {
534 InstructionOperand op = pair.first;
535 int vreg = pair.second;
536 auto found_op = block_assessments->map().find(op);
537 CHECK(found_op != block_assessments->map().end());
538 switch (found_op->second->kind()) {
539 case Final:
540 ValidateFinalAssessment(block->rpo_number(), op, block_assessments,
541 FinalAssessment::cast(found_op->second),
542 vreg);
543 break;
544 case Pending:
545 const PendingAssessment* pending =
546 PendingAssessment::cast(found_op->second);
547 ValidatePendingAssessment(block->rpo_number(), op, block_assessments,
548 pending, vreg);
549 block_assessments->map()[op] =
550 new (zone()) FinalAssessment(vreg, pending);
551 break;
552 }
553 }
554 }
555 }
556
557 } // namespace compiler
558 } // namespace internal
559 } // namespace v8
560