1 // Copyright 2015 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/crankshaft/s390/lithium-gap-resolver-s390.h"
6 
7 #include "src/crankshaft/s390/lithium-codegen-s390.h"
8 
9 namespace v8 {
10 namespace internal {
11 
12 static const Register kSavedValueRegister = {1};
13 
LGapResolver(LCodeGen * owner)14 LGapResolver::LGapResolver(LCodeGen* owner)
15     : cgen_(owner),
16       moves_(32, owner->zone()),
17       root_index_(0),
18       in_cycle_(false),
19       saved_destination_(NULL) {}
20 
Resolve(LParallelMove * parallel_move)21 void LGapResolver::Resolve(LParallelMove* parallel_move) {
22   DCHECK(moves_.is_empty());
23   // Build up a worklist of moves.
24   BuildInitialMoveList(parallel_move);
25 
26   for (int i = 0; i < moves_.length(); ++i) {
27     LMoveOperands move = moves_[i];
28     // Skip constants to perform them last.  They don't block other moves
29     // and skipping such moves with register destinations keeps those
30     // registers free for the whole algorithm.
31     if (!move.IsEliminated() && !move.source()->IsConstantOperand()) {
32       root_index_ = i;  // Any cycle is found when by reaching this move again.
33       PerformMove(i);
34       if (in_cycle_) {
35         RestoreValue();
36       }
37     }
38   }
39 
40   // Perform the moves with constant sources.
41   for (int i = 0; i < moves_.length(); ++i) {
42     if (!moves_[i].IsEliminated()) {
43       DCHECK(moves_[i].source()->IsConstantOperand());
44       EmitMove(i);
45     }
46   }
47 
48   moves_.Rewind(0);
49 }
50 
BuildInitialMoveList(LParallelMove * parallel_move)51 void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) {
52   // Perform a linear sweep of the moves to add them to the initial list of
53   // moves to perform, ignoring any move that is redundant (the source is
54   // the same as the destination, the destination is ignored and
55   // unallocated, or the move was already eliminated).
56   const ZoneList<LMoveOperands>* moves = parallel_move->move_operands();
57   for (int i = 0; i < moves->length(); ++i) {
58     LMoveOperands move = moves->at(i);
59     if (!move.IsRedundant()) moves_.Add(move, cgen_->zone());
60   }
61   Verify();
62 }
63 
PerformMove(int index)64 void LGapResolver::PerformMove(int index) {
65   // Each call to this function performs a move and deletes it from the move
66   // graph.  We first recursively perform any move blocking this one.  We
67   // mark a move as "pending" on entry to PerformMove in order to detect
68   // cycles in the move graph.
69 
70   // We can only find a cycle, when doing a depth-first traversal of moves,
71   // be encountering the starting move again. So by spilling the source of
72   // the starting move, we break the cycle.  All moves are then unblocked,
73   // and the starting move is completed by writing the spilled value to
74   // its destination.  All other moves from the spilled source have been
75   // completed prior to breaking the cycle.
76   // An additional complication is that moves to MemOperands with large
77   // offsets (more than 1K or 4K) require us to spill this spilled value to
78   // the stack, to free up the register.
79   DCHECK(!moves_[index].IsPending());
80   DCHECK(!moves_[index].IsRedundant());
81 
82   // Clear this move's destination to indicate a pending move.  The actual
83   // destination is saved in a stack allocated local.  Multiple moves can
84   // be pending because this function is recursive.
85   DCHECK(moves_[index].source() != NULL);  // Or else it will look eliminated.
86   LOperand* destination = moves_[index].destination();
87   moves_[index].set_destination(NULL);
88 
89   // Perform a depth-first traversal of the move graph to resolve
90   // dependencies.  Any unperformed, unpending move with a source the same
91   // as this one's destination blocks this one so recursively perform all
92   // such moves.
93   for (int i = 0; i < moves_.length(); ++i) {
94     LMoveOperands other_move = moves_[i];
95     if (other_move.Blocks(destination) && !other_move.IsPending()) {
96       PerformMove(i);
97       // If there is a blocking, pending move it must be moves_[root_index_]
98       // and all other moves with the same source as moves_[root_index_] are
99       // sucessfully executed (because they are cycle-free) by this loop.
100     }
101   }
102 
103   // We are about to resolve this move and don't need it marked as
104   // pending, so restore its destination.
105   moves_[index].set_destination(destination);
106 
107   // The move may be blocked on a pending move, which must be the starting move.
108   // In this case, we have a cycle, and we save the source of this move to
109   // a scratch register to break it.
110   LMoveOperands other_move = moves_[root_index_];
111   if (other_move.Blocks(destination)) {
112     DCHECK(other_move.IsPending());
113     BreakCycle(index);
114     return;
115   }
116 
117   // This move is no longer blocked.
118   EmitMove(index);
119 }
120 
Verify()121 void LGapResolver::Verify() {
122 #ifdef ENABLE_SLOW_DCHECKS
123   // No operand should be the destination for more than one move.
124   for (int i = 0; i < moves_.length(); ++i) {
125     LOperand* destination = moves_[i].destination();
126     for (int j = i + 1; j < moves_.length(); ++j) {
127       SLOW_DCHECK(!destination->Equals(moves_[j].destination()));
128     }
129   }
130 #endif
131 }
132 
133 #define __ ACCESS_MASM(cgen_->masm())
134 
BreakCycle(int index)135 void LGapResolver::BreakCycle(int index) {
136   // We save in a register the value that should end up in the source of
137   // moves_[root_index].  After performing all moves in the tree rooted
138   // in that move, we save the value to that source.
139   DCHECK(moves_[index].destination()->Equals(moves_[root_index_].source()));
140   DCHECK(!in_cycle_);
141   in_cycle_ = true;
142   LOperand* source = moves_[index].source();
143   saved_destination_ = moves_[index].destination();
144   if (source->IsRegister()) {
145     __ LoadRR(kSavedValueRegister, cgen_->ToRegister(source));
146   } else if (source->IsStackSlot()) {
147     __ LoadP(kSavedValueRegister, cgen_->ToMemOperand(source));
148   } else if (source->IsDoubleRegister()) {
149     __ ldr(kScratchDoubleReg, cgen_->ToDoubleRegister(source));
150   } else if (source->IsDoubleStackSlot()) {
151     __ LoadDouble(kScratchDoubleReg, cgen_->ToMemOperand(source));
152   } else {
153     UNREACHABLE();
154   }
155   // This move will be done by restoring the saved value to the destination.
156   moves_[index].Eliminate();
157 }
158 
RestoreValue()159 void LGapResolver::RestoreValue() {
160   DCHECK(in_cycle_);
161   DCHECK(saved_destination_ != NULL);
162 
163   // Spilled value is in kSavedValueRegister or kSavedDoubleValueRegister.
164   if (saved_destination_->IsRegister()) {
165     __ LoadRR(cgen_->ToRegister(saved_destination_), kSavedValueRegister);
166   } else if (saved_destination_->IsStackSlot()) {
167     __ StoreP(kSavedValueRegister, cgen_->ToMemOperand(saved_destination_));
168   } else if (saved_destination_->IsDoubleRegister()) {
169     __ ldr(cgen_->ToDoubleRegister(saved_destination_), kScratchDoubleReg);
170   } else if (saved_destination_->IsDoubleStackSlot()) {
171     __ StoreDouble(kScratchDoubleReg, cgen_->ToMemOperand(saved_destination_));
172   } else {
173     UNREACHABLE();
174   }
175 
176   in_cycle_ = false;
177   saved_destination_ = NULL;
178 }
179 
EmitMove(int index)180 void LGapResolver::EmitMove(int index) {
181   LOperand* source = moves_[index].source();
182   LOperand* destination = moves_[index].destination();
183 
184   // Dispatch on the source and destination operand kinds.  Not all
185   // combinations are possible.
186 
187   if (source->IsRegister()) {
188     Register source_register = cgen_->ToRegister(source);
189     if (destination->IsRegister()) {
190       __ LoadRR(cgen_->ToRegister(destination), source_register);
191     } else {
192       DCHECK(destination->IsStackSlot());
193       __ StoreP(source_register, cgen_->ToMemOperand(destination));
194     }
195   } else if (source->IsStackSlot()) {
196     MemOperand source_operand = cgen_->ToMemOperand(source);
197     if (destination->IsRegister()) {
198       __ LoadP(cgen_->ToRegister(destination), source_operand);
199     } else {
200       DCHECK(destination->IsStackSlot());
201       MemOperand destination_operand = cgen_->ToMemOperand(destination);
202       if (in_cycle_) {
203         __ LoadP(ip, source_operand);
204         __ StoreP(ip, destination_operand);
205       } else {
206         __ LoadP(kSavedValueRegister, source_operand);
207         __ StoreP(kSavedValueRegister, destination_operand);
208       }
209     }
210 
211   } else if (source->IsConstantOperand()) {
212     LConstantOperand* constant_source = LConstantOperand::cast(source);
213     if (destination->IsRegister()) {
214       Register dst = cgen_->ToRegister(destination);
215       if (cgen_->IsInteger32(constant_source)) {
216         cgen_->EmitLoadIntegerConstant(constant_source, dst);
217       } else {
218         __ Move(dst, cgen_->ToHandle(constant_source));
219       }
220     } else if (destination->IsDoubleRegister()) {
221       DoubleRegister result = cgen_->ToDoubleRegister(destination);
222       double v = cgen_->ToDouble(constant_source);
223       __ LoadDoubleLiteral(result, v, ip);
224     } else {
225       DCHECK(destination->IsStackSlot());
226       DCHECK(!in_cycle_);  // Constant moves happen after all cycles are gone.
227       if (cgen_->IsInteger32(constant_source)) {
228         cgen_->EmitLoadIntegerConstant(constant_source, kSavedValueRegister);
229       } else {
230         __ Move(kSavedValueRegister, cgen_->ToHandle(constant_source));
231       }
232       __ StoreP(kSavedValueRegister, cgen_->ToMemOperand(destination));
233     }
234 
235   } else if (source->IsDoubleRegister()) {
236     DoubleRegister source_register = cgen_->ToDoubleRegister(source);
237     if (destination->IsDoubleRegister()) {
238       __ ldr(cgen_->ToDoubleRegister(destination), source_register);
239     } else {
240       DCHECK(destination->IsDoubleStackSlot());
241       __ StoreDouble(source_register, cgen_->ToMemOperand(destination));
242     }
243 
244   } else if (source->IsDoubleStackSlot()) {
245     MemOperand source_operand = cgen_->ToMemOperand(source);
246     if (destination->IsDoubleRegister()) {
247       __ LoadDouble(cgen_->ToDoubleRegister(destination), source_operand);
248     } else {
249       DCHECK(destination->IsDoubleStackSlot());
250       MemOperand destination_operand = cgen_->ToMemOperand(destination);
251       if (in_cycle_) {
252 // kSavedDoubleValueRegister was used to break the cycle,
253 // but kSavedValueRegister is free.
254 #if V8_TARGET_ARCH_S390X
255         __ lg(kSavedValueRegister, source_operand);
256         __ stg(kSavedValueRegister, destination_operand);
257 #else
258         MemOperand source_high_operand = cgen_->ToHighMemOperand(source);
259         MemOperand destination_high_operand =
260             cgen_->ToHighMemOperand(destination);
261         __ LoadlW(kSavedValueRegister, source_operand);
262         __ StoreW(kSavedValueRegister, destination_operand);
263         __ LoadlW(kSavedValueRegister, source_high_operand);
264         __ StoreW(kSavedValueRegister, destination_high_operand);
265 #endif
266       } else {
267         __ LoadDouble(kScratchDoubleReg, source_operand);
268         __ StoreDouble(kScratchDoubleReg, destination_operand);
269       }
270     }
271   } else {
272     UNREACHABLE();
273   }
274 
275   moves_[index].Eliminate();
276 }
277 
278 #undef __
279 }  // namespace internal
280 }  // namespace v8
281