1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef V8_COMPILER_INSTRUCTION_SELECTOR_H_
6 #define V8_COMPILER_INSTRUCTION_SELECTOR_H_
7 
8 #include <map>
9 
10 #include "src/compiler/common-operator.h"
11 #include "src/compiler/instruction-scheduler.h"
12 #include "src/compiler/instruction.h"
13 #include "src/compiler/machine-operator.h"
14 #include "src/compiler/node.h"
15 #include "src/globals.h"
16 #include "src/zone/zone-containers.h"
17 
18 namespace v8 {
19 namespace internal {
20 namespace compiler {
21 
22 // Forward declarations.
23 class BasicBlock;
24 struct CallBuffer;  // TODO(bmeurer): Remove this.
25 class FlagsContinuation;
26 class Linkage;
27 class OperandGenerator;
28 struct SwitchInfo;
29 
30 // This struct connects nodes of parameters which are going to be pushed on the
31 // call stack with their parameter index in the call descriptor of the callee.
32 class PushParameter {
33  public:
PushParameter()34   PushParameter() : node_(nullptr), type_(MachineType::None()) {}
PushParameter(Node * node,MachineType type)35   PushParameter(Node* node, MachineType type) : node_(node), type_(type) {}
36 
node()37   Node* node() const { return node_; }
type()38   MachineType type() const { return type_; }
39 
40  private:
41   Node* node_;
42   MachineType type_;
43 };
44 
45 // Instruction selection generates an InstructionSequence for a given Schedule.
46 class V8_EXPORT_PRIVATE InstructionSelector final {
47  public:
48   // Forward declarations.
49   class Features;
50 
51   enum SourcePositionMode { kCallSourcePositions, kAllSourcePositions };
52   enum EnableScheduling { kDisableScheduling, kEnableScheduling };
53   enum EnableSerialization { kDisableSerialization, kEnableSerialization };
54 
55   InstructionSelector(
56       Zone* zone, size_t node_count, Linkage* linkage,
57       InstructionSequence* sequence, Schedule* schedule,
58       SourcePositionTable* source_positions, Frame* frame,
59       SourcePositionMode source_position_mode = kCallSourcePositions,
60       Features features = SupportedFeatures(),
61       EnableScheduling enable_scheduling = FLAG_turbo_instruction_scheduling
62                                                ? kEnableScheduling
63                                                : kDisableScheduling,
64       EnableSerialization enable_serialization = kDisableSerialization);
65 
66   // Visit code for the entire graph with the included schedule.
67   bool SelectInstructions();
68 
69   void StartBlock(RpoNumber rpo);
70   void EndBlock(RpoNumber rpo);
71   void AddInstruction(Instruction* instr);
72 
73   // ===========================================================================
74   // ============= Architecture-independent code emission methods. =============
75   // ===========================================================================
76 
77   Instruction* Emit(InstructionCode opcode, InstructionOperand output,
78                     size_t temp_count = 0, InstructionOperand* temps = nullptr);
79   Instruction* Emit(InstructionCode opcode, InstructionOperand output,
80                     InstructionOperand a, size_t temp_count = 0,
81                     InstructionOperand* temps = nullptr);
82   Instruction* Emit(InstructionCode opcode, InstructionOperand output,
83                     InstructionOperand a, InstructionOperand b,
84                     size_t temp_count = 0, InstructionOperand* temps = nullptr);
85   Instruction* Emit(InstructionCode opcode, InstructionOperand output,
86                     InstructionOperand a, InstructionOperand b,
87                     InstructionOperand c, size_t temp_count = 0,
88                     InstructionOperand* temps = nullptr);
89   Instruction* Emit(InstructionCode opcode, InstructionOperand output,
90                     InstructionOperand a, InstructionOperand b,
91                     InstructionOperand c, InstructionOperand d,
92                     size_t temp_count = 0, InstructionOperand* temps = nullptr);
93   Instruction* Emit(InstructionCode opcode, InstructionOperand output,
94                     InstructionOperand a, InstructionOperand b,
95                     InstructionOperand c, InstructionOperand d,
96                     InstructionOperand e, size_t temp_count = 0,
97                     InstructionOperand* temps = nullptr);
98   Instruction* Emit(InstructionCode opcode, InstructionOperand output,
99                     InstructionOperand a, InstructionOperand b,
100                     InstructionOperand c, InstructionOperand d,
101                     InstructionOperand e, InstructionOperand f,
102                     size_t temp_count = 0, InstructionOperand* temps = nullptr);
103   Instruction* Emit(InstructionCode opcode, size_t output_count,
104                     InstructionOperand* outputs, size_t input_count,
105                     InstructionOperand* inputs, size_t temp_count = 0,
106                     InstructionOperand* temps = nullptr);
107   Instruction* Emit(Instruction* instr);
108 
109   // ===========================================================================
110   // ===== Architecture-independent deoptimization exit emission methods. ======
111   // ===========================================================================
112 
113   Instruction* EmitDeoptimize(InstructionCode opcode, InstructionOperand output,
114                               InstructionOperand a, DeoptimizeReason reason,
115                               Node* frame_state);
116   Instruction* EmitDeoptimize(InstructionCode opcode, InstructionOperand output,
117                               InstructionOperand a, InstructionOperand b,
118                               DeoptimizeReason reason, Node* frame_state);
119   Instruction* EmitDeoptimize(InstructionCode opcode, size_t output_count,
120                               InstructionOperand* outputs, size_t input_count,
121                               InstructionOperand* inputs,
122                               DeoptimizeReason reason, Node* frame_state);
123 
124   // ===========================================================================
125   // ============== Architecture-independent CPU feature methods. ==============
126   // ===========================================================================
127 
128   class Features final {
129    public:
Features()130     Features() : bits_(0) {}
Features(unsigned bits)131     explicit Features(unsigned bits) : bits_(bits) {}
Features(CpuFeature f)132     explicit Features(CpuFeature f) : bits_(1u << f) {}
Features(CpuFeature f1,CpuFeature f2)133     Features(CpuFeature f1, CpuFeature f2) : bits_((1u << f1) | (1u << f2)) {}
134 
Contains(CpuFeature f)135     bool Contains(CpuFeature f) const { return (bits_ & (1u << f)); }
136 
137    private:
138     unsigned bits_;
139   };
140 
IsSupported(CpuFeature feature)141   bool IsSupported(CpuFeature feature) const {
142     return features_.Contains(feature);
143   }
144 
145   // Returns the features supported on the target platform.
SupportedFeatures()146   static Features SupportedFeatures() {
147     return Features(CpuFeatures::SupportedFeatures());
148   }
149 
150   // TODO(sigurds) This should take a CpuFeatures argument.
151   static MachineOperatorBuilder::Flags SupportedMachineOperatorFlags();
152 
153   static MachineOperatorBuilder::AlignmentRequirements AlignmentRequirements();
154 
155   // ===========================================================================
156   // ============ Architecture-independent graph covering methods. =============
157   // ===========================================================================
158 
159   // Used in pattern matching during code generation.
160   // Check if {node} can be covered while generating code for the current
161   // instruction. A node can be covered if the {user} of the node has the only
162   // edge and the two are in the same basic block.
163   bool CanCover(Node* user, Node* node) const;
164 
165   // Used in pattern matching during code generation.
166   // This function checks that {node} and {user} are in the same basic block,
167   // and that {user} is the only user of {node} in this basic block.  This
168   // check guarantees that there are no users of {node} scheduled between
169   // {node} and {user}, and thus we can select a single instruction for both
170   // nodes, if such an instruction exists. This check can be used for example
171   // when selecting instructions for:
172   //   n = Int32Add(a, b)
173   //   c = Word32Compare(n, 0, cond)
174   //   Branch(c, true_label, false_label)
175   // Here we can generate a flag-setting add instruction, even if the add has
176   // uses in other basic blocks, since the flag-setting add instruction will
177   // still generate the result of the addition and not just set the flags.
178   // However, if we had uses of the add in the same basic block, we could have:
179   //   n = Int32Add(a, b)
180   //   o = OtherOp(n, ...)
181   //   c = Word32Compare(n, 0, cond)
182   //   Branch(c, true_label, false_label)
183   // where we cannot select the add and the compare together.  If we were to
184   // select a flag-setting add instruction for Word32Compare and Int32Add while
185   // visiting Word32Compare, we would then have to select an instruction for
186   // OtherOp *afterwards*, which means we would attempt to use the result of
187   // the add before we have defined it.
188   bool IsOnlyUserOfNodeInSameBlock(Node* user, Node* node) const;
189 
190   // Checks if {node} was already defined, and therefore code was already
191   // generated for it.
192   bool IsDefined(Node* node) const;
193 
194   // Checks if {node} has any uses, and therefore code has to be generated for
195   // it.
196   bool IsUsed(Node* node) const;
197 
198   // Checks if {node} is currently live.
IsLive(Node * node)199   bool IsLive(Node* node) const { return !IsDefined(node) && IsUsed(node); }
200 
201   // Gets the effect level of {node}.
202   int GetEffectLevel(Node* node) const;
203 
204   int GetVirtualRegister(const Node* node);
205   const std::map<NodeId, int> GetVirtualRegistersForTesting() const;
206 
207   // Check if we can generate loads and stores of ExternalConstants relative
208   // to the roots register, i.e. if both a root register is available for this
209   // compilation unit and the serializer is disabled.
210   bool CanAddressRelativeToRootsRegister() const;
211   // Check if we can use the roots register to access GC roots.
212   bool CanUseRootsRegister() const;
213 
isolate()214   Isolate* isolate() const { return sequence()->isolate(); }
215 
216  private:
217   friend class OperandGenerator;
218 
UseInstructionScheduling()219   bool UseInstructionScheduling() const {
220     return (enable_scheduling_ == kEnableScheduling) &&
221            InstructionScheduler::SchedulerSupported();
222   }
223 
224   void EmitTableSwitch(const SwitchInfo& sw, InstructionOperand& index_operand);
225   void EmitLookupSwitch(const SwitchInfo& sw,
226                         InstructionOperand& value_operand);
227 
228   void TryRename(InstructionOperand* op);
229   int GetRename(int virtual_register);
230   void SetRename(const Node* node, const Node* rename);
231   void UpdateRenames(Instruction* instruction);
232   void UpdateRenamesInPhi(PhiInstruction* phi);
233 
234   // Inform the instruction selection that {node} was just defined.
235   void MarkAsDefined(Node* node);
236 
237   // Inform the instruction selection that {node} has at least one use and we
238   // will need to generate code for it.
239   void MarkAsUsed(Node* node);
240 
241   // Sets the effect level of {node}.
242   void SetEffectLevel(Node* node, int effect_level);
243 
244   // Inform the register allocation of the representation of the value produced
245   // by {node}.
246   void MarkAsRepresentation(MachineRepresentation rep, Node* node);
MarkAsWord32(Node * node)247   void MarkAsWord32(Node* node) {
248     MarkAsRepresentation(MachineRepresentation::kWord32, node);
249   }
MarkAsWord64(Node * node)250   void MarkAsWord64(Node* node) {
251     MarkAsRepresentation(MachineRepresentation::kWord64, node);
252   }
MarkAsFloat32(Node * node)253   void MarkAsFloat32(Node* node) {
254     MarkAsRepresentation(MachineRepresentation::kFloat32, node);
255   }
MarkAsFloat64(Node * node)256   void MarkAsFloat64(Node* node) {
257     MarkAsRepresentation(MachineRepresentation::kFloat64, node);
258   }
MarkAsSimd128(Node * node)259   void MarkAsSimd128(Node* node) {
260     MarkAsRepresentation(MachineRepresentation::kSimd128, node);
261   }
MarkAsReference(Node * node)262   void MarkAsReference(Node* node) {
263     MarkAsRepresentation(MachineRepresentation::kTagged, node);
264   }
265 
266   // Inform the register allocation of the representation of the unallocated
267   // operand {op}.
268   void MarkAsRepresentation(MachineRepresentation rep,
269                             const InstructionOperand& op);
270 
271   enum CallBufferFlag {
272     kCallCodeImmediate = 1u << 0,
273     kCallAddressImmediate = 1u << 1,
274     kCallTail = 1u << 2
275   };
276   typedef base::Flags<CallBufferFlag> CallBufferFlags;
277 
278   // Initialize the call buffer with the InstructionOperands, nodes, etc,
279   // corresponding
280   // to the inputs and outputs of the call.
281   // {call_code_immediate} to generate immediate operands to calls of code.
282   // {call_address_immediate} to generate immediate operands to address calls.
283   void InitializeCallBuffer(Node* call, CallBuffer* buffer,
284                             CallBufferFlags flags, int stack_slot_delta = 0);
285   bool IsTailCallAddressImmediate();
286   int GetTempsCountForTailCallFromJSFunction();
287 
288   FrameStateDescriptor* GetFrameStateDescriptor(Node* node);
289 
290   // ===========================================================================
291   // ============= Architecture-specific graph covering methods. ===============
292   // ===========================================================================
293 
294   // Visit nodes in the given block and generate code.
295   void VisitBlock(BasicBlock* block);
296 
297   // Visit the node for the control flow at the end of the block, generating
298   // code if necessary.
299   void VisitControl(BasicBlock* block);
300 
301   // Visit the node and generate code, if any.
302   void VisitNode(Node* node);
303 
304   // Visit the node and generate code for IEEE 754 functions.
305   void VisitFloat64Ieee754Binop(Node*, InstructionCode code);
306   void VisitFloat64Ieee754Unop(Node*, InstructionCode code);
307 
308 #define DECLARE_GENERATOR(x) void Visit##x(Node* node);
309   MACHINE_OP_LIST(DECLARE_GENERATOR)
310   MACHINE_SIMD_RETURN_NUM_OP_LIST(DECLARE_GENERATOR)
311   MACHINE_SIMD_RETURN_SIMD_OP_LIST(DECLARE_GENERATOR)
312 #undef DECLARE_GENERATOR
313 
314   void VisitFinishRegion(Node* node);
315   void VisitParameter(Node* node);
316   void VisitIfException(Node* node);
317   void VisitOsrValue(Node* node);
318   void VisitPhi(Node* node);
319   void VisitProjection(Node* node);
320   void VisitConstant(Node* node);
321   void VisitCall(Node* call, BasicBlock* handler = nullptr);
322   void VisitDeoptimizeIf(Node* node);
323   void VisitDeoptimizeUnless(Node* node);
324   void VisitTailCall(Node* call);
325   void VisitGoto(BasicBlock* target);
326   void VisitBranch(Node* input, BasicBlock* tbranch, BasicBlock* fbranch);
327   void VisitSwitch(Node* node, const SwitchInfo& sw);
328   void VisitDeoptimize(DeoptimizeKind kind, DeoptimizeReason reason,
329                        Node* value);
330   void VisitReturn(Node* ret);
331   void VisitThrow(Node* value);
332   void VisitRetain(Node* node);
333 
334   void EmitPrepareArguments(ZoneVector<compiler::PushParameter>* arguments,
335                             const CallDescriptor* descriptor, Node* node);
336 
337   void EmitIdentity(Node* node);
338   bool CanProduceSignalingNaN(Node* node);
339 
340   // ===========================================================================
341 
schedule()342   Schedule* schedule() const { return schedule_; }
linkage()343   Linkage* linkage() const { return linkage_; }
sequence()344   InstructionSequence* sequence() const { return sequence_; }
instruction_zone()345   Zone* instruction_zone() const { return sequence()->zone(); }
zone()346   Zone* zone() const { return zone_; }
347 
set_instruction_selection_failed()348   void set_instruction_selection_failed() {
349     instruction_selection_failed_ = true;
350   }
instruction_selection_failed()351   bool instruction_selection_failed() { return instruction_selection_failed_; }
352 
353   void MarkPairProjectionsAsWord32(Node* node);
354 
355   // ===========================================================================
356 
357   Zone* const zone_;
358   Linkage* const linkage_;
359   InstructionSequence* const sequence_;
360   SourcePositionTable* const source_positions_;
361   SourcePositionMode const source_position_mode_;
362   Features features_;
363   Schedule* const schedule_;
364   BasicBlock* current_block_;
365   ZoneVector<Instruction*> instructions_;
366   BoolVector defined_;
367   BoolVector used_;
368   IntVector effect_level_;
369   IntVector virtual_registers_;
370   IntVector virtual_register_rename_;
371   InstructionScheduler* scheduler_;
372   EnableScheduling enable_scheduling_;
373   EnableSerialization enable_serialization_;
374   Frame* frame_;
375   bool instruction_selection_failed_;
376 };
377 
378 }  // namespace compiler
379 }  // namespace internal
380 }  // namespace v8
381 
382 #endif  // V8_COMPILER_INSTRUCTION_SELECTOR_H_
383