1 // Copyright 2015 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/compiler/instruction-scheduler.h"
6 
7 namespace v8 {
8 namespace internal {
9 namespace compiler {
10 
SchedulerSupported()11 bool InstructionScheduler::SchedulerSupported() { return true; }
12 
13 
GetTargetInstructionFlags(const Instruction * instr) const14 int InstructionScheduler::GetTargetInstructionFlags(
15     const Instruction* instr) const {
16   switch (instr->arch_opcode()) {
17     case kX64Add:
18     case kX64Add32:
19     case kX64And:
20     case kX64And32:
21     case kX64Cmp:
22     case kX64Cmp32:
23     case kX64Cmp16:
24     case kX64Cmp8:
25     case kX64Test:
26     case kX64Test32:
27     case kX64Test16:
28     case kX64Test8:
29     case kX64Or:
30     case kX64Or32:
31     case kX64Xor:
32     case kX64Xor32:
33     case kX64Sub:
34     case kX64Sub32:
35     case kX64Imul:
36     case kX64Imul32:
37     case kX64ImulHigh32:
38     case kX64UmulHigh32:
39     case kX64Not:
40     case kX64Not32:
41     case kX64Neg:
42     case kX64Neg32:
43     case kX64Shl:
44     case kX64Shl32:
45     case kX64Shr:
46     case kX64Shr32:
47     case kX64Sar:
48     case kX64Sar32:
49     case kX64Ror:
50     case kX64Ror32:
51     case kX64Lzcnt:
52     case kX64Lzcnt32:
53     case kX64Tzcnt:
54     case kX64Tzcnt32:
55     case kX64Popcnt:
56     case kX64Popcnt32:
57     case kSSEFloat32Cmp:
58     case kSSEFloat32Add:
59     case kSSEFloat32Sub:
60     case kSSEFloat32Mul:
61     case kSSEFloat32Div:
62     case kSSEFloat32Abs:
63     case kSSEFloat32Neg:
64     case kSSEFloat32Sqrt:
65     case kSSEFloat32Round:
66     case kSSEFloat32ToFloat64:
67     case kSSEFloat64Cmp:
68     case kSSEFloat64Add:
69     case kSSEFloat64Sub:
70     case kSSEFloat64Mul:
71     case kSSEFloat64Div:
72     case kSSEFloat64Mod:
73     case kSSEFloat64Abs:
74     case kSSEFloat64Neg:
75     case kSSEFloat64Sqrt:
76     case kSSEFloat64Round:
77     case kSSEFloat32Max:
78     case kSSEFloat64Max:
79     case kSSEFloat32Min:
80     case kSSEFloat64Min:
81     case kSSEFloat64ToFloat32:
82     case kSSEFloat32ToInt32:
83     case kSSEFloat32ToUint32:
84     case kSSEFloat64ToInt32:
85     case kSSEFloat64ToUint32:
86     case kSSEFloat64ToInt64:
87     case kSSEFloat32ToInt64:
88     case kSSEFloat64ToUint64:
89     case kSSEFloat32ToUint64:
90     case kSSEInt32ToFloat64:
91     case kSSEInt32ToFloat32:
92     case kSSEInt64ToFloat32:
93     case kSSEInt64ToFloat64:
94     case kSSEUint64ToFloat32:
95     case kSSEUint64ToFloat64:
96     case kSSEUint32ToFloat64:
97     case kSSEUint32ToFloat32:
98     case kSSEFloat64ExtractLowWord32:
99     case kSSEFloat64ExtractHighWord32:
100     case kSSEFloat64InsertLowWord32:
101     case kSSEFloat64InsertHighWord32:
102     case kSSEFloat64LoadLowWord32:
103     case kSSEFloat64SilenceNaN:
104     case kAVXFloat32Cmp:
105     case kAVXFloat32Add:
106     case kAVXFloat32Sub:
107     case kAVXFloat32Mul:
108     case kAVXFloat32Div:
109     case kAVXFloat64Cmp:
110     case kAVXFloat64Add:
111     case kAVXFloat64Sub:
112     case kAVXFloat64Mul:
113     case kAVXFloat64Div:
114     case kAVXFloat64Abs:
115     case kAVXFloat64Neg:
116     case kAVXFloat32Abs:
117     case kAVXFloat32Neg:
118     case kX64BitcastFI:
119     case kX64BitcastDL:
120     case kX64BitcastIF:
121     case kX64BitcastLD:
122     case kX64Lea32:
123     case kX64Lea:
124     case kX64Dec32:
125     case kX64Inc32:
126     case kX64Int32x4Create:
127     case kX64Int32x4ExtractLane:
128       return (instr->addressing_mode() == kMode_None)
129           ? kNoOpcodeFlags
130           : kIsLoadOperation | kHasSideEffect;
131 
132     case kX64Idiv:
133     case kX64Idiv32:
134     case kX64Udiv:
135     case kX64Udiv32:
136       return (instr->addressing_mode() == kMode_None)
137                  ? kMayNeedDeoptCheck
138                  : kMayNeedDeoptCheck | kIsLoadOperation | kHasSideEffect;
139 
140     case kX64Movsxbl:
141     case kX64Movzxbl:
142     case kX64Movsxbq:
143     case kX64Movzxbq:
144     case kX64Movsxwl:
145     case kX64Movzxwl:
146     case kX64Movsxwq:
147     case kX64Movzxwq:
148     case kX64Movsxlq:
149       DCHECK(instr->InputCount() >= 1);
150       return instr->InputAt(0)->IsRegister() ? kNoOpcodeFlags
151                                              : kIsLoadOperation;
152 
153     case kX64Movb:
154     case kX64Movw:
155       return kHasSideEffect;
156 
157     case kX64Movl:
158     case kX64TrapMovl:
159       if (instr->HasOutput()) {
160         DCHECK(instr->InputCount() >= 1);
161         return instr->InputAt(0)->IsRegister() ? kNoOpcodeFlags
162                                                : kIsLoadOperation;
163       } else {
164         return kHasSideEffect;
165       }
166 
167     case kX64Movq:
168     case kX64Movsd:
169     case kX64Movss:
170       return instr->HasOutput() ? kIsLoadOperation : kHasSideEffect;
171 
172     case kX64StackCheck:
173       return kIsLoadOperation;
174 
175     case kX64Push:
176     case kX64Poke:
177       return kHasSideEffect;
178 
179     case kX64Xchgb:
180     case kX64Xchgw:
181     case kX64Xchgl:
182       return kIsLoadOperation | kHasSideEffect;
183 
184 #define CASE(Name) case k##Name:
185     COMMON_ARCH_OPCODE_LIST(CASE)
186 #undef CASE
187       // Already covered in architecture independent code.
188       UNREACHABLE();
189   }
190 
191   UNREACHABLE();
192   return kNoOpcodeFlags;
193 }
194 
195 
GetInstructionLatency(const Instruction * instr)196 int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
197   // Basic latency modeling for x64 instructions. They have been determined
198   // in an empirical way.
199   switch (instr->arch_opcode()) {
200     case kCheckedLoadInt8:
201     case kCheckedLoadUint8:
202     case kCheckedLoadInt16:
203     case kCheckedLoadUint16:
204     case kCheckedLoadWord32:
205     case kCheckedLoadWord64:
206     case kCheckedLoadFloat32:
207     case kCheckedLoadFloat64:
208     case kCheckedStoreWord8:
209     case kCheckedStoreWord16:
210     case kCheckedStoreWord32:
211     case kCheckedStoreWord64:
212     case kCheckedStoreFloat32:
213     case kCheckedStoreFloat64:
214     case kSSEFloat64Mul:
215       return 5;
216     case kX64Imul:
217     case kX64Imul32:
218     case kX64ImulHigh32:
219     case kX64UmulHigh32:
220     case kSSEFloat32Cmp:
221     case kSSEFloat32Add:
222     case kSSEFloat32Sub:
223     case kSSEFloat32Abs:
224     case kSSEFloat32Neg:
225     case kSSEFloat64Cmp:
226     case kSSEFloat64Add:
227     case kSSEFloat64Sub:
228     case kSSEFloat64Max:
229     case kSSEFloat64Min:
230     case kSSEFloat64Abs:
231     case kSSEFloat64Neg:
232       return 3;
233     case kSSEFloat32Mul:
234     case kSSEFloat32ToFloat64:
235     case kSSEFloat64ToFloat32:
236     case kSSEFloat32Round:
237     case kSSEFloat64Round:
238     case kSSEFloat32ToInt32:
239     case kSSEFloat32ToUint32:
240     case kSSEFloat64ToInt32:
241     case kSSEFloat64ToUint32:
242       return 4;
243     case kX64Idiv:
244       return 49;
245     case kX64Idiv32:
246       return 35;
247     case kX64Udiv:
248       return 38;
249     case kX64Udiv32:
250       return 26;
251     case kSSEFloat32Div:
252     case kSSEFloat64Div:
253     case kSSEFloat32Sqrt:
254     case kSSEFloat64Sqrt:
255       return 13;
256     case kSSEFloat32ToInt64:
257     case kSSEFloat64ToInt64:
258     case kSSEFloat32ToUint64:
259     case kSSEFloat64ToUint64:
260       return 10;
261     case kSSEFloat64Mod:
262       return 50;
263     case kArchTruncateDoubleToI:
264       return 6;
265     default:
266       return 1;
267   }
268 }
269 
270 }  // namespace compiler
271 }  // namespace internal
272 }  // namespace v8
273