1 // Copyright 2015 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/interpreter/interpreter-assembler.h"
6
7 #include <limits>
8 #include <ostream>
9
10 #include "src/code-factory.h"
11 #include "src/frames.h"
12 #include "src/interface-descriptors.h"
13 #include "src/interpreter/bytecodes.h"
14 #include "src/interpreter/interpreter.h"
15 #include "src/machine-type.h"
16 #include "src/macro-assembler.h"
17 #include "src/zone/zone.h"
18
19 namespace v8 {
20 namespace internal {
21 namespace interpreter {
22
23 using compiler::Node;
24
InterpreterAssembler(Isolate * isolate,Zone * zone,Bytecode bytecode,OperandScale operand_scale)25 InterpreterAssembler::InterpreterAssembler(Isolate* isolate, Zone* zone,
26 Bytecode bytecode,
27 OperandScale operand_scale)
28 : CodeStubAssembler(isolate, zone, InterpreterDispatchDescriptor(isolate),
29 Code::ComputeFlags(Code::BYTECODE_HANDLER),
30 Bytecodes::ToString(bytecode),
31 Bytecodes::ReturnCount(bytecode)),
32 bytecode_(bytecode),
33 operand_scale_(operand_scale),
34 bytecode_offset_(this, MachineType::PointerRepresentation()),
35 interpreted_frame_pointer_(this, MachineType::PointerRepresentation()),
36 accumulator_(this, MachineRepresentation::kTagged),
37 accumulator_use_(AccumulatorUse::kNone),
38 made_call_(false),
39 disable_stack_check_across_call_(false),
40 stack_pointer_before_call_(nullptr) {
41 accumulator_.Bind(Parameter(InterpreterDispatchDescriptor::kAccumulator));
42 bytecode_offset_.Bind(
43 Parameter(InterpreterDispatchDescriptor::kBytecodeOffset));
44 if (FLAG_trace_ignition) {
45 TraceBytecode(Runtime::kInterpreterTraceBytecodeEntry);
46 }
47 }
48
~InterpreterAssembler()49 InterpreterAssembler::~InterpreterAssembler() {
50 // If the following check fails the handler does not use the
51 // accumulator in the way described in the bytecode definitions in
52 // bytecodes.h.
53 DCHECK_EQ(accumulator_use_, Bytecodes::GetAccumulatorUse(bytecode_));
54 }
55
GetInterpretedFramePointer()56 Node* InterpreterAssembler::GetInterpretedFramePointer() {
57 if (!interpreted_frame_pointer_.IsBound()) {
58 interpreted_frame_pointer_.Bind(LoadParentFramePointer());
59 }
60 return interpreted_frame_pointer_.value();
61 }
62
GetAccumulatorUnchecked()63 Node* InterpreterAssembler::GetAccumulatorUnchecked() {
64 return accumulator_.value();
65 }
66
GetAccumulator()67 Node* InterpreterAssembler::GetAccumulator() {
68 DCHECK(Bytecodes::ReadsAccumulator(bytecode_));
69 accumulator_use_ = accumulator_use_ | AccumulatorUse::kRead;
70 return GetAccumulatorUnchecked();
71 }
72
SetAccumulator(Node * value)73 void InterpreterAssembler::SetAccumulator(Node* value) {
74 DCHECK(Bytecodes::WritesAccumulator(bytecode_));
75 accumulator_use_ = accumulator_use_ | AccumulatorUse::kWrite;
76 accumulator_.Bind(value);
77 }
78
GetContext()79 Node* InterpreterAssembler::GetContext() {
80 return LoadRegister(Register::current_context());
81 }
82
SetContext(Node * value)83 void InterpreterAssembler::SetContext(Node* value) {
84 StoreRegister(value, Register::current_context());
85 }
86
GetContextAtDepth(Node * context,Node * depth)87 Node* InterpreterAssembler::GetContextAtDepth(Node* context, Node* depth) {
88 Variable cur_context(this, MachineRepresentation::kTaggedPointer);
89 cur_context.Bind(context);
90
91 Variable cur_depth(this, MachineRepresentation::kWord32);
92 cur_depth.Bind(depth);
93
94 Label context_found(this);
95
96 Variable* context_search_loop_variables[2] = {&cur_depth, &cur_context};
97 Label context_search(this, 2, context_search_loop_variables);
98
99 // Fast path if the depth is 0.
100 Branch(Word32Equal(depth, Int32Constant(0)), &context_found, &context_search);
101
102 // Loop until the depth is 0.
103 Bind(&context_search);
104 {
105 cur_depth.Bind(Int32Sub(cur_depth.value(), Int32Constant(1)));
106 cur_context.Bind(
107 LoadContextElement(cur_context.value(), Context::PREVIOUS_INDEX));
108
109 Branch(Word32Equal(cur_depth.value(), Int32Constant(0)), &context_found,
110 &context_search);
111 }
112
113 Bind(&context_found);
114 return cur_context.value();
115 }
116
GotoIfHasContextExtensionUpToDepth(Node * context,Node * depth,Label * target)117 void InterpreterAssembler::GotoIfHasContextExtensionUpToDepth(Node* context,
118 Node* depth,
119 Label* target) {
120 Variable cur_context(this, MachineRepresentation::kTaggedPointer);
121 cur_context.Bind(context);
122
123 Variable cur_depth(this, MachineRepresentation::kWord32);
124 cur_depth.Bind(depth);
125
126 Variable* context_search_loop_variables[2] = {&cur_depth, &cur_context};
127 Label context_search(this, 2, context_search_loop_variables);
128
129 // Loop until the depth is 0.
130 Goto(&context_search);
131 Bind(&context_search);
132 {
133 // TODO(leszeks): We only need to do this check if the context had a sloppy
134 // eval, we could pass in a context chain bitmask to figure out which
135 // contexts actually need to be checked.
136
137 Node* extension_slot =
138 LoadContextElement(cur_context.value(), Context::EXTENSION_INDEX);
139
140 // Jump to the target if the extension slot is not a hole.
141 GotoIf(WordNotEqual(extension_slot, TheHoleConstant()), target);
142
143 cur_depth.Bind(Int32Sub(cur_depth.value(), Int32Constant(1)));
144 cur_context.Bind(
145 LoadContextElement(cur_context.value(), Context::PREVIOUS_INDEX));
146
147 GotoIf(Word32NotEqual(cur_depth.value(), Int32Constant(0)),
148 &context_search);
149 }
150 }
151
BytecodeOffset()152 Node* InterpreterAssembler::BytecodeOffset() {
153 return bytecode_offset_.value();
154 }
155
BytecodeArrayTaggedPointer()156 Node* InterpreterAssembler::BytecodeArrayTaggedPointer() {
157 if (made_call_) {
158 // If we have made a call, restore bytecode array from stack frame in case
159 // the debugger has swapped us to the patched debugger bytecode array.
160 return LoadRegister(Register::bytecode_array());
161 } else {
162 return Parameter(InterpreterDispatchDescriptor::kBytecodeArray);
163 }
164 }
165
DispatchTableRawPointer()166 Node* InterpreterAssembler::DispatchTableRawPointer() {
167 return Parameter(InterpreterDispatchDescriptor::kDispatchTable);
168 }
169
RegisterLocation(Node * reg_index)170 Node* InterpreterAssembler::RegisterLocation(Node* reg_index) {
171 return IntPtrAdd(GetInterpretedFramePointer(),
172 RegisterFrameOffset(reg_index));
173 }
174
RegisterFrameOffset(Node * index)175 Node* InterpreterAssembler::RegisterFrameOffset(Node* index) {
176 return WordShl(index, kPointerSizeLog2);
177 }
178
LoadRegister(Register reg)179 Node* InterpreterAssembler::LoadRegister(Register reg) {
180 return Load(MachineType::AnyTagged(), GetInterpretedFramePointer(),
181 IntPtrConstant(reg.ToOperand() << kPointerSizeLog2));
182 }
183
LoadRegister(Node * reg_index)184 Node* InterpreterAssembler::LoadRegister(Node* reg_index) {
185 return Load(MachineType::AnyTagged(), GetInterpretedFramePointer(),
186 RegisterFrameOffset(reg_index));
187 }
188
StoreRegister(Node * value,Register reg)189 Node* InterpreterAssembler::StoreRegister(Node* value, Register reg) {
190 return StoreNoWriteBarrier(
191 MachineRepresentation::kTagged, GetInterpretedFramePointer(),
192 IntPtrConstant(reg.ToOperand() << kPointerSizeLog2), value);
193 }
194
StoreRegister(Node * value,Node * reg_index)195 Node* InterpreterAssembler::StoreRegister(Node* value, Node* reg_index) {
196 return StoreNoWriteBarrier(MachineRepresentation::kTagged,
197 GetInterpretedFramePointer(),
198 RegisterFrameOffset(reg_index), value);
199 }
200
NextRegister(Node * reg_index)201 Node* InterpreterAssembler::NextRegister(Node* reg_index) {
202 // Register indexes are negative, so the next index is minus one.
203 return IntPtrAdd(reg_index, IntPtrConstant(-1));
204 }
205
OperandOffset(int operand_index)206 Node* InterpreterAssembler::OperandOffset(int operand_index) {
207 return IntPtrConstant(
208 Bytecodes::GetOperandOffset(bytecode_, operand_index, operand_scale()));
209 }
210
BytecodeOperandUnsignedByte(int operand_index)211 Node* InterpreterAssembler::BytecodeOperandUnsignedByte(int operand_index) {
212 DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
213 DCHECK_EQ(OperandSize::kByte, Bytecodes::GetOperandSize(
214 bytecode_, operand_index, operand_scale()));
215 Node* operand_offset = OperandOffset(operand_index);
216 return Load(MachineType::Uint8(), BytecodeArrayTaggedPointer(),
217 IntPtrAdd(BytecodeOffset(), operand_offset));
218 }
219
BytecodeOperandSignedByte(int operand_index)220 Node* InterpreterAssembler::BytecodeOperandSignedByte(int operand_index) {
221 DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
222 DCHECK_EQ(OperandSize::kByte, Bytecodes::GetOperandSize(
223 bytecode_, operand_index, operand_scale()));
224 Node* operand_offset = OperandOffset(operand_index);
225 Node* load = Load(MachineType::Int8(), BytecodeArrayTaggedPointer(),
226 IntPtrAdd(BytecodeOffset(), operand_offset));
227
228 // Ensure that we sign extend to full pointer size
229 if (kPointerSize == 8) {
230 load = ChangeInt32ToInt64(load);
231 }
232 return load;
233 }
234
BytecodeOperandReadUnaligned(int relative_offset,MachineType result_type)235 compiler::Node* InterpreterAssembler::BytecodeOperandReadUnaligned(
236 int relative_offset, MachineType result_type) {
237 static const int kMaxCount = 4;
238 DCHECK(!TargetSupportsUnalignedAccess());
239
240 int count;
241 switch (result_type.representation()) {
242 case MachineRepresentation::kWord16:
243 count = 2;
244 break;
245 case MachineRepresentation::kWord32:
246 count = 4;
247 break;
248 default:
249 UNREACHABLE();
250 break;
251 }
252 MachineType msb_type =
253 result_type.IsSigned() ? MachineType::Int8() : MachineType::Uint8();
254
255 #if V8_TARGET_LITTLE_ENDIAN
256 const int kStep = -1;
257 int msb_offset = count - 1;
258 #elif V8_TARGET_BIG_ENDIAN
259 const int kStep = 1;
260 int msb_offset = 0;
261 #else
262 #error "Unknown Architecture"
263 #endif
264
265 // Read the most signicant bytecode into bytes[0] and then in order
266 // down to least significant in bytes[count - 1].
267 DCHECK(count <= kMaxCount);
268 compiler::Node* bytes[kMaxCount];
269 for (int i = 0; i < count; i++) {
270 MachineType machine_type = (i == 0) ? msb_type : MachineType::Uint8();
271 Node* offset = IntPtrConstant(relative_offset + msb_offset + i * kStep);
272 Node* array_offset = IntPtrAdd(BytecodeOffset(), offset);
273 bytes[i] = Load(machine_type, BytecodeArrayTaggedPointer(), array_offset);
274 }
275
276 // Pack LSB to MSB.
277 Node* result = bytes[--count];
278 for (int i = 1; --count >= 0; i++) {
279 Node* shift = Int32Constant(i * kBitsPerByte);
280 Node* value = Word32Shl(bytes[count], shift);
281 result = Word32Or(value, result);
282 }
283 return result;
284 }
285
BytecodeOperandUnsignedShort(int operand_index)286 Node* InterpreterAssembler::BytecodeOperandUnsignedShort(int operand_index) {
287 DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
288 DCHECK_EQ(
289 OperandSize::kShort,
290 Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale()));
291 int operand_offset =
292 Bytecodes::GetOperandOffset(bytecode_, operand_index, operand_scale());
293 if (TargetSupportsUnalignedAccess()) {
294 return Load(MachineType::Uint16(), BytecodeArrayTaggedPointer(),
295 IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset)));
296 } else {
297 return BytecodeOperandReadUnaligned(operand_offset, MachineType::Uint16());
298 }
299 }
300
BytecodeOperandSignedShort(int operand_index)301 Node* InterpreterAssembler::BytecodeOperandSignedShort(int operand_index) {
302 DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
303 DCHECK_EQ(
304 OperandSize::kShort,
305 Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale()));
306 int operand_offset =
307 Bytecodes::GetOperandOffset(bytecode_, operand_index, operand_scale());
308 Node* load;
309 if (TargetSupportsUnalignedAccess()) {
310 load = Load(MachineType::Int16(), BytecodeArrayTaggedPointer(),
311 IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset)));
312 } else {
313 load = BytecodeOperandReadUnaligned(operand_offset, MachineType::Int16());
314 }
315
316 // Ensure that we sign extend to full pointer size
317 if (kPointerSize == 8) {
318 load = ChangeInt32ToInt64(load);
319 }
320 return load;
321 }
322
BytecodeOperandUnsignedQuad(int operand_index)323 Node* InterpreterAssembler::BytecodeOperandUnsignedQuad(int operand_index) {
324 DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
325 DCHECK_EQ(OperandSize::kQuad, Bytecodes::GetOperandSize(
326 bytecode_, operand_index, operand_scale()));
327 int operand_offset =
328 Bytecodes::GetOperandOffset(bytecode_, operand_index, operand_scale());
329 if (TargetSupportsUnalignedAccess()) {
330 return Load(MachineType::Uint32(), BytecodeArrayTaggedPointer(),
331 IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset)));
332 } else {
333 return BytecodeOperandReadUnaligned(operand_offset, MachineType::Uint32());
334 }
335 }
336
BytecodeOperandSignedQuad(int operand_index)337 Node* InterpreterAssembler::BytecodeOperandSignedQuad(int operand_index) {
338 DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
339 DCHECK_EQ(OperandSize::kQuad, Bytecodes::GetOperandSize(
340 bytecode_, operand_index, operand_scale()));
341 int operand_offset =
342 Bytecodes::GetOperandOffset(bytecode_, operand_index, operand_scale());
343 Node* load;
344 if (TargetSupportsUnalignedAccess()) {
345 load = Load(MachineType::Int32(), BytecodeArrayTaggedPointer(),
346 IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset)));
347 } else {
348 load = BytecodeOperandReadUnaligned(operand_offset, MachineType::Int32());
349 }
350
351 // Ensure that we sign extend to full pointer size
352 if (kPointerSize == 8) {
353 load = ChangeInt32ToInt64(load);
354 }
355 return load;
356 }
357
BytecodeSignedOperand(int operand_index,OperandSize operand_size)358 Node* InterpreterAssembler::BytecodeSignedOperand(int operand_index,
359 OperandSize operand_size) {
360 DCHECK(!Bytecodes::IsUnsignedOperandType(
361 Bytecodes::GetOperandType(bytecode_, operand_index)));
362 switch (operand_size) {
363 case OperandSize::kByte:
364 return BytecodeOperandSignedByte(operand_index);
365 case OperandSize::kShort:
366 return BytecodeOperandSignedShort(operand_index);
367 case OperandSize::kQuad:
368 return BytecodeOperandSignedQuad(operand_index);
369 case OperandSize::kNone:
370 UNREACHABLE();
371 }
372 return nullptr;
373 }
374
BytecodeUnsignedOperand(int operand_index,OperandSize operand_size)375 Node* InterpreterAssembler::BytecodeUnsignedOperand(int operand_index,
376 OperandSize operand_size) {
377 DCHECK(Bytecodes::IsUnsignedOperandType(
378 Bytecodes::GetOperandType(bytecode_, operand_index)));
379 switch (operand_size) {
380 case OperandSize::kByte:
381 return BytecodeOperandUnsignedByte(operand_index);
382 case OperandSize::kShort:
383 return BytecodeOperandUnsignedShort(operand_index);
384 case OperandSize::kQuad:
385 return BytecodeOperandUnsignedQuad(operand_index);
386 case OperandSize::kNone:
387 UNREACHABLE();
388 }
389 return nullptr;
390 }
391
BytecodeOperandCount(int operand_index)392 Node* InterpreterAssembler::BytecodeOperandCount(int operand_index) {
393 DCHECK_EQ(OperandType::kRegCount,
394 Bytecodes::GetOperandType(bytecode_, operand_index));
395 OperandSize operand_size =
396 Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
397 return BytecodeUnsignedOperand(operand_index, operand_size);
398 }
399
BytecodeOperandFlag(int operand_index)400 Node* InterpreterAssembler::BytecodeOperandFlag(int operand_index) {
401 DCHECK_EQ(OperandType::kFlag8,
402 Bytecodes::GetOperandType(bytecode_, operand_index));
403 OperandSize operand_size =
404 Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
405 DCHECK_EQ(operand_size, OperandSize::kByte);
406 return BytecodeUnsignedOperand(operand_index, operand_size);
407 }
408
BytecodeOperandUImm(int operand_index)409 Node* InterpreterAssembler::BytecodeOperandUImm(int operand_index) {
410 DCHECK_EQ(OperandType::kUImm,
411 Bytecodes::GetOperandType(bytecode_, operand_index));
412 OperandSize operand_size =
413 Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
414 return BytecodeUnsignedOperand(operand_index, operand_size);
415 }
416
BytecodeOperandImm(int operand_index)417 Node* InterpreterAssembler::BytecodeOperandImm(int operand_index) {
418 DCHECK_EQ(OperandType::kImm,
419 Bytecodes::GetOperandType(bytecode_, operand_index));
420 OperandSize operand_size =
421 Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
422 return BytecodeSignedOperand(operand_index, operand_size);
423 }
424
BytecodeOperandIdx(int operand_index)425 Node* InterpreterAssembler::BytecodeOperandIdx(int operand_index) {
426 DCHECK(OperandType::kIdx ==
427 Bytecodes::GetOperandType(bytecode_, operand_index));
428 OperandSize operand_size =
429 Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
430 return BytecodeUnsignedOperand(operand_index, operand_size);
431 }
432
BytecodeOperandReg(int operand_index)433 Node* InterpreterAssembler::BytecodeOperandReg(int operand_index) {
434 DCHECK(Bytecodes::IsRegisterOperandType(
435 Bytecodes::GetOperandType(bytecode_, operand_index)));
436 OperandSize operand_size =
437 Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
438 return BytecodeSignedOperand(operand_index, operand_size);
439 }
440
BytecodeOperandRuntimeId(int operand_index)441 Node* InterpreterAssembler::BytecodeOperandRuntimeId(int operand_index) {
442 DCHECK(OperandType::kRuntimeId ==
443 Bytecodes::GetOperandType(bytecode_, operand_index));
444 OperandSize operand_size =
445 Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
446 DCHECK_EQ(operand_size, OperandSize::kShort);
447 return BytecodeUnsignedOperand(operand_index, operand_size);
448 }
449
BytecodeOperandIntrinsicId(int operand_index)450 Node* InterpreterAssembler::BytecodeOperandIntrinsicId(int operand_index) {
451 DCHECK(OperandType::kIntrinsicId ==
452 Bytecodes::GetOperandType(bytecode_, operand_index));
453 OperandSize operand_size =
454 Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
455 DCHECK_EQ(operand_size, OperandSize::kByte);
456 return BytecodeUnsignedOperand(operand_index, operand_size);
457 }
458
LoadConstantPoolEntry(Node * index)459 Node* InterpreterAssembler::LoadConstantPoolEntry(Node* index) {
460 Node* constant_pool = LoadObjectField(BytecodeArrayTaggedPointer(),
461 BytecodeArray::kConstantPoolOffset);
462 Node* entry_offset =
463 IntPtrAdd(IntPtrConstant(FixedArray::kHeaderSize - kHeapObjectTag),
464 WordShl(index, kPointerSizeLog2));
465 return Load(MachineType::AnyTagged(), constant_pool, entry_offset);
466 }
467
LoadAndUntagConstantPoolEntry(Node * index)468 Node* InterpreterAssembler::LoadAndUntagConstantPoolEntry(Node* index) {
469 Node* constant_pool = LoadObjectField(BytecodeArrayTaggedPointer(),
470 BytecodeArray::kConstantPoolOffset);
471 int offset = FixedArray::kHeaderSize - kHeapObjectTag;
472 #if V8_TARGET_LITTLE_ENDIAN
473 if (Is64()) {
474 offset += kPointerSize / 2;
475 }
476 #endif
477 Node* entry_offset =
478 IntPtrAdd(IntPtrConstant(offset), WordShl(index, kPointerSizeLog2));
479 if (Is64()) {
480 return ChangeInt32ToInt64(
481 Load(MachineType::Int32(), constant_pool, entry_offset));
482 } else {
483 return SmiUntag(
484 Load(MachineType::AnyTagged(), constant_pool, entry_offset));
485 }
486 }
487
LoadTypeFeedbackVector()488 Node* InterpreterAssembler::LoadTypeFeedbackVector() {
489 Node* function = LoadRegister(Register::function_closure());
490 Node* literals = LoadObjectField(function, JSFunction::kLiteralsOffset);
491 Node* vector =
492 LoadObjectField(literals, LiteralsArray::kFeedbackVectorOffset);
493 return vector;
494 }
495
CallPrologue()496 void InterpreterAssembler::CallPrologue() {
497 StoreRegister(SmiTag(BytecodeOffset()), Register::bytecode_offset());
498
499 if (FLAG_debug_code && !disable_stack_check_across_call_) {
500 DCHECK(stack_pointer_before_call_ == nullptr);
501 stack_pointer_before_call_ = LoadStackPointer();
502 }
503 made_call_ = true;
504 }
505
CallEpilogue()506 void InterpreterAssembler::CallEpilogue() {
507 if (FLAG_debug_code && !disable_stack_check_across_call_) {
508 Node* stack_pointer_after_call = LoadStackPointer();
509 Node* stack_pointer_before_call = stack_pointer_before_call_;
510 stack_pointer_before_call_ = nullptr;
511 AbortIfWordNotEqual(stack_pointer_before_call, stack_pointer_after_call,
512 kUnexpectedStackPointer);
513 }
514 }
515
IncrementCallCount(Node * type_feedback_vector,Node * slot_id)516 Node* InterpreterAssembler::IncrementCallCount(Node* type_feedback_vector,
517 Node* slot_id) {
518 Comment("increment call count");
519 Node* call_count_slot = IntPtrAdd(slot_id, IntPtrConstant(1));
520 Node* call_count =
521 LoadFixedArrayElement(type_feedback_vector, call_count_slot);
522 Node* new_count = SmiAdd(call_count, SmiTag(Int32Constant(1)));
523 // Count is Smi, so we don't need a write barrier.
524 return StoreFixedArrayElement(type_feedback_vector, call_count_slot,
525 new_count, SKIP_WRITE_BARRIER);
526 }
527
CallJSWithFeedback(Node * function,Node * context,Node * first_arg,Node * arg_count,Node * slot_id,Node * type_feedback_vector,TailCallMode tail_call_mode)528 Node* InterpreterAssembler::CallJSWithFeedback(Node* function, Node* context,
529 Node* first_arg, Node* arg_count,
530 Node* slot_id,
531 Node* type_feedback_vector,
532 TailCallMode tail_call_mode) {
533 // Static checks to assert it is safe to examine the type feedback element.
534 // We don't know that we have a weak cell. We might have a private symbol
535 // or an AllocationSite, but the memory is safe to examine.
536 // AllocationSite::kTransitionInfoOffset - contains a Smi or pointer to
537 // FixedArray.
538 // WeakCell::kValueOffset - contains a JSFunction or Smi(0)
539 // Symbol::kHashFieldSlot - if the low bit is 1, then the hash is not
540 // computed, meaning that it can't appear to be a pointer. If the low bit is
541 // 0, then hash is computed, but the 0 bit prevents the field from appearing
542 // to be a pointer.
543 STATIC_ASSERT(WeakCell::kSize >= kPointerSize);
544 STATIC_ASSERT(AllocationSite::kTransitionInfoOffset ==
545 WeakCell::kValueOffset &&
546 WeakCell::kValueOffset == Symbol::kHashFieldSlot);
547
548 Variable return_value(this, MachineRepresentation::kTagged);
549 Label call_function(this), extra_checks(this, Label::kDeferred), call(this),
550 end(this);
551
552 // The checks. First, does function match the recorded monomorphic target?
553 Node* feedback_element = LoadFixedArrayElement(type_feedback_vector, slot_id);
554 Node* feedback_value = LoadWeakCellValueUnchecked(feedback_element);
555 Node* is_monomorphic = WordEqual(function, feedback_value);
556 GotoUnless(is_monomorphic, &extra_checks);
557
558 // The compare above could have been a SMI/SMI comparison. Guard against
559 // this convincing us that we have a monomorphic JSFunction.
560 Node* is_smi = TaggedIsSmi(function);
561 Branch(is_smi, &extra_checks, &call_function);
562
563 Bind(&call_function);
564 {
565 // Increment the call count.
566 IncrementCallCount(type_feedback_vector, slot_id);
567
568 // Call using call function builtin.
569 Callable callable = CodeFactory::InterpreterPushArgsAndCall(
570 isolate(), tail_call_mode, CallableType::kJSFunction);
571 Node* code_target = HeapConstant(callable.code());
572 Node* ret_value = CallStub(callable.descriptor(), code_target, context,
573 arg_count, first_arg, function);
574 return_value.Bind(ret_value);
575 Goto(&end);
576 }
577
578 Bind(&extra_checks);
579 {
580 Label check_initialized(this), mark_megamorphic(this),
581 create_allocation_site(this);
582
583 Comment("check if megamorphic");
584 // Check if it is a megamorphic target.
585 Node* is_megamorphic = WordEqual(
586 feedback_element,
587 HeapConstant(TypeFeedbackVector::MegamorphicSentinel(isolate())));
588 GotoIf(is_megamorphic, &call);
589
590 Comment("check if it is an allocation site");
591 Node* is_allocation_site = WordEqual(
592 LoadMap(feedback_element), LoadRoot(Heap::kAllocationSiteMapRootIndex));
593 GotoUnless(is_allocation_site, &check_initialized);
594
595 // If it is not the Array() function, mark megamorphic.
596 Node* context_slot =
597 LoadFixedArrayElement(LoadNativeContext(context),
598 Int32Constant(Context::ARRAY_FUNCTION_INDEX));
599 Node* is_array_function = WordEqual(context_slot, function);
600 GotoUnless(is_array_function, &mark_megamorphic);
601
602 // It is a monomorphic Array function. Increment the call count.
603 IncrementCallCount(type_feedback_vector, slot_id);
604
605 // Call ArrayConstructorStub.
606 Callable callable_call =
607 CodeFactory::InterpreterPushArgsAndConstructArray(isolate());
608 Node* code_target_call = HeapConstant(callable_call.code());
609 Node* ret_value =
610 CallStub(callable_call.descriptor(), code_target_call, context,
611 arg_count, function, feedback_element, first_arg);
612 return_value.Bind(ret_value);
613 Goto(&end);
614
615 Bind(&check_initialized);
616 {
617 Comment("check if uninitialized");
618 // Check if it is uninitialized target first.
619 Node* is_uninitialized = WordEqual(
620 feedback_element,
621 HeapConstant(TypeFeedbackVector::UninitializedSentinel(isolate())));
622 GotoUnless(is_uninitialized, &mark_megamorphic);
623
624 Comment("handle_unitinitialized");
625 // If it is not a JSFunction mark it as megamorphic.
626 Node* is_smi = TaggedIsSmi(function);
627 GotoIf(is_smi, &mark_megamorphic);
628
629 // Check if function is an object of JSFunction type.
630 Node* instance_type = LoadInstanceType(function);
631 Node* is_js_function =
632 WordEqual(instance_type, Int32Constant(JS_FUNCTION_TYPE));
633 GotoUnless(is_js_function, &mark_megamorphic);
634
635 // Check if it is the Array() function.
636 Node* context_slot =
637 LoadFixedArrayElement(LoadNativeContext(context),
638 Int32Constant(Context::ARRAY_FUNCTION_INDEX));
639 Node* is_array_function = WordEqual(context_slot, function);
640 GotoIf(is_array_function, &create_allocation_site);
641
642 // Check if the function belongs to the same native context.
643 Node* native_context = LoadNativeContext(
644 LoadObjectField(function, JSFunction::kContextOffset));
645 Node* is_same_native_context =
646 WordEqual(native_context, LoadNativeContext(context));
647 GotoUnless(is_same_native_context, &mark_megamorphic);
648
649 CreateWeakCellInFeedbackVector(type_feedback_vector, SmiTag(slot_id),
650 function);
651
652 // Call using call function builtin.
653 Goto(&call_function);
654 }
655
656 Bind(&create_allocation_site);
657 {
658 CreateAllocationSiteInFeedbackVector(type_feedback_vector,
659 SmiTag(slot_id));
660
661 // Call using CallFunction builtin. CallICs have a PREMONOMORPHIC state.
662 // They start collecting feedback only when a call is executed the second
663 // time. So, do not pass any feedback here.
664 Goto(&call_function);
665 }
666
667 Bind(&mark_megamorphic);
668 {
669 // Mark it as a megamorphic.
670 // MegamorphicSentinel is created as a part of Heap::InitialObjects
671 // and will not move during a GC. So it is safe to skip write barrier.
672 DCHECK(Heap::RootIsImmortalImmovable(Heap::kmegamorphic_symbolRootIndex));
673 StoreFixedArrayElement(
674 type_feedback_vector, slot_id,
675 HeapConstant(TypeFeedbackVector::MegamorphicSentinel(isolate())),
676 SKIP_WRITE_BARRIER);
677 Goto(&call);
678 }
679 }
680
681 Bind(&call);
682 {
683 Comment("Increment call count and call using Call builtin");
684 // Increment the call count.
685 IncrementCallCount(type_feedback_vector, slot_id);
686
687 // Call using call builtin.
688 Callable callable_call = CodeFactory::InterpreterPushArgsAndCall(
689 isolate(), tail_call_mode, CallableType::kAny);
690 Node* code_target_call = HeapConstant(callable_call.code());
691 Node* ret_value = CallStub(callable_call.descriptor(), code_target_call,
692 context, arg_count, first_arg, function);
693 return_value.Bind(ret_value);
694 Goto(&end);
695 }
696
697 Bind(&end);
698 return return_value.value();
699 }
700
CallJS(Node * function,Node * context,Node * first_arg,Node * arg_count,TailCallMode tail_call_mode)701 Node* InterpreterAssembler::CallJS(Node* function, Node* context,
702 Node* first_arg, Node* arg_count,
703 TailCallMode tail_call_mode) {
704 Callable callable = CodeFactory::InterpreterPushArgsAndCall(
705 isolate(), tail_call_mode, CallableType::kAny);
706 Node* code_target = HeapConstant(callable.code());
707 return CallStub(callable.descriptor(), code_target, context, arg_count,
708 first_arg, function);
709 }
710
CallConstruct(Node * constructor,Node * context,Node * new_target,Node * first_arg,Node * arg_count,Node * slot_id,Node * type_feedback_vector)711 Node* InterpreterAssembler::CallConstruct(Node* constructor, Node* context,
712 Node* new_target, Node* first_arg,
713 Node* arg_count, Node* slot_id,
714 Node* type_feedback_vector) {
715 Variable return_value(this, MachineRepresentation::kTagged);
716 Variable allocation_feedback(this, MachineRepresentation::kTagged);
717 Label call_construct_function(this, &allocation_feedback),
718 extra_checks(this, Label::kDeferred), call_construct(this), end(this);
719
720 // Slot id of 0 is used to indicate no type feedback is available.
721 STATIC_ASSERT(TypeFeedbackVector::kReservedIndexCount > 0);
722 Node* is_feedback_unavailable = Word32Equal(slot_id, Int32Constant(0));
723 GotoIf(is_feedback_unavailable, &call_construct);
724
725 // Check that the constructor is not a smi.
726 Node* is_smi = TaggedIsSmi(constructor);
727 GotoIf(is_smi, &call_construct);
728
729 // Check that constructor is a JSFunction.
730 Node* instance_type = LoadInstanceType(constructor);
731 Node* is_js_function =
732 WordEqual(instance_type, Int32Constant(JS_FUNCTION_TYPE));
733 GotoUnless(is_js_function, &call_construct);
734
735 // Check if it is a monomorphic constructor.
736 Node* feedback_element = LoadFixedArrayElement(type_feedback_vector, slot_id);
737 Node* feedback_value = LoadWeakCellValueUnchecked(feedback_element);
738 Node* is_monomorphic = WordEqual(constructor, feedback_value);
739 allocation_feedback.Bind(UndefinedConstant());
740 Branch(is_monomorphic, &call_construct_function, &extra_checks);
741
742 Bind(&call_construct_function);
743 {
744 Comment("call using callConstructFunction");
745 IncrementCallCount(type_feedback_vector, slot_id);
746 Callable callable_function = CodeFactory::InterpreterPushArgsAndConstruct(
747 isolate(), CallableType::kJSFunction);
748 return_value.Bind(CallStub(callable_function.descriptor(),
749 HeapConstant(callable_function.code()), context,
750 arg_count, new_target, constructor,
751 allocation_feedback.value(), first_arg));
752 Goto(&end);
753 }
754
755 Bind(&extra_checks);
756 {
757 Label check_allocation_site(this), check_initialized(this),
758 initialize(this), mark_megamorphic(this);
759
760 // Check if it is a megamorphic target.
761 Comment("check if megamorphic");
762 Node* is_megamorphic = WordEqual(
763 feedback_element,
764 HeapConstant(TypeFeedbackVector::MegamorphicSentinel(isolate())));
765 GotoIf(is_megamorphic, &call_construct_function);
766
767 Comment("check if weak cell");
768 Node* is_weak_cell = WordEqual(LoadMap(feedback_element),
769 LoadRoot(Heap::kWeakCellMapRootIndex));
770 GotoUnless(is_weak_cell, &check_allocation_site);
771
772 // If the weak cell is cleared, we have a new chance to become
773 // monomorphic.
774 Comment("check if weak cell is cleared");
775 Node* is_smi = TaggedIsSmi(feedback_value);
776 Branch(is_smi, &initialize, &mark_megamorphic);
777
778 Bind(&check_allocation_site);
779 {
780 Comment("check if it is an allocation site");
781 Node* is_allocation_site =
782 WordEqual(LoadObjectField(feedback_element, 0),
783 LoadRoot(Heap::kAllocationSiteMapRootIndex));
784 GotoUnless(is_allocation_site, &check_initialized);
785
786 // Make sure the function is the Array() function.
787 Node* context_slot =
788 LoadFixedArrayElement(LoadNativeContext(context),
789 Int32Constant(Context::ARRAY_FUNCTION_INDEX));
790 Node* is_array_function = WordEqual(context_slot, constructor);
791 GotoUnless(is_array_function, &mark_megamorphic);
792
793 allocation_feedback.Bind(feedback_element);
794 Goto(&call_construct_function);
795 }
796
797 Bind(&check_initialized);
798 {
799 // Check if it is uninitialized.
800 Comment("check if uninitialized");
801 Node* is_uninitialized = WordEqual(
802 feedback_element, LoadRoot(Heap::kuninitialized_symbolRootIndex));
803 Branch(is_uninitialized, &initialize, &mark_megamorphic);
804 }
805
806 Bind(&initialize);
807 {
808 Label create_allocation_site(this), create_weak_cell(this);
809 Comment("initialize the feedback element");
810 // Create an allocation site if the function is an array function,
811 // otherwise create a weak cell.
812 Node* context_slot =
813 LoadFixedArrayElement(LoadNativeContext(context),
814 Int32Constant(Context::ARRAY_FUNCTION_INDEX));
815 Node* is_array_function = WordEqual(context_slot, constructor);
816 Branch(is_array_function, &create_allocation_site, &create_weak_cell);
817
818 Bind(&create_allocation_site);
819 {
820 Node* site = CreateAllocationSiteInFeedbackVector(type_feedback_vector,
821 SmiTag(slot_id));
822 allocation_feedback.Bind(site);
823 Goto(&call_construct_function);
824 }
825
826 Bind(&create_weak_cell);
827 {
828 CreateWeakCellInFeedbackVector(type_feedback_vector, SmiTag(slot_id),
829 constructor);
830 Goto(&call_construct_function);
831 }
832 }
833
834 Bind(&mark_megamorphic);
835 {
836 // MegamorphicSentinel is an immortal immovable object so
837 // write-barrier is not needed.
838 Comment("transition to megamorphic");
839 DCHECK(Heap::RootIsImmortalImmovable(Heap::kmegamorphic_symbolRootIndex));
840 StoreFixedArrayElement(
841 type_feedback_vector, slot_id,
842 HeapConstant(TypeFeedbackVector::MegamorphicSentinel(isolate())),
843 SKIP_WRITE_BARRIER);
844 Goto(&call_construct_function);
845 }
846 }
847
848 Bind(&call_construct);
849 {
850 Comment("call using callConstruct builtin");
851 Callable callable = CodeFactory::InterpreterPushArgsAndConstruct(
852 isolate(), CallableType::kAny);
853 Node* code_target = HeapConstant(callable.code());
854 return_value.Bind(CallStub(callable.descriptor(), code_target, context,
855 arg_count, new_target, constructor,
856 UndefinedConstant(), first_arg));
857 Goto(&end);
858 }
859
860 Bind(&end);
861 return return_value.value();
862 }
863
CallRuntimeN(Node * function_id,Node * context,Node * first_arg,Node * arg_count,int result_size)864 Node* InterpreterAssembler::CallRuntimeN(Node* function_id, Node* context,
865 Node* first_arg, Node* arg_count,
866 int result_size) {
867 Callable callable = CodeFactory::InterpreterCEntry(isolate(), result_size);
868 Node* code_target = HeapConstant(callable.code());
869
870 // Get the function entry from the function id.
871 Node* function_table = ExternalConstant(
872 ExternalReference::runtime_function_table_address(isolate()));
873 Node* function_offset =
874 Int32Mul(function_id, Int32Constant(sizeof(Runtime::Function)));
875 Node* function = IntPtrAdd(function_table, function_offset);
876 Node* function_entry =
877 Load(MachineType::Pointer(), function,
878 IntPtrConstant(offsetof(Runtime::Function, entry)));
879
880 return CallStub(callable.descriptor(), code_target, context, arg_count,
881 first_arg, function_entry, result_size);
882 }
883
UpdateInterruptBudget(Node * weight)884 void InterpreterAssembler::UpdateInterruptBudget(Node* weight) {
885 // TODO(rmcilroy): It might be worthwhile to only update the budget for
886 // backwards branches. Those are distinguishable by the {JumpLoop} bytecode.
887
888 Label ok(this), interrupt_check(this, Label::kDeferred), end(this);
889 Node* budget_offset =
890 IntPtrConstant(BytecodeArray::kInterruptBudgetOffset - kHeapObjectTag);
891
892 // Update budget by |weight| and check if it reaches zero.
893 Variable new_budget(this, MachineRepresentation::kWord32);
894 Node* old_budget =
895 Load(MachineType::Int32(), BytecodeArrayTaggedPointer(), budget_offset);
896 new_budget.Bind(Int32Add(old_budget, weight));
897 Node* condition =
898 Int32GreaterThanOrEqual(new_budget.value(), Int32Constant(0));
899 Branch(condition, &ok, &interrupt_check);
900
901 // Perform interrupt and reset budget.
902 Bind(&interrupt_check);
903 {
904 CallRuntime(Runtime::kInterrupt, GetContext());
905 new_budget.Bind(Int32Constant(Interpreter::InterruptBudget()));
906 Goto(&ok);
907 }
908
909 // Update budget.
910 Bind(&ok);
911 StoreNoWriteBarrier(MachineRepresentation::kWord32,
912 BytecodeArrayTaggedPointer(), budget_offset,
913 new_budget.value());
914 }
915
Advance()916 Node* InterpreterAssembler::Advance() {
917 return Advance(Bytecodes::Size(bytecode_, operand_scale_));
918 }
919
Advance(int delta)920 Node* InterpreterAssembler::Advance(int delta) {
921 return Advance(IntPtrConstant(delta));
922 }
923
Advance(Node * delta)924 Node* InterpreterAssembler::Advance(Node* delta) {
925 if (FLAG_trace_ignition) {
926 TraceBytecode(Runtime::kInterpreterTraceBytecodeExit);
927 }
928 Node* next_offset = IntPtrAdd(BytecodeOffset(), delta);
929 bytecode_offset_.Bind(next_offset);
930 return next_offset;
931 }
932
Jump(Node * delta)933 Node* InterpreterAssembler::Jump(Node* delta) {
934 DCHECK(!Bytecodes::IsStarLookahead(bytecode_, operand_scale_));
935
936 UpdateInterruptBudget(delta);
937 Node* new_bytecode_offset = Advance(delta);
938 Node* target_bytecode = LoadBytecode(new_bytecode_offset);
939 return DispatchToBytecode(target_bytecode, new_bytecode_offset);
940 }
941
JumpConditional(Node * condition,Node * delta)942 void InterpreterAssembler::JumpConditional(Node* condition, Node* delta) {
943 Label match(this), no_match(this);
944
945 Branch(condition, &match, &no_match);
946 Bind(&match);
947 Jump(delta);
948 Bind(&no_match);
949 Dispatch();
950 }
951
JumpIfWordEqual(Node * lhs,Node * rhs,Node * delta)952 void InterpreterAssembler::JumpIfWordEqual(Node* lhs, Node* rhs, Node* delta) {
953 JumpConditional(WordEqual(lhs, rhs), delta);
954 }
955
JumpIfWordNotEqual(Node * lhs,Node * rhs,Node * delta)956 void InterpreterAssembler::JumpIfWordNotEqual(Node* lhs, Node* rhs,
957 Node* delta) {
958 JumpConditional(WordNotEqual(lhs, rhs), delta);
959 }
960
LoadBytecode(compiler::Node * bytecode_offset)961 Node* InterpreterAssembler::LoadBytecode(compiler::Node* bytecode_offset) {
962 Node* bytecode =
963 Load(MachineType::Uint8(), BytecodeArrayTaggedPointer(), bytecode_offset);
964 if (kPointerSize == 8) {
965 bytecode = ChangeUint32ToUint64(bytecode);
966 }
967 return bytecode;
968 }
969
StarDispatchLookahead(Node * target_bytecode)970 Node* InterpreterAssembler::StarDispatchLookahead(Node* target_bytecode) {
971 Label do_inline_star(this), done(this);
972
973 Variable var_bytecode(this, MachineType::PointerRepresentation());
974 var_bytecode.Bind(target_bytecode);
975
976 Node* star_bytecode = IntPtrConstant(static_cast<int>(Bytecode::kStar));
977 Node* is_star = WordEqual(target_bytecode, star_bytecode);
978 Branch(is_star, &do_inline_star, &done);
979
980 Bind(&do_inline_star);
981 {
982 InlineStar();
983 var_bytecode.Bind(LoadBytecode(BytecodeOffset()));
984 Goto(&done);
985 }
986 Bind(&done);
987 return var_bytecode.value();
988 }
989
InlineStar()990 void InterpreterAssembler::InlineStar() {
991 Bytecode previous_bytecode = bytecode_;
992 AccumulatorUse previous_acc_use = accumulator_use_;
993
994 bytecode_ = Bytecode::kStar;
995 accumulator_use_ = AccumulatorUse::kNone;
996
997 if (FLAG_trace_ignition) {
998 TraceBytecode(Runtime::kInterpreterTraceBytecodeEntry);
999 }
1000 StoreRegister(GetAccumulator(), BytecodeOperandReg(0));
1001
1002 DCHECK_EQ(accumulator_use_, Bytecodes::GetAccumulatorUse(bytecode_));
1003
1004 Advance();
1005 bytecode_ = previous_bytecode;
1006 accumulator_use_ = previous_acc_use;
1007 }
1008
Dispatch()1009 Node* InterpreterAssembler::Dispatch() {
1010 Node* target_offset = Advance();
1011 Node* target_bytecode = LoadBytecode(target_offset);
1012
1013 if (Bytecodes::IsStarLookahead(bytecode_, operand_scale_)) {
1014 target_bytecode = StarDispatchLookahead(target_bytecode);
1015 }
1016 return DispatchToBytecode(target_bytecode, BytecodeOffset());
1017 }
1018
DispatchToBytecode(Node * target_bytecode,Node * new_bytecode_offset)1019 Node* InterpreterAssembler::DispatchToBytecode(Node* target_bytecode,
1020 Node* new_bytecode_offset) {
1021 if (FLAG_trace_ignition_dispatches) {
1022 TraceBytecodeDispatch(target_bytecode);
1023 }
1024
1025 Node* target_code_entry =
1026 Load(MachineType::Pointer(), DispatchTableRawPointer(),
1027 WordShl(target_bytecode, IntPtrConstant(kPointerSizeLog2)));
1028
1029 return DispatchToBytecodeHandlerEntry(target_code_entry, new_bytecode_offset);
1030 }
1031
DispatchToBytecodeHandler(Node * handler,Node * bytecode_offset)1032 Node* InterpreterAssembler::DispatchToBytecodeHandler(Node* handler,
1033 Node* bytecode_offset) {
1034 Node* handler_entry =
1035 IntPtrAdd(handler, IntPtrConstant(Code::kHeaderSize - kHeapObjectTag));
1036 return DispatchToBytecodeHandlerEntry(handler_entry, bytecode_offset);
1037 }
1038
DispatchToBytecodeHandlerEntry(Node * handler_entry,Node * bytecode_offset)1039 Node* InterpreterAssembler::DispatchToBytecodeHandlerEntry(
1040 Node* handler_entry, Node* bytecode_offset) {
1041 InterpreterDispatchDescriptor descriptor(isolate());
1042 Node* args[] = {GetAccumulatorUnchecked(), bytecode_offset,
1043 BytecodeArrayTaggedPointer(), DispatchTableRawPointer()};
1044 return TailCallBytecodeDispatch(descriptor, handler_entry, args);
1045 }
1046
DispatchWide(OperandScale operand_scale)1047 void InterpreterAssembler::DispatchWide(OperandScale operand_scale) {
1048 // Dispatching a wide bytecode requires treating the prefix
1049 // bytecode a base pointer into the dispatch table and dispatching
1050 // the bytecode that follows relative to this base.
1051 //
1052 // Indices 0-255 correspond to bytecodes with operand_scale == 0
1053 // Indices 256-511 correspond to bytecodes with operand_scale == 1
1054 // Indices 512-767 correspond to bytecodes with operand_scale == 2
1055 Node* next_bytecode_offset = Advance(1);
1056 Node* next_bytecode = LoadBytecode(next_bytecode_offset);
1057
1058 if (FLAG_trace_ignition_dispatches) {
1059 TraceBytecodeDispatch(next_bytecode);
1060 }
1061
1062 Node* base_index;
1063 switch (operand_scale) {
1064 case OperandScale::kDouble:
1065 base_index = IntPtrConstant(1 << kBitsPerByte);
1066 break;
1067 case OperandScale::kQuadruple:
1068 base_index = IntPtrConstant(2 << kBitsPerByte);
1069 break;
1070 default:
1071 UNREACHABLE();
1072 base_index = nullptr;
1073 }
1074 Node* target_index = IntPtrAdd(base_index, next_bytecode);
1075 Node* target_code_entry =
1076 Load(MachineType::Pointer(), DispatchTableRawPointer(),
1077 WordShl(target_index, kPointerSizeLog2));
1078
1079 DispatchToBytecodeHandlerEntry(target_code_entry, next_bytecode_offset);
1080 }
1081
TruncateTaggedToWord32WithFeedback(Node * context,Node * value,Variable * var_type_feedback)1082 Node* InterpreterAssembler::TruncateTaggedToWord32WithFeedback(
1083 Node* context, Node* value, Variable* var_type_feedback) {
1084 // We might need to loop once due to ToNumber conversion.
1085 Variable var_value(this, MachineRepresentation::kTagged),
1086 var_result(this, MachineRepresentation::kWord32);
1087 Variable* loop_vars[] = {&var_value, var_type_feedback};
1088 Label loop(this, 2, loop_vars), done_loop(this, &var_result);
1089 var_value.Bind(value);
1090 var_type_feedback->Bind(Int32Constant(BinaryOperationFeedback::kNone));
1091 Goto(&loop);
1092 Bind(&loop);
1093 {
1094 // Load the current {value}.
1095 value = var_value.value();
1096
1097 // Check if the {value} is a Smi or a HeapObject.
1098 Label if_valueissmi(this), if_valueisnotsmi(this);
1099 Branch(TaggedIsSmi(value), &if_valueissmi, &if_valueisnotsmi);
1100
1101 Bind(&if_valueissmi);
1102 {
1103 // Convert the Smi {value}.
1104 var_result.Bind(SmiToWord32(value));
1105 var_type_feedback->Bind(
1106 Word32Or(var_type_feedback->value(),
1107 Int32Constant(BinaryOperationFeedback::kSignedSmall)));
1108 Goto(&done_loop);
1109 }
1110
1111 Bind(&if_valueisnotsmi);
1112 {
1113 // Check if {value} is a HeapNumber.
1114 Label if_valueisheapnumber(this),
1115 if_valueisnotheapnumber(this, Label::kDeferred);
1116 Node* value_map = LoadMap(value);
1117 Branch(WordEqual(value_map, HeapNumberMapConstant()),
1118 &if_valueisheapnumber, &if_valueisnotheapnumber);
1119
1120 Bind(&if_valueisheapnumber);
1121 {
1122 // Truncate the floating point value.
1123 var_result.Bind(TruncateHeapNumberValueToWord32(value));
1124 var_type_feedback->Bind(
1125 Word32Or(var_type_feedback->value(),
1126 Int32Constant(BinaryOperationFeedback::kNumber)));
1127 Goto(&done_loop);
1128 }
1129
1130 Bind(&if_valueisnotheapnumber);
1131 {
1132 // We do not require an Or with earlier feedback here because once we
1133 // convert the value to a number, we cannot reach this path. We can
1134 // only reach this path on the first pass when the feedback is kNone.
1135 CSA_ASSERT(this,
1136 Word32Equal(var_type_feedback->value(),
1137 Int32Constant(BinaryOperationFeedback::kNone)));
1138
1139 Label if_valueisoddball(this),
1140 if_valueisnotoddball(this, Label::kDeferred);
1141 Node* is_oddball = Word32Equal(LoadMapInstanceType(value_map),
1142 Int32Constant(ODDBALL_TYPE));
1143 Branch(is_oddball, &if_valueisoddball, &if_valueisnotoddball);
1144
1145 Bind(&if_valueisoddball);
1146 {
1147 // Convert Oddball to a Number and perform checks again.
1148 var_value.Bind(LoadObjectField(value, Oddball::kToNumberOffset));
1149 var_type_feedback->Bind(
1150 Int32Constant(BinaryOperationFeedback::kNumberOrOddball));
1151 Goto(&loop);
1152 }
1153
1154 Bind(&if_valueisnotoddball);
1155 {
1156 // Convert the {value} to a Number first.
1157 Callable callable = CodeFactory::NonNumberToNumber(isolate());
1158 var_value.Bind(CallStub(callable, context, value));
1159 var_type_feedback->Bind(Int32Constant(BinaryOperationFeedback::kAny));
1160 Goto(&loop);
1161 }
1162 }
1163 }
1164 }
1165 Bind(&done_loop);
1166 return var_result.value();
1167 }
1168
UpdateInterruptBudgetOnReturn()1169 void InterpreterAssembler::UpdateInterruptBudgetOnReturn() {
1170 // TODO(rmcilroy): Investigate whether it is worth supporting self
1171 // optimization of primitive functions like FullCodegen.
1172
1173 // Update profiling count by -BytecodeOffset to simulate backedge to start of
1174 // function.
1175 Node* profiling_weight =
1176 Int32Sub(Int32Constant(kHeapObjectTag + BytecodeArray::kHeaderSize),
1177 BytecodeOffset());
1178 UpdateInterruptBudget(profiling_weight);
1179 }
1180
StackCheckTriggeredInterrupt()1181 Node* InterpreterAssembler::StackCheckTriggeredInterrupt() {
1182 Node* sp = LoadStackPointer();
1183 Node* stack_limit = Load(
1184 MachineType::Pointer(),
1185 ExternalConstant(ExternalReference::address_of_stack_limit(isolate())));
1186 return UintPtrLessThan(sp, stack_limit);
1187 }
1188
LoadOSRNestingLevel()1189 Node* InterpreterAssembler::LoadOSRNestingLevel() {
1190 Node* offset =
1191 IntPtrConstant(BytecodeArray::kOSRNestingLevelOffset - kHeapObjectTag);
1192 return Load(MachineType::Int8(), BytecodeArrayTaggedPointer(), offset);
1193 }
1194
Abort(BailoutReason bailout_reason)1195 void InterpreterAssembler::Abort(BailoutReason bailout_reason) {
1196 disable_stack_check_across_call_ = true;
1197 Node* abort_id = SmiTag(Int32Constant(bailout_reason));
1198 CallRuntime(Runtime::kAbort, GetContext(), abort_id);
1199 disable_stack_check_across_call_ = false;
1200 }
1201
AbortIfWordNotEqual(Node * lhs,Node * rhs,BailoutReason bailout_reason)1202 void InterpreterAssembler::AbortIfWordNotEqual(Node* lhs, Node* rhs,
1203 BailoutReason bailout_reason) {
1204 Label ok(this), abort(this, Label::kDeferred);
1205 Branch(WordEqual(lhs, rhs), &ok, &abort);
1206
1207 Bind(&abort);
1208 Abort(bailout_reason);
1209 Goto(&ok);
1210
1211 Bind(&ok);
1212 }
1213
TraceBytecode(Runtime::FunctionId function_id)1214 void InterpreterAssembler::TraceBytecode(Runtime::FunctionId function_id) {
1215 CallRuntime(function_id, GetContext(), BytecodeArrayTaggedPointer(),
1216 SmiTag(BytecodeOffset()), GetAccumulatorUnchecked());
1217 }
1218
TraceBytecodeDispatch(Node * target_bytecode)1219 void InterpreterAssembler::TraceBytecodeDispatch(Node* target_bytecode) {
1220 Node* counters_table = ExternalConstant(
1221 ExternalReference::interpreter_dispatch_counters(isolate()));
1222 Node* source_bytecode_table_index = IntPtrConstant(
1223 static_cast<int>(bytecode_) * (static_cast<int>(Bytecode::kLast) + 1));
1224
1225 Node* counter_offset =
1226 WordShl(IntPtrAdd(source_bytecode_table_index, target_bytecode),
1227 IntPtrConstant(kPointerSizeLog2));
1228 Node* old_counter =
1229 Load(MachineType::IntPtr(), counters_table, counter_offset);
1230
1231 Label counter_ok(this), counter_saturated(this, Label::kDeferred);
1232
1233 Node* counter_reached_max = WordEqual(
1234 old_counter, IntPtrConstant(std::numeric_limits<uintptr_t>::max()));
1235 Branch(counter_reached_max, &counter_saturated, &counter_ok);
1236
1237 Bind(&counter_ok);
1238 {
1239 Node* new_counter = IntPtrAdd(old_counter, IntPtrConstant(1));
1240 StoreNoWriteBarrier(MachineType::PointerRepresentation(), counters_table,
1241 counter_offset, new_counter);
1242 Goto(&counter_saturated);
1243 }
1244
1245 Bind(&counter_saturated);
1246 }
1247
1248 // static
TargetSupportsUnalignedAccess()1249 bool InterpreterAssembler::TargetSupportsUnalignedAccess() {
1250 #if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
1251 return false;
1252 #elif V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_X87 || \
1253 V8_TARGET_ARCH_S390 || V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || \
1254 V8_TARGET_ARCH_PPC
1255 return true;
1256 #else
1257 #error "Unknown Architecture"
1258 #endif
1259 }
1260
RegisterCount()1261 Node* InterpreterAssembler::RegisterCount() {
1262 Node* bytecode_array = LoadRegister(Register::bytecode_array());
1263 Node* frame_size = LoadObjectField(
1264 bytecode_array, BytecodeArray::kFrameSizeOffset, MachineType::Int32());
1265 return Word32Sar(frame_size, Int32Constant(kPointerSizeLog2));
1266 }
1267
ExportRegisterFile(Node * array)1268 Node* InterpreterAssembler::ExportRegisterFile(Node* array) {
1269 if (FLAG_debug_code) {
1270 Node* array_size = LoadAndUntagFixedArrayBaseLength(array);
1271 AbortIfWordNotEqual(
1272 array_size, RegisterCount(), kInvalidRegisterFileInGenerator);
1273 }
1274
1275 Variable var_index(this, MachineRepresentation::kWord32);
1276 var_index.Bind(Int32Constant(0));
1277
1278 // Iterate over register file and write values into array.
1279 // The mapping of register to array index must match that used in
1280 // BytecodeGraphBuilder::VisitResumeGenerator.
1281 Label loop(this, &var_index), done_loop(this);
1282 Goto(&loop);
1283 Bind(&loop);
1284 {
1285 Node* index = var_index.value();
1286 Node* condition = Int32LessThan(index, RegisterCount());
1287 GotoUnless(condition, &done_loop);
1288
1289 Node* reg_index =
1290 Int32Sub(Int32Constant(Register(0).ToOperand()), index);
1291 Node* value = LoadRegister(ChangeInt32ToIntPtr(reg_index));
1292
1293 StoreFixedArrayElement(array, index, value);
1294
1295 var_index.Bind(Int32Add(index, Int32Constant(1)));
1296 Goto(&loop);
1297 }
1298 Bind(&done_loop);
1299
1300 return array;
1301 }
1302
ImportRegisterFile(Node * array)1303 Node* InterpreterAssembler::ImportRegisterFile(Node* array) {
1304 if (FLAG_debug_code) {
1305 Node* array_size = LoadAndUntagFixedArrayBaseLength(array);
1306 AbortIfWordNotEqual(
1307 array_size, RegisterCount(), kInvalidRegisterFileInGenerator);
1308 }
1309
1310 Variable var_index(this, MachineRepresentation::kWord32);
1311 var_index.Bind(Int32Constant(0));
1312
1313 // Iterate over array and write values into register file. Also erase the
1314 // array contents to not keep them alive artificially.
1315 Label loop(this, &var_index), done_loop(this);
1316 Goto(&loop);
1317 Bind(&loop);
1318 {
1319 Node* index = var_index.value();
1320 Node* condition = Int32LessThan(index, RegisterCount());
1321 GotoUnless(condition, &done_loop);
1322
1323 Node* value = LoadFixedArrayElement(array, index);
1324
1325 Node* reg_index =
1326 Int32Sub(Int32Constant(Register(0).ToOperand()), index);
1327 StoreRegister(value, ChangeInt32ToIntPtr(reg_index));
1328
1329 StoreFixedArrayElement(array, index, StaleRegisterConstant());
1330
1331 var_index.Bind(Int32Add(index, Int32Constant(1)));
1332 Goto(&loop);
1333 }
1334 Bind(&done_loop);
1335
1336 return array;
1337 }
1338
1339 } // namespace interpreter
1340 } // namespace internal
1341 } // namespace v8
1342