1 // Copyright 2013 the V8 project authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style license that can be 3 // found in the LICENSE file. 4 5 #ifndef V8_CRANKSHAFT_ARM64_LITHIUM_CODEGEN_ARM64_H_ 6 #define V8_CRANKSHAFT_ARM64_LITHIUM_CODEGEN_ARM64_H_ 7 8 #include "src/crankshaft/arm64/lithium-arm64.h" 9 10 #include "src/ast/scopes.h" 11 #include "src/crankshaft/arm64/lithium-gap-resolver-arm64.h" 12 #include "src/crankshaft/lithium-codegen.h" 13 #include "src/deoptimizer.h" 14 #include "src/safepoint-table.h" 15 #include "src/utils.h" 16 17 namespace v8 { 18 namespace internal { 19 20 // Forward declarations. 21 class LDeferredCode; 22 class SafepointGenerator; 23 class BranchGenerator; 24 25 class LCodeGen: public LCodeGenBase { 26 public: LCodeGen(LChunk * chunk,MacroAssembler * assembler,CompilationInfo * info)27 LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info) 28 : LCodeGenBase(chunk, assembler, info), 29 jump_table_(4, info->zone()), 30 scope_(info->scope()), 31 deferred_(8, info->zone()), 32 frame_is_built_(false), 33 safepoints_(info->zone()), 34 resolver_(this), 35 expected_safepoint_kind_(Safepoint::kSimple), 36 pushed_arguments_(0) { 37 PopulateDeoptimizationLiteralsWithInlinedFunctions(); 38 } 39 40 // Simple accessors. scope()41 Scope* scope() const { return scope_; } 42 LookupDestination(int block_id)43 int LookupDestination(int block_id) const { 44 return chunk()->LookupDestination(block_id); 45 } 46 IsNextEmittedBlock(int block_id)47 bool IsNextEmittedBlock(int block_id) const { 48 return LookupDestination(block_id) == GetNextEmittedBlock(); 49 } 50 NeedsEagerFrame()51 bool NeedsEagerFrame() const { 52 return HasAllocatedStackSlots() || info()->is_non_deferred_calling() || 53 !info()->IsStub() || info()->requires_frame(); 54 } NeedsDeferredFrame()55 bool NeedsDeferredFrame() const { 56 return !NeedsEagerFrame() && info()->is_deferred_calling(); 57 } 58 GetLinkRegisterState()59 LinkRegisterStatus GetLinkRegisterState() const { 60 return frame_is_built_ ? kLRHasBeenSaved : kLRHasNotBeenSaved; 61 } 62 63 // Try to generate code for the entire chunk, but it may fail if the 64 // chunk contains constructs we cannot handle. Returns true if the 65 // code generation attempt succeeded. 66 bool GenerateCode(); 67 68 // Finish the code by setting stack height, safepoint, and bailout 69 // information on it. 70 void FinishCode(Handle<Code> code); 71 72 enum IntegerSignedness { SIGNED_INT32, UNSIGNED_INT32 }; 73 // Support for converting LOperands to assembler types. 74 Register ToRegister(LOperand* op) const; 75 Register ToRegister32(LOperand* op) const; 76 Operand ToOperand(LOperand* op); 77 Operand ToOperand32(LOperand* op); 78 enum StackMode { kMustUseFramePointer, kCanUseStackPointer }; 79 MemOperand ToMemOperand(LOperand* op, 80 StackMode stack_mode = kCanUseStackPointer) const; 81 Handle<Object> ToHandle(LConstantOperand* op) const; 82 83 template <class LI> 84 Operand ToShiftedRightOperand32(LOperand* right, LI* shift_info); 85 JSShiftAmountFromLConstant(LOperand * constant)86 int JSShiftAmountFromLConstant(LOperand* constant) { 87 return ToInteger32(LConstantOperand::cast(constant)) & 0x1f; 88 } 89 90 // TODO(jbramley): Examine these helpers and check that they make sense. 91 // IsInteger32Constant returns true for smi constants, for example. 92 bool IsInteger32Constant(LConstantOperand* op) const; 93 bool IsSmi(LConstantOperand* op) const; 94 95 int32_t ToInteger32(LConstantOperand* op) const; 96 Smi* ToSmi(LConstantOperand* op) const; 97 double ToDouble(LConstantOperand* op) const; 98 DoubleRegister ToDoubleRegister(LOperand* op) const; 99 100 // Declare methods that deal with the individual node types. 101 #define DECLARE_DO(type) void Do##type(L##type* node); LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)102 LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_DO) 103 #undef DECLARE_DO 104 105 private: 106 // Return a double scratch register which can be used locally 107 // when generating code for a lithium instruction. 108 DoubleRegister double_scratch() { return crankshaft_fp_scratch; } 109 110 // Deferred code support. 111 void DoDeferredNumberTagD(LNumberTagD* instr); 112 void DoDeferredStackCheck(LStackCheck* instr); 113 void DoDeferredMaybeGrowElements(LMaybeGrowElements* instr); 114 void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr); 115 void DoDeferredStringCharFromCode(LStringCharFromCode* instr); 116 void DoDeferredMathAbsTagged(LMathAbsTagged* instr, 117 Label* exit, 118 Label* allocation_entry); 119 120 void DoDeferredNumberTagU(LInstruction* instr, 121 LOperand* value, 122 LOperand* temp1, 123 LOperand* temp2); 124 void DoDeferredTaggedToI(LTaggedToI* instr, 125 LOperand* value, 126 LOperand* temp1, 127 LOperand* temp2); 128 void DoDeferredAllocate(LAllocate* instr); 129 void DoDeferredInstanceMigration(LCheckMaps* instr, Register object); 130 void DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr, 131 Register result, 132 Register object, 133 Register index); 134 135 static Condition TokenToCondition(Token::Value op, bool is_unsigned); 136 void EmitGoto(int block); 137 void DoGap(LGap* instr); 138 139 // Generic version of EmitBranch. It contains some code to avoid emitting a 140 // branch on the next emitted basic block where we could just fall-through. 141 // You shouldn't use that directly but rather consider one of the helper like 142 // LCodeGen::EmitBranch, LCodeGen::EmitCompareAndBranch... 143 template<class InstrType> 144 void EmitBranchGeneric(InstrType instr, 145 const BranchGenerator& branch); 146 147 template<class InstrType> 148 void EmitBranch(InstrType instr, Condition condition); 149 150 template<class InstrType> 151 void EmitCompareAndBranch(InstrType instr, 152 Condition condition, 153 const Register& lhs, 154 const Operand& rhs); 155 156 template<class InstrType> 157 void EmitTestAndBranch(InstrType instr, 158 Condition condition, 159 const Register& value, 160 uint64_t mask); 161 162 template<class InstrType> 163 void EmitBranchIfNonZeroNumber(InstrType instr, 164 const FPRegister& value, 165 const FPRegister& scratch); 166 167 template<class InstrType> 168 void EmitBranchIfHeapNumber(InstrType instr, 169 const Register& value); 170 171 template<class InstrType> 172 void EmitBranchIfRoot(InstrType instr, 173 const Register& value, 174 Heap::RootListIndex index); 175 176 // Emits optimized code to deep-copy the contents of statically known object 177 // graphs (e.g. object literal boilerplate). Expects a pointer to the 178 // allocated destination object in the result register, and a pointer to the 179 // source object in the source register. 180 void EmitDeepCopy(Handle<JSObject> object, 181 Register result, 182 Register source, 183 Register scratch, 184 int* offset, 185 AllocationSiteMode mode); 186 187 template <class T> 188 void EmitVectorLoadICRegisters(T* instr); 189 190 // Emits optimized code for %_IsString(x). Preserves input register. 191 // Returns the condition on which a final split to 192 // true and false label should be made, to optimize fallthrough. 193 Condition EmitIsString(Register input, Register temp1, Label* is_not_string, 194 SmiCheck check_needed); 195 196 MemOperand BuildSeqStringOperand(Register string, 197 Register temp, 198 LOperand* index, 199 String::Encoding encoding); 200 void DeoptimizeBranch(LInstruction* instr, DeoptimizeReason deopt_reason, 201 BranchType branch_type, Register reg = NoReg, 202 int bit = -1, 203 Deoptimizer::BailoutType* override_bailout_type = NULL); 204 void Deoptimize(LInstruction* instr, DeoptimizeReason deopt_reason, 205 Deoptimizer::BailoutType* override_bailout_type = NULL); 206 void DeoptimizeIf(Condition cond, LInstruction* instr, 207 DeoptimizeReason deopt_reason); 208 void DeoptimizeIfZero(Register rt, LInstruction* instr, 209 DeoptimizeReason deopt_reason); 210 void DeoptimizeIfNotZero(Register rt, LInstruction* instr, 211 DeoptimizeReason deopt_reason); 212 void DeoptimizeIfNegative(Register rt, LInstruction* instr, 213 DeoptimizeReason deopt_reason); 214 void DeoptimizeIfSmi(Register rt, LInstruction* instr, 215 DeoptimizeReason deopt_reason); 216 void DeoptimizeIfNotSmi(Register rt, LInstruction* instr, 217 DeoptimizeReason deopt_reason); 218 void DeoptimizeIfRoot(Register rt, Heap::RootListIndex index, 219 LInstruction* instr, DeoptimizeReason deopt_reason); 220 void DeoptimizeIfNotRoot(Register rt, Heap::RootListIndex index, 221 LInstruction* instr, DeoptimizeReason deopt_reason); 222 void DeoptimizeIfNotHeapNumber(Register object, LInstruction* instr); 223 void DeoptimizeIfMinusZero(DoubleRegister input, LInstruction* instr, 224 DeoptimizeReason deopt_reason); 225 void DeoptimizeIfBitSet(Register rt, int bit, LInstruction* instr, 226 DeoptimizeReason deopt_reason); 227 void DeoptimizeIfBitClear(Register rt, int bit, LInstruction* instr, 228 DeoptimizeReason deopt_reason); 229 230 MemOperand PrepareKeyedExternalArrayOperand(Register key, 231 Register base, 232 Register scratch, 233 bool key_is_smi, 234 bool key_is_constant, 235 int constant_key, 236 ElementsKind elements_kind, 237 int base_offset); 238 MemOperand PrepareKeyedArrayOperand(Register base, 239 Register elements, 240 Register key, 241 bool key_is_tagged, 242 ElementsKind elements_kind, 243 Representation representation, 244 int base_offset); 245 246 void RegisterEnvironmentForDeoptimization(LEnvironment* environment, 247 Safepoint::DeoptMode mode); 248 HasAllocatedStackSlots()249 bool HasAllocatedStackSlots() const { 250 return chunk()->HasAllocatedStackSlots(); 251 } GetStackSlotCount()252 int GetStackSlotCount() const { return chunk()->GetSpillSlotCount(); } GetTotalFrameSlotCount()253 int GetTotalFrameSlotCount() const { 254 return chunk()->GetTotalFrameSlotCount(); 255 } 256 AddDeferredCode(LDeferredCode * code)257 void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); } 258 259 // Emit frame translation commands for an environment. 260 void WriteTranslation(LEnvironment* environment, Translation* translation); 261 262 void AddToTranslation(LEnvironment* environment, 263 Translation* translation, 264 LOperand* op, 265 bool is_tagged, 266 bool is_uint32, 267 int* object_index_pointer, 268 int* dematerialized_index_pointer); 269 270 void SaveCallerDoubles(); 271 void RestoreCallerDoubles(); 272 273 // Code generation steps. Returns true if code generation should continue. 274 void GenerateBodyInstructionPre(LInstruction* instr) override; 275 bool GeneratePrologue(); 276 bool GenerateDeferredCode(); 277 bool GenerateJumpTable(); 278 bool GenerateSafepointTable(); 279 280 // Generates the custom OSR entrypoint and sets the osr_pc_offset. 281 void GenerateOsrPrologue(); 282 283 enum SafepointMode { 284 RECORD_SIMPLE_SAFEPOINT, 285 RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS 286 }; 287 288 void CallCode(Handle<Code> code, 289 RelocInfo::Mode mode, 290 LInstruction* instr); 291 292 void CallCodeGeneric(Handle<Code> code, 293 RelocInfo::Mode mode, 294 LInstruction* instr, 295 SafepointMode safepoint_mode); 296 297 void CallRuntime(const Runtime::Function* function, 298 int num_arguments, 299 LInstruction* instr, 300 SaveFPRegsMode save_doubles = kDontSaveFPRegs); 301 CallRuntime(Runtime::FunctionId id,int num_arguments,LInstruction * instr)302 void CallRuntime(Runtime::FunctionId id, 303 int num_arguments, 304 LInstruction* instr) { 305 const Runtime::Function* function = Runtime::FunctionForId(id); 306 CallRuntime(function, num_arguments, instr); 307 } 308 CallRuntime(Runtime::FunctionId id,LInstruction * instr)309 void CallRuntime(Runtime::FunctionId id, LInstruction* instr) { 310 const Runtime::Function* function = Runtime::FunctionForId(id); 311 CallRuntime(function, function->nargs, instr); 312 } 313 314 void LoadContextFromDeferred(LOperand* context); 315 void CallRuntimeFromDeferred(Runtime::FunctionId id, 316 int argc, 317 LInstruction* instr, 318 LOperand* context); 319 320 void PrepareForTailCall(const ParameterCount& actual, Register scratch1, 321 Register scratch2, Register scratch3); 322 323 // Generate a direct call to a known function. Expects the function 324 // to be in x1. 325 void CallKnownFunction(Handle<JSFunction> function, 326 int formal_parameter_count, int arity, 327 bool is_tail_call, LInstruction* instr); 328 329 // Support for recording safepoint information. 330 void RecordSafepoint(LPointerMap* pointers, 331 Safepoint::Kind kind, 332 int arguments, 333 Safepoint::DeoptMode mode); 334 void RecordSafepoint(LPointerMap* pointers, Safepoint::DeoptMode mode); 335 void RecordSafepoint(Safepoint::DeoptMode mode); 336 void RecordSafepointWithRegisters(LPointerMap* pointers, 337 int arguments, 338 Safepoint::DeoptMode mode); 339 void RecordSafepointWithLazyDeopt(LInstruction* instr, 340 SafepointMode safepoint_mode); 341 342 void EnsureSpaceForLazyDeopt(int space_needed) override; 343 344 ZoneList<Deoptimizer::JumpTableEntry*> jump_table_; 345 Scope* const scope_; 346 ZoneList<LDeferredCode*> deferred_; 347 bool frame_is_built_; 348 349 // Builder that keeps track of safepoints in the code. The table itself is 350 // emitted at the end of the generated code. 351 SafepointTableBuilder safepoints_; 352 353 // Compiler from a set of parallel moves to a sequential list of moves. 354 LGapResolver resolver_; 355 356 Safepoint::Kind expected_safepoint_kind_; 357 358 // The number of arguments pushed onto the stack, either by this block or by a 359 // predecessor. 360 int pushed_arguments_; 361 RecordPushedArgumentsDelta(int delta)362 void RecordPushedArgumentsDelta(int delta) { 363 pushed_arguments_ += delta; 364 DCHECK(pushed_arguments_ >= 0); 365 } 366 367 int old_position_; 368 369 class PushSafepointRegistersScope BASE_EMBEDDED { 370 public: 371 explicit PushSafepointRegistersScope(LCodeGen* codegen); 372 373 ~PushSafepointRegistersScope(); 374 375 private: 376 LCodeGen* codegen_; 377 }; 378 379 friend class LDeferredCode; 380 friend class SafepointGenerator; 381 DISALLOW_COPY_AND_ASSIGN(LCodeGen); 382 }; 383 384 385 class LDeferredCode: public ZoneObject { 386 public: LDeferredCode(LCodeGen * codegen)387 explicit LDeferredCode(LCodeGen* codegen) 388 : codegen_(codegen), 389 external_exit_(NULL), 390 instruction_index_(codegen->current_instruction_) { 391 codegen->AddDeferredCode(this); 392 } 393 ~LDeferredCode()394 virtual ~LDeferredCode() { } 395 virtual void Generate() = 0; 396 virtual LInstruction* instr() = 0; 397 SetExit(Label * exit)398 void SetExit(Label* exit) { external_exit_ = exit; } entry()399 Label* entry() { return &entry_; } exit()400 Label* exit() { return (external_exit_ != NULL) ? external_exit_ : &exit_; } instruction_index()401 int instruction_index() const { return instruction_index_; } 402 403 protected: codegen()404 LCodeGen* codegen() const { return codegen_; } masm()405 MacroAssembler* masm() const { return codegen_->masm(); } 406 407 private: 408 LCodeGen* codegen_; 409 Label entry_; 410 Label exit_; 411 Label* external_exit_; 412 int instruction_index_; 413 }; 414 415 416 // This is the abstract class used by EmitBranchGeneric. 417 // It is used to emit code for conditional branching. The Emit() function 418 // emits code to branch when the condition holds and EmitInverted() emits 419 // the branch when the inverted condition is verified. 420 // 421 // For actual examples of condition see the concrete implementation in 422 // lithium-codegen-arm64.cc (e.g. BranchOnCondition, CompareAndBranch). 423 class BranchGenerator BASE_EMBEDDED { 424 public: BranchGenerator(LCodeGen * codegen)425 explicit BranchGenerator(LCodeGen* codegen) 426 : codegen_(codegen) { } 427 ~BranchGenerator()428 virtual ~BranchGenerator() { } 429 430 virtual void Emit(Label* label) const = 0; 431 virtual void EmitInverted(Label* label) const = 0; 432 433 protected: masm()434 MacroAssembler* masm() const { return codegen_->masm(); } 435 436 LCodeGen* codegen_; 437 }; 438 439 } // namespace internal 440 } // namespace v8 441 442 #endif // V8_CRANKSHAFT_ARM64_LITHIUM_CODEGEN_ARM64_H_ 443