• Home
  • History
  • Annotate
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/deoptimizer.h"
6 #include "src/codegen.h"
7 #include "src/full-codegen/full-codegen.h"
8 #include "src/register-configuration.h"
9 #include "src/safepoint-table.h"
10 
11 namespace v8 {
12 namespace internal {
13 
14 // LAY + LGHI/LHI + BRCL
15 const int Deoptimizer::table_entry_size_ = 16;
16 
patch_size()17 int Deoptimizer::patch_size() {
18 #if V8_TARGET_ARCH_S390X
19   const int kCallInstructionSize = 16;
20 #else
21   const int kCallInstructionSize = 10;
22 #endif
23   return kCallInstructionSize;
24 }
25 
EnsureRelocSpaceForLazyDeoptimization(Handle<Code> code)26 void Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(Handle<Code> code) {
27   // Empty because there is no need for relocation information for the code
28   // patching in Deoptimizer::PatchCodeForDeoptimization below.
29 }
30 
PatchCodeForDeoptimization(Isolate * isolate,Code * code)31 void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
32   Address code_start_address = code->instruction_start();
33 
34   // Invalidate the relocation information, as it will become invalid by the
35   // code patching below, and is not needed any more.
36   code->InvalidateRelocation();
37 
38   if (FLAG_zap_code_space) {
39     // Fail hard and early if we enter this code object again.
40     byte* pointer = code->FindCodeAgeSequence();
41     if (pointer != NULL) {
42       pointer += kNoCodeAgeSequenceLength;
43     } else {
44       pointer = code->instruction_start();
45     }
46     CodePatcher patcher(isolate, pointer, 2);
47     patcher.masm()->bkpt(0);
48 
49     DeoptimizationInputData* data =
50         DeoptimizationInputData::cast(code->deoptimization_data());
51     int osr_offset = data->OsrPcOffset()->value();
52     if (osr_offset > 0) {
53       CodePatcher osr_patcher(isolate, code->instruction_start() + osr_offset,
54                               2);
55       osr_patcher.masm()->bkpt(0);
56     }
57   }
58 
59   DeoptimizationInputData* deopt_data =
60       DeoptimizationInputData::cast(code->deoptimization_data());
61 #ifdef DEBUG
62   Address prev_call_address = NULL;
63 #endif
64   // For each LLazyBailout instruction insert a call to the corresponding
65   // deoptimization entry.
66   for (int i = 0; i < deopt_data->DeoptCount(); i++) {
67     if (deopt_data->Pc(i)->value() == -1) continue;
68     Address call_address = code_start_address + deopt_data->Pc(i)->value();
69     Address deopt_entry = GetDeoptimizationEntry(isolate, i, LAZY);
70     // We need calls to have a predictable size in the unoptimized code, but
71     // this is optimized code, so we don't have to have a predictable size.
72     int call_size_in_bytes = MacroAssembler::CallSizeNotPredictableCodeSize(
73         deopt_entry, kRelocInfo_NONEPTR);
74     DCHECK(call_size_in_bytes <= patch_size());
75     CodePatcher patcher(isolate, call_address, call_size_in_bytes);
76     patcher.masm()->Call(deopt_entry, kRelocInfo_NONEPTR);
77     DCHECK(prev_call_address == NULL ||
78            call_address >= prev_call_address + patch_size());
79     DCHECK(call_address + patch_size() <= code->instruction_end());
80 #ifdef DEBUG
81     prev_call_address = call_address;
82 #endif
83   }
84 }
85 
SetPlatformCompiledStubRegisters(FrameDescription * output_frame,CodeStubDescriptor * descriptor)86 void Deoptimizer::SetPlatformCompiledStubRegisters(
87     FrameDescription* output_frame, CodeStubDescriptor* descriptor) {
88   ApiFunction function(descriptor->deoptimization_handler());
89   ExternalReference xref(&function, ExternalReference::BUILTIN_CALL, isolate_);
90   intptr_t handler = reinterpret_cast<intptr_t>(xref.address());
91   int params = descriptor->GetHandlerParameterCount();
92   output_frame->SetRegister(r2.code(), params);
93   output_frame->SetRegister(r3.code(), handler);
94 }
95 
CopyDoubleRegisters(FrameDescription * output_frame)96 void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) {
97   for (int i = 0; i < DoubleRegister::kNumRegisters; ++i) {
98     double double_value = input_->GetDoubleRegister(i);
99     output_frame->SetDoubleRegister(i, double_value);
100   }
101 }
102 
103 #define __ masm()->
104 
105 // This code tries to be close to ia32 code so that any changes can be
106 // easily ported.
Generate()107 void Deoptimizer::TableEntryGenerator::Generate() {
108   GeneratePrologue();
109 
110   // Save all the registers onto the stack
111   const int kNumberOfRegisters = Register::kNumRegisters;
112 
113   RegList restored_regs = kJSCallerSaved | kCalleeSaved;
114 
115   const int kDoubleRegsSize = kDoubleSize * DoubleRegister::kNumRegisters;
116 
117   // Save all double registers before messing with them.
118   __ lay(sp, MemOperand(sp, -kDoubleRegsSize));
119   const RegisterConfiguration* config = RegisterConfiguration::Crankshaft();
120   for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
121     int code = config->GetAllocatableDoubleCode(i);
122     const DoubleRegister dreg = DoubleRegister::from_code(code);
123     int offset = code * kDoubleSize;
124     __ StoreDouble(dreg, MemOperand(sp, offset));
125   }
126 
127   // Push all GPRs onto the stack
128   __ lay(sp, MemOperand(sp, -kNumberOfRegisters * kPointerSize));
129   __ StoreMultipleP(r0, sp, MemOperand(sp));  // Save all 16 registers
130 
131   __ mov(ip, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
132   __ StoreP(fp, MemOperand(ip));
133 
134   const int kSavedRegistersAreaSize =
135       (kNumberOfRegisters * kPointerSize) + kDoubleRegsSize;
136 
137   // Get the bailout id from the stack.
138   __ LoadP(r4, MemOperand(sp, kSavedRegistersAreaSize));
139 
140   // Cleanse the Return address for 31-bit
141   __ CleanseP(r14);
142 
143   // Get the address of the location in the code object (r5)(return
144   // address for lazy deoptimization) and compute the fp-to-sp delta in
145   // register r6.
146   __ LoadRR(r5, r14);
147   __ la(r6, MemOperand(sp, kSavedRegistersAreaSize + (1 * kPointerSize)));
148   __ SubP(r6, fp, r6);
149 
150   // Allocate a new deoptimizer object.
151   // Pass six arguments in r2 to r7.
152   __ PrepareCallCFunction(6, r7);
153   __ LoadImmP(r2, Operand::Zero());
154   Label context_check;
155   __ LoadP(r3, MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
156   __ JumpIfSmi(r3, &context_check);
157   __ LoadP(r2, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
158   __ bind(&context_check);
159   __ LoadImmP(r3, Operand(type()));  // bailout type,
160   // r4: bailout id already loaded.
161   // r5: code address or 0 already loaded.
162   // r6: Fp-to-sp delta.
163   // Parm6: isolate is passed on the stack.
164   __ mov(r7, Operand(ExternalReference::isolate_address(isolate())));
165   __ StoreP(r7, MemOperand(sp, kStackFrameExtraParamSlot * kPointerSize));
166 
167   // Call Deoptimizer::New().
168   {
169     AllowExternalCallThatCantCauseGC scope(masm());
170     __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate()), 6);
171   }
172 
173   // Preserve "deoptimizer" object in register r2 and get the input
174   // frame descriptor pointer to r3 (deoptimizer->input_);
175   __ LoadP(r3, MemOperand(r2, Deoptimizer::input_offset()));
176 
177   // Copy core registers into FrameDescription::registers_[kNumRegisters].
178   // DCHECK(Register::kNumRegisters == kNumberOfRegisters);
179   // __ mvc(MemOperand(r3, FrameDescription::registers_offset()),
180   //        MemOperand(sp), kNumberOfRegisters * kPointerSize);
181   // Copy core registers into FrameDescription::registers_[kNumRegisters].
182   // TODO(john.yan): optimize the following code by using mvc instruction
183   DCHECK(Register::kNumRegisters == kNumberOfRegisters);
184   for (int i = 0; i < kNumberOfRegisters; i++) {
185     int offset = (i * kPointerSize) + FrameDescription::registers_offset();
186     __ LoadP(r4, MemOperand(sp, i * kPointerSize));
187     __ StoreP(r4, MemOperand(r3, offset));
188   }
189 
190   int double_regs_offset = FrameDescription::double_registers_offset();
191   // Copy double registers to
192   // double_registers_[DoubleRegister::kNumRegisters]
193   for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
194     int code = config->GetAllocatableDoubleCode(i);
195     int dst_offset = code * kDoubleSize + double_regs_offset;
196     int src_offset = code * kDoubleSize + kNumberOfRegisters * kPointerSize;
197     // TODO(joransiu): MVC opportunity
198     __ LoadDouble(d0, MemOperand(sp, src_offset));
199     __ StoreDouble(d0, MemOperand(r3, dst_offset));
200   }
201 
202   // Remove the bailout id and the saved registers from the stack.
203   __ la(sp, MemOperand(sp, kSavedRegistersAreaSize + (1 * kPointerSize)));
204 
205   // Compute a pointer to the unwinding limit in register r4; that is
206   // the first stack slot not part of the input frame.
207   __ LoadP(r4, MemOperand(r3, FrameDescription::frame_size_offset()));
208   __ AddP(r4, sp);
209 
210   // Unwind the stack down to - but not including - the unwinding
211   // limit and copy the contents of the activation frame to the input
212   // frame description.
213   __ la(r5, MemOperand(r3, FrameDescription::frame_content_offset()));
214   Label pop_loop;
215   Label pop_loop_header;
216   __ b(&pop_loop_header, Label::kNear);
217   __ bind(&pop_loop);
218   __ pop(r6);
219   __ StoreP(r6, MemOperand(r5, 0));
220   __ la(r5, MemOperand(r5, kPointerSize));
221   __ bind(&pop_loop_header);
222   __ CmpP(r4, sp);
223   __ bne(&pop_loop);
224 
225   // Compute the output frame in the deoptimizer.
226   __ push(r2);  // Preserve deoptimizer object across call.
227   // r2: deoptimizer object; r3: scratch.
228   __ PrepareCallCFunction(1, r3);
229   // Call Deoptimizer::ComputeOutputFrames().
230   {
231     AllowExternalCallThatCantCauseGC scope(masm());
232     __ CallCFunction(
233         ExternalReference::compute_output_frames_function(isolate()), 1);
234   }
235   __ pop(r2);  // Restore deoptimizer object (class Deoptimizer).
236 
237   __ LoadP(sp, MemOperand(r2, Deoptimizer::caller_frame_top_offset()));
238 
239   // Replace the current (input) frame with the output frames.
240   Label outer_push_loop, inner_push_loop, outer_loop_header, inner_loop_header;
241   // Outer loop state: r6 = current "FrameDescription** output_",
242   // r3 = one past the last FrameDescription**.
243   __ LoadlW(r3, MemOperand(r2, Deoptimizer::output_count_offset()));
244   __ LoadP(r6, MemOperand(r2, Deoptimizer::output_offset()));  // r6 is output_.
245   __ ShiftLeftP(r3, r3, Operand(kPointerSizeLog2));
246   __ AddP(r3, r6, r3);
247   __ b(&outer_loop_header, Label::kNear);
248 
249   __ bind(&outer_push_loop);
250   // Inner loop state: r4 = current FrameDescription*, r5 = loop index.
251   __ LoadP(r4, MemOperand(r6, 0));  // output_[ix]
252   __ LoadP(r5, MemOperand(r4, FrameDescription::frame_size_offset()));
253   __ b(&inner_loop_header, Label::kNear);
254 
255   __ bind(&inner_push_loop);
256   __ AddP(r5, Operand(-sizeof(intptr_t)));
257   __ AddP(r8, r4, r5);
258   __ LoadP(r8, MemOperand(r8, FrameDescription::frame_content_offset()));
259   __ push(r8);
260 
261   __ bind(&inner_loop_header);
262   __ CmpP(r5, Operand::Zero());
263   __ bne(&inner_push_loop);  // test for gt?
264 
265   __ AddP(r6, r6, Operand(kPointerSize));
266   __ bind(&outer_loop_header);
267   __ CmpP(r6, r3);
268   __ blt(&outer_push_loop);
269 
270   __ LoadP(r3, MemOperand(r2, Deoptimizer::input_offset()));
271   for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
272     int code = config->GetAllocatableDoubleCode(i);
273     const DoubleRegister dreg = DoubleRegister::from_code(code);
274     int src_offset = code * kDoubleSize + double_regs_offset;
275     __ ld(dreg, MemOperand(r3, src_offset));
276   }
277 
278   // Push state, pc, and continuation from the last output frame.
279   __ LoadP(r8, MemOperand(r4, FrameDescription::state_offset()));
280   __ push(r8);
281   __ LoadP(r8, MemOperand(r4, FrameDescription::pc_offset()));
282   __ push(r8);
283   __ LoadP(r8, MemOperand(r4, FrameDescription::continuation_offset()));
284   __ push(r8);
285 
286   // Restore the registers from the last output frame.
287   __ LoadRR(r1, r4);
288   for (int i = kNumberOfRegisters - 1; i > 0; i--) {
289     int offset = (i * kPointerSize) + FrameDescription::registers_offset();
290     if ((restored_regs & (1 << i)) != 0) {
291       __ LoadP(ToRegister(i), MemOperand(r1, offset));
292     }
293   }
294 
295   __ InitializeRootRegister();
296 
297   __ pop(ip);  // get continuation, leave pc on stack
298   __ pop(r14);
299   __ Jump(ip);
300   __ stop("Unreachable.");
301 }
302 
GeneratePrologue()303 void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
304   // Create a sequence of deoptimization entries. Note that any
305   // registers may be still live.
306   Label done;
307   for (int i = 0; i < count(); i++) {
308     int start = masm()->pc_offset();
309     USE(start);
310     __ lay(sp, MemOperand(sp, -kPointerSize));
311     __ LoadImmP(ip, Operand(i));
312     __ b(&done);
313     int end = masm()->pc_offset();
314     USE(end);
315     DCHECK(masm()->pc_offset() - start == table_entry_size_);
316   }
317   __ bind(&done);
318   __ StoreP(ip, MemOperand(sp));
319 }
320 
SetCallerPc(unsigned offset,intptr_t value)321 void FrameDescription::SetCallerPc(unsigned offset, intptr_t value) {
322   SetFrameSlot(offset, value);
323 }
324 
SetCallerFp(unsigned offset,intptr_t value)325 void FrameDescription::SetCallerFp(unsigned offset, intptr_t value) {
326   SetFrameSlot(offset, value);
327 }
328 
SetCallerConstantPool(unsigned offset,intptr_t value)329 void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) {
330   // No out-of-line constant pool support.
331   UNREACHABLE();
332 }
333 
334 #undef __
335 
336 }  // namespace internal
337 }  // namespace v8
338