1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/arm64/frames-arm64.h"
6 #include "src/codegen.h"
7 #include "src/deoptimizer.h"
8 #include "src/full-codegen/full-codegen.h"
9 #include "src/register-configuration.h"
10 #include "src/safepoint-table.h"
11 
12 
13 namespace v8 {
14 namespace internal {
15 
16 
patch_size()17 int Deoptimizer::patch_size() {
18   // Size of the code used to patch lazy bailout points.
19   // Patching is done by Deoptimizer::DeoptimizeFunction.
20   return 4 * kInstructionSize;
21 }
22 
23 
EnsureRelocSpaceForLazyDeoptimization(Handle<Code> code)24 void Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(Handle<Code> code) {
25   // Empty because there is no need for relocation information for the code
26   // patching in Deoptimizer::PatchCodeForDeoptimization below.
27 }
28 
29 
PatchCodeForDeoptimization(Isolate * isolate,Code * code)30 void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
31   // Invalidate the relocation information, as it will become invalid by the
32   // code patching below, and is not needed any more.
33   code->InvalidateRelocation();
34 
35   // TODO(jkummerow): if (FLAG_zap_code_space), make the code object's
36   // entry sequence unusable (see other architectures).
37 
38   DeoptimizationInputData* deopt_data =
39       DeoptimizationInputData::cast(code->deoptimization_data());
40   Address code_start_address = code->instruction_start();
41 #ifdef DEBUG
42   Address prev_call_address = NULL;
43 #endif
44   // For each LLazyBailout instruction insert a call to the corresponding
45   // deoptimization entry.
46   for (int i = 0; i < deopt_data->DeoptCount(); i++) {
47     if (deopt_data->Pc(i)->value() == -1) continue;
48 
49     Address call_address = code_start_address + deopt_data->Pc(i)->value();
50     Address deopt_entry = GetDeoptimizationEntry(isolate, i, LAZY);
51 
52     PatchingAssembler patcher(isolate, call_address,
53                               patch_size() / kInstructionSize);
54     patcher.ldr_pcrel(ip0, (2 * kInstructionSize) >> kLoadLiteralScaleLog2);
55     patcher.blr(ip0);
56     patcher.dc64(reinterpret_cast<intptr_t>(deopt_entry));
57 
58     DCHECK((prev_call_address == NULL) ||
59            (call_address >= prev_call_address + patch_size()));
60     DCHECK(call_address + patch_size() <= code->instruction_end());
61 #ifdef DEBUG
62     prev_call_address = call_address;
63 #endif
64   }
65 }
66 
67 
SetPlatformCompiledStubRegisters(FrameDescription * output_frame,CodeStubDescriptor * descriptor)68 void Deoptimizer::SetPlatformCompiledStubRegisters(
69     FrameDescription* output_frame, CodeStubDescriptor* descriptor) {
70   ApiFunction function(descriptor->deoptimization_handler());
71   ExternalReference xref(&function, ExternalReference::BUILTIN_CALL, isolate_);
72   intptr_t handler = reinterpret_cast<intptr_t>(xref.address());
73   int params = descriptor->GetHandlerParameterCount();
74   output_frame->SetRegister(x0.code(), params);
75   output_frame->SetRegister(x1.code(), handler);
76 }
77 
78 
CopyDoubleRegisters(FrameDescription * output_frame)79 void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) {
80   for (int i = 0; i < DoubleRegister::kMaxNumRegisters; ++i) {
81     double double_value = input_->GetDoubleRegister(i);
82     output_frame->SetDoubleRegister(i, double_value);
83   }
84 }
85 
86 
87 
88 #define __ masm()->
89 
Generate()90 void Deoptimizer::TableEntryGenerator::Generate() {
91   GeneratePrologue();
92 
93   // TODO(all): This code needs to be revisited. We probably only need to save
94   // caller-saved registers here. Callee-saved registers can be stored directly
95   // in the input frame.
96 
97   // Save all allocatable floating point registers.
98   CPURegList saved_fp_registers(
99       CPURegister::kFPRegister, kDRegSizeInBits,
100       RegisterConfiguration::Crankshaft()->allocatable_double_codes_mask());
101   __ PushCPURegList(saved_fp_registers);
102 
103   // We save all the registers expcept jssp, sp and lr.
104   CPURegList saved_registers(CPURegister::kRegister, kXRegSizeInBits, 0, 27);
105   saved_registers.Combine(fp);
106   __ PushCPURegList(saved_registers);
107 
108   __ Mov(x3, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
109   __ Str(fp, MemOperand(x3));
110 
111   const int kSavedRegistersAreaSize =
112       (saved_registers.Count() * kXRegSize) +
113       (saved_fp_registers.Count() * kDRegSize);
114 
115   // Floating point registers are saved on the stack above core registers.
116   const int kFPRegistersOffset = saved_registers.Count() * kXRegSize;
117 
118   // Get the bailout id from the stack.
119   Register bailout_id = x2;
120   __ Peek(bailout_id, kSavedRegistersAreaSize);
121 
122   Register code_object = x3;
123   Register fp_to_sp = x4;
124   // Get the address of the location in the code object. This is the return
125   // address for lazy deoptimization.
126   __ Mov(code_object, lr);
127   // Compute the fp-to-sp delta, and correct one word for bailout id.
128   __ Add(fp_to_sp, __ StackPointer(),
129          kSavedRegistersAreaSize + (1 * kPointerSize));
130   __ Sub(fp_to_sp, fp, fp_to_sp);
131 
132   // Allocate a new deoptimizer object.
133   __ Mov(x0, 0);
134   Label context_check;
135   __ Ldr(x1, MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
136   __ JumpIfSmi(x1, &context_check);
137   __ Ldr(x0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
138   __ bind(&context_check);
139   __ Mov(x1, type());
140   // Following arguments are already loaded:
141   //  - x2: bailout id
142   //  - x3: code object address
143   //  - x4: fp-to-sp delta
144   __ Mov(x5, ExternalReference::isolate_address(isolate()));
145 
146   {
147     // Call Deoptimizer::New().
148     AllowExternalCallThatCantCauseGC scope(masm());
149     __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate()), 6);
150   }
151 
152   // Preserve "deoptimizer" object in register x0.
153   Register deoptimizer = x0;
154 
155   // Get the input frame descriptor pointer.
156   __ Ldr(x1, MemOperand(deoptimizer, Deoptimizer::input_offset()));
157 
158   // Copy core registers into the input frame.
159   CPURegList copy_to_input = saved_registers;
160   for (int i = 0; i < saved_registers.Count(); i++) {
161     __ Peek(x2, i * kPointerSize);
162     CPURegister current_reg = copy_to_input.PopLowestIndex();
163     int offset = (current_reg.code() * kPointerSize) +
164         FrameDescription::registers_offset();
165     __ Str(x2, MemOperand(x1, offset));
166   }
167 
168   // Copy FP registers to the input frame.
169   CPURegList copy_fp_to_input = saved_fp_registers;
170   for (int i = 0; i < saved_fp_registers.Count(); i++) {
171     int src_offset = kFPRegistersOffset + (i * kDoubleSize);
172     __ Peek(x2, src_offset);
173     CPURegister reg = copy_fp_to_input.PopLowestIndex();
174     int dst_offset = FrameDescription::double_registers_offset() +
175                      (reg.code() * kDoubleSize);
176     __ Str(x2, MemOperand(x1, dst_offset));
177   }
178 
179   // Remove the bailout id and the saved registers from the stack.
180   __ Drop(1 + (kSavedRegistersAreaSize / kXRegSize));
181 
182   // Compute a pointer to the unwinding limit in register x2; that is
183   // the first stack slot not part of the input frame.
184   Register unwind_limit = x2;
185   __ Ldr(unwind_limit, MemOperand(x1, FrameDescription::frame_size_offset()));
186   __ Add(unwind_limit, unwind_limit, __ StackPointer());
187 
188   // Unwind the stack down to - but not including - the unwinding
189   // limit and copy the contents of the activation frame to the input
190   // frame description.
191   __ Add(x3, x1, FrameDescription::frame_content_offset());
192   Label pop_loop;
193   Label pop_loop_header;
194   __ B(&pop_loop_header);
195   __ Bind(&pop_loop);
196   __ Pop(x4);
197   __ Str(x4, MemOperand(x3, kPointerSize, PostIndex));
198   __ Bind(&pop_loop_header);
199   __ Cmp(unwind_limit, __ StackPointer());
200   __ B(ne, &pop_loop);
201 
202   // Compute the output frame in the deoptimizer.
203   __ Push(x0);  // Preserve deoptimizer object across call.
204 
205   {
206     // Call Deoptimizer::ComputeOutputFrames().
207     AllowExternalCallThatCantCauseGC scope(masm());
208     __ CallCFunction(
209         ExternalReference::compute_output_frames_function(isolate()), 1);
210   }
211   __ Pop(x4);  // Restore deoptimizer object (class Deoptimizer).
212 
213   __ Ldr(__ StackPointer(),
214          MemOperand(x4, Deoptimizer::caller_frame_top_offset()));
215 
216   // Replace the current (input) frame with the output frames.
217   Label outer_push_loop, inner_push_loop,
218       outer_loop_header, inner_loop_header;
219   __ Ldrsw(x1, MemOperand(x4, Deoptimizer::output_count_offset()));
220   __ Ldr(x0, MemOperand(x4, Deoptimizer::output_offset()));
221   __ Add(x1, x0, Operand(x1, LSL, kPointerSizeLog2));
222   __ B(&outer_loop_header);
223 
224   __ Bind(&outer_push_loop);
225   Register current_frame = x2;
226   __ Ldr(current_frame, MemOperand(x0, 0));
227   __ Ldr(x3, MemOperand(current_frame, FrameDescription::frame_size_offset()));
228   __ B(&inner_loop_header);
229 
230   __ Bind(&inner_push_loop);
231   __ Sub(x3, x3, kPointerSize);
232   __ Add(x6, current_frame, x3);
233   __ Ldr(x7, MemOperand(x6, FrameDescription::frame_content_offset()));
234   __ Push(x7);
235   __ Bind(&inner_loop_header);
236   __ Cbnz(x3, &inner_push_loop);
237 
238   __ Add(x0, x0, kPointerSize);
239   __ Bind(&outer_loop_header);
240   __ Cmp(x0, x1);
241   __ B(lt, &outer_push_loop);
242 
243   __ Ldr(x1, MemOperand(x4, Deoptimizer::input_offset()));
244   DCHECK(!saved_fp_registers.IncludesAliasOf(crankshaft_fp_scratch) &&
245          !saved_fp_registers.IncludesAliasOf(fp_zero) &&
246          !saved_fp_registers.IncludesAliasOf(fp_scratch));
247   while (!saved_fp_registers.IsEmpty()) {
248     const CPURegister reg = saved_fp_registers.PopLowestIndex();
249     int src_offset = FrameDescription::double_registers_offset() +
250                      (reg.code() * kDoubleSize);
251     __ Ldr(reg, MemOperand(x1, src_offset));
252   }
253 
254   // Push state from the last output frame.
255   __ Ldr(x6, MemOperand(current_frame, FrameDescription::state_offset()));
256   __ Push(x6);
257 
258   // TODO(all): ARM copies a lot (if not all) of the last output frame onto the
259   // stack, then pops it all into registers. Here, we try to load it directly
260   // into the relevant registers. Is this correct? If so, we should improve the
261   // ARM code.
262 
263   // TODO(all): This code needs to be revisited, We probably don't need to
264   // restore all the registers as fullcodegen does not keep live values in
265   // registers (note that at least fp must be restored though).
266 
267   // Restore registers from the last output frame.
268   // Note that lr is not in the list of saved_registers and will be restored
269   // later. We can use it to hold the address of last output frame while
270   // reloading the other registers.
271   DCHECK(!saved_registers.IncludesAliasOf(lr));
272   Register last_output_frame = lr;
273   __ Mov(last_output_frame, current_frame);
274 
275   // We don't need to restore x7 as it will be clobbered later to hold the
276   // continuation address.
277   Register continuation = x7;
278   saved_registers.Remove(continuation);
279 
280   while (!saved_registers.IsEmpty()) {
281     // TODO(all): Look for opportunities to optimize this by using ldp.
282     CPURegister current_reg = saved_registers.PopLowestIndex();
283     int offset = (current_reg.code() * kPointerSize) +
284         FrameDescription::registers_offset();
285     __ Ldr(current_reg, MemOperand(last_output_frame, offset));
286   }
287 
288   __ Ldr(continuation, MemOperand(last_output_frame,
289                                   FrameDescription::continuation_offset()));
290   __ Ldr(lr, MemOperand(last_output_frame, FrameDescription::pc_offset()));
291   __ InitializeRootRegister();
292   __ Br(continuation);
293 }
294 
295 
296 // Size of an entry of the second level deopt table.
297 // This is the code size generated by GeneratePrologue for one entry.
298 const int Deoptimizer::table_entry_size_ = 2 * kInstructionSize;
299 
300 
GeneratePrologue()301 void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
302   UseScratchRegisterScope temps(masm());
303   Register entry_id = temps.AcquireX();
304 
305   // Create a sequence of deoptimization entries.
306   // Note that registers are still live when jumping to an entry.
307   Label done;
308   {
309     InstructionAccurateScope scope(masm());
310 
311     // The number of entry will never exceed kMaxNumberOfEntries.
312     // As long as kMaxNumberOfEntries is a valid 16 bits immediate you can use
313     // a movz instruction to load the entry id.
314     DCHECK(is_uint16(Deoptimizer::kMaxNumberOfEntries));
315 
316     for (int i = 0; i < count(); i++) {
317       int start = masm()->pc_offset();
318       USE(start);
319       __ movz(entry_id, i);
320       __ b(&done);
321       DCHECK(masm()->pc_offset() - start == table_entry_size_);
322     }
323   }
324   __ Bind(&done);
325   __ Push(entry_id);
326 }
327 
328 
SetCallerPc(unsigned offset,intptr_t value)329 void FrameDescription::SetCallerPc(unsigned offset, intptr_t value) {
330   SetFrameSlot(offset, value);
331 }
332 
333 
SetCallerFp(unsigned offset,intptr_t value)334 void FrameDescription::SetCallerFp(unsigned offset, intptr_t value) {
335   SetFrameSlot(offset, value);
336 }
337 
338 
SetCallerConstantPool(unsigned offset,intptr_t value)339 void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) {
340   // No embedded constant pool support.
341   UNREACHABLE();
342 }
343 
344 
345 #undef __
346 
347 }  // namespace internal
348 }  // namespace v8
349