1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/assembler-inl.h"
6 #include "src/deoptimizer.h"
7 #include "src/register-configuration.h"
8 #include "src/safepoint-table.h"
9
10 namespace v8 {
11 namespace internal {
12
13
14 #define __ masm()->
15
16
17 // This code tries to be close to ia32 code so that any changes can be
18 // easily ported.
Generate()19 void Deoptimizer::TableEntryGenerator::Generate() {
20 GeneratePrologue();
21
22 // Unlike on ARM we don't save all the registers, just the useful ones.
23 // For the rest, there are gaps on the stack, so the offsets remain the same.
24 const int kNumberOfRegisters = Register::kNumRegisters;
25
26 RegList restored_regs = kJSCallerSaved | kCalleeSaved;
27 RegList saved_regs = restored_regs | sp.bit() | ra.bit();
28
29 const int kDoubleRegsSize = kDoubleSize * DoubleRegister::kNumRegisters;
30 const int kFloatRegsSize = kFloatSize * FloatRegister::kNumRegisters;
31
32 // Save all FPU registers before messing with them.
33 __ Subu(sp, sp, Operand(kDoubleRegsSize));
34 const RegisterConfiguration* config = RegisterConfiguration::Default();
35 for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
36 int code = config->GetAllocatableDoubleCode(i);
37 const DoubleRegister fpu_reg = DoubleRegister::from_code(code);
38 int offset = code * kDoubleSize;
39 __ Sdc1(fpu_reg, MemOperand(sp, offset));
40 }
41
42 __ Subu(sp, sp, Operand(kFloatRegsSize));
43 for (int i = 0; i < config->num_allocatable_float_registers(); ++i) {
44 int code = config->GetAllocatableFloatCode(i);
45 const FloatRegister fpu_reg = FloatRegister::from_code(code);
46 int offset = code * kFloatSize;
47 __ swc1(fpu_reg, MemOperand(sp, offset));
48 }
49
50 // Push saved_regs (needed to populate FrameDescription::registers_).
51 // Leave gaps for other registers.
52 __ Subu(sp, sp, kNumberOfRegisters * kPointerSize);
53 for (int16_t i = kNumberOfRegisters - 1; i >= 0; i--) {
54 if ((saved_regs & (1 << i)) != 0) {
55 __ sw(ToRegister(i), MemOperand(sp, kPointerSize * i));
56 }
57 }
58
59 __ li(a2, Operand(ExternalReference::Create(
60 IsolateAddressId::kCEntryFPAddress, isolate())));
61 __ sw(fp, MemOperand(a2));
62
63 const int kSavedRegistersAreaSize =
64 (kNumberOfRegisters * kPointerSize) + kDoubleRegsSize + kFloatRegsSize;
65
66 // Get the bailout id from the stack.
67 __ lw(a2, MemOperand(sp, kSavedRegistersAreaSize));
68
69 // Get the address of the location in the code object (a3) (return
70 // address for lazy deoptimization) and compute the fp-to-sp delta in
71 // register t0.
72 __ mov(a3, ra);
73 // Correct one word for bailout id.
74 __ Addu(t0, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
75
76 __ Subu(t0, fp, t0);
77
78 // Allocate a new deoptimizer object.
79 __ PrepareCallCFunction(6, t1);
80 // Pass four arguments in a0 to a3 and fifth & sixth arguments on stack.
81 __ mov(a0, zero_reg);
82 Label context_check;
83 __ lw(a1, MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
84 __ JumpIfSmi(a1, &context_check);
85 __ lw(a0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
86 __ bind(&context_check);
87 __ li(a1, Operand(static_cast<int>(deopt_kind())));
88 // a2: bailout id already loaded.
89 // a3: code address or 0 already loaded.
90 __ sw(t0, CFunctionArgumentOperand(5)); // Fp-to-sp delta.
91 __ li(t1, Operand(ExternalReference::isolate_address(isolate())));
92 __ sw(t1, CFunctionArgumentOperand(6)); // Isolate.
93 // Call Deoptimizer::New().
94 {
95 AllowExternalCallThatCantCauseGC scope(masm());
96 __ CallCFunction(ExternalReference::new_deoptimizer_function(), 6);
97 }
98
99 // Preserve "deoptimizer" object in register v0 and get the input
100 // frame descriptor pointer to a1 (deoptimizer->input_);
101 // Move deopt-obj to a0 for call to Deoptimizer::ComputeOutputFrames() below.
102 __ mov(a0, v0);
103 __ lw(a1, MemOperand(v0, Deoptimizer::input_offset()));
104
105 // Copy core registers into FrameDescription::registers_[kNumRegisters].
106 DCHECK_EQ(Register::kNumRegisters, kNumberOfRegisters);
107 for (int i = 0; i < kNumberOfRegisters; i++) {
108 int offset = (i * kPointerSize) + FrameDescription::registers_offset();
109 if ((saved_regs & (1 << i)) != 0) {
110 __ lw(a2, MemOperand(sp, i * kPointerSize));
111 __ sw(a2, MemOperand(a1, offset));
112 } else if (FLAG_debug_code) {
113 __ li(a2, kDebugZapValue);
114 __ sw(a2, MemOperand(a1, offset));
115 }
116 }
117
118 int double_regs_offset = FrameDescription::double_registers_offset();
119 // Copy FPU registers to
120 // double_registers_[DoubleRegister::kNumAllocatableRegisters]
121 for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
122 int code = config->GetAllocatableDoubleCode(i);
123 int dst_offset = code * kDoubleSize + double_regs_offset;
124 int src_offset =
125 code * kDoubleSize + kNumberOfRegisters * kPointerSize + kFloatRegsSize;
126 __ Ldc1(f0, MemOperand(sp, src_offset));
127 __ Sdc1(f0, MemOperand(a1, dst_offset));
128 }
129
130 // Copy FPU registers to
131 // float_registers_[FloatRegister::kNumAllocatableRegisters]
132 int float_regs_offset = FrameDescription::float_registers_offset();
133 for (int i = 0; i < config->num_allocatable_float_registers(); ++i) {
134 int code = config->GetAllocatableFloatCode(i);
135 int dst_offset = code * kFloatSize + float_regs_offset;
136 int src_offset = code * kFloatSize + kNumberOfRegisters * kPointerSize;
137 __ lwc1(f0, MemOperand(sp, src_offset));
138 __ swc1(f0, MemOperand(a1, dst_offset));
139 }
140
141 // Remove the bailout id and the saved registers from the stack.
142 __ Addu(sp, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
143
144 // Compute a pointer to the unwinding limit in register a2; that is
145 // the first stack slot not part of the input frame.
146 __ lw(a2, MemOperand(a1, FrameDescription::frame_size_offset()));
147 __ Addu(a2, a2, sp);
148
149 // Unwind the stack down to - but not including - the unwinding
150 // limit and copy the contents of the activation frame to the input
151 // frame description.
152 __ Addu(a3, a1, Operand(FrameDescription::frame_content_offset()));
153 Label pop_loop;
154 Label pop_loop_header;
155 __ BranchShort(&pop_loop_header);
156 __ bind(&pop_loop);
157 __ pop(t0);
158 __ sw(t0, MemOperand(a3, 0));
159 __ addiu(a3, a3, sizeof(uint32_t));
160 __ bind(&pop_loop_header);
161 __ BranchShort(&pop_loop, ne, a2, Operand(sp));
162
163 // Compute the output frame in the deoptimizer.
164 __ push(a0); // Preserve deoptimizer object across call.
165 // a0: deoptimizer object; a1: scratch.
166 __ PrepareCallCFunction(1, a1);
167 // Call Deoptimizer::ComputeOutputFrames().
168 {
169 AllowExternalCallThatCantCauseGC scope(masm());
170 __ CallCFunction(ExternalReference::compute_output_frames_function(), 1);
171 }
172 __ pop(a0); // Restore deoptimizer object (class Deoptimizer).
173
174 __ lw(sp, MemOperand(a0, Deoptimizer::caller_frame_top_offset()));
175
176 // Replace the current (input) frame with the output frames.
177 Label outer_push_loop, inner_push_loop,
178 outer_loop_header, inner_loop_header;
179 // Outer loop state: t0 = current "FrameDescription** output_",
180 // a1 = one past the last FrameDescription**.
181 __ lw(a1, MemOperand(a0, Deoptimizer::output_count_offset()));
182 __ lw(t0, MemOperand(a0, Deoptimizer::output_offset())); // t0 is output_.
183 __ Lsa(a1, t0, a1, kPointerSizeLog2);
184 __ BranchShort(&outer_loop_header);
185 __ bind(&outer_push_loop);
186 // Inner loop state: a2 = current FrameDescription*, a3 = loop index.
187 __ lw(a2, MemOperand(t0, 0)); // output_[ix]
188 __ lw(a3, MemOperand(a2, FrameDescription::frame_size_offset()));
189 __ BranchShort(&inner_loop_header);
190 __ bind(&inner_push_loop);
191 __ Subu(a3, a3, Operand(sizeof(uint32_t)));
192 __ Addu(t2, a2, Operand(a3));
193 __ lw(t3, MemOperand(t2, FrameDescription::frame_content_offset()));
194 __ push(t3);
195 __ bind(&inner_loop_header);
196 __ BranchShort(&inner_push_loop, ne, a3, Operand(zero_reg));
197
198 __ Addu(t0, t0, Operand(kPointerSize));
199 __ bind(&outer_loop_header);
200 __ BranchShort(&outer_push_loop, lt, t0, Operand(a1));
201
202 __ lw(a1, MemOperand(a0, Deoptimizer::input_offset()));
203 for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
204 int code = config->GetAllocatableDoubleCode(i);
205 const DoubleRegister fpu_reg = DoubleRegister::from_code(code);
206 int src_offset = code * kDoubleSize + double_regs_offset;
207 __ Ldc1(fpu_reg, MemOperand(a1, src_offset));
208 }
209
210 // Push pc and continuation from the last output frame.
211 __ lw(t2, MemOperand(a2, FrameDescription::pc_offset()));
212 __ push(t2);
213 __ lw(t2, MemOperand(a2, FrameDescription::continuation_offset()));
214 __ push(t2);
215
216
217 // Technically restoring 'at' should work unless zero_reg is also restored
218 // but it's safer to check for this.
219 DCHECK(!(at.bit() & restored_regs));
220 // Restore the registers from the last output frame.
221 __ mov(at, a2);
222 for (int i = kNumberOfRegisters - 1; i >= 0; i--) {
223 int offset = (i * kPointerSize) + FrameDescription::registers_offset();
224 if ((restored_regs & (1 << i)) != 0) {
225 __ lw(ToRegister(i), MemOperand(at, offset));
226 }
227 }
228
229 __ InitializeRootRegister();
230
231 __ pop(at); // Get continuation, leave pc on stack.
232 __ pop(ra);
233 __ Jump(at);
234 __ stop("Unreachable.");
235 }
236
237
238 // Maximum size of a table entry generated below.
239 #ifdef _MIPS_ARCH_MIPS32R6
240 const int Deoptimizer::table_entry_size_ = 2 * kInstrSize;
241 #else
242 const int Deoptimizer::table_entry_size_ = 3 * kInstrSize;
243 #endif
244
GeneratePrologue()245 void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
246 Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm());
247
248 // Create a sequence of deoptimization entries.
249 // Note that registers are still live when jumping to an entry.
250 Label table_start, done, trampoline_jump;
251 __ bind(&table_start);
252
253 #ifdef _MIPS_ARCH_MIPS32R6
254 int kMaxEntriesBranchReach =
255 (1 << (kImm26Bits - 2)) / (table_entry_size_ / kInstrSize);
256 #else
257 int kMaxEntriesBranchReach =
258 (1 << (kImm16Bits - 2)) / (table_entry_size_ / kInstrSize);
259 #endif
260
261 if (count() <= kMaxEntriesBranchReach) {
262 // Common case.
263 for (int i = 0; i < count(); i++) {
264 Label start;
265 __ bind(&start);
266 DCHECK(is_int16(i));
267 if (IsMipsArchVariant(kMips32r6)) {
268 __ li(kScratchReg, i);
269 __ BranchShort(PROTECT, &done);
270 } else {
271 __ BranchShort(USE_DELAY_SLOT, &done); // Expose delay slot.
272 __ li(kScratchReg, i); // In the delay slot.
273 __ nop();
274 }
275 DCHECK_EQ(table_entry_size_, masm()->SizeOfCodeGeneratedSince(&start));
276 }
277
278 DCHECK_EQ(masm()->SizeOfCodeGeneratedSince(&table_start),
279 count() * table_entry_size_);
280 __ bind(&done);
281 __ Push(kScratchReg);
282 } else {
283 DCHECK(!IsMipsArchVariant(kMips32r6));
284 // Uncommon case, the branch cannot reach.
285 // Create mini trampoline to reach the end of the table
286 for (int i = 0, j = 0; i < count(); i++, j++) {
287 Label start;
288 __ bind(&start);
289 DCHECK(is_int16(i));
290 if (j >= kMaxEntriesBranchReach) {
291 j = 0;
292 __ li(kScratchReg, i);
293 __ bind(&trampoline_jump);
294 trampoline_jump = Label();
295 __ BranchShort(USE_DELAY_SLOT, &trampoline_jump);
296 __ nop();
297 } else {
298 __ BranchShort(USE_DELAY_SLOT, &trampoline_jump); // Expose delay slot.
299 __ li(kScratchReg, i); // In the delay slot.
300 __ nop();
301 }
302 DCHECK_EQ(table_entry_size_, masm()->SizeOfCodeGeneratedSince(&start));
303 }
304
305 DCHECK_EQ(masm()->SizeOfCodeGeneratedSince(&table_start),
306 count() * table_entry_size_);
307 __ bind(&trampoline_jump);
308 __ Push(kScratchReg);
309 }
310 }
311
PadTopOfStackRegister()312 bool Deoptimizer::PadTopOfStackRegister() { return false; }
313
SetCallerPc(unsigned offset,intptr_t value)314 void FrameDescription::SetCallerPc(unsigned offset, intptr_t value) {
315 SetFrameSlot(offset, value);
316 }
317
318
SetCallerFp(unsigned offset,intptr_t value)319 void FrameDescription::SetCallerFp(unsigned offset, intptr_t value) {
320 SetFrameSlot(offset, value);
321 }
322
323
SetCallerConstantPool(unsigned offset,intptr_t value)324 void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) {
325 // No embedded constant pool support.
326 UNREACHABLE();
327 }
328
329
330 #undef __
331
332
333 } // namespace internal
334 } // namespace v8
335