1 /*
2  * Copyright (C) 2016 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "jni_macro_assembler_x86_64.h"
18 
19 #include "base/casts.h"
20 #include "base/memory_region.h"
21 #include "entrypoints/quick/quick_entrypoints.h"
22 #include "thread.h"
23 
24 namespace art {
25 namespace x86_64 {
26 
DWARFReg(Register reg)27 static dwarf::Reg DWARFReg(Register reg) {
28   return dwarf::Reg::X86_64Core(static_cast<int>(reg));
29 }
DWARFReg(FloatRegister reg)30 static dwarf::Reg DWARFReg(FloatRegister reg) {
31   return dwarf::Reg::X86_64Fp(static_cast<int>(reg));
32 }
33 
34 constexpr size_t kFramePointerSize = 8;
35 
36 static constexpr size_t kNativeStackAlignment = 16;
37 static_assert(kNativeStackAlignment == kStackAlignment);
38 
GetScratchRegister()39 static inline CpuRegister GetScratchRegister() {
40   return CpuRegister(R11);
41 }
42 
43 #define __ asm_.
44 
BuildFrame(size_t frame_size,ManagedRegister method_reg,ArrayRef<const ManagedRegister> spill_regs)45 void X86_64JNIMacroAssembler::BuildFrame(size_t frame_size,
46                                          ManagedRegister method_reg,
47                                          ArrayRef<const ManagedRegister> spill_regs) {
48   DCHECK_EQ(CodeSize(), 0U);  // Nothing emitted yet.
49   cfi().SetCurrentCFAOffset(8);  // Return address on stack.
50   // Note: @CriticalNative tail call is not used (would have frame_size == kFramePointerSize).
51   if (method_reg.IsNoRegister()) {
52     CHECK_ALIGNED(frame_size, kNativeStackAlignment);
53   } else {
54     CHECK_ALIGNED(frame_size, kStackAlignment);
55   }
56   size_t gpr_count = 0u;
57   for (int i = spill_regs.size() - 1; i >= 0; --i) {
58     x86_64::X86_64ManagedRegister spill = spill_regs[i].AsX86_64();
59     if (spill.IsCpuRegister()) {
60       __ pushq(spill.AsCpuRegister());
61       gpr_count++;
62       cfi().AdjustCFAOffset(kFramePointerSize);
63       cfi().RelOffset(DWARFReg(spill.AsCpuRegister().AsRegister()), 0);
64     }
65   }
66   // return address then method on stack.
67   int64_t rest_of_frame = static_cast<int64_t>(frame_size)
68                           - (gpr_count * kFramePointerSize)
69                           - kFramePointerSize /*return address*/;
70   if (rest_of_frame != 0) {
71     __ subq(CpuRegister(RSP), Immediate(rest_of_frame));
72     cfi().AdjustCFAOffset(rest_of_frame);
73   }
74 
75   // spill xmms
76   int64_t offset = rest_of_frame;
77   for (int i = spill_regs.size() - 1; i >= 0; --i) {
78     x86_64::X86_64ManagedRegister spill = spill_regs[i].AsX86_64();
79     if (spill.IsXmmRegister()) {
80       offset -= sizeof(double);
81       __ movsd(Address(CpuRegister(RSP), offset), spill.AsXmmRegister());
82       cfi().RelOffset(DWARFReg(spill.AsXmmRegister().AsFloatRegister()), offset);
83     }
84   }
85 
86   static_assert(static_cast<size_t>(kX86_64PointerSize) == kFramePointerSize,
87                 "Unexpected frame pointer size.");
88 
89   if (method_reg.IsRegister()) {
90     __ movq(Address(CpuRegister(RSP), 0), method_reg.AsX86_64().AsCpuRegister());
91   }
92 }
93 
RemoveFrame(size_t frame_size,ArrayRef<const ManagedRegister> spill_regs,bool may_suspend ATTRIBUTE_UNUSED)94 void X86_64JNIMacroAssembler::RemoveFrame(size_t frame_size,
95                                           ArrayRef<const ManagedRegister> spill_regs,
96                                           bool may_suspend ATTRIBUTE_UNUSED) {
97   CHECK_ALIGNED(frame_size, kNativeStackAlignment);
98   cfi().RememberState();
99   int gpr_count = 0;
100   // unspill xmms
101   int64_t offset = static_cast<int64_t>(frame_size)
102       - (spill_regs.size() * kFramePointerSize)
103       - kFramePointerSize;
104   for (size_t i = 0; i < spill_regs.size(); ++i) {
105     x86_64::X86_64ManagedRegister spill = spill_regs[i].AsX86_64();
106     if (spill.IsXmmRegister()) {
107       __ movsd(spill.AsXmmRegister(), Address(CpuRegister(RSP), offset));
108       cfi().Restore(DWARFReg(spill.AsXmmRegister().AsFloatRegister()));
109       offset += sizeof(double);
110     } else {
111       gpr_count++;
112     }
113   }
114   DCHECK_EQ(static_cast<size_t>(offset),
115             frame_size - (gpr_count * kFramePointerSize) - kFramePointerSize);
116   if (offset != 0) {
117     __ addq(CpuRegister(RSP), Immediate(offset));
118     cfi().AdjustCFAOffset(-offset);
119   }
120   for (size_t i = 0; i < spill_regs.size(); ++i) {
121     x86_64::X86_64ManagedRegister spill = spill_regs[i].AsX86_64();
122     if (spill.IsCpuRegister()) {
123       __ popq(spill.AsCpuRegister());
124       cfi().AdjustCFAOffset(-static_cast<int>(kFramePointerSize));
125       cfi().Restore(DWARFReg(spill.AsCpuRegister().AsRegister()));
126     }
127   }
128   __ ret();
129   // The CFI should be restored for any code that follows the exit block.
130   cfi().RestoreState();
131   cfi().DefCFAOffset(frame_size);
132 }
133 
IncreaseFrameSize(size_t adjust)134 void X86_64JNIMacroAssembler::IncreaseFrameSize(size_t adjust) {
135   if (adjust != 0u) {
136     CHECK_ALIGNED(adjust, kNativeStackAlignment);
137     __ addq(CpuRegister(RSP), Immediate(-static_cast<int64_t>(adjust)));
138     cfi().AdjustCFAOffset(adjust);
139   }
140 }
141 
DecreaseFrameSizeImpl(size_t adjust,X86_64Assembler * assembler)142 static void DecreaseFrameSizeImpl(size_t adjust, X86_64Assembler* assembler) {
143   if (adjust != 0u) {
144     CHECK_ALIGNED(adjust, kNativeStackAlignment);
145     assembler->addq(CpuRegister(RSP), Immediate(adjust));
146     assembler->cfi().AdjustCFAOffset(-adjust);
147   }
148 }
149 
DecreaseFrameSize(size_t adjust)150 void X86_64JNIMacroAssembler::DecreaseFrameSize(size_t adjust) {
151   DecreaseFrameSizeImpl(adjust, &asm_);
152 }
153 
Store(FrameOffset offs,ManagedRegister msrc,size_t size)154 void X86_64JNIMacroAssembler::Store(FrameOffset offs, ManagedRegister msrc, size_t size) {
155   X86_64ManagedRegister src = msrc.AsX86_64();
156   if (src.IsNoRegister()) {
157     CHECK_EQ(0u, size);
158   } else if (src.IsCpuRegister()) {
159     if (size == 4) {
160       CHECK_EQ(4u, size);
161       __ movl(Address(CpuRegister(RSP), offs), src.AsCpuRegister());
162     } else {
163       CHECK_EQ(8u, size);
164       __ movq(Address(CpuRegister(RSP), offs), src.AsCpuRegister());
165     }
166   } else if (src.IsRegisterPair()) {
167     CHECK_EQ(0u, size);
168     __ movq(Address(CpuRegister(RSP), offs), src.AsRegisterPairLow());
169     __ movq(Address(CpuRegister(RSP), FrameOffset(offs.Int32Value()+4)),
170             src.AsRegisterPairHigh());
171   } else if (src.IsX87Register()) {
172     if (size == 4) {
173       __ fstps(Address(CpuRegister(RSP), offs));
174     } else {
175       __ fstpl(Address(CpuRegister(RSP), offs));
176     }
177   } else {
178     CHECK(src.IsXmmRegister());
179     if (size == 4) {
180       __ movss(Address(CpuRegister(RSP), offs), src.AsXmmRegister());
181     } else {
182       __ movsd(Address(CpuRegister(RSP), offs), src.AsXmmRegister());
183     }
184   }
185 }
186 
StoreRef(FrameOffset dest,ManagedRegister msrc)187 void X86_64JNIMacroAssembler::StoreRef(FrameOffset dest, ManagedRegister msrc) {
188   X86_64ManagedRegister src = msrc.AsX86_64();
189   CHECK(src.IsCpuRegister());
190   __ movl(Address(CpuRegister(RSP), dest), src.AsCpuRegister());
191 }
192 
StoreRawPtr(FrameOffset dest,ManagedRegister msrc)193 void X86_64JNIMacroAssembler::StoreRawPtr(FrameOffset dest, ManagedRegister msrc) {
194   X86_64ManagedRegister src = msrc.AsX86_64();
195   CHECK(src.IsCpuRegister());
196   __ movq(Address(CpuRegister(RSP), dest), src.AsCpuRegister());
197 }
198 
StoreImmediateToFrame(FrameOffset dest,uint32_t imm)199 void X86_64JNIMacroAssembler::StoreImmediateToFrame(FrameOffset dest, uint32_t imm) {
200   __ movl(Address(CpuRegister(RSP), dest), Immediate(imm));  // TODO(64) movq?
201 }
202 
StoreStackOffsetToThread(ThreadOffset64 thr_offs,FrameOffset fr_offs)203 void X86_64JNIMacroAssembler::StoreStackOffsetToThread(ThreadOffset64 thr_offs,
204                                                        FrameOffset fr_offs) {
205   CpuRegister scratch = GetScratchRegister();
206   __ leaq(scratch, Address(CpuRegister(RSP), fr_offs));
207   __ gs()->movq(Address::Absolute(thr_offs, true), scratch);
208 }
209 
StoreStackPointerToThread(ThreadOffset64 thr_offs)210 void X86_64JNIMacroAssembler::StoreStackPointerToThread(ThreadOffset64 thr_offs) {
211   __ gs()->movq(Address::Absolute(thr_offs, true), CpuRegister(RSP));
212 }
213 
StoreSpanning(FrameOffset,ManagedRegister,FrameOffset)214 void X86_64JNIMacroAssembler::StoreSpanning(FrameOffset /*dst*/,
215                                             ManagedRegister /*src*/,
216                                             FrameOffset /*in_off*/) {
217   UNIMPLEMENTED(FATAL);  // this case only currently exists for ARM
218 }
219 
Load(ManagedRegister mdest,FrameOffset src,size_t size)220 void X86_64JNIMacroAssembler::Load(ManagedRegister mdest, FrameOffset src, size_t size) {
221   X86_64ManagedRegister dest = mdest.AsX86_64();
222   if (dest.IsNoRegister()) {
223     CHECK_EQ(0u, size);
224   } else if (dest.IsCpuRegister()) {
225     if (size == 4) {
226       CHECK_EQ(4u, size);
227       __ movl(dest.AsCpuRegister(), Address(CpuRegister(RSP), src));
228     } else {
229       CHECK_EQ(8u, size);
230       __ movq(dest.AsCpuRegister(), Address(CpuRegister(RSP), src));
231     }
232   } else if (dest.IsRegisterPair()) {
233     CHECK_EQ(0u, size);
234     __ movq(dest.AsRegisterPairLow(), Address(CpuRegister(RSP), src));
235     __ movq(dest.AsRegisterPairHigh(), Address(CpuRegister(RSP), FrameOffset(src.Int32Value()+4)));
236   } else if (dest.IsX87Register()) {
237     if (size == 4) {
238       __ flds(Address(CpuRegister(RSP), src));
239     } else {
240       __ fldl(Address(CpuRegister(RSP), src));
241     }
242   } else {
243     CHECK(dest.IsXmmRegister());
244     if (size == 4) {
245       __ movss(dest.AsXmmRegister(), Address(CpuRegister(RSP), src));
246     } else {
247       __ movsd(dest.AsXmmRegister(), Address(CpuRegister(RSP), src));
248     }
249   }
250 }
251 
LoadFromThread(ManagedRegister mdest,ThreadOffset64 src,size_t size)252 void X86_64JNIMacroAssembler::LoadFromThread(ManagedRegister mdest,
253                                              ThreadOffset64 src, size_t size) {
254   X86_64ManagedRegister dest = mdest.AsX86_64();
255   if (dest.IsNoRegister()) {
256     CHECK_EQ(0u, size);
257   } else if (dest.IsCpuRegister()) {
258     if (size == 1u) {
259       __ gs()->movzxb(dest.AsCpuRegister(), Address::Absolute(src, true));
260     } else {
261       CHECK_EQ(4u, size);
262       __ gs()->movl(dest.AsCpuRegister(), Address::Absolute(src, true));
263     }
264   } else if (dest.IsRegisterPair()) {
265     CHECK_EQ(8u, size);
266     __ gs()->movq(dest.AsRegisterPairLow(), Address::Absolute(src, true));
267   } else if (dest.IsX87Register()) {
268     if (size == 4) {
269       __ gs()->flds(Address::Absolute(src, true));
270     } else {
271       __ gs()->fldl(Address::Absolute(src, true));
272     }
273   } else {
274     CHECK(dest.IsXmmRegister());
275     if (size == 4) {
276       __ gs()->movss(dest.AsXmmRegister(), Address::Absolute(src, true));
277     } else {
278       __ gs()->movsd(dest.AsXmmRegister(), Address::Absolute(src, true));
279     }
280   }
281 }
282 
LoadRef(ManagedRegister mdest,FrameOffset src)283 void X86_64JNIMacroAssembler::LoadRef(ManagedRegister mdest, FrameOffset src) {
284   X86_64ManagedRegister dest = mdest.AsX86_64();
285   CHECK(dest.IsCpuRegister());
286   __ movq(dest.AsCpuRegister(), Address(CpuRegister(RSP), src));
287 }
288 
LoadRef(ManagedRegister mdest,ManagedRegister mbase,MemberOffset offs,bool unpoison_reference)289 void X86_64JNIMacroAssembler::LoadRef(ManagedRegister mdest,
290                                       ManagedRegister mbase,
291                                       MemberOffset offs,
292                                       bool unpoison_reference) {
293   X86_64ManagedRegister base = mbase.AsX86_64();
294   X86_64ManagedRegister dest = mdest.AsX86_64();
295   CHECK(base.IsCpuRegister());
296   CHECK(dest.IsCpuRegister());
297   __ movl(dest.AsCpuRegister(), Address(base.AsCpuRegister(), offs));
298   if (unpoison_reference) {
299     __ MaybeUnpoisonHeapReference(dest.AsCpuRegister());
300   }
301 }
302 
LoadRawPtr(ManagedRegister mdest,ManagedRegister mbase,Offset offs)303 void X86_64JNIMacroAssembler::LoadRawPtr(ManagedRegister mdest,
304                                          ManagedRegister mbase,
305                                          Offset offs) {
306   X86_64ManagedRegister base = mbase.AsX86_64();
307   X86_64ManagedRegister dest = mdest.AsX86_64();
308   CHECK(base.IsCpuRegister());
309   CHECK(dest.IsCpuRegister());
310   __ movq(dest.AsCpuRegister(), Address(base.AsCpuRegister(), offs));
311 }
312 
LoadRawPtrFromThread(ManagedRegister mdest,ThreadOffset64 offs)313 void X86_64JNIMacroAssembler::LoadRawPtrFromThread(ManagedRegister mdest, ThreadOffset64 offs) {
314   X86_64ManagedRegister dest = mdest.AsX86_64();
315   CHECK(dest.IsCpuRegister());
316   __ gs()->movq(dest.AsCpuRegister(), Address::Absolute(offs, true));
317 }
318 
SignExtend(ManagedRegister mreg,size_t size)319 void X86_64JNIMacroAssembler::SignExtend(ManagedRegister mreg, size_t size) {
320   X86_64ManagedRegister reg = mreg.AsX86_64();
321   CHECK(size == 1 || size == 2) << size;
322   CHECK(reg.IsCpuRegister()) << reg;
323   if (size == 1) {
324     __ movsxb(reg.AsCpuRegister(), reg.AsCpuRegister());
325   } else {
326     __ movsxw(reg.AsCpuRegister(), reg.AsCpuRegister());
327   }
328 }
329 
ZeroExtend(ManagedRegister mreg,size_t size)330 void X86_64JNIMacroAssembler::ZeroExtend(ManagedRegister mreg, size_t size) {
331   X86_64ManagedRegister reg = mreg.AsX86_64();
332   CHECK(size == 1 || size == 2) << size;
333   CHECK(reg.IsCpuRegister()) << reg;
334   if (size == 1) {
335     __ movzxb(reg.AsCpuRegister(), reg.AsCpuRegister());
336   } else {
337     __ movzxw(reg.AsCpuRegister(), reg.AsCpuRegister());
338   }
339 }
340 
MoveArguments(ArrayRef<ArgumentLocation> dests,ArrayRef<ArgumentLocation> srcs)341 void X86_64JNIMacroAssembler::MoveArguments(ArrayRef<ArgumentLocation> dests,
342                                             ArrayRef<ArgumentLocation> srcs) {
343   DCHECK_EQ(dests.size(), srcs.size());
344   auto get_mask = [](ManagedRegister reg) -> uint32_t {
345     X86_64ManagedRegister x86_64_reg = reg.AsX86_64();
346     if (x86_64_reg.IsCpuRegister()) {
347       size_t cpu_reg_number = static_cast<size_t>(x86_64_reg.AsCpuRegister().AsRegister());
348       DCHECK_LT(cpu_reg_number, 16u);
349       return 1u << cpu_reg_number;
350     } else {
351       DCHECK(x86_64_reg.IsXmmRegister());
352       size_t xmm_reg_number = static_cast<size_t>(x86_64_reg.AsXmmRegister().AsFloatRegister());
353       DCHECK_LT(xmm_reg_number, 16u);
354       return (1u << 16u) << xmm_reg_number;
355     }
356   };
357   // Collect registers to move while storing/copying args to stack slots.
358   uint32_t src_regs = 0u;
359   uint32_t dest_regs = 0u;
360   for (size_t i = 0, arg_count = srcs.size(); i != arg_count; ++i) {
361     const ArgumentLocation& src = srcs[i];
362     const ArgumentLocation& dest = dests[i];
363     DCHECK_EQ(src.GetSize(), dest.GetSize());
364     if (dest.IsRegister()) {
365       if (src.IsRegister() && src.GetRegister().Equals(dest.GetRegister())) {
366         // Nothing to do.
367       } else {
368         if (src.IsRegister()) {
369           src_regs |= get_mask(src.GetRegister());
370         }
371         dest_regs |= get_mask(dest.GetRegister());
372       }
373     } else {
374       if (src.IsRegister()) {
375         Store(dest.GetFrameOffset(), src.GetRegister(), dest.GetSize());
376       } else {
377         Copy(dest.GetFrameOffset(), src.GetFrameOffset(), dest.GetSize());
378       }
379     }
380   }
381   // Fill destination registers.
382   // There should be no cycles, so this simple algorithm should make progress.
383   while (dest_regs != 0u) {
384     uint32_t old_dest_regs = dest_regs;
385     for (size_t i = 0, arg_count = srcs.size(); i != arg_count; ++i) {
386       const ArgumentLocation& src = srcs[i];
387       const ArgumentLocation& dest = dests[i];
388       if (!dest.IsRegister()) {
389         continue;  // Stored in first loop above.
390       }
391       uint32_t dest_reg_mask = get_mask(dest.GetRegister());
392       if ((dest_reg_mask & dest_regs) == 0u) {
393         continue;  // Equals source, or already filled in one of previous iterations.
394       }
395       if ((dest_reg_mask & src_regs) != 0u) {
396         continue;  // Cannot clobber this register yet.
397       }
398       if (src.IsRegister()) {
399         Move(dest.GetRegister(), src.GetRegister(), dest.GetSize());
400         src_regs &= ~get_mask(src.GetRegister());  // Allow clobbering source register.
401       } else {
402         Load(dest.GetRegister(), src.GetFrameOffset(), dest.GetSize());
403       }
404       dest_regs &= ~get_mask(dest.GetRegister());  // Destination register was filled.
405     }
406     CHECK_NE(old_dest_regs, dest_regs);
407     DCHECK_EQ(0u, dest_regs & ~old_dest_regs);
408   }
409 }
410 
Move(ManagedRegister mdest,ManagedRegister msrc,size_t size)411 void X86_64JNIMacroAssembler::Move(ManagedRegister mdest, ManagedRegister msrc, size_t size) {
412   DCHECK(!mdest.Equals(X86_64ManagedRegister::FromCpuRegister(GetScratchRegister().AsRegister())));
413   X86_64ManagedRegister dest = mdest.AsX86_64();
414   X86_64ManagedRegister src = msrc.AsX86_64();
415   if (!dest.Equals(src)) {
416     if (dest.IsCpuRegister() && src.IsCpuRegister()) {
417       __ movq(dest.AsCpuRegister(), src.AsCpuRegister());
418     } else if (src.IsX87Register() && dest.IsXmmRegister()) {
419       // Pass via stack and pop X87 register
420       __ subl(CpuRegister(RSP), Immediate(16));
421       if (size == 4) {
422         CHECK_EQ(src.AsX87Register(), ST0);
423         __ fstps(Address(CpuRegister(RSP), 0));
424         __ movss(dest.AsXmmRegister(), Address(CpuRegister(RSP), 0));
425       } else {
426         CHECK_EQ(src.AsX87Register(), ST0);
427         __ fstpl(Address(CpuRegister(RSP), 0));
428         __ movsd(dest.AsXmmRegister(), Address(CpuRegister(RSP), 0));
429       }
430       __ addq(CpuRegister(RSP), Immediate(16));
431     } else {
432       // TODO: x87, SSE
433       UNIMPLEMENTED(FATAL) << ": Move " << dest << ", " << src;
434     }
435   }
436 }
437 
CopyRef(FrameOffset dest,FrameOffset src)438 void X86_64JNIMacroAssembler::CopyRef(FrameOffset dest, FrameOffset src) {
439   CpuRegister scratch = GetScratchRegister();
440   __ movl(scratch, Address(CpuRegister(RSP), src));
441   __ movl(Address(CpuRegister(RSP), dest), scratch);
442 }
443 
CopyRef(FrameOffset dest,ManagedRegister base,MemberOffset offs,bool unpoison_reference)444 void X86_64JNIMacroAssembler::CopyRef(FrameOffset dest,
445                                       ManagedRegister base,
446                                       MemberOffset offs,
447                                       bool unpoison_reference) {
448   CpuRegister scratch = GetScratchRegister();
449   __ movl(scratch, Address(base.AsX86_64().AsCpuRegister(), offs));
450   if (unpoison_reference) {
451     __ MaybeUnpoisonHeapReference(scratch);
452   }
453   __ movl(Address(CpuRegister(RSP), dest), scratch);
454 }
455 
CopyRawPtrFromThread(FrameOffset fr_offs,ThreadOffset64 thr_offs)456 void X86_64JNIMacroAssembler::CopyRawPtrFromThread(FrameOffset fr_offs, ThreadOffset64 thr_offs) {
457   CpuRegister scratch = GetScratchRegister();
458   __ gs()->movq(scratch, Address::Absolute(thr_offs, true));
459   __ movq(Address(CpuRegister(RSP), fr_offs), scratch);
460 }
461 
CopyRawPtrToThread(ThreadOffset64 thr_offs,FrameOffset fr_offs,ManagedRegister mscratch)462 void X86_64JNIMacroAssembler::CopyRawPtrToThread(ThreadOffset64 thr_offs,
463                                                  FrameOffset fr_offs,
464                                                  ManagedRegister mscratch) {
465   X86_64ManagedRegister scratch = mscratch.AsX86_64();
466   CHECK(scratch.IsCpuRegister());
467   Load(scratch, fr_offs, 8);
468   __ gs()->movq(Address::Absolute(thr_offs, true), scratch.AsCpuRegister());
469 }
470 
Copy(FrameOffset dest,FrameOffset src,size_t size)471 void X86_64JNIMacroAssembler::Copy(FrameOffset dest, FrameOffset src, size_t size) {
472   DCHECK(size == 4 || size == 8) << size;
473   CpuRegister scratch = GetScratchRegister();
474   if (size == 8) {
475     __ movq(scratch, Address(CpuRegister(RSP), src));
476     __ movq(Address(CpuRegister(RSP), dest), scratch);
477   } else {
478     __ movl(scratch, Address(CpuRegister(RSP), src));
479     __ movl(Address(CpuRegister(RSP), dest), scratch);
480   }
481 }
482 
Copy(FrameOffset,ManagedRegister,Offset,ManagedRegister,size_t)483 void X86_64JNIMacroAssembler::Copy(FrameOffset /*dst*/,
484                                    ManagedRegister /*src_base*/,
485                                    Offset /*src_offset*/,
486                                    ManagedRegister /*scratch*/,
487                                    size_t /*size*/) {
488   UNIMPLEMENTED(FATAL);
489 }
490 
Copy(ManagedRegister dest_base,Offset dest_offset,FrameOffset src,ManagedRegister scratch,size_t size)491 void X86_64JNIMacroAssembler::Copy(ManagedRegister dest_base,
492                                    Offset dest_offset,
493                                    FrameOffset src,
494                                    ManagedRegister scratch,
495                                    size_t size) {
496   CHECK(scratch.IsNoRegister());
497   CHECK_EQ(size, 4u);
498   __ pushq(Address(CpuRegister(RSP), src));
499   __ popq(Address(dest_base.AsX86_64().AsCpuRegister(), dest_offset));
500 }
501 
Copy(FrameOffset dest,FrameOffset src_base,Offset src_offset,ManagedRegister mscratch,size_t size)502 void X86_64JNIMacroAssembler::Copy(FrameOffset dest,
503                                    FrameOffset src_base,
504                                    Offset src_offset,
505                                    ManagedRegister mscratch,
506                                    size_t size) {
507   CpuRegister scratch = mscratch.AsX86_64().AsCpuRegister();
508   CHECK_EQ(size, 4u);
509   __ movq(scratch, Address(CpuRegister(RSP), src_base));
510   __ movq(scratch, Address(scratch, src_offset));
511   __ movq(Address(CpuRegister(RSP), dest), scratch);
512 }
513 
Copy(ManagedRegister dest,Offset dest_offset,ManagedRegister src,Offset src_offset,ManagedRegister scratch,size_t size)514 void X86_64JNIMacroAssembler::Copy(ManagedRegister dest,
515                                    Offset dest_offset,
516                                    ManagedRegister src,
517                                    Offset src_offset,
518                                    ManagedRegister scratch,
519                                    size_t size) {
520   CHECK_EQ(size, 4u);
521   CHECK(scratch.IsNoRegister());
522   __ pushq(Address(src.AsX86_64().AsCpuRegister(), src_offset));
523   __ popq(Address(dest.AsX86_64().AsCpuRegister(), dest_offset));
524 }
525 
Copy(FrameOffset dest,Offset dest_offset,FrameOffset src,Offset src_offset,ManagedRegister mscratch,size_t size)526 void X86_64JNIMacroAssembler::Copy(FrameOffset dest,
527                                    Offset dest_offset,
528                                    FrameOffset src,
529                                    Offset src_offset,
530                                    ManagedRegister mscratch,
531                                    size_t size) {
532   CpuRegister scratch = mscratch.AsX86_64().AsCpuRegister();
533   CHECK_EQ(size, 4u);
534   CHECK_EQ(dest.Int32Value(), src.Int32Value());
535   __ movq(scratch, Address(CpuRegister(RSP), src));
536   __ pushq(Address(scratch, src_offset));
537   __ popq(Address(scratch, dest_offset));
538 }
539 
MemoryBarrier(ManagedRegister)540 void X86_64JNIMacroAssembler::MemoryBarrier(ManagedRegister) {
541   __ mfence();
542 }
543 
CreateJObject(ManagedRegister mout_reg,FrameOffset spilled_reference_offset,ManagedRegister min_reg,bool null_allowed)544 void X86_64JNIMacroAssembler::CreateJObject(ManagedRegister mout_reg,
545                                             FrameOffset spilled_reference_offset,
546                                             ManagedRegister min_reg,
547                                             bool null_allowed) {
548   X86_64ManagedRegister out_reg = mout_reg.AsX86_64();
549   X86_64ManagedRegister in_reg = min_reg.AsX86_64();
550   if (in_reg.IsNoRegister()) {  // TODO(64): && null_allowed
551     // Use out_reg as indicator of null.
552     in_reg = out_reg;
553     // TODO: movzwl
554     __ movl(in_reg.AsCpuRegister(), Address(CpuRegister(RSP), spilled_reference_offset));
555   }
556   CHECK(in_reg.IsCpuRegister());
557   CHECK(out_reg.IsCpuRegister());
558   VerifyObject(in_reg, null_allowed);
559   if (null_allowed) {
560     Label null_arg;
561     if (!out_reg.Equals(in_reg)) {
562       __ xorl(out_reg.AsCpuRegister(), out_reg.AsCpuRegister());
563     }
564     __ testl(in_reg.AsCpuRegister(), in_reg.AsCpuRegister());
565     __ j(kZero, &null_arg);
566     __ leaq(out_reg.AsCpuRegister(), Address(CpuRegister(RSP), spilled_reference_offset));
567     __ Bind(&null_arg);
568   } else {
569     __ leaq(out_reg.AsCpuRegister(), Address(CpuRegister(RSP), spilled_reference_offset));
570   }
571 }
572 
CreateJObject(FrameOffset out_off,FrameOffset spilled_reference_offset,bool null_allowed)573 void X86_64JNIMacroAssembler::CreateJObject(FrameOffset out_off,
574                                             FrameOffset spilled_reference_offset,
575                                             bool null_allowed) {
576   CpuRegister scratch = GetScratchRegister();
577   if (null_allowed) {
578     Label null_arg;
579     __ movl(scratch, Address(CpuRegister(RSP), spilled_reference_offset));
580     __ testl(scratch, scratch);
581     __ j(kZero, &null_arg);
582     __ leaq(scratch, Address(CpuRegister(RSP), spilled_reference_offset));
583     __ Bind(&null_arg);
584   } else {
585     __ leaq(scratch, Address(CpuRegister(RSP), spilled_reference_offset));
586   }
587   __ movq(Address(CpuRegister(RSP), out_off), scratch);
588 }
589 
VerifyObject(ManagedRegister,bool)590 void X86_64JNIMacroAssembler::VerifyObject(ManagedRegister /*src*/, bool /*could_be_null*/) {
591   // TODO: not validating references
592 }
593 
VerifyObject(FrameOffset,bool)594 void X86_64JNIMacroAssembler::VerifyObject(FrameOffset /*src*/, bool /*could_be_null*/) {
595   // TODO: not validating references
596 }
597 
Jump(ManagedRegister mbase,Offset offset)598 void X86_64JNIMacroAssembler::Jump(ManagedRegister mbase, Offset offset) {
599   X86_64ManagedRegister base = mbase.AsX86_64();
600   CHECK(base.IsCpuRegister());
601   __ jmp(Address(base.AsCpuRegister(), offset.Int32Value()));
602 }
603 
Call(ManagedRegister mbase,Offset offset)604 void X86_64JNIMacroAssembler::Call(ManagedRegister mbase, Offset offset) {
605   X86_64ManagedRegister base = mbase.AsX86_64();
606   CHECK(base.IsCpuRegister());
607   __ call(Address(base.AsCpuRegister(), offset.Int32Value()));
608   // TODO: place reference map on call
609 }
610 
Call(FrameOffset base,Offset offset)611 void X86_64JNIMacroAssembler::Call(FrameOffset base, Offset offset) {
612   CpuRegister scratch = GetScratchRegister();
613   __ movq(scratch, Address(CpuRegister(RSP), base));
614   __ call(Address(scratch, offset));
615 }
616 
CallFromThread(ThreadOffset64 offset)617 void X86_64JNIMacroAssembler::CallFromThread(ThreadOffset64 offset) {
618   __ gs()->call(Address::Absolute(offset, true));
619 }
620 
GetCurrentThread(ManagedRegister dest)621 void X86_64JNIMacroAssembler::GetCurrentThread(ManagedRegister dest) {
622   __ gs()->movq(dest.AsX86_64().AsCpuRegister(),
623                 Address::Absolute(Thread::SelfOffset<kX86_64PointerSize>(), true));
624 }
625 
GetCurrentThread(FrameOffset offset)626 void X86_64JNIMacroAssembler::GetCurrentThread(FrameOffset offset) {
627   CpuRegister scratch = GetScratchRegister();
628   __ gs()->movq(scratch, Address::Absolute(Thread::SelfOffset<kX86_64PointerSize>(), true));
629   __ movq(Address(CpuRegister(RSP), offset), scratch);
630 }
631 
632 // Slowpath entered when Thread::Current()->_exception is non-null
633 class X86_64ExceptionSlowPath final : public SlowPath {
634  public:
X86_64ExceptionSlowPath(size_t stack_adjust)635   explicit X86_64ExceptionSlowPath(size_t stack_adjust) : stack_adjust_(stack_adjust) {}
636   void Emit(Assembler *sp_asm) override;
637  private:
638   const size_t stack_adjust_;
639 };
640 
ExceptionPoll(size_t stack_adjust)641 void X86_64JNIMacroAssembler::ExceptionPoll(size_t stack_adjust) {
642   X86_64ExceptionSlowPath* slow = new (__ GetAllocator()) X86_64ExceptionSlowPath(stack_adjust);
643   __ GetBuffer()->EnqueueSlowPath(slow);
644   __ gs()->cmpl(Address::Absolute(Thread::ExceptionOffset<kX86_64PointerSize>(), true),
645                 Immediate(0));
646   __ j(kNotEqual, slow->Entry());
647 }
648 
CreateLabel()649 std::unique_ptr<JNIMacroLabel> X86_64JNIMacroAssembler::CreateLabel() {
650   return std::unique_ptr<JNIMacroLabel>(new X86_64JNIMacroLabel());
651 }
652 
Jump(JNIMacroLabel * label)653 void X86_64JNIMacroAssembler::Jump(JNIMacroLabel* label) {
654   CHECK(label != nullptr);
655   __ jmp(X86_64JNIMacroLabel::Cast(label)->AsX86_64());
656 }
657 
TestGcMarking(JNIMacroLabel * label,JNIMacroUnaryCondition cond)658 void X86_64JNIMacroAssembler::TestGcMarking(JNIMacroLabel* label, JNIMacroUnaryCondition cond) {
659   CHECK(label != nullptr);
660 
661   art::x86_64::Condition x86_64_cond;
662   switch (cond) {
663     case JNIMacroUnaryCondition::kZero:
664       x86_64_cond = art::x86_64::kZero;
665       break;
666     case JNIMacroUnaryCondition::kNotZero:
667       x86_64_cond = art::x86_64::kNotZero;
668       break;
669     default:
670       LOG(FATAL) << "Not implemented condition: " << static_cast<int>(cond);
671       UNREACHABLE();
672   }
673 
674   // CMP self->tls32_.is_gc_marking, 0
675   // Jcc <Offset>
676   DCHECK_EQ(Thread::IsGcMarkingSize(), 4u);
677   __ gs()->cmpl(Address::Absolute(Thread::IsGcMarkingOffset<kX86_64PointerSize>(), true),
678                 Immediate(0));
679   __ j(x86_64_cond, X86_64JNIMacroLabel::Cast(label)->AsX86_64());
680 }
681 
Bind(JNIMacroLabel * label)682 void X86_64JNIMacroAssembler::Bind(JNIMacroLabel* label) {
683   CHECK(label != nullptr);
684   __ Bind(X86_64JNIMacroLabel::Cast(label)->AsX86_64());
685 }
686 
687 #undef __
688 
Emit(Assembler * sasm)689 void X86_64ExceptionSlowPath::Emit(Assembler *sasm) {
690   X86_64Assembler* sp_asm = down_cast<X86_64Assembler*>(sasm);
691 #define __ sp_asm->
692   __ Bind(&entry_);
693   // Note: the return value is dead
694   if (stack_adjust_ != 0) {  // Fix up the frame.
695     DecreaseFrameSizeImpl(stack_adjust_, sp_asm);
696   }
697   // Pass exception as argument in RDI
698   __ gs()->movq(CpuRegister(RDI),
699                 Address::Absolute(Thread::ExceptionOffset<kX86_64PointerSize>(), true));
700   __ gs()->call(
701       Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86_64PointerSize, pDeliverException), true));
702   // this call should never return
703   __ int3();
704 #undef __
705 }
706 
707 }  // namespace x86_64
708 }  // namespace art
709