1 /*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #ifndef ART_COMPILER_UTILS_X86_64_ASSEMBLER_X86_64_H_
18 #define ART_COMPILER_UTILS_X86_64_ASSEMBLER_X86_64_H_
19
20 #include <vector>
21 #include "base/macros.h"
22 #include "constants_x86_64.h"
23 #include "globals.h"
24 #include "managed_register_x86_64.h"
25 #include "offsets.h"
26 #include "utils/assembler.h"
27 #include "utils.h"
28
29 namespace art {
30 namespace x86_64 {
31
32 // Encodes an immediate value for operands.
33 //
34 // Note: Immediates can be 64b on x86-64 for certain instructions, but are often restricted
35 // to 32b.
36 //
37 // Note: As we support cross-compilation, the value type must be int64_t. Please be aware of
38 // conversion rules in expressions regarding negation, especially size_t on 32b.
39 class Immediate {
40 public:
Immediate(int64_t value)41 explicit Immediate(int64_t value) : value_(value) {}
42
value()43 int64_t value() const { return value_; }
44
is_int8()45 bool is_int8() const { return IsInt(8, value_); }
is_uint8()46 bool is_uint8() const { return IsUint(8, value_); }
is_uint16()47 bool is_uint16() const { return IsUint(16, value_); }
is_int32()48 bool is_int32() const {
49 // This does not work on 32b machines: return IsInt(32, value_);
50 int64_t limit = static_cast<int64_t>(1) << 31;
51 return (-limit <= value_) && (value_ < limit);
52 }
53
54 private:
55 const int64_t value_;
56
57 DISALLOW_COPY_AND_ASSIGN(Immediate);
58 };
59
60
61 class Operand {
62 public:
mod()63 uint8_t mod() const {
64 return (encoding_at(0) >> 6) & 3;
65 }
66
rm()67 Register rm() const {
68 return static_cast<Register>(encoding_at(0) & 7);
69 }
70
scale()71 ScaleFactor scale() const {
72 return static_cast<ScaleFactor>((encoding_at(1) >> 6) & 3);
73 }
74
index()75 Register index() const {
76 return static_cast<Register>((encoding_at(1) >> 3) & 7);
77 }
78
base()79 Register base() const {
80 return static_cast<Register>(encoding_at(1) & 7);
81 }
82
rex()83 uint8_t rex() const {
84 return rex_;
85 }
86
disp8()87 int8_t disp8() const {
88 CHECK_GE(length_, 2);
89 return static_cast<int8_t>(encoding_[length_ - 1]);
90 }
91
disp32()92 int32_t disp32() const {
93 CHECK_GE(length_, 5);
94 int32_t value;
95 memcpy(&value, &encoding_[length_ - 4], sizeof(value));
96 return value;
97 }
98
IsRegister(CpuRegister reg)99 bool IsRegister(CpuRegister reg) const {
100 return ((encoding_[0] & 0xF8) == 0xC0) // Addressing mode is register only.
101 && ((encoding_[0] & 0x07) == reg.LowBits()) // Register codes match.
102 && (reg.NeedsRex() == ((rex_ & 1) != 0)); // REX.000B bits match.
103 }
104
105 protected:
106 // Operand can be sub classed (e.g: Address).
Operand()107 Operand() : rex_(0), length_(0) { }
108
SetModRM(uint8_t mod,CpuRegister rm)109 void SetModRM(uint8_t mod, CpuRegister rm) {
110 CHECK_EQ(mod & ~3, 0);
111 if (rm.NeedsRex()) {
112 rex_ |= 0x41; // REX.000B
113 }
114 encoding_[0] = (mod << 6) | rm.LowBits();
115 length_ = 1;
116 }
117
SetSIB(ScaleFactor scale,CpuRegister index,CpuRegister base)118 void SetSIB(ScaleFactor scale, CpuRegister index, CpuRegister base) {
119 CHECK_EQ(length_, 1);
120 CHECK_EQ(scale & ~3, 0);
121 if (base.NeedsRex()) {
122 rex_ |= 0x41; // REX.000B
123 }
124 if (index.NeedsRex()) {
125 rex_ |= 0x42; // REX.00X0
126 }
127 encoding_[1] = (scale << 6) | (static_cast<uint8_t>(index.LowBits()) << 3) |
128 static_cast<uint8_t>(base.LowBits());
129 length_ = 2;
130 }
131
SetDisp8(int8_t disp)132 void SetDisp8(int8_t disp) {
133 CHECK(length_ == 1 || length_ == 2);
134 encoding_[length_++] = static_cast<uint8_t>(disp);
135 }
136
SetDisp32(int32_t disp)137 void SetDisp32(int32_t disp) {
138 CHECK(length_ == 1 || length_ == 2);
139 int disp_size = sizeof(disp);
140 memmove(&encoding_[length_], &disp, disp_size);
141 length_ += disp_size;
142 }
143
144 private:
145 uint8_t rex_;
146 uint8_t length_;
147 uint8_t encoding_[6];
148
Operand(CpuRegister reg)149 explicit Operand(CpuRegister reg) : rex_(0), length_(0) { SetModRM(3, reg); }
150
151 // Get the operand encoding byte at the given index.
encoding_at(int index)152 uint8_t encoding_at(int index) const {
153 CHECK_GE(index, 0);
154 CHECK_LT(index, length_);
155 return encoding_[index];
156 }
157
158 friend class X86_64Assembler;
159
160 DISALLOW_COPY_AND_ASSIGN(Operand);
161 };
162
163
164 class Address : public Operand {
165 public:
Address(CpuRegister base,int32_t disp)166 Address(CpuRegister base, int32_t disp) {
167 Init(base, disp);
168 }
169
Address(CpuRegister base,Offset disp)170 Address(CpuRegister base, Offset disp) {
171 Init(base, disp.Int32Value());
172 }
173
Address(CpuRegister base,FrameOffset disp)174 Address(CpuRegister base, FrameOffset disp) {
175 CHECK_EQ(base.AsRegister(), RSP);
176 Init(CpuRegister(RSP), disp.Int32Value());
177 }
178
Address(CpuRegister base,MemberOffset disp)179 Address(CpuRegister base, MemberOffset disp) {
180 Init(base, disp.Int32Value());
181 }
182
Init(CpuRegister base,int32_t disp)183 void Init(CpuRegister base, int32_t disp) {
184 if (disp == 0 && base.AsRegister() != RBP) {
185 SetModRM(0, base);
186 if (base.AsRegister() == RSP) {
187 SetSIB(TIMES_1, CpuRegister(RSP), base);
188 }
189 } else if (disp >= -128 && disp <= 127) {
190 SetModRM(1, base);
191 if (base.AsRegister() == RSP) {
192 SetSIB(TIMES_1, CpuRegister(RSP), base);
193 }
194 SetDisp8(disp);
195 } else {
196 SetModRM(2, base);
197 if (base.AsRegister() == RSP) {
198 SetSIB(TIMES_1, CpuRegister(RSP), base);
199 }
200 SetDisp32(disp);
201 }
202 }
203
204
Address(CpuRegister index,ScaleFactor scale,int32_t disp)205 Address(CpuRegister index, ScaleFactor scale, int32_t disp) {
206 CHECK_NE(index.AsRegister(), RSP); // Illegal addressing mode.
207 SetModRM(0, CpuRegister(RSP));
208 SetSIB(scale, index, CpuRegister(RBP));
209 SetDisp32(disp);
210 }
211
Address(CpuRegister base,CpuRegister index,ScaleFactor scale,int32_t disp)212 Address(CpuRegister base, CpuRegister index, ScaleFactor scale, int32_t disp) {
213 CHECK_NE(index.AsRegister(), RSP); // Illegal addressing mode.
214 if (disp == 0 && base.AsRegister() != RBP) {
215 SetModRM(0, CpuRegister(RSP));
216 SetSIB(scale, index, base);
217 } else if (disp >= -128 && disp <= 127) {
218 SetModRM(1, CpuRegister(RSP));
219 SetSIB(scale, index, base);
220 SetDisp8(disp);
221 } else {
222 SetModRM(2, CpuRegister(RSP));
223 SetSIB(scale, index, base);
224 SetDisp32(disp);
225 }
226 }
227
228 // If no_rip is true then the Absolute address isn't RIP relative.
229 static Address Absolute(uword addr, bool no_rip = false) {
230 Address result;
231 if (no_rip) {
232 result.SetModRM(0, CpuRegister(RSP));
233 result.SetSIB(TIMES_1, CpuRegister(RSP), CpuRegister(RBP));
234 result.SetDisp32(addr);
235 } else {
236 result.SetModRM(0, CpuRegister(RBP));
237 result.SetDisp32(addr);
238 }
239 return result;
240 }
241
242 // If no_rip is true then the Absolute address isn't RIP relative.
243 static Address Absolute(ThreadOffset<8> addr, bool no_rip = false) {
244 return Absolute(addr.Int32Value(), no_rip);
245 }
246
247 private:
Address()248 Address() {}
249
250 DISALLOW_COPY_AND_ASSIGN(Address);
251 };
252
253
254 class X86_64Assembler FINAL : public Assembler {
255 public:
X86_64Assembler()256 X86_64Assembler() {}
~X86_64Assembler()257 virtual ~X86_64Assembler() {}
258
259 /*
260 * Emit Machine Instructions.
261 */
262 void call(CpuRegister reg);
263 void call(const Address& address);
264 void call(Label* label);
265
266 void pushq(CpuRegister reg);
267 void pushq(const Address& address);
268 void pushq(const Immediate& imm);
269
270 void popq(CpuRegister reg);
271 void popq(const Address& address);
272
273 void movq(CpuRegister dst, const Immediate& src);
274 void movl(CpuRegister dst, const Immediate& src);
275 void movq(CpuRegister dst, CpuRegister src);
276 void movl(CpuRegister dst, CpuRegister src);
277
278 void movq(CpuRegister dst, const Address& src);
279 void movl(CpuRegister dst, const Address& src);
280 void movq(const Address& dst, CpuRegister src);
281 void movl(const Address& dst, CpuRegister src);
282 void movl(const Address& dst, const Immediate& imm);
283
284 void movzxb(CpuRegister dst, CpuRegister src);
285 void movzxb(CpuRegister dst, const Address& src);
286 void movsxb(CpuRegister dst, CpuRegister src);
287 void movsxb(CpuRegister dst, const Address& src);
288 void movb(CpuRegister dst, const Address& src);
289 void movb(const Address& dst, CpuRegister src);
290 void movb(const Address& dst, const Immediate& imm);
291
292 void movzxw(CpuRegister dst, CpuRegister src);
293 void movzxw(CpuRegister dst, const Address& src);
294 void movsxw(CpuRegister dst, CpuRegister src);
295 void movsxw(CpuRegister dst, const Address& src);
296 void movw(CpuRegister dst, const Address& src);
297 void movw(const Address& dst, CpuRegister src);
298
299 void leaq(CpuRegister dst, const Address& src);
300
301 void movss(XmmRegister dst, const Address& src);
302 void movss(const Address& dst, XmmRegister src);
303 void movss(XmmRegister dst, XmmRegister src);
304
305 void movd(XmmRegister dst, CpuRegister src);
306 void movd(CpuRegister dst, XmmRegister src);
307
308 void addss(XmmRegister dst, XmmRegister src);
309 void addss(XmmRegister dst, const Address& src);
310 void subss(XmmRegister dst, XmmRegister src);
311 void subss(XmmRegister dst, const Address& src);
312 void mulss(XmmRegister dst, XmmRegister src);
313 void mulss(XmmRegister dst, const Address& src);
314 void divss(XmmRegister dst, XmmRegister src);
315 void divss(XmmRegister dst, const Address& src);
316
317 void movsd(XmmRegister dst, const Address& src);
318 void movsd(const Address& dst, XmmRegister src);
319 void movsd(XmmRegister dst, XmmRegister src);
320
321 void addsd(XmmRegister dst, XmmRegister src);
322 void addsd(XmmRegister dst, const Address& src);
323 void subsd(XmmRegister dst, XmmRegister src);
324 void subsd(XmmRegister dst, const Address& src);
325 void mulsd(XmmRegister dst, XmmRegister src);
326 void mulsd(XmmRegister dst, const Address& src);
327 void divsd(XmmRegister dst, XmmRegister src);
328 void divsd(XmmRegister dst, const Address& src);
329
330 void cvtsi2ss(XmmRegister dst, CpuRegister src);
331 void cvtsi2sd(XmmRegister dst, CpuRegister src);
332
333 void cvtss2si(CpuRegister dst, XmmRegister src);
334 void cvtss2sd(XmmRegister dst, XmmRegister src);
335
336 void cvtsd2si(CpuRegister dst, XmmRegister src);
337 void cvtsd2ss(XmmRegister dst, XmmRegister src);
338
339 void cvttss2si(CpuRegister dst, XmmRegister src);
340 void cvttsd2si(CpuRegister dst, XmmRegister src);
341
342 void cvtdq2pd(XmmRegister dst, XmmRegister src);
343
344 void comiss(XmmRegister a, XmmRegister b);
345 void comisd(XmmRegister a, XmmRegister b);
346
347 void sqrtsd(XmmRegister dst, XmmRegister src);
348 void sqrtss(XmmRegister dst, XmmRegister src);
349
350 void xorpd(XmmRegister dst, const Address& src);
351 void xorpd(XmmRegister dst, XmmRegister src);
352 void xorps(XmmRegister dst, const Address& src);
353 void xorps(XmmRegister dst, XmmRegister src);
354
355 void andpd(XmmRegister dst, const Address& src);
356
357 void flds(const Address& src);
358 void fstps(const Address& dst);
359
360 void fldl(const Address& src);
361 void fstpl(const Address& dst);
362
363 void fnstcw(const Address& dst);
364 void fldcw(const Address& src);
365
366 void fistpl(const Address& dst);
367 void fistps(const Address& dst);
368 void fildl(const Address& src);
369
370 void fincstp();
371 void ffree(const Immediate& index);
372
373 void fsin();
374 void fcos();
375 void fptan();
376
377 void xchgl(CpuRegister dst, CpuRegister src);
378 void xchgq(CpuRegister dst, CpuRegister src);
379 void xchgl(CpuRegister reg, const Address& address);
380
381 void cmpl(CpuRegister reg, const Immediate& imm);
382 void cmpl(CpuRegister reg0, CpuRegister reg1);
383 void cmpl(CpuRegister reg, const Address& address);
384 void cmpl(const Address& address, CpuRegister reg);
385 void cmpl(const Address& address, const Immediate& imm);
386
387 void cmpq(CpuRegister reg0, CpuRegister reg1);
388 void cmpq(CpuRegister reg0, const Immediate& imm);
389 void cmpq(CpuRegister reg0, const Address& address);
390
391 void testl(CpuRegister reg1, CpuRegister reg2);
392 void testl(CpuRegister reg, const Immediate& imm);
393
394 void testq(CpuRegister reg, const Address& address);
395
396 void andl(CpuRegister dst, const Immediate& imm);
397 void andl(CpuRegister dst, CpuRegister src);
398 void andq(CpuRegister dst, const Immediate& imm);
399
400 void orl(CpuRegister dst, const Immediate& imm);
401 void orl(CpuRegister dst, CpuRegister src);
402
403 void xorl(CpuRegister dst, CpuRegister src);
404 void xorq(CpuRegister dst, const Immediate& imm);
405 void xorq(CpuRegister dst, CpuRegister src);
406
407 void addl(CpuRegister dst, CpuRegister src);
408 void addl(CpuRegister reg, const Immediate& imm);
409 void addl(CpuRegister reg, const Address& address);
410 void addl(const Address& address, CpuRegister reg);
411 void addl(const Address& address, const Immediate& imm);
412
413 void addq(CpuRegister reg, const Immediate& imm);
414 void addq(CpuRegister dst, CpuRegister src);
415 void addq(CpuRegister dst, const Address& address);
416
417 void subl(CpuRegister dst, CpuRegister src);
418 void subl(CpuRegister reg, const Immediate& imm);
419 void subl(CpuRegister reg, const Address& address);
420
421 void subq(CpuRegister reg, const Immediate& imm);
422 void subq(CpuRegister dst, CpuRegister src);
423 void subq(CpuRegister dst, const Address& address);
424
425 void cdq();
426
427 void idivl(CpuRegister reg);
428
429 void imull(CpuRegister dst, CpuRegister src);
430 void imull(CpuRegister reg, const Immediate& imm);
431 void imull(CpuRegister reg, const Address& address);
432
433 void imull(CpuRegister reg);
434 void imull(const Address& address);
435
436 void mull(CpuRegister reg);
437 void mull(const Address& address);
438
439 void shll(CpuRegister reg, const Immediate& imm);
440 void shll(CpuRegister operand, CpuRegister shifter);
441 void shrl(CpuRegister reg, const Immediate& imm);
442 void shrl(CpuRegister operand, CpuRegister shifter);
443 void sarl(CpuRegister reg, const Immediate& imm);
444 void sarl(CpuRegister operand, CpuRegister shifter);
445
446 void shrq(CpuRegister reg, const Immediate& imm);
447
448 void negl(CpuRegister reg);
449 void notl(CpuRegister reg);
450
451 void enter(const Immediate& imm);
452 void leave();
453
454 void ret();
455 void ret(const Immediate& imm);
456
457 void nop();
458 void int3();
459 void hlt();
460
461 void j(Condition condition, Label* label);
462
463 void jmp(CpuRegister reg);
464 void jmp(const Address& address);
465 void jmp(Label* label);
466
467 X86_64Assembler* lock();
468 void cmpxchgl(const Address& address, CpuRegister reg);
469
470 void mfence();
471
472 X86_64Assembler* gs();
473
474 void setcc(Condition condition, CpuRegister dst);
475
476 //
477 // Macros for High-level operations.
478 //
479
480 void AddImmediate(CpuRegister reg, const Immediate& imm);
481
482 void LoadDoubleConstant(XmmRegister dst, double value);
483
484 void DoubleNegate(XmmRegister d);
485 void FloatNegate(XmmRegister f);
486
487 void DoubleAbs(XmmRegister reg);
488
LockCmpxchgl(const Address & address,CpuRegister reg)489 void LockCmpxchgl(const Address& address, CpuRegister reg) {
490 lock()->cmpxchgl(address, reg);
491 }
492
493 //
494 // Misc. functionality
495 //
PreferredLoopAlignment()496 int PreferredLoopAlignment() { return 16; }
497 void Align(int alignment, int offset);
498 void Bind(Label* label);
499
500 //
501 // Overridden common assembler high-level functionality
502 //
503
504 // Emit code that will create an activation on the stack
505 void BuildFrame(size_t frame_size, ManagedRegister method_reg,
506 const std::vector<ManagedRegister>& callee_save_regs,
507 const ManagedRegisterEntrySpills& entry_spills) OVERRIDE;
508
509 // Emit code that will remove an activation from the stack
510 void RemoveFrame(size_t frame_size, const std::vector<ManagedRegister>& callee_save_regs)
511 OVERRIDE;
512
513 void IncreaseFrameSize(size_t adjust) OVERRIDE;
514 void DecreaseFrameSize(size_t adjust) OVERRIDE;
515
516 // Store routines
517 void Store(FrameOffset offs, ManagedRegister src, size_t size) OVERRIDE;
518 void StoreRef(FrameOffset dest, ManagedRegister src) OVERRIDE;
519 void StoreRawPtr(FrameOffset dest, ManagedRegister src) OVERRIDE;
520
521 void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister scratch) OVERRIDE;
522
523 void StoreImmediateToThread64(ThreadOffset<8> dest, uint32_t imm, ManagedRegister scratch)
524 OVERRIDE;
525
526 void StoreStackOffsetToThread64(ThreadOffset<8> thr_offs, FrameOffset fr_offs,
527 ManagedRegister scratch) OVERRIDE;
528
529 void StoreStackPointerToThread64(ThreadOffset<8> thr_offs) OVERRIDE;
530
531 void StoreSpanning(FrameOffset dest, ManagedRegister src, FrameOffset in_off,
532 ManagedRegister scratch) OVERRIDE;
533
534 // Load routines
535 void Load(ManagedRegister dest, FrameOffset src, size_t size) OVERRIDE;
536
537 void LoadFromThread64(ManagedRegister dest, ThreadOffset<8> src, size_t size) OVERRIDE;
538
539 void LoadRef(ManagedRegister dest, FrameOffset src) OVERRIDE;
540
541 void LoadRef(ManagedRegister dest, ManagedRegister base, MemberOffset offs) OVERRIDE;
542
543 void LoadRawPtr(ManagedRegister dest, ManagedRegister base, Offset offs) OVERRIDE;
544
545 void LoadRawPtrFromThread64(ManagedRegister dest, ThreadOffset<8> offs) OVERRIDE;
546
547 // Copying routines
548 void Move(ManagedRegister dest, ManagedRegister src, size_t size);
549
550 void CopyRawPtrFromThread64(FrameOffset fr_offs, ThreadOffset<8> thr_offs,
551 ManagedRegister scratch) OVERRIDE;
552
553 void CopyRawPtrToThread64(ThreadOffset<8> thr_offs, FrameOffset fr_offs, ManagedRegister scratch)
554 OVERRIDE;
555
556 void CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister scratch) OVERRIDE;
557
558 void Copy(FrameOffset dest, FrameOffset src, ManagedRegister scratch, size_t size) OVERRIDE;
559
560 void Copy(FrameOffset dest, ManagedRegister src_base, Offset src_offset, ManagedRegister scratch,
561 size_t size) OVERRIDE;
562
563 void Copy(ManagedRegister dest_base, Offset dest_offset, FrameOffset src, ManagedRegister scratch,
564 size_t size) OVERRIDE;
565
566 void Copy(FrameOffset dest, FrameOffset src_base, Offset src_offset, ManagedRegister scratch,
567 size_t size) OVERRIDE;
568
569 void Copy(ManagedRegister dest, Offset dest_offset, ManagedRegister src, Offset src_offset,
570 ManagedRegister scratch, size_t size) OVERRIDE;
571
572 void Copy(FrameOffset dest, Offset dest_offset, FrameOffset src, Offset src_offset,
573 ManagedRegister scratch, size_t size) OVERRIDE;
574
575 void MemoryBarrier(ManagedRegister) OVERRIDE;
576
577 // Sign extension
578 void SignExtend(ManagedRegister mreg, size_t size) OVERRIDE;
579
580 // Zero extension
581 void ZeroExtend(ManagedRegister mreg, size_t size) OVERRIDE;
582
583 // Exploit fast access in managed code to Thread::Current()
584 void GetCurrentThread(ManagedRegister tr) OVERRIDE;
585 void GetCurrentThread(FrameOffset dest_offset, ManagedRegister scratch) OVERRIDE;
586
587 // Set up out_reg to hold a Object** into the handle scope, or to be NULL if the
588 // value is null and null_allowed. in_reg holds a possibly stale reference
589 // that can be used to avoid loading the handle scope entry to see if the value is
590 // NULL.
591 void CreateHandleScopeEntry(ManagedRegister out_reg, FrameOffset handlescope_offset, ManagedRegister in_reg,
592 bool null_allowed) OVERRIDE;
593
594 // Set up out_off to hold a Object** into the handle scope, or to be NULL if the
595 // value is null and null_allowed.
596 void CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handlescope_offset, ManagedRegister scratch,
597 bool null_allowed) OVERRIDE;
598
599 // src holds a handle scope entry (Object**) load this into dst
600 virtual void LoadReferenceFromHandleScope(ManagedRegister dst,
601 ManagedRegister src);
602
603 // Heap::VerifyObject on src. In some cases (such as a reference to this) we
604 // know that src may not be null.
605 void VerifyObject(ManagedRegister src, bool could_be_null) OVERRIDE;
606 void VerifyObject(FrameOffset src, bool could_be_null) OVERRIDE;
607
608 // Call to address held at [base+offset]
609 void Call(ManagedRegister base, Offset offset, ManagedRegister scratch) OVERRIDE;
610 void Call(FrameOffset base, Offset offset, ManagedRegister scratch) OVERRIDE;
611 void CallFromThread64(ThreadOffset<8> offset, ManagedRegister scratch) OVERRIDE;
612
613 // Generate code to check if Thread::Current()->exception_ is non-null
614 // and branch to a ExceptionSlowPath if it is.
615 void ExceptionPoll(ManagedRegister scratch, size_t stack_adjust) OVERRIDE;
616
617 private:
618 void EmitUint8(uint8_t value);
619 void EmitInt32(int32_t value);
620 void EmitInt64(int64_t value);
621 void EmitRegisterOperand(uint8_t rm, uint8_t reg);
622 void EmitXmmRegisterOperand(uint8_t rm, XmmRegister reg);
623 void EmitFixup(AssemblerFixup* fixup);
624 void EmitOperandSizeOverride();
625
626 void EmitOperand(uint8_t rm, const Operand& operand);
627 void EmitImmediate(const Immediate& imm);
628 void EmitComplex(uint8_t rm, const Operand& operand, const Immediate& immediate);
629 void EmitLabel(Label* label, int instruction_size);
630 void EmitLabelLink(Label* label);
631 void EmitNearLabelLink(Label* label);
632
633 void EmitGenericShift(bool wide, int rm, CpuRegister reg, const Immediate& imm);
634 void EmitGenericShift(int rm, CpuRegister operand, CpuRegister shifter);
635
636 // If any input is not false, output the necessary rex prefix.
637 void EmitOptionalRex(bool force, bool w, bool r, bool x, bool b);
638
639 // Emit a rex prefix byte if necessary for reg. ie if reg is a register in the range R8 to R15.
640 void EmitOptionalRex32(CpuRegister reg);
641 void EmitOptionalRex32(CpuRegister dst, CpuRegister src);
642 void EmitOptionalRex32(XmmRegister dst, XmmRegister src);
643 void EmitOptionalRex32(CpuRegister dst, XmmRegister src);
644 void EmitOptionalRex32(XmmRegister dst, CpuRegister src);
645 void EmitOptionalRex32(const Operand& operand);
646 void EmitOptionalRex32(CpuRegister dst, const Operand& operand);
647 void EmitOptionalRex32(XmmRegister dst, const Operand& operand);
648
649 // Emit a REX.W prefix plus necessary register bit encodings.
650 void EmitRex64(CpuRegister reg);
651 void EmitRex64(CpuRegister dst, CpuRegister src);
652 void EmitRex64(CpuRegister dst, const Operand& operand);
653
654 // Emit a REX prefix to normalize byte registers plus necessary register bit encodings.
655 void EmitOptionalByteRegNormalizingRex32(CpuRegister dst, CpuRegister src);
656 void EmitOptionalByteRegNormalizingRex32(CpuRegister dst, const Operand& operand);
657
658 DISALLOW_COPY_AND_ASSIGN(X86_64Assembler);
659 };
660
EmitUint8(uint8_t value)661 inline void X86_64Assembler::EmitUint8(uint8_t value) {
662 buffer_.Emit<uint8_t>(value);
663 }
664
EmitInt32(int32_t value)665 inline void X86_64Assembler::EmitInt32(int32_t value) {
666 buffer_.Emit<int32_t>(value);
667 }
668
EmitInt64(int64_t value)669 inline void X86_64Assembler::EmitInt64(int64_t value) {
670 buffer_.Emit<int64_t>(value);
671 }
672
EmitRegisterOperand(uint8_t rm,uint8_t reg)673 inline void X86_64Assembler::EmitRegisterOperand(uint8_t rm, uint8_t reg) {
674 CHECK_GE(rm, 0);
675 CHECK_LT(rm, 8);
676 buffer_.Emit<uint8_t>(0xC0 + (rm << 3) + reg);
677 }
678
EmitXmmRegisterOperand(uint8_t rm,XmmRegister reg)679 inline void X86_64Assembler::EmitXmmRegisterOperand(uint8_t rm, XmmRegister reg) {
680 EmitRegisterOperand(rm, static_cast<uint8_t>(reg.AsFloatRegister()));
681 }
682
EmitFixup(AssemblerFixup * fixup)683 inline void X86_64Assembler::EmitFixup(AssemblerFixup* fixup) {
684 buffer_.EmitFixup(fixup);
685 }
686
EmitOperandSizeOverride()687 inline void X86_64Assembler::EmitOperandSizeOverride() {
688 EmitUint8(0x66);
689 }
690
691 } // namespace x86_64
692 } // namespace art
693
694 #endif // ART_COMPILER_UTILS_X86_64_ASSEMBLER_X86_64_H_
695