1
2 /*---------------------------------------------------------------*/
3 /*--- begin host_amd64_defs.h ---*/
4 /*---------------------------------------------------------------*/
5
6 /*
7 This file is part of Valgrind, a dynamic binary instrumentation
8 framework.
9
10 Copyright (C) 2004-2013 OpenWorks LLP
11 info@open-works.net
12
13 This program is free software; you can redistribute it and/or
14 modify it under the terms of the GNU General Public License as
15 published by the Free Software Foundation; either version 2 of the
16 License, or (at your option) any later version.
17
18 This program is distributed in the hope that it will be useful, but
19 WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 General Public License for more details.
22
23 You should have received a copy of the GNU General Public License
24 along with this program; if not, write to the Free Software
25 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
26 02110-1301, USA.
27
28 The GNU General Public License is contained in the file COPYING.
29
30 Neither the names of the U.S. Department of Energy nor the
31 University of California nor the names of its contributors may be
32 used to endorse or promote products derived from this software
33 without prior written permission.
34 */
35
36 #ifndef __VEX_HOST_AMD64_DEFS_H
37 #define __VEX_HOST_AMD64_DEFS_H
38
39 #include "libvex_basictypes.h"
40 #include "libvex.h" // VexArch
41 #include "host_generic_regs.h" // HReg
42
43 /* --------- Registers. --------- */
44
45 /* The usual HReg abstraction. There are 16 real int regs, 6 real
46 float regs, and 16 real vector regs.
47 */
48
49 #define ST_IN static inline
hregAMD64_RSI(void)50 ST_IN HReg hregAMD64_RSI ( void ) { return mkHReg(False, HRcInt64, 6, 0); }
hregAMD64_RDI(void)51 ST_IN HReg hregAMD64_RDI ( void ) { return mkHReg(False, HRcInt64, 7, 1); }
hregAMD64_R8(void)52 ST_IN HReg hregAMD64_R8 ( void ) { return mkHReg(False, HRcInt64, 8, 2); }
hregAMD64_R9(void)53 ST_IN HReg hregAMD64_R9 ( void ) { return mkHReg(False, HRcInt64, 9, 3); }
hregAMD64_R12(void)54 ST_IN HReg hregAMD64_R12 ( void ) { return mkHReg(False, HRcInt64, 12, 4); }
hregAMD64_R13(void)55 ST_IN HReg hregAMD64_R13 ( void ) { return mkHReg(False, HRcInt64, 13, 5); }
hregAMD64_R14(void)56 ST_IN HReg hregAMD64_R14 ( void ) { return mkHReg(False, HRcInt64, 14, 6); }
hregAMD64_R15(void)57 ST_IN HReg hregAMD64_R15 ( void ) { return mkHReg(False, HRcInt64, 15, 7); }
hregAMD64_RBX(void)58 ST_IN HReg hregAMD64_RBX ( void ) { return mkHReg(False, HRcInt64, 3, 8); }
59
hregAMD64_XMM3(void)60 ST_IN HReg hregAMD64_XMM3 ( void ) { return mkHReg(False, HRcVec128, 3, 9); }
hregAMD64_XMM4(void)61 ST_IN HReg hregAMD64_XMM4 ( void ) { return mkHReg(False, HRcVec128, 4, 10); }
hregAMD64_XMM5(void)62 ST_IN HReg hregAMD64_XMM5 ( void ) { return mkHReg(False, HRcVec128, 5, 11); }
hregAMD64_XMM6(void)63 ST_IN HReg hregAMD64_XMM6 ( void ) { return mkHReg(False, HRcVec128, 6, 12); }
hregAMD64_XMM7(void)64 ST_IN HReg hregAMD64_XMM7 ( void ) { return mkHReg(False, HRcVec128, 7, 13); }
hregAMD64_XMM8(void)65 ST_IN HReg hregAMD64_XMM8 ( void ) { return mkHReg(False, HRcVec128, 8, 14); }
hregAMD64_XMM9(void)66 ST_IN HReg hregAMD64_XMM9 ( void ) { return mkHReg(False, HRcVec128, 9, 15); }
hregAMD64_XMM10(void)67 ST_IN HReg hregAMD64_XMM10 ( void ) { return mkHReg(False, HRcVec128, 10, 16); }
hregAMD64_XMM11(void)68 ST_IN HReg hregAMD64_XMM11 ( void ) { return mkHReg(False, HRcVec128, 11, 17); }
hregAMD64_XMM12(void)69 ST_IN HReg hregAMD64_XMM12 ( void ) { return mkHReg(False, HRcVec128, 12, 18); }
70
hregAMD64_R10(void)71 ST_IN HReg hregAMD64_R10 ( void ) { return mkHReg(False, HRcInt64, 10, 19); }
72
hregAMD64_RAX(void)73 ST_IN HReg hregAMD64_RAX ( void ) { return mkHReg(False, HRcInt64, 0, 20); }
hregAMD64_RCX(void)74 ST_IN HReg hregAMD64_RCX ( void ) { return mkHReg(False, HRcInt64, 1, 21); }
hregAMD64_RDX(void)75 ST_IN HReg hregAMD64_RDX ( void ) { return mkHReg(False, HRcInt64, 2, 22); }
hregAMD64_RSP(void)76 ST_IN HReg hregAMD64_RSP ( void ) { return mkHReg(False, HRcInt64, 4, 23); }
hregAMD64_RBP(void)77 ST_IN HReg hregAMD64_RBP ( void ) { return mkHReg(False, HRcInt64, 5, 24); }
hregAMD64_R11(void)78 ST_IN HReg hregAMD64_R11 ( void ) { return mkHReg(False, HRcInt64, 11, 25); }
79
hregAMD64_XMM0(void)80 ST_IN HReg hregAMD64_XMM0 ( void ) { return mkHReg(False, HRcVec128, 0, 26); }
hregAMD64_XMM1(void)81 ST_IN HReg hregAMD64_XMM1 ( void ) { return mkHReg(False, HRcVec128, 1, 27); }
82 #undef ST_IN
83
84 extern void ppHRegAMD64 ( HReg );
85
86
87 /* --------- Condition codes, AMD encoding. --------- */
88
89 typedef
90 enum {
91 Acc_O = 0, /* overflow */
92 Acc_NO = 1, /* no overflow */
93
94 Acc_B = 2, /* below */
95 Acc_NB = 3, /* not below */
96
97 Acc_Z = 4, /* zero */
98 Acc_NZ = 5, /* not zero */
99
100 Acc_BE = 6, /* below or equal */
101 Acc_NBE = 7, /* not below or equal */
102
103 Acc_S = 8, /* negative */
104 Acc_NS = 9, /* not negative */
105
106 Acc_P = 10, /* parity even */
107 Acc_NP = 11, /* not parity even */
108
109 Acc_L = 12, /* jump less */
110 Acc_NL = 13, /* not less */
111
112 Acc_LE = 14, /* less or equal */
113 Acc_NLE = 15, /* not less or equal */
114
115 Acc_ALWAYS = 16 /* the usual hack */
116 }
117 AMD64CondCode;
118
119 extern const HChar* showAMD64CondCode ( AMD64CondCode );
120
121
122 /* --------- Memory address expressions (amodes). --------- */
123
124 typedef
125 enum {
126 Aam_IR, /* Immediate + Reg */
127 Aam_IRRS /* Immediate + Reg1 + (Reg2 << Shift) */
128 }
129 AMD64AModeTag;
130
131 typedef
132 struct {
133 AMD64AModeTag tag;
134 union {
135 struct {
136 UInt imm;
137 HReg reg;
138 } IR;
139 struct {
140 UInt imm;
141 HReg base;
142 HReg index;
143 Int shift; /* 0, 1, 2 or 3 only */
144 } IRRS;
145 } Aam;
146 }
147 AMD64AMode;
148
149 extern AMD64AMode* AMD64AMode_IR ( UInt, HReg );
150 extern AMD64AMode* AMD64AMode_IRRS ( UInt, HReg, HReg, Int );
151
152 extern AMD64AMode* dopyAMD64AMode ( AMD64AMode* );
153
154 extern void ppAMD64AMode ( AMD64AMode* );
155
156
157 /* --------- Operand, which can be reg, immediate or memory. --------- */
158
159 typedef
160 enum {
161 Armi_Imm,
162 Armi_Reg,
163 Armi_Mem
164 }
165 AMD64RMITag;
166
167 typedef
168 struct {
169 AMD64RMITag tag;
170 union {
171 struct {
172 UInt imm32;
173 } Imm;
174 struct {
175 HReg reg;
176 } Reg;
177 struct {
178 AMD64AMode* am;
179 } Mem;
180 }
181 Armi;
182 }
183 AMD64RMI;
184
185 extern AMD64RMI* AMD64RMI_Imm ( UInt );
186 extern AMD64RMI* AMD64RMI_Reg ( HReg );
187 extern AMD64RMI* AMD64RMI_Mem ( AMD64AMode* );
188
189 extern void ppAMD64RMI ( AMD64RMI* );
190 extern void ppAMD64RMI_lo32 ( AMD64RMI* );
191
192
193 /* --------- Operand, which can be reg or immediate only. --------- */
194
195 typedef
196 enum {
197 Ari_Imm,
198 Ari_Reg
199 }
200 AMD64RITag;
201
202 typedef
203 struct {
204 AMD64RITag tag;
205 union {
206 struct {
207 UInt imm32;
208 } Imm;
209 struct {
210 HReg reg;
211 } Reg;
212 }
213 Ari;
214 }
215 AMD64RI;
216
217 extern AMD64RI* AMD64RI_Imm ( UInt );
218 extern AMD64RI* AMD64RI_Reg ( HReg );
219
220 extern void ppAMD64RI ( AMD64RI* );
221
222
223 /* --------- Operand, which can be reg or memory only. --------- */
224
225 typedef
226 enum {
227 Arm_Reg,
228 Arm_Mem
229 }
230 AMD64RMTag;
231
232 typedef
233 struct {
234 AMD64RMTag tag;
235 union {
236 struct {
237 HReg reg;
238 } Reg;
239 struct {
240 AMD64AMode* am;
241 } Mem;
242 }
243 Arm;
244 }
245 AMD64RM;
246
247 extern AMD64RM* AMD64RM_Reg ( HReg );
248 extern AMD64RM* AMD64RM_Mem ( AMD64AMode* );
249
250 extern void ppAMD64RM ( AMD64RM* );
251
252
253 /* --------- Instructions. --------- */
254
255 /* --------- */
256 typedef
257 enum {
258 Aun_NEG,
259 Aun_NOT
260 }
261 AMD64UnaryOp;
262
263 extern const HChar* showAMD64UnaryOp ( AMD64UnaryOp );
264
265
266 /* --------- */
267 typedef
268 enum {
269 Aalu_INVALID,
270 Aalu_MOV,
271 Aalu_CMP,
272 Aalu_ADD, Aalu_SUB, Aalu_ADC, Aalu_SBB,
273 Aalu_AND, Aalu_OR, Aalu_XOR,
274 Aalu_MUL
275 }
276 AMD64AluOp;
277
278 extern const HChar* showAMD64AluOp ( AMD64AluOp );
279
280
281 /* --------- */
282 typedef
283 enum {
284 Ash_INVALID,
285 Ash_SHL, Ash_SHR, Ash_SAR
286 }
287 AMD64ShiftOp;
288
289 extern const HChar* showAMD64ShiftOp ( AMD64ShiftOp );
290
291
292 /* --------- */
293 typedef
294 enum {
295 Afp_INVALID,
296 /* Binary */
297 Afp_SCALE, Afp_ATAN, Afp_YL2X, Afp_YL2XP1, Afp_PREM, Afp_PREM1,
298 /* Unary */
299 Afp_SQRT,
300 Afp_SIN, Afp_COS, Afp_TAN,
301 Afp_ROUND, Afp_2XM1
302 }
303 A87FpOp;
304
305 extern const HChar* showA87FpOp ( A87FpOp );
306
307
308 /* --------- */
309 typedef
310 enum {
311 Asse_INVALID,
312 /* mov */
313 Asse_MOV,
314 /* Floating point binary */
315 Asse_ADDF, Asse_SUBF, Asse_MULF, Asse_DIVF,
316 Asse_MAXF, Asse_MINF,
317 Asse_CMPEQF, Asse_CMPLTF, Asse_CMPLEF, Asse_CMPUNF,
318 /* Floating point unary */
319 Asse_RCPF, Asse_RSQRTF, Asse_SQRTF,
320 /* Bitwise */
321 Asse_AND, Asse_OR, Asse_XOR, Asse_ANDN,
322 Asse_ADD8, Asse_ADD16, Asse_ADD32, Asse_ADD64,
323 Asse_QADD8U, Asse_QADD16U,
324 Asse_QADD8S, Asse_QADD16S,
325 Asse_SUB8, Asse_SUB16, Asse_SUB32, Asse_SUB64,
326 Asse_QSUB8U, Asse_QSUB16U,
327 Asse_QSUB8S, Asse_QSUB16S,
328 Asse_MUL16,
329 Asse_MULHI16U,
330 Asse_MULHI16S,
331 Asse_AVG8U, Asse_AVG16U,
332 Asse_MAX16S,
333 Asse_MAX8U,
334 Asse_MIN16S,
335 Asse_MIN8U,
336 Asse_CMPEQ8, Asse_CMPEQ16, Asse_CMPEQ32,
337 Asse_CMPGT8S, Asse_CMPGT16S, Asse_CMPGT32S,
338 Asse_SHL16, Asse_SHL32, Asse_SHL64,
339 Asse_SHR16, Asse_SHR32, Asse_SHR64,
340 Asse_SAR16, Asse_SAR32,
341 Asse_PACKSSD, Asse_PACKSSW, Asse_PACKUSW,
342 Asse_UNPCKHB, Asse_UNPCKHW, Asse_UNPCKHD, Asse_UNPCKHQ,
343 Asse_UNPCKLB, Asse_UNPCKLW, Asse_UNPCKLD, Asse_UNPCKLQ
344 }
345 AMD64SseOp;
346
347 extern const HChar* showAMD64SseOp ( AMD64SseOp );
348
349
350 /* --------- */
351 typedef
352 enum {
353 Ain_Imm64, /* Generate 64-bit literal to register */
354 Ain_Alu64R, /* 64-bit mov/arith/logical, dst=REG */
355 Ain_Alu64M, /* 64-bit mov/arith/logical, dst=MEM */
356 Ain_Sh64, /* 64-bit shift/rotate, dst=REG or MEM */
357 Ain_Test64, /* 64-bit test (AND, set flags, discard result) */
358 Ain_Unary64, /* 64-bit not and neg */
359 Ain_Lea64, /* 64-bit compute EA into a reg */
360 Ain_Alu32R, /* 32-bit add/sub/and/or/xor/cmp, dst=REG (a la Alu64R) */
361 Ain_MulL, /* widening multiply */
362 Ain_Div, /* div and mod */
363 Ain_Push, /* push 64-bit value on stack */
364 Ain_Call, /* call to address in register */
365 Ain_XDirect, /* direct transfer to GA */
366 Ain_XIndir, /* indirect transfer to GA */
367 Ain_XAssisted, /* assisted transfer to GA */
368 Ain_CMov64, /* conditional move, 64-bit reg-reg only */
369 Ain_CLoad, /* cond. load to int reg, 32 bit ZX or 64 bit only */
370 Ain_CStore, /* cond. store from int reg, 32 or 64 bit only */
371 Ain_MovxLQ, /* reg-reg move, zx-ing/sx-ing top half */
372 Ain_LoadEX, /* mov{s,z}{b,w,l}q from mem to reg */
373 Ain_Store, /* store 32/16/8 bit value in memory */
374 Ain_Set64, /* convert condition code to 64-bit value */
375 Ain_Bsfr64, /* 64-bit bsf/bsr */
376 Ain_MFence, /* mem fence */
377 Ain_ACAS, /* 8/16/32/64-bit lock;cmpxchg */
378 Ain_DACAS, /* lock;cmpxchg8b/16b (doubleword ACAS, 2 x
379 32-bit or 2 x 64-bit only) */
380 Ain_A87Free, /* free up x87 registers */
381 Ain_A87PushPop, /* x87 loads/stores */
382 Ain_A87FpOp, /* x87 operations */
383 Ain_A87LdCW, /* load x87 control word */
384 Ain_A87StSW, /* store x87 status word */
385 Ain_LdMXCSR, /* load %mxcsr */
386 Ain_SseUComIS, /* ucomisd/ucomiss, then get %rflags into int
387 register */
388 Ain_SseSI2SF, /* scalar 32/64 int to 32/64 float conversion */
389 Ain_SseSF2SI, /* scalar 32/64 float to 32/64 int conversion */
390 Ain_SseSDSS, /* scalar float32 to/from float64 */
391 Ain_SseLdSt, /* SSE load/store 32/64/128 bits, no alignment
392 constraints, upper 96/64/0 bits arbitrary */
393 Ain_SseLdzLO, /* SSE load low 32/64 bits, zero remainder of reg */
394 Ain_Sse32Fx4, /* SSE binary, 32Fx4 */
395 Ain_Sse32FLo, /* SSE binary, 32F in lowest lane only */
396 Ain_Sse64Fx2, /* SSE binary, 64Fx2 */
397 Ain_Sse64FLo, /* SSE binary, 64F in lowest lane only */
398 Ain_SseReRg, /* SSE binary general reg-reg, Re, Rg */
399 Ain_SseCMov, /* SSE conditional move */
400 Ain_SseShuf, /* SSE2 shuffle (pshufd) */
401 //uu Ain_AvxLdSt, /* AVX load/store 256 bits,
402 //uu no alignment constraints */
403 //uu Ain_AvxReRg, /* AVX binary general reg-reg, Re, Rg */
404 Ain_EvCheck, /* Event check */
405 Ain_ProfInc /* 64-bit profile counter increment */
406 }
407 AMD64InstrTag;
408
409 /* Destinations are on the RIGHT (second operand) */
410
411 typedef
412 struct {
413 AMD64InstrTag tag;
414 union {
415 struct {
416 ULong imm64;
417 HReg dst;
418 } Imm64;
419 struct {
420 AMD64AluOp op;
421 AMD64RMI* src;
422 HReg dst;
423 } Alu64R;
424 struct {
425 AMD64AluOp op;
426 AMD64RI* src;
427 AMD64AMode* dst;
428 } Alu64M;
429 struct {
430 AMD64ShiftOp op;
431 UInt src; /* shift amount, or 0 means %cl */
432 HReg dst;
433 } Sh64;
434 struct {
435 UInt imm32;
436 HReg dst;
437 } Test64;
438 /* Not and Neg */
439 struct {
440 AMD64UnaryOp op;
441 HReg dst;
442 } Unary64;
443 /* 64-bit compute EA into a reg */
444 struct {
445 AMD64AMode* am;
446 HReg dst;
447 } Lea64;
448 /* 32-bit add/sub/and/or/xor/cmp, dst=REG (a la Alu64R) */
449 struct {
450 AMD64AluOp op;
451 AMD64RMI* src;
452 HReg dst;
453 } Alu32R;
454 /* 64 x 64 -> 128 bit widening multiply: RDX:RAX = RAX *s/u
455 r/m64 */
456 struct {
457 Bool syned;
458 AMD64RM* src;
459 } MulL;
460 /* amd64 div/idiv instruction. Modifies RDX and RAX and
461 reads src. */
462 struct {
463 Bool syned;
464 Int sz; /* 4 or 8 only */
465 AMD64RM* src;
466 } Div;
467 struct {
468 AMD64RMI* src;
469 } Push;
470 /* Pseudo-insn. Call target (an absolute address), on given
471 condition (which could be Xcc_ALWAYS). */
472 struct {
473 AMD64CondCode cond;
474 Addr64 target;
475 Int regparms; /* 0 .. 6 */
476 RetLoc rloc; /* where the return value will be */
477 } Call;
478 /* Update the guest RIP value, then exit requesting to chain
479 to it. May be conditional. */
480 struct {
481 Addr64 dstGA; /* next guest address */
482 AMD64AMode* amRIP; /* amode in guest state for RIP */
483 AMD64CondCode cond; /* can be Acc_ALWAYS */
484 Bool toFastEP; /* chain to the slow or fast point? */
485 } XDirect;
486 /* Boring transfer to a guest address not known at JIT time.
487 Not chainable. May be conditional. */
488 struct {
489 HReg dstGA;
490 AMD64AMode* amRIP;
491 AMD64CondCode cond; /* can be Acc_ALWAYS */
492 } XIndir;
493 /* Assisted transfer to a guest address, most general case.
494 Not chainable. May be conditional. */
495 struct {
496 HReg dstGA;
497 AMD64AMode* amRIP;
498 AMD64CondCode cond; /* can be Acc_ALWAYS */
499 IRJumpKind jk;
500 } XAssisted;
501 /* Mov src to dst on the given condition, which may not
502 be the bogus Acc_ALWAYS. */
503 struct {
504 AMD64CondCode cond;
505 HReg src;
506 HReg dst;
507 } CMov64;
508 /* conditional load to int reg, 32 bit ZX or 64 bit only.
509 cond may not be Acc_ALWAYS. */
510 struct {
511 AMD64CondCode cond;
512 UChar szB; /* 4 or 8 only */
513 AMD64AMode* addr;
514 HReg dst;
515 } CLoad;
516 /* cond. store from int reg, 32 or 64 bit only.
517 cond may not be Acc_ALWAYS. */
518 struct {
519 AMD64CondCode cond;
520 UChar szB; /* 4 or 8 only */
521 HReg src;
522 AMD64AMode* addr;
523 } CStore;
524 /* reg-reg move, sx-ing/zx-ing top half */
525 struct {
526 Bool syned;
527 HReg src;
528 HReg dst;
529 } MovxLQ;
530 /* Sign/Zero extending loads. Dst size is always 64 bits. */
531 struct {
532 UChar szSmall; /* only 1, 2 or 4 */
533 Bool syned;
534 AMD64AMode* src;
535 HReg dst;
536 } LoadEX;
537 /* 32/16/8 bit stores. */
538 struct {
539 UChar sz; /* only 1, 2 or 4 */
540 HReg src;
541 AMD64AMode* dst;
542 } Store;
543 /* Convert an amd64 condition code to a 64-bit value (0 or 1). */
544 struct {
545 AMD64CondCode cond;
546 HReg dst;
547 } Set64;
548 /* 64-bit bsf or bsr. */
549 struct {
550 Bool isFwds;
551 HReg src;
552 HReg dst;
553 } Bsfr64;
554 /* Mem fence. In short, an insn which flushes all preceding
555 loads and stores as much as possible before continuing.
556 On AMD64 we emit a real "mfence". */
557 struct {
558 } MFence;
559 struct {
560 AMD64AMode* addr;
561 UChar sz; /* 1, 2, 4 or 8 */
562 } ACAS;
563 struct {
564 AMD64AMode* addr;
565 UChar sz; /* 4 or 8 only */
566 } DACAS;
567
568 /* --- X87 --- */
569
570 /* A very minimal set of x87 insns, that operate exactly in a
571 stack-like way so no need to think about x87 registers. */
572
573 /* Do 'ffree' on %st(7) .. %st(7-nregs) */
574 struct {
575 Int nregs; /* 1 <= nregs <= 7 */
576 } A87Free;
577
578 /* Push a 32- or 64-bit FP value from memory onto the stack,
579 or move a value from the stack to memory and remove it
580 from the stack. */
581 struct {
582 AMD64AMode* addr;
583 Bool isPush;
584 UChar szB; /* 4 or 8 */
585 } A87PushPop;
586
587 /* Do an operation on the top-of-stack. This can be unary, in
588 which case it is %st0 = OP( %st0 ), or binary: %st0 = OP(
589 %st0, %st1 ). */
590 struct {
591 A87FpOp op;
592 } A87FpOp;
593
594 /* Load the FPU control word. */
595 struct {
596 AMD64AMode* addr;
597 } A87LdCW;
598
599 /* Store the FPU status word (fstsw m16) */
600 struct {
601 AMD64AMode* addr;
602 } A87StSW;
603
604 /* --- SSE --- */
605
606 /* Load 32 bits into %mxcsr. */
607 struct {
608 AMD64AMode* addr;
609 }
610 LdMXCSR;
611 /* ucomisd/ucomiss, then get %rflags into int register */
612 struct {
613 UChar sz; /* 4 or 8 only */
614 HReg srcL; /* xmm */
615 HReg srcR; /* xmm */
616 HReg dst; /* int */
617 } SseUComIS;
618 /* scalar 32/64 int to 32/64 float conversion */
619 struct {
620 UChar szS; /* 4 or 8 */
621 UChar szD; /* 4 or 8 */
622 HReg src; /* i class */
623 HReg dst; /* v class */
624 } SseSI2SF;
625 /* scalar 32/64 float to 32/64 int conversion */
626 struct {
627 UChar szS; /* 4 or 8 */
628 UChar szD; /* 4 or 8 */
629 HReg src; /* v class */
630 HReg dst; /* i class */
631 } SseSF2SI;
632 /* scalar float32 to/from float64 */
633 struct {
634 Bool from64; /* True: 64->32; False: 32->64 */
635 HReg src;
636 HReg dst;
637 } SseSDSS;
638 struct {
639 Bool isLoad;
640 UChar sz; /* 4, 8 or 16 only */
641 HReg reg;
642 AMD64AMode* addr;
643 } SseLdSt;
644 struct {
645 Int sz; /* 4 or 8 only */
646 HReg reg;
647 AMD64AMode* addr;
648 } SseLdzLO;
649 struct {
650 AMD64SseOp op;
651 HReg src;
652 HReg dst;
653 } Sse32Fx4;
654 struct {
655 AMD64SseOp op;
656 HReg src;
657 HReg dst;
658 } Sse32FLo;
659 struct {
660 AMD64SseOp op;
661 HReg src;
662 HReg dst;
663 } Sse64Fx2;
664 struct {
665 AMD64SseOp op;
666 HReg src;
667 HReg dst;
668 } Sse64FLo;
669 struct {
670 AMD64SseOp op;
671 HReg src;
672 HReg dst;
673 } SseReRg;
674 /* Mov src to dst on the given condition, which may not
675 be the bogus Xcc_ALWAYS. */
676 struct {
677 AMD64CondCode cond;
678 HReg src;
679 HReg dst;
680 } SseCMov;
681 struct {
682 Int order; /* 0 <= order <= 0xFF */
683 HReg src;
684 HReg dst;
685 } SseShuf;
686 //uu struct {
687 //uu Bool isLoad;
688 //uu HReg reg;
689 //uu AMD64AMode* addr;
690 //uu } AvxLdSt;
691 //uu struct {
692 //uu AMD64SseOp op;
693 //uu HReg src;
694 //uu HReg dst;
695 //uu } AvxReRg;
696 struct {
697 AMD64AMode* amCounter;
698 AMD64AMode* amFailAddr;
699 } EvCheck;
700 struct {
701 /* No fields. The address of the counter to inc is
702 installed later, post-translation, by patching it in,
703 as it is not known at translation time. */
704 } ProfInc;
705
706 } Ain;
707 }
708 AMD64Instr;
709
710 extern AMD64Instr* AMD64Instr_Imm64 ( ULong imm64, HReg dst );
711 extern AMD64Instr* AMD64Instr_Alu64R ( AMD64AluOp, AMD64RMI*, HReg );
712 extern AMD64Instr* AMD64Instr_Alu64M ( AMD64AluOp, AMD64RI*, AMD64AMode* );
713 extern AMD64Instr* AMD64Instr_Unary64 ( AMD64UnaryOp op, HReg dst );
714 extern AMD64Instr* AMD64Instr_Lea64 ( AMD64AMode* am, HReg dst );
715 extern AMD64Instr* AMD64Instr_Alu32R ( AMD64AluOp, AMD64RMI*, HReg );
716 extern AMD64Instr* AMD64Instr_Sh64 ( AMD64ShiftOp, UInt, HReg );
717 extern AMD64Instr* AMD64Instr_Test64 ( UInt imm32, HReg dst );
718 extern AMD64Instr* AMD64Instr_MulL ( Bool syned, AMD64RM* );
719 extern AMD64Instr* AMD64Instr_Div ( Bool syned, Int sz, AMD64RM* );
720 extern AMD64Instr* AMD64Instr_Push ( AMD64RMI* );
721 extern AMD64Instr* AMD64Instr_Call ( AMD64CondCode, Addr64, Int, RetLoc );
722 extern AMD64Instr* AMD64Instr_XDirect ( Addr64 dstGA, AMD64AMode* amRIP,
723 AMD64CondCode cond, Bool toFastEP );
724 extern AMD64Instr* AMD64Instr_XIndir ( HReg dstGA, AMD64AMode* amRIP,
725 AMD64CondCode cond );
726 extern AMD64Instr* AMD64Instr_XAssisted ( HReg dstGA, AMD64AMode* amRIP,
727 AMD64CondCode cond, IRJumpKind jk );
728 extern AMD64Instr* AMD64Instr_CMov64 ( AMD64CondCode, HReg src, HReg dst );
729 extern AMD64Instr* AMD64Instr_CLoad ( AMD64CondCode cond, UChar szB,
730 AMD64AMode* addr, HReg dst );
731 extern AMD64Instr* AMD64Instr_CStore ( AMD64CondCode cond, UChar szB,
732 HReg src, AMD64AMode* addr );
733 extern AMD64Instr* AMD64Instr_MovxLQ ( Bool syned, HReg src, HReg dst );
734 extern AMD64Instr* AMD64Instr_LoadEX ( UChar szSmall, Bool syned,
735 AMD64AMode* src, HReg dst );
736 extern AMD64Instr* AMD64Instr_Store ( UChar sz, HReg src, AMD64AMode* dst );
737 extern AMD64Instr* AMD64Instr_Set64 ( AMD64CondCode cond, HReg dst );
738 extern AMD64Instr* AMD64Instr_Bsfr64 ( Bool isFwds, HReg src, HReg dst );
739 extern AMD64Instr* AMD64Instr_MFence ( void );
740 extern AMD64Instr* AMD64Instr_ACAS ( AMD64AMode* addr, UChar sz );
741 extern AMD64Instr* AMD64Instr_DACAS ( AMD64AMode* addr, UChar sz );
742
743 extern AMD64Instr* AMD64Instr_A87Free ( Int nregs );
744 extern AMD64Instr* AMD64Instr_A87PushPop ( AMD64AMode* addr, Bool isPush, UChar szB );
745 extern AMD64Instr* AMD64Instr_A87FpOp ( A87FpOp op );
746 extern AMD64Instr* AMD64Instr_A87LdCW ( AMD64AMode* addr );
747 extern AMD64Instr* AMD64Instr_A87StSW ( AMD64AMode* addr );
748 extern AMD64Instr* AMD64Instr_LdMXCSR ( AMD64AMode* );
749 extern AMD64Instr* AMD64Instr_SseUComIS ( Int sz, HReg srcL, HReg srcR, HReg dst );
750 extern AMD64Instr* AMD64Instr_SseSI2SF ( Int szS, Int szD, HReg src, HReg dst );
751 extern AMD64Instr* AMD64Instr_SseSF2SI ( Int szS, Int szD, HReg src, HReg dst );
752 extern AMD64Instr* AMD64Instr_SseSDSS ( Bool from64, HReg src, HReg dst );
753 extern AMD64Instr* AMD64Instr_SseLdSt ( Bool isLoad, Int sz, HReg, AMD64AMode* );
754 extern AMD64Instr* AMD64Instr_SseLdzLO ( Int sz, HReg, AMD64AMode* );
755 extern AMD64Instr* AMD64Instr_Sse32Fx4 ( AMD64SseOp, HReg, HReg );
756 extern AMD64Instr* AMD64Instr_Sse32FLo ( AMD64SseOp, HReg, HReg );
757 extern AMD64Instr* AMD64Instr_Sse64Fx2 ( AMD64SseOp, HReg, HReg );
758 extern AMD64Instr* AMD64Instr_Sse64FLo ( AMD64SseOp, HReg, HReg );
759 extern AMD64Instr* AMD64Instr_SseReRg ( AMD64SseOp, HReg, HReg );
760 extern AMD64Instr* AMD64Instr_SseCMov ( AMD64CondCode, HReg src, HReg dst );
761 extern AMD64Instr* AMD64Instr_SseShuf ( Int order, HReg src, HReg dst );
762 //uu extern AMD64Instr* AMD64Instr_AvxLdSt ( Bool isLoad, HReg, AMD64AMode* );
763 //uu extern AMD64Instr* AMD64Instr_AvxReRg ( AMD64SseOp, HReg, HReg );
764 extern AMD64Instr* AMD64Instr_EvCheck ( AMD64AMode* amCounter,
765 AMD64AMode* amFailAddr );
766 extern AMD64Instr* AMD64Instr_ProfInc ( void );
767
768
769 extern void ppAMD64Instr ( const AMD64Instr*, Bool );
770
771 /* Some functions that insulate the register allocator from details
772 of the underlying instruction set. */
773 extern void getRegUsage_AMD64Instr ( HRegUsage*, const AMD64Instr*, Bool );
774 extern void mapRegs_AMD64Instr ( HRegRemap*, AMD64Instr*, Bool );
775 extern Bool isMove_AMD64Instr ( const AMD64Instr*, HReg*, HReg* );
776 extern Int emit_AMD64Instr ( /*MB_MOD*/Bool* is_profInc,
777 UChar* buf, Int nbuf,
778 const AMD64Instr* i,
779 Bool mode64,
780 VexEndness endness_host,
781 const void* disp_cp_chain_me_to_slowEP,
782 const void* disp_cp_chain_me_to_fastEP,
783 const void* disp_cp_xindir,
784 const void* disp_cp_xassisted );
785
786 extern void genSpill_AMD64 ( /*OUT*/HInstr** i1, /*OUT*/HInstr** i2,
787 HReg rreg, Int offset, Bool );
788 extern void genReload_AMD64 ( /*OUT*/HInstr** i1, /*OUT*/HInstr** i2,
789 HReg rreg, Int offset, Bool );
790
791 extern const RRegUniverse* getRRegUniverse_AMD64 ( void );
792
793 extern HInstrArray* iselSB_AMD64 ( const IRSB*,
794 VexArch,
795 const VexArchInfo*,
796 const VexAbiInfo*,
797 Int offs_Host_EvC_Counter,
798 Int offs_Host_EvC_FailAddr,
799 Bool chainingAllowed,
800 Bool addProfInc,
801 Addr max_ga );
802
803 /* How big is an event check? This is kind of a kludge because it
804 depends on the offsets of host_EvC_FAILADDR and host_EvC_COUNTER,
805 and so assumes that they are both <= 128, and so can use the short
806 offset encoding. This is all checked with assertions, so in the
807 worst case we will merely assert at startup. */
808 extern Int evCheckSzB_AMD64 (void);
809
810 /* Perform a chaining and unchaining of an XDirect jump. */
811 extern VexInvalRange chainXDirect_AMD64 ( VexEndness endness_host,
812 void* place_to_chain,
813 const void* disp_cp_chain_me_EXPECTED,
814 const void* place_to_jump_to );
815
816 extern VexInvalRange unchainXDirect_AMD64 ( VexEndness endness_host,
817 void* place_to_unchain,
818 const void* place_to_jump_to_EXPECTED,
819 const void* disp_cp_chain_me );
820
821 /* Patch the counter location into an existing ProfInc point. */
822 extern VexInvalRange patchProfInc_AMD64 ( VexEndness endness_host,
823 void* place_to_patch,
824 const ULong* location_of_counter );
825
826
827 #endif /* ndef __VEX_HOST_AMD64_DEFS_H */
828
829 /*---------------------------------------------------------------*/
830 /*--- end host_amd64_defs.h ---*/
831 /*---------------------------------------------------------------*/
832