1
2 /*--------------------------------------------------------------------*/
3 /*--- begin guest_tilegx_toIR.c ---*/
4 /*--------------------------------------------------------------------*/
5
6 /*
7 This file is part of Valgrind, a dynamic binary instrumentation
8 framework.
9
10 Copyright (C) 2010-2015 Tilera Corp.
11
12 This program is free software; you can redistribute it and/or
13 modify it under the terms of the GNU General Public License as
14 published by the Free Software Foundation; either version 2 of the
15 License, or (at your option) any later version.
16
17 This program is distributed in the hope that it will be useful, but
18 WITHOUT ANY WARRANTY; without even the implied warranty of
19 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 General Public License for more details.
21
22 You should have received a copy of the GNU General Public License
23 along with this program; if not, write to the Free Software
24 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
25 02111-1307, USA.
26
27 The GNU General Public License is contained in the file COPYING.
28 */
29
30 /* Contributed by Zhi-Gang Liu <zliu at tilera dot com> */
31
32 /* Translates TILEGX code to IR. */
33
34 #include "libvex_basictypes.h"
35 #include "libvex_ir.h"
36 #include "libvex.h"
37 #include "libvex_guest_tilegx.h"
38
39 #include "main_util.h"
40 #include "main_globals.h"
41 #include "guest_generic_bb_to_IR.h"
42 #include "guest_tilegx_defs.h"
43 #include "tilegx_disasm.h"
44
45 /*------------------------------------------------------------*/
46 /*--- Globals ---*/
47 /*------------------------------------------------------------*/
48
49 /* These are set at the start of the translation of a instruction, so
50 that we don't have to pass them around endlessly. CONST means does
51 not change during translation of the instruction.
52 */
53
54 /* CONST: is the host bigendian? This has to do with float vs double
55 register accesses on VFP, but it's complex and not properly thought
56 out. */
57 static VexEndness host_endness;
58
59 /* Pointer to the guest code area. */
60 static UChar *guest_code;
61
62 /* The guest address corresponding to guest_code[0]. */
63 static Addr64 guest_PC_bbstart;
64
65 /* CONST: The guest address for the instruction currently being
66 translated. */
67 static Addr64 guest_PC_curr_instr;
68
69 /* MOD: The IRSB* into which we're generating code. */
70 static IRSB *irsb;
71
72 /*------------------------------------------------------------*/
73 /*--- Debugging output ---*/
74 /*------------------------------------------------------------*/
75
76 #define DIP(format, args...) \
77 if (vex_traceflags & VEX_TRACE_FE) \
78 vex_printf(format, ## args)
79
80 /*------------------------------------------------------------*/
81 /*--- Helper bits and pieces for deconstructing the ---*/
82 /*--- tilegx insn stream. ---*/
83 /*------------------------------------------------------------*/
84
integerGuestRegOffset(UInt iregNo)85 static Int integerGuestRegOffset ( UInt iregNo )
86 {
87 return 8 * (iregNo);
88 }
89
90 /*------------------------------------------------------------*/
91 /*--- Field helpers ---*/
92 /*------------------------------------------------------------*/
93
94 /*------------------------------------------------------------*/
95 /*--- Helper bits and pieces for creating IR fragments. ---*/
96 /*------------------------------------------------------------*/
97
mkU8(UInt i)98 static IRExpr *mkU8 ( UInt i )
99 {
100 return IRExpr_Const(IRConst_U8((UChar) i));
101 }
102
103 /* Create an expression node for a 32-bit integer constant */
mkU32(UInt i)104 static IRExpr *mkU32 ( UInt i )
105 {
106 return IRExpr_Const(IRConst_U32(i));
107 }
108
109 /* Create an expression node for a 64-bit integer constant */
mkU64(ULong i)110 static IRExpr *mkU64 ( ULong i )
111 {
112 return IRExpr_Const(IRConst_U64(i));
113 }
114
mkexpr(IRTemp tmp)115 static IRExpr *mkexpr ( IRTemp tmp )
116 {
117 return IRExpr_RdTmp(tmp);
118 }
119
unop(IROp op,IRExpr * a)120 static IRExpr *unop ( IROp op, IRExpr * a )
121 {
122 return IRExpr_Unop(op, a);
123 }
124
binop(IROp op,IRExpr * a1,IRExpr * a2)125 static IRExpr *binop ( IROp op, IRExpr * a1, IRExpr * a2 )
126 {
127 return IRExpr_Binop(op, a1, a2);
128 }
129
load(IRType ty,IRExpr * addr)130 static IRExpr *load ( IRType ty, IRExpr * addr )
131 {
132 IRExpr *load1 = NULL;
133
134 load1 = IRExpr_Load(Iend_LE, ty, addr);
135 return load1;
136 }
137
138 /* Add a statement to the list held by "irsb". */
stmt(IRStmt * st)139 static void stmt ( IRStmt * st )
140 {
141 addStmtToIRSB(irsb, st);
142 }
143
144 #define OFFB_PC offsetof(VexGuestTILEGXState, guest_pc)
145
putPC(IRExpr * e)146 static void putPC ( IRExpr * e )
147 {
148 stmt(IRStmt_Put(OFFB_PC, e));
149 }
150
assign(IRTemp dst,IRExpr * e)151 static void assign ( IRTemp dst, IRExpr * e )
152 {
153 stmt(IRStmt_WrTmp(dst, e));
154 }
155
store(IRExpr * addr,IRExpr * data)156 static void store ( IRExpr * addr, IRExpr * data )
157 {
158 stmt(IRStmt_Store(Iend_LE, addr, data));
159 }
160
161 /* Generate a new temporary of the given type. */
newTemp(IRType ty)162 static IRTemp newTemp ( IRType ty )
163 {
164 vassert(isPlausibleIRType(ty));
165 return newIRTemp(irsb->tyenv, ty);
166 }
167
extend_s_16to64(UInt x)168 static ULong extend_s_16to64 ( UInt x )
169 {
170 return (ULong) ((((Long) x) << 48) >> 48);
171 }
172
extend_s_8to64(UInt x)173 static ULong extend_s_8to64 ( UInt x )
174 {
175 return (ULong) ((((Long) x) << 56) >> 56);
176 }
177
getIReg(UInt iregNo)178 static IRExpr *getIReg ( UInt iregNo )
179 {
180 IRType ty = Ity_I64;
181 if(!(iregNo < 56 || iregNo == 63 ||
182 (iregNo >= 70 && iregNo <= 73))) {
183 vex_printf("iregNo=%u\n", iregNo);
184 vassert(0);
185 }
186 return IRExpr_Get(integerGuestRegOffset(iregNo), ty);
187 }
188
putIReg(UInt archreg,IRExpr * e)189 static void putIReg ( UInt archreg, IRExpr * e )
190 {
191 IRType ty = Ity_I64;
192 if(!(archreg < 56 || archreg == 63 || archreg == 70 ||
193 archreg == 72 || archreg == 73)) {
194 vex_printf("archreg=%u\n", archreg);
195 vassert(0);
196 }
197 vassert(typeOfIRExpr(irsb->tyenv, e) == ty);
198 if (archreg != 63)
199 stmt(IRStmt_Put(integerGuestRegOffset(archreg), e));
200 }
201
202 /* Narrow 8/16/32 bit int expr to 8/16/32. Clearly only some
203 of these combinations make sense. */
narrowTo(IRType dst_ty,IRExpr * e)204 static IRExpr *narrowTo ( IRType dst_ty, IRExpr * e )
205 {
206 IRType src_ty = typeOfIRExpr(irsb->tyenv, e);
207 if (src_ty == dst_ty)
208 return e;
209 if (src_ty == Ity_I32 && dst_ty == Ity_I16)
210 return unop(Iop_32to16, e);
211 if (src_ty == Ity_I32 && dst_ty == Ity_I8)
212 return unop(Iop_32to8, e);
213
214 if (src_ty == Ity_I64 && dst_ty == Ity_I8) {
215 return unop(Iop_64to8, e);
216 }
217 if (src_ty == Ity_I64 && dst_ty == Ity_I16) {
218 return unop(Iop_64to16, e);
219 }
220 if (src_ty == Ity_I64 && dst_ty == Ity_I32) {
221 return unop(Iop_64to32, e);
222 }
223
224 if (vex_traceflags & VEX_TRACE_FE) {
225 vex_printf("\nsrc, dst tys are: ");
226 ppIRType(src_ty);
227 vex_printf(", ");
228 ppIRType(dst_ty);
229 vex_printf("\n");
230 }
231 vpanic("narrowTo(tilegx)");
232 return e;
233 }
234
235 #define signExtend(_e, _n) \
236 ((_n == 32) ? \
237 unop(Iop_32Sto64, _e) : \
238 ((_n == 16) ? \
239 unop(Iop_16Sto64, _e) : \
240 (binop(Iop_Sar64, binop(Iop_Shl64, _e, mkU8(63 - (_n))), mkU8(63 - (_n))))))
241
dis_branch(IRExpr * guard,ULong imm)242 static IRStmt* dis_branch ( IRExpr* guard, ULong imm )
243 {
244 IRTemp t0;
245
246 t0 = newTemp(Ity_I1);
247 assign(t0, guard);
248 return IRStmt_Exit(mkexpr(t0), Ijk_Boring,
249 IRConst_U64(imm), OFFB_PC);
250 }
251
252 #define MARK_REG_WB(_rd, _td) \
253 do { \
254 vassert(rd_wb_index < 6); \
255 rd_wb_temp[rd_wb_index] = _td; \
256 rd_wb_reg[rd_wb_index] = _rd; \
257 rd_wb_index++; \
258 } while(0)
259
260
261 /* Expand/repeat byte _X 8 times to a 64-bit value */
262 #define V1EXP(_X) \
263 ({ \
264 _X = ((((UChar)(_X)) << 8) | ((UChar)(_X))); \
265 _X = (((_X) << 16) | (_X)); \
266 (((_X) << 32) | (_X)); \
267 })
268
269 /* Expand/repeat byte _X 4 times to a 64-bit value */
270 #define V2EXP(_X) \
271 ({ \
272 _X = ((((UChar)(_X)) << 16) | ((UChar)(_X))); \
273 (((_X) << 32) | (_X)); \
274 })
275
276 /*------------------------------------------------------------*/
277 /*--- Disassemble a single instruction ---*/
278 /*------------------------------------------------------------*/
279
280 /* Disassemble a single instruction bundle into IR. The bundle is
281 located in host memory at guest_instr, and has guest IP of
282 guest_PC_curr_instr, which will have been set before the call
283 here. */
disInstr_TILEGX_WRK(Bool (* resteerOkFn)(void *,Addr),Bool resteerCisOk,void * callback_opaque,Long delta64,const VexArchInfo * archinfo,const VexAbiInfo * abiinfo,Bool sigill_diag)284 static DisResult disInstr_TILEGX_WRK ( Bool(*resteerOkFn) (void *, Addr),
285 Bool resteerCisOk,
286 void *callback_opaque,
287 Long delta64,
288 const VexArchInfo * archinfo,
289 const VexAbiInfo * abiinfo,
290 Bool sigill_diag )
291 {
292 struct tilegx_decoded_instruction
293 decoded[TILEGX_MAX_INSTRUCTIONS_PER_BUNDLE];
294 ULong cins, opcode = -1, rd, ra, rb, imm = 0;
295 ULong opd[4];
296 ULong opd_src_map, opd_dst_map, opd_imm_map;
297 Int use_dirty_helper;
298 IRTemp t0, t1, t2, t3, t4;
299 IRTemp tb[4];
300 IRTemp rd_wb_temp[6];
301 ULong rd_wb_reg[6];
302 /* Tilegx is a VLIW processor, we have to commit register write after read.*/
303 Int rd_wb_index;
304 Int n = 0, nr_insn;
305 DisResult dres;
306
307 /* The running delta */
308 Long delta = delta64;
309
310 /* Holds pc at the start of the insn, so that we can print
311 consistent error messages for unimplemented insns. */
312 //Long delta_start = delta;
313
314 UChar *code = (UChar *) (guest_code + delta);
315
316 IRStmt *bstmt = NULL; /* Branch statement. */
317 IRExpr *next = NULL; /* Next bundle expr. */
318 ULong jumpkind = Ijk_Boring;
319 ULong steering_pc;
320
321 /* Set result defaults. */
322 dres.whatNext = Dis_Continue;
323 dres.len = 0;
324 dres.continueAt = 0;
325 dres.jk_StopHere = Ijk_INVALID;
326
327 /* Verify the code addr is 8-byte aligned. */
328 vassert((((Addr)code) & 7) == 0);
329
330 /* Get the instruction bundle. */
331 cins = *((ULong *)(Addr) code);
332
333 /* "Special" instructions. */
334 /* Spot the 16-byte preamble: ****tilegx****
335 0:02b3c7ff91234fff { moveli zero, 4660 ; moveli zero, 22136 }
336 8:0091a7ff95678fff { moveli zero, 22136 ; moveli zero, 4660 }
337 */
338 #define CL_W0 0x02b3c7ff91234fffULL
339 #define CL_W1 0x0091a7ff95678fffULL
340
341 if (*((ULong*)(Addr)(code)) == CL_W0 &&
342 *((ULong*)(Addr)(code + 8)) == CL_W1) {
343 /* Got a "Special" instruction preamble. Which one is it? */
344 if (*((ULong*)(Addr)(code + 16)) ==
345 0x283a69a6d1483000ULL /* or r13, r13, r13 */ ) {
346 /* r0 = client_request ( r12 ) */
347 DIP("r0 = client_request ( r12 )\n");
348
349 putPC(mkU64(guest_PC_curr_instr + 24));
350
351 dres.jk_StopHere = Ijk_ClientReq;
352 dres.whatNext = Dis_StopHere;
353 dres.len = 24;
354 goto decode_success;
355
356 } else if (*((ULong*)(Addr)(code + 16)) ==
357 0x283a71c751483000ULL /* or r14, r14, r14 */ ) {
358 /* r11 = guest_NRADDR */
359 DIP("r11 = guest_NRADDR\n");
360 dres.len = 24;
361 putIReg(11, IRExpr_Get(offsetof(VexGuestTILEGXState, guest_NRADDR),
362 Ity_I64));
363 putPC(mkU64(guest_PC_curr_instr + 8));
364 goto decode_success;
365
366 } else if (*((ULong*)(Addr)(code + 16)) ==
367 0x283a79e7d1483000ULL /* or r15, r15, r15 */ ) {
368 /* branch-and-link-to-noredir r12 */
369 DIP("branch-and-link-to-noredir r12\n");
370 dres.len = 24;
371 putIReg(55, mkU64(guest_PC_curr_instr + 24));
372
373 putPC(getIReg(12));
374
375 dres.jk_StopHere = Ijk_NoRedir;
376 dres.whatNext = Dis_StopHere;
377 goto decode_success;
378
379 } else if (*((ULong*)(Addr)(code + 16)) ==
380 0x283a5965d1483000ULL /* or r11, r11, r11 */ ) {
381 /* vex-inject-ir */
382 DIP("vex-inject-ir\n");
383 dres.len = 24;
384
385 vex_inject_ir(irsb, Iend_LE);
386
387 stmt(IRStmt_Put(offsetof(VexGuestTILEGXState, guest_CMSTART),
388 mkU64(guest_PC_curr_instr)));
389 stmt(IRStmt_Put(offsetof(VexGuestTILEGXState, guest_CMLEN),
390 mkU64(24)));
391
392 /* 2 + 1 = 3 bundles. 24 bytes. */
393 putPC(mkU64(guest_PC_curr_instr + 24));
394
395 dres.jk_StopHere = Ijk_InvalICache;
396 dres.whatNext = Dis_StopHere;
397 goto decode_success;
398 }
399
400 /* We don't expect this. */
401 vex_printf("%s: unexpect special bundles at %lx\n",
402 __func__, (Addr)guest_PC_curr_instr);
403 delta += 16;
404 goto decode_failure;
405 /*NOTREACHED*/
406 }
407
408 /* To decode the given instruction bundle. */
409 nr_insn = parse_insn_tilegx((tilegx_bundle_bits)cins,
410 (ULong)(Addr)code,
411 decoded);
412
413 if (vex_traceflags & VEX_TRACE_FE)
414 decode_and_display(&cins, 1, (ULong)(Addr)code);
415
416 /* Init. rb_wb_index */
417 rd_wb_index = 0;
418
419 steering_pc = -1ULL;
420
421 for (n = 0; n < nr_insn; n++) {
422 opcode = decoded[n].opcode->mnemonic;
423 Int opi;
424
425 rd = ra = rb = -1;
426 opd[0] = opd[1] = opd[2] = opd[3] = -1;
427 opd_dst_map = 0;
428 opd_src_map = 0;
429 opd_imm_map = 0;
430
431 for (opi = 0; opi < decoded[n].opcode->num_operands; opi++) {
432 const struct tilegx_operand *op = decoded[n].operands[opi];
433 opd[opi] = decoded[n].operand_values[opi];
434
435 /* Set the operands. rd, ra, rb and imm. */
436 if (opi < 3) {
437 if (op->is_dest_reg) {
438 if (rd == -1)
439 rd = decoded[n].operand_values[opi];
440 else if (ra == -1)
441 ra = decoded[n].operand_values[opi];
442 } else if (op->is_src_reg) {
443 if (ra == -1) {
444 ra = decoded[n].operand_values[opi];
445 } else if(rb == -1) {
446 rb = decoded[n].operand_values[opi];
447 } else {
448 vassert(0);
449 }
450 } else {
451 imm = decoded[n].operand_values[opi];
452 }
453 }
454
455 /* Build bit maps of used dest, source registers
456 and immediate. */
457 if (op->is_dest_reg) {
458 opd_dst_map |= 1ULL << opi;
459 if(op->is_src_reg)
460 opd_src_map |= 1ULL << opi;
461 } else if(op->is_src_reg) {
462 opd_src_map |= 1ULL << opi;
463 } else {
464 opd_imm_map |= 1ULL << opi;
465 }
466 }
467
468 use_dirty_helper = 0;
469
470 switch (opcode) {
471 case 0: /* "bpt" */ /* "raise" */
472 /* "bpt" pseudo instruction is an illegal instruction */
473 opd_imm_map |= (1 << 0);
474 opd[0] = cins;
475 use_dirty_helper = 1;
476 break;
477 case 1: /* "info" */ /* Ignore this instruction. */
478 break;
479 case 2: /* "infol" */ /* Ignore this instruction. */
480 break;
481 case 3: /* "ld4s_tls" */ /* Ignore this instruction. */
482 break;
483 case 4: /* "ld_tls" */ /* Ignore this instruction. */
484 break;
485 case 5: /* "move" */
486 t2 = newTemp(Ity_I64);
487 assign(t2, getIReg(ra));
488 MARK_REG_WB(rd, t2);
489 break;
490 case 6: /* "movei" */
491 t2 = newTemp(Ity_I64);
492 assign(t2, mkU64(extend_s_8to64(imm)));
493 MARK_REG_WB(rd, t2);
494 break;
495 case 7: /* "moveli" */
496 t2 = newTemp(Ity_I64);
497 assign(t2, mkU64(extend_s_16to64(imm)));
498 MARK_REG_WB(rd, t2);
499 break;
500 case 8: /* "prefetch" */ /* Ignore. */
501 break;
502 case 9: /* "prefetch_add_l1" */ /* Ignore. */
503 break;
504 case 10: /* "prefetch_add_l1_fault" */ /* Ignore. */
505 break;
506 case 11: /* "prefetch_add_l2" */ /* Ignore. */
507 break;
508 case 12: /* "prefetch_add_l2_fault" */ /* Ignore. */
509 break;
510 case 13: /* "prefetch_add_l3" */ /* Ignore. */
511 break;
512 case 14: /* "prefetch_add_l3_fault" */ /* Ignore. */
513 break;
514 case 15: /* "prefetch_l1" */ /* Ignore. */
515 break;
516 case 16: /* "prefetch_l1_fault" */ /* Ignore. */
517 break;
518 case 17: /* "prefetch_l2" */ /* Ignore. */
519 break;
520 case 18: /* "prefetch_l2_fault" */ /* Ignore. */
521 break;
522 case 19: /* "prefetch_l3" */ /* Ignore. */
523 break;
524 case 20: /* "prefetch_l3_fault" */ /* Ignore. */
525 break;
526 case 21: /* "raise" */
527 /* "raise" pseudo instruction is an illegal instruction plusing
528 a "moveli zero, <sig>", so we need save whole bundle in the
529 opd[0], which will be used in the dirty helper. */
530 opd_imm_map |= (1 << 0);
531 opd[0] = cins;
532 use_dirty_helper = 1;
533 break;
534 case 22: /* "add" */
535 t2 = newTemp(Ity_I64);
536 assign(t2, binop(Iop_Add64, getIReg(ra), getIReg(rb)));
537 MARK_REG_WB(rd, t2);
538 break;
539 case 23: /* "addi" */
540 t2 = newTemp(Ity_I64);
541 assign(t2, binop(Iop_Add64, getIReg(ra),
542 mkU64(extend_s_8to64(imm))));
543 MARK_REG_WB(rd, t2);
544 break;
545 case 24: /* "addli" */
546 t2 = newTemp(Ity_I64);
547 assign(t2, binop(Iop_Add64, getIReg(ra),
548 mkU64(extend_s_16to64(imm))));
549 MARK_REG_WB(rd, t2);
550 break;
551 case 25: /* "addx" */
552 t2 = newTemp(Ity_I64);
553 assign(t2, signExtend(binop(Iop_Add32,
554 narrowTo(Ity_I32, getIReg(ra)),
555 narrowTo(Ity_I32, getIReg(rb))),
556 32));
557 MARK_REG_WB(rd, t2);
558 break;
559 case 26: /* "addxi" */
560 t2 = newTemp(Ity_I64);
561 assign(t2, signExtend(binop(Iop_Add32,
562 narrowTo(Ity_I32, getIReg(ra)),
563 mkU32(imm)), 32));
564 MARK_REG_WB(rd, t2);
565 break;
566 case 27: /* "addxli" */
567 t2 = newTemp(Ity_I64);
568 assign(t2, signExtend(binop(Iop_Add32,
569 narrowTo(Ity_I32, getIReg(ra)),
570 mkU32(imm)), 32));
571
572 MARK_REG_WB(rd, t2);
573 break;
574 case 28: /* "addxsc" */
575 use_dirty_helper = 1;
576 break;
577 case 29: /* "and" */
578 t2 = newTemp(Ity_I64);
579 assign(t2, binop(Iop_And64, getIReg(ra), getIReg(rb)));
580 MARK_REG_WB(rd, t2);
581 break;
582 case 30: /* "andi" */
583 t2 = newTemp(Ity_I64);
584 assign(t2, binop(Iop_And64, getIReg(ra),
585 mkU64(extend_s_8to64(imm))));
586 MARK_REG_WB(rd, t2);
587 break;
588 case 31: /* "beqz" */
589 /* Fall-through */
590 case 32:
591 /* "beqzt" */
592 bstmt = dis_branch(binop(Iop_CmpEQ64, getIReg(ra), mkU64(0)),
593 imm);
594 break;
595 case 33: /* "bfexts" */
596 {
597 ULong imm0 = decoded[n].operand_values[3];
598 ULong mask = ((-1ULL) ^ ((-1ULL << ((imm0 - imm) & 63)) << 1));
599 t0 = newTemp(Ity_I64);
600 t2 = newTemp(Ity_I64);
601 assign(t0, binop(Iop_Xor64,
602 binop(Iop_Sub64,
603 binop(Iop_And64,
604 binop(Iop_Shr64,
605 getIReg(ra),
606 mkU8(imm0)),
607 mkU64(1)),
608 mkU64(1)),
609 mkU64(-1ULL)));
610 assign(t2,
611 binop(Iop_Or64,
612 binop(Iop_And64,
613 binop(Iop_Or64,
614 binop(Iop_Shr64,
615 getIReg(ra),
616 mkU8(imm)),
617 binop(Iop_Shl64,
618 getIReg(ra),
619 mkU8(64 - imm))),
620 mkU64(mask)),
621 binop(Iop_And64,
622 mkexpr(t0),
623 mkU64(~mask))));
624
625 MARK_REG_WB(rd, t2);
626 }
627 break;
628 case 34: /* "bfextu" */
629 {
630 ULong imm0 = decoded[n].operand_values[3];
631 ULong mask = 0;
632 t2 = newTemp(Ity_I64);
633 mask = ((-1ULL) ^ ((-1ULL << ((imm0 - imm) & 63)) << 1));
634
635 assign(t2,
636 binop(Iop_And64,
637 binop(Iop_Or64,
638 binop(Iop_Shr64,
639 getIReg(ra),
640 mkU8(imm)),
641 binop(Iop_Shl64,
642 getIReg(ra),
643 mkU8(64 - imm))),
644 mkU64(mask)));
645 MARK_REG_WB(rd, t2);
646 }
647 break;
648 case 35: /* "bfins" */
649 {
650 ULong mask;
651 ULong imm0 = decoded[n].operand_values[3];
652 t0 = newTemp(Ity_I64);
653 t2 = newTemp(Ity_I64);
654 if (imm <= imm0)
655 {
656 mask = ((-1ULL << imm) ^ ((-1ULL << imm0) << 1));
657 }
658 else
659 {
660 mask = ((-1ULL << imm) | (-1ULL >> (63 - imm0)));
661 }
662
663 assign(t0, binop(Iop_Or64,
664 binop(Iop_Shl64,
665 getIReg(ra),
666 mkU8(imm)),
667 binop(Iop_Shr64,
668 getIReg(ra),
669 mkU8(64 - imm))));
670
671 assign(t2, binop(Iop_Or64,
672 binop(Iop_And64,
673 mkexpr(t0),
674 mkU64(mask)),
675 binop(Iop_And64,
676 getIReg(rd),
677 mkU64(~mask))));
678
679 MARK_REG_WB(rd, t2);
680 }
681 break;
682 case 36: /* "bgez" */
683 /* Fall-through */
684 case 37: /* "bgezt" */
685 bstmt = dis_branch(binop(Iop_CmpEQ64,
686 binop(Iop_And64,
687 getIReg(ra),
688 mkU64(0x8000000000000000ULL)),
689 mkU64(0x0)),
690 imm);
691 break;
692 case 38: /* "bgtz" */
693 /* Fall-through */
694 case 39:
695 /* "bgtzt" */
696 bstmt = dis_branch(unop(Iop_Not1,
697 binop(Iop_CmpLE64S,
698 getIReg(ra),
699 mkU64(0))),
700 imm);
701 break;
702 case 40: /* "blbc" */
703 /* Fall-through */
704 case 41: /* "blbct" */
705 bstmt = dis_branch(unop(Iop_64to1,
706 unop(Iop_Not64, getIReg(ra))),
707 imm);
708
709 break;
710 case 42: /* "blbs" */
711 /* Fall-through */
712 case 43:
713 /* "blbst" */
714 bstmt = dis_branch(unop(Iop_64to1,
715 getIReg(ra)),
716 imm);
717 break;
718 case 44: /* "blez" */
719 bstmt = dis_branch(binop(Iop_CmpLE64S, getIReg(ra),
720 mkU64(0)),
721 imm);
722 break;
723 case 45: /* "blezt" */
724 bstmt = dis_branch(binop(Iop_CmpLE64S, getIReg(ra),
725 mkU64(0)),
726 imm);
727 break;
728 case 46: /* "bltz" */
729 bstmt = dis_branch(binop(Iop_CmpLT64S, getIReg(ra),
730 mkU64(0)),
731 imm);
732 break;
733 case 47: /* "bltzt" */
734 bstmt = dis_branch(binop(Iop_CmpLT64S, getIReg(ra),
735 mkU64(0)),
736 imm);
737 break;
738 case 48: /* "bnez" */
739 /* Fall-through */
740 case 49:
741 /* "bnezt" */
742 bstmt = dis_branch(binop(Iop_CmpNE64, getIReg(ra),
743 mkU64(0)),
744 imm);
745 break;
746 case 50: /* "clz" */
747 t2 = newTemp(Ity_I64);
748 assign(t2, unop(Iop_Clz64, getIReg(ra)));
749
750 MARK_REG_WB(rd, t2);
751 break;
752 case 51: /* "cmoveqz rd, ra, rb" */
753 t2 = newTemp(Ity_I64);
754 assign(t2, IRExpr_ITE(binop(Iop_CmpEQ64, getIReg(ra), mkU64(0)),
755 getIReg(rb), getIReg(rd)));
756 MARK_REG_WB(rd, t2);
757 break;
758 case 52: /* "cmovnez" */
759 t2 = newTemp(Ity_I64);
760 assign(t2, IRExpr_ITE(binop(Iop_CmpEQ64, getIReg(ra), mkU64(0)),
761 getIReg(rd), getIReg(rb)));
762 MARK_REG_WB(rd, t2);
763 break;
764 case 53: /* "cmpeq" */
765 t2 = newTemp(Ity_I64);
766 assign(t2, unop(Iop_1Uto64, binop(Iop_CmpEQ64,
767 getIReg(ra), getIReg(rb))));
768 MARK_REG_WB(rd, t2);
769 break;
770
771 case 54: /* "cmpeqi" */
772 t2 = newTemp(Ity_I64);
773 assign(t2, unop(Iop_1Uto64, binop(Iop_CmpEQ64,
774 getIReg(ra),
775 mkU64(extend_s_8to64(imm)))));
776 MARK_REG_WB(rd, t2);
777 break;
778 case 55: /* "cmpexch" */
779 t1 = newTemp(Ity_I64);
780 t2 = newTemp(Ity_I64);
781
782 assign(t1, getIReg(rb));
783 stmt( IRStmt_CAS(mkIRCAS(IRTemp_INVALID, t2, Iend_LE,
784 getIReg(ra),
785 NULL, binop(Iop_Add64,
786 getIReg(70),
787 getIReg(71)),
788 NULL, mkexpr(t1))));
789 MARK_REG_WB(rd, t2);
790 break;
791 case 56: /* "cmpexch4" */
792 t1 = newTemp(Ity_I32);
793 t2 = newTemp(Ity_I64);
794 t3 = newTemp(Ity_I32);
795
796 assign(t1, narrowTo(Ity_I32, getIReg(rb)));
797 stmt( IRStmt_CAS(mkIRCAS(IRTemp_INVALID, t3, Iend_LE,
798 getIReg(ra),
799 NULL,
800 narrowTo(Ity_I32, binop(Iop_Add64,
801 getIReg(70),
802 getIReg(71))),
803 NULL,
804 mkexpr(t1))));
805 assign(t2, unop(Iop_32Uto64, mkexpr(t3)));
806 MARK_REG_WB(rd, t2);
807 break;
808 case 57: /* "cmples" */
809 t2 = newTemp(Ity_I64);
810 assign(t2, unop(Iop_1Uto64,
811 binop(Iop_CmpLE64S, getIReg(ra), getIReg(rb))));
812 MARK_REG_WB(rd, t2);
813 break;
814 case 58: /* "cmpleu" */
815 t2 = newTemp(Ity_I64);
816 assign(t2, unop(Iop_1Uto64,
817 binop(Iop_CmpLE64U, getIReg(ra), getIReg(rb))));
818 MARK_REG_WB(rd, t2);
819 break;
820 case 59: /* "cmplts" */
821 t2 = newTemp(Ity_I64);
822 assign(t2, unop(Iop_1Uto64,
823 binop(Iop_CmpLT64S, getIReg(ra), getIReg(rb))));
824 MARK_REG_WB(rd, t2);
825 break;
826 case 60: /* "cmpltsi" */
827 t2 = newTemp(Ity_I64);
828 assign(t2, unop(Iop_1Uto64,
829 binop(Iop_CmpLT64S,
830 getIReg(ra),
831 mkU64(extend_s_8to64(imm)))));
832 MARK_REG_WB(rd, t2);
833 break;
834 case 61:
835
836 /* "cmpltu" */
837 t2 = newTemp(Ity_I64);
838 assign(t2, unop(Iop_1Uto64,
839 binop(Iop_CmpLT64U, getIReg(ra), getIReg(rb))));
840 MARK_REG_WB(rd, t2);
841
842
843 break;
844 case 62: /* "cmpltui" */
845 t2 = newTemp(Ity_I64);
846 assign(t2, unop(Iop_1Uto64,
847 binop(Iop_CmpLT64U,
848 getIReg(ra),
849 mkU64(imm))));
850 MARK_REG_WB(rd, t2);
851
852
853 break;
854 case 63: /* "cmpne" */
855 t2 = newTemp(Ity_I64);
856 assign(t2, unop(Iop_1Uto64,
857 binop(Iop_CmpNE64, getIReg(ra), getIReg(rb))));
858 MARK_REG_WB(rd, t2);
859
860
861 break;
862 case 64:
863 /* Fall-through */
864 case 65:
865 /* Fall-through */
866 case 66:
867 /* Fall-through */
868 case 67:
869 /* Fall-through */
870 case 68:
871 /* Fall-through */
872 case 69:
873 /* Fall-through */
874 case 70:
875 /* Fall-through */
876 case 71:
877 /* Fall-through */
878 case 72:
879 use_dirty_helper = 1;
880 break;
881 case 73: /* "ctz" */
882 t2 = newTemp(Ity_I64);
883 assign(t2, unop(Iop_Ctz64, getIReg(ra)));
884
885 MARK_REG_WB(rd, t2);
886
887
888 break;
889 case 74: /* "dblalign" */
890 t0 = newTemp(Ity_I64);
891 t1 = newTemp(Ity_I64);
892 t2 = newTemp(Ity_I64);
893
894 /* t0 is the bit shift amount */
895 assign(t0, binop(Iop_Shl64,
896 binop(Iop_And64,
897 getIReg(rb),
898 mkU64(7)),
899 mkU8(3)));
900 assign(t1, binop(Iop_Sub64,
901 mkU64(64),
902 mkexpr(t0)));
903
904 assign(t2, binop(Iop_Or64,
905 binop(Iop_Shl64,
906 getIReg(ra),
907 unop(Iop_64to8, mkexpr(t1))),
908 binop(Iop_Shr64,
909 getIReg(rd),
910 unop(Iop_64to8, mkexpr(t0)))));
911
912 MARK_REG_WB(rd, t2);
913 break;
914 case 75:
915 /* Fall-through */
916 case 76:
917 /* Fall-through */
918 case 77:
919 /* Fall-through */
920 case 78:
921 /* Fall-through */
922 case 79:
923 use_dirty_helper = 1;
924 break;
925 case 80: /* "exch" */
926 t2 = newTemp(Ity_I64);
927 stmt( IRStmt_CAS(
928 mkIRCAS(IRTemp_INVALID,
929 t2,
930 Iend_LE,
931 getIReg(ra),
932 NULL,
933 mkU64(0x0),
934 NULL,
935 getIReg(rb))));
936 MARK_REG_WB(rd, t2);
937 break;
938 case 81: /* "exch4 rd, ra, rb" */
939 t0 = newTemp(Ity_I32);
940 t2 = newTemp(Ity_I64);
941 stmt( IRStmt_CAS(
942 mkIRCAS(IRTemp_INVALID,
943 t0,
944 Iend_LE,
945 getIReg(ra),
946 NULL,
947 mkU32(0x0),
948 NULL,
949 narrowTo(Ity_I32,
950 getIReg(rb)))));
951 assign(t2, unop(Iop_32Sto64, mkexpr(t0)));
952 MARK_REG_WB(rd, t2);
953 break;
954 case 82:
955 /* Fall-through */
956 case 83:
957 /* Fall-through */
958 case 84:
959 /* Fall-through */
960 case 85:
961 /* Fall-through */
962 case 86:
963 /* Fall-through */
964 case 87:
965 /* Fall-through */
966 case 88:
967 /* Fall-through */
968 case 89:
969 use_dirty_helper = 1;
970 break;
971 case 90: /* "fetchadd" */
972 t2 = newTemp(Ity_I64);
973 stmt( IRStmt_CAS(
974 mkIRCAS(IRTemp_INVALID,
975 t2,
976 Iend_LE,
977 getIReg(ra),
978 NULL,
979 // fetchadd=3
980 mkU64(0x3),
981 NULL,
982 getIReg(rb))));
983 MARK_REG_WB(rd, t2);
984 break;
985 case 91: /* "fetchadd4" */
986 t0 = newTemp(Ity_I32);
987 t2 = newTemp(Ity_I64);
988 stmt( IRStmt_CAS(
989 mkIRCAS(IRTemp_INVALID,
990 t0,
991 Iend_LE,
992 getIReg(ra),
993 NULL,
994 // fetchadd=3
995 mkU32(0x3),
996 NULL,
997 narrowTo(Ity_I32,
998 getIReg(rb)))));
999 assign(t2, unop(Iop_32Sto64, mkexpr(t0)));
1000 MARK_REG_WB(rd, t2);
1001
1002 break;
1003 case 92: /* "fetchaddgez" */
1004 t2 = newTemp(Ity_I64);
1005 stmt( IRStmt_CAS(
1006 mkIRCAS(IRTemp_INVALID,
1007 t2,
1008 Iend_LE,
1009 getIReg(ra),
1010 NULL,
1011 // fetchaddgez=5
1012 mkU64(0x5),
1013 NULL,
1014 getIReg(rb))));
1015 MARK_REG_WB(rd, t2);
1016 break;
1017 case 93: /* "fetchaddgez4" */
1018 t0 = newTemp(Ity_I32);
1019 t2 = newTemp(Ity_I64);
1020 stmt( IRStmt_CAS(
1021 mkIRCAS(IRTemp_INVALID,
1022 t0,
1023 Iend_LE,
1024 getIReg(ra),
1025 NULL,
1026 // fetchaddgez=5
1027 mkU32(0x5),
1028 NULL,
1029 narrowTo(Ity_I32,
1030 getIReg(rb)))));
1031 assign(t2, unop(Iop_32Sto64, mkexpr(t0)));
1032 MARK_REG_WB(rd, t2);
1033 break;
1034 case 94: /* "fetchand\n") */
1035 t2 = newTemp(Ity_I64);
1036 stmt( IRStmt_CAS(
1037 mkIRCAS(IRTemp_INVALID,
1038 t2,
1039 Iend_LE,
1040 getIReg(ra),
1041 NULL,
1042 mkU64(0x2),
1043 NULL,
1044 getIReg(rb))));
1045 MARK_REG_WB(rd, t2);
1046 break;
1047 case 95:
1048 /* mkIRCAS.
1049 0: xch### 1: cmpexch###,
1050 2: fetchand## 3: fetchadd##
1051 4: fetchor## 5: fetchaddgez
1052 */
1053 /* "fetchand4" */
1054 t0 = newTemp(Ity_I32);
1055 t2 = newTemp(Ity_I64);
1056 stmt( IRStmt_CAS(
1057 mkIRCAS(IRTemp_INVALID,
1058 t0,
1059 Iend_LE,
1060 getIReg(ra),
1061 NULL,
1062 mkU32(0x2),
1063 NULL,
1064 narrowTo(Ity_I32,
1065 getIReg(rb)))));
1066 assign(t2, unop(Iop_32Sto64, mkexpr(t0)));
1067 MARK_REG_WB(rd, t2);
1068 break;
1069 case 96: /* "fetchor" */
1070 t2 = newTemp(Ity_I64);
1071 stmt( IRStmt_CAS(
1072 mkIRCAS(IRTemp_INVALID,
1073 t2,
1074 Iend_LE,
1075 getIReg(ra),
1076 NULL,
1077 mkU64(0x4),
1078 NULL,
1079 getIReg(rb))));
1080 MARK_REG_WB(rd, t2);
1081 break;
1082 case 97: /* "fetchor4" */
1083 t0 = newTemp(Ity_I32);
1084 t2 = newTemp(Ity_I64);
1085 stmt( IRStmt_CAS(
1086 mkIRCAS(IRTemp_INVALID,
1087 t0,
1088 Iend_LE,
1089 getIReg(ra),
1090 NULL,
1091 mkU32(0x4),
1092 NULL,
1093 narrowTo(Ity_I32,
1094 getIReg(rb)))));
1095 assign(t2, unop(Iop_32Sto64, mkexpr(t0)));
1096 MARK_REG_WB(rd, t2);
1097 break;
1098 case 98:
1099 /* Fall-through */
1100 case 99:
1101 /* Fall-through */
1102 case 100:
1103 use_dirty_helper = 1;
1104 break;
1105 case 101: /* "fnop" Ignore */
1106 break;
1107 case 102:
1108 /* Fall-through */
1109 case 103:
1110 /* Fall-through */
1111 case 104:
1112 /* Fall-through */
1113 case 105:
1114 /* Fall-through */
1115 case 106:
1116 /* Fall-through */
1117 case 107:
1118 /* Fall-through */
1119 case 108:
1120 use_dirty_helper = 1;
1121 break;
1122 case 109:
1123 /* Fall-through */
1124 case 110:
1125 /* Fall-through */
1126 case 111:
1127 use_dirty_helper = 1;
1128 break;
1129 case 112: /* "iret" */
1130 next = mkU64(guest_PC_curr_instr + 8);
1131 jumpkind = Ijk_Ret;
1132 break;
1133 case 113: /* "j" */
1134 next = mkU64(imm);
1135 /* set steering address. */
1136 steering_pc = imm;
1137 jumpkind = Ijk_Boring;
1138 break;
1139 case 114:
1140 t2 = newTemp(Ity_I64);
1141 assign(t2, mkU64(guest_PC_curr_instr + 8));
1142 /* set steering address. */
1143 steering_pc = imm;
1144 next = mkU64(imm);
1145 jumpkind = Ijk_Call;
1146 MARK_REG_WB(55, t2);
1147 break;
1148 case 115: /* "jalr" */
1149 /* Fall-through */
1150 case 116: /* "jalrp" */
1151 t1 = newTemp(Ity_I64);
1152 t2 = newTemp(Ity_I64);
1153 assign(t1, getIReg(ra));
1154 assign(t2, mkU64(guest_PC_curr_instr + 8));
1155 next = mkexpr(t1);
1156 jumpkind = Ijk_Call;
1157 MARK_REG_WB(55, t2);
1158 break;
1159 case 117: /* "jr" */
1160 /* Fall-through */
1161 case 118: /* "jrp" */
1162 next = getIReg(ra);
1163 jumpkind = Ijk_Boring;
1164 break;
1165 case 119: /* "ld" */
1166 t2 = newTemp(Ity_I64);
1167 assign(t2, load(Ity_I64, (getIReg(ra))));
1168 MARK_REG_WB(rd, t2);
1169 break;
1170 case 120: /* "ld1s" */
1171 t2 = newTemp(Ity_I64);
1172 assign(t2, unop(Iop_8Sto64,
1173 load(Ity_I8, (getIReg(ra)))));
1174 MARK_REG_WB(rd, t2);
1175 break;
1176 case 121: /* "ld1s_add" */
1177 t1 = newTemp(Ity_I64);
1178 t2 = newTemp(Ity_I64);
1179 assign(t1, binop(Iop_Add64, getIReg(ra), mkU64(imm)));
1180 assign(t2, unop(Iop_8Sto64,
1181 load(Ity_I8, (getIReg(ra)))));
1182 MARK_REG_WB(ra, t1);
1183 MARK_REG_WB(rd, t2);
1184 break;
1185 case 122: /* "ld1u" */
1186 t2 = newTemp(Ity_I64);
1187 assign(t2, unop(Iop_8Uto64,
1188 load(Ity_I8, (getIReg(ra)))));
1189 MARK_REG_WB(rd, t2);
1190
1191 break;
1192 case 123: /* "ld1u_add" */
1193 t1 = newTemp(Ity_I64);
1194 t2 = newTemp(Ity_I64);
1195 assign(t1, binop(Iop_Add64, getIReg(ra), mkU64(imm)));
1196 assign(t2, unop(Iop_8Uto64,
1197 load(Ity_I8, (getIReg(ra)))));
1198 MARK_REG_WB(ra, t1);
1199 MARK_REG_WB(rd, t2);
1200 break;
1201 case 124: /* "ld2s" */
1202 t2 = newTemp(Ity_I64);
1203 assign(t2, unop(Iop_16Sto64,
1204 load(Ity_I16, getIReg(ra))));
1205 MARK_REG_WB(rd, t2);
1206 break;
1207 case 125: /* "ld2s_add" */
1208 t1 = newTemp(Ity_I64);
1209 t2 = newTemp(Ity_I64);
1210 assign(t1, binop(Iop_Add64, getIReg(ra), mkU64(imm)));
1211 assign(t2, unop(Iop_16Sto64,
1212 load(Ity_I16, getIReg(ra))));
1213 MARK_REG_WB(rd, t2);
1214 MARK_REG_WB(ra, t1);
1215 break;
1216 case 126: /* "ld2u" */
1217 t2 = newTemp(Ity_I64);
1218 assign(t2, unop(Iop_16Uto64,
1219 load(Ity_I16, getIReg(ra))));
1220 MARK_REG_WB(rd, t2);
1221 break;
1222 case 127: /* "ld2u_add" */
1223 t1 = newTemp(Ity_I64);
1224 t2 = newTemp(Ity_I64);
1225 assign(t1, binop(Iop_Add64, getIReg(ra), mkU64(imm)));
1226 assign(t2, unop(Iop_16Uto64,
1227 load(Ity_I16, getIReg(ra))));
1228 MARK_REG_WB(rd, t2);
1229 MARK_REG_WB(ra, t1);
1230 break;
1231 case 128: /* "ld4s" */
1232 t2 = newTemp(Ity_I64);
1233 assign(t2, unop(Iop_32Sto64,
1234 load(Ity_I32, (getIReg(ra)))));
1235 MARK_REG_WB(rd, t2);
1236 break;
1237 case 129: /* "ld4s_add" */
1238 t2 = newTemp(Ity_I64);
1239 t1 = newTemp(Ity_I64);
1240 assign(t1, binop(Iop_Add64, getIReg(ra), mkU64(imm)));
1241 assign(t2, unop(Iop_32Sto64,
1242 load(Ity_I32, (getIReg(ra)))));
1243 MARK_REG_WB(rd, t2);
1244 MARK_REG_WB(ra, t1);
1245 break;
1246 case 130: /* "ld4u" */
1247 t2 = newTemp(Ity_I64);
1248 assign(t2, unop(Iop_32Uto64,
1249 load(Ity_I32, getIReg(ra))));
1250 MARK_REG_WB(rd, t2);
1251 break;
1252 case 131: /* "ld4u_add" */
1253 t1 = newTemp(Ity_I64);
1254 t2 = newTemp(Ity_I64);
1255 assign(t1, binop(Iop_Add64, getIReg(ra), mkU64(imm)));
1256 assign(t2, unop(Iop_32Uto64,
1257 load(Ity_I32, getIReg(ra))));
1258 MARK_REG_WB(ra, t1);
1259 MARK_REG_WB(rd, t2);
1260 break;
1261 case 132: /* "ld_add" */
1262 t1 = newTemp(Ity_I64);
1263 t2 = newTemp(Ity_I64);
1264 assign(t1, load(Ity_I64, getIReg(ra)));
1265 assign(t2, binop(Iop_Add64, getIReg(ra), mkU64(imm)));
1266 MARK_REG_WB(ra, t2);
1267 MARK_REG_WB(rd, t1);
1268 break;
1269 case 133: /* "ldna" */
1270 t2 = newTemp(Ity_I64);
1271 assign(t2, load(Ity_I64,
1272 binop(Iop_And64,
1273 getIReg(ra),
1274 unop(Iop_Not64,
1275 mkU64(7)))));
1276 MARK_REG_WB(rd, t2);
1277 break;
1278 case 134: /* "ldna_add" */
1279 t1 = newTemp(Ity_I64);
1280 t2 = newTemp(Ity_I64);
1281
1282 assign(t1, binop(Iop_Add64, getIReg(ra), mkU64(imm)));
1283 assign(t2, load(Ity_I64,
1284 binop(Iop_And64,
1285 getIReg(ra),
1286 unop(Iop_Not64,
1287 mkU64(7)))));
1288 MARK_REG_WB(ra, t1);
1289 MARK_REG_WB(rd, t2);
1290 break;
1291 case 135: /* "ldnt" */
1292 /* Valgrind IR has no Non-Temp load. Use normal load. */
1293 t2 = newTemp(Ity_I64);
1294 assign(t2, load(Ity_I64, (getIReg(ra))));
1295 MARK_REG_WB(rd, t2);
1296 break;
1297 case 136: /* "ldnt1s" */
1298 t2 = newTemp(Ity_I64);
1299 assign(t2, unop(Iop_8Sto64,
1300 load(Ity_I8, (getIReg(ra)))));
1301 MARK_REG_WB(rd, t2);
1302 break;
1303 case 137: /* "ldnt1s_add" */
1304 t1 = newTemp(Ity_I64);
1305 t2 = newTemp(Ity_I64);
1306 assign(t2, unop(Iop_8Sto64,
1307 load(Ity_I8, (getIReg(ra)))));
1308 assign(t1, binop(Iop_Add64, getIReg(ra), mkU64(imm)));
1309 MARK_REG_WB(ra, t1);
1310 MARK_REG_WB(rd, t2);
1311 break;
1312 case 138: /* "ldnt1u" */
1313 t2 = newTemp(Ity_I64);
1314 assign(t2, unop(Iop_8Uto64,
1315 load(Ity_I8, (getIReg(ra)))));
1316 MARK_REG_WB(rd, t2);
1317 break;
1318 case 139: /* "ldnt1u_add" */
1319 t1 = newTemp(Ity_I64);
1320 t2 = newTemp(Ity_I64);
1321
1322 assign(t1, binop(Iop_Add64, getIReg(ra), mkU64(imm)));
1323 assign(t2, unop(Iop_8Uto64,
1324 load(Ity_I8, (getIReg(ra)))));
1325
1326 MARK_REG_WB(ra, t1);
1327 MARK_REG_WB(rd, t2);
1328 break;
1329 case 140: /* "ldnt2s" */
1330 t2 = newTemp(Ity_I64);
1331 assign(t2, unop(Iop_16Sto64,
1332 load(Ity_I16, getIReg(ra))));
1333 MARK_REG_WB(rd, t2);
1334 break;
1335 case 141: /* "ldnt2s_add" */
1336 t1 = newTemp(Ity_I64);
1337 t2 = newTemp(Ity_I64);
1338 assign(t2, unop(Iop_16Sto64,
1339 load(Ity_I16, getIReg(ra))));
1340 assign(t1, binop(Iop_Add64, getIReg(ra), mkU64(imm)));
1341 MARK_REG_WB(ra, t1);
1342 MARK_REG_WB(rd, t2);
1343 break;
1344 case 142: /* "ldnt2u" */
1345 t2 = newTemp(Ity_I64);
1346 assign(t2, unop(Iop_16Uto64,
1347 load(Ity_I16, getIReg(ra))));
1348 MARK_REG_WB(rd, t2);
1349 break;
1350 case 143: /* "ldnt2u_add" */
1351 t1 = newTemp(Ity_I64);
1352 t2 = newTemp(Ity_I64);
1353 assign(t2, unop(Iop_16Uto64,
1354 load(Ity_I16, getIReg(ra))));
1355 assign(t1, binop(Iop_Add64, getIReg(ra), mkU64(imm)));
1356 MARK_REG_WB(ra, t1);
1357 MARK_REG_WB(rd, t2);
1358 break;
1359 case 144: /* "ldnt4s" */
1360 t2 = newTemp(Ity_I64);
1361 assign(t2, unop(Iop_32Sto64,
1362 load(Ity_I32, (getIReg(ra)))));
1363 MARK_REG_WB(rd, t2);
1364 break;
1365 case 145: /* "ldnt4s_add" */
1366 t1 = newTemp(Ity_I64);
1367 t2 = newTemp(Ity_I64);
1368 assign(t2, unop(Iop_32Sto64,
1369 load(Ity_I32, (getIReg(ra)))));
1370 assign(t1, binop(Iop_Add64, getIReg(ra), mkU64(imm)));
1371 MARK_REG_WB(rd, t2);
1372 MARK_REG_WB(ra, t1);
1373 break;
1374 case 146: /* "ldnt4u" */
1375 t2 = newTemp(Ity_I64);
1376 assign(t2, unop(Iop_32Uto64,
1377 load(Ity_I32, getIReg(ra))));
1378 MARK_REG_WB(rd, t2);
1379 break;
1380 case 147: /* "ldnt4u_add" */
1381 t1 = newTemp(Ity_I64);
1382 t2 = newTemp(Ity_I64);
1383 assign(t2, unop(Iop_32Uto64,
1384 load(Ity_I32, getIReg(ra))));
1385 assign(t1, binop(Iop_Add64, getIReg(ra), mkU64(imm)));
1386 MARK_REG_WB(rd, t2);
1387 MARK_REG_WB(ra, t1);
1388 break;
1389 case 148: /* "ldnt_add" */
1390 t1 = newTemp(Ity_I64);
1391 t2 = newTemp(Ity_I64);
1392 assign(t1, load(Ity_I64, getIReg(ra)));
1393 assign(t2, binop(Iop_Add64, getIReg(ra), mkU64(imm)));
1394 MARK_REG_WB(rd, t1);
1395 MARK_REG_WB(ra, t2);
1396 break;
1397 case 149: /* "lnk" */
1398 t2 = newTemp(Ity_I64);
1399 assign(t2, mkU64(guest_PC_curr_instr + 8));
1400 MARK_REG_WB(rd, t2);
1401 break;
1402 case 150: /* "mf" */
1403 use_dirty_helper = 1;
1404 break;
1405 case 151: /* "mfspr" */
1406 t2 = newTemp(Ity_I64);
1407 if (imm == 0x2780) { // Get Cmpexch value
1408 assign(t2, getIReg(70));
1409 MARK_REG_WB(rd, t2);
1410 } else if (imm == 0x2580) { // Get EX_CONTEXT_0_0
1411 assign(t2, getIReg(576 / 8));
1412 MARK_REG_WB(rd, t2);
1413 } else if (imm == 0x2581) { // Get EX_CONTEXT_0_1
1414 assign(t2, getIReg(584 / 8));
1415 MARK_REG_WB(rd, t2);
1416 } else
1417 use_dirty_helper = 1;
1418 break;
1419 case 152: /* "mm" */
1420 use_dirty_helper = 1;
1421 break;
1422 case 153: /* "mnz" */
1423 t2 = newTemp(Ity_I64);
1424 assign(t2, binop(Iop_And64,
1425 unop(Iop_1Sto64, binop(Iop_CmpNE64,
1426 getIReg(ra),
1427 mkU64(0))),
1428 getIReg(rb)));
1429 MARK_REG_WB(rd, t2);
1430 break;
1431 case 154: /* "mtspr imm, ra" */
1432 if (imm == 0x2780) // Set Cmpexch value
1433 putIReg(70, getIReg(ra));
1434 else if (imm == 0x2580) // set EX_CONTEXT_0_0
1435 putIReg(576/8, getIReg(ra));
1436 else if (imm == 0x2581) // set EX_CONTEXT_0_1
1437 putIReg(584/8, getIReg(ra));
1438 else
1439 use_dirty_helper = 1;
1440 break;
1441 case 155: /* "mul_hs_hs" */
1442 t2 = newTemp(Ity_I64);
1443 assign(t2, binop(Iop_MullS32,
1444 unop(Iop_64to32,
1445 binop(Iop_Shr64,
1446 getIReg(ra),
1447 mkU8(32))),
1448 unop(Iop_64to32,
1449 binop(Iop_Shr64,
1450 getIReg(rb),
1451 mkU8(32)))));
1452 MARK_REG_WB(rd, t2);
1453 break;
1454 case 156: /* "mul_hs_hu" */
1455 t0 = newTemp(Ity_I64);
1456 t1 = newTemp(Ity_I64);
1457 t2 = newTemp(Ity_I64);
1458 t3 = newTemp(Ity_I64);
1459
1460 assign(t0, unop(Iop_32Sto64,
1461 unop(Iop_64to32,
1462 binop(Iop_Shr64, getIReg(ra), mkU8(32)))));
1463 assign(t1, binop(Iop_MullU32,
1464 unop(Iop_64to32, mkexpr(t0)),
1465 unop(Iop_64to32, binop(Iop_Shr64, getIReg(rb), mkU8(32)))));
1466 assign(t3, binop(Iop_MullU32,
1467 unop(Iop_64to32, binop(Iop_Shr64,
1468 mkexpr(t0),
1469 mkU8(32))),
1470 unop(Iop_64to32, binop(Iop_Shr64, getIReg(rb), mkU8(32)))));
1471 assign(t2, binop(Iop_Add64,
1472 mkexpr(t1),
1473 binop(Iop_Shl64,
1474 mkexpr(t3),
1475 mkU8(32))));
1476 MARK_REG_WB(rd, t2);
1477 break;
1478 case 157: /* "mul_hs_ls" */
1479 t2 = newTemp(Ity_I64);
1480 assign(t2, binop(Iop_MullS32,
1481 unop(Iop_64to32,
1482 binop(Iop_Shr64,
1483 getIReg(ra),
1484 mkU8(32))),
1485 unop(Iop_64to32,
1486 getIReg(rb))));
1487 MARK_REG_WB(rd, t2);
1488 break;
1489 case 158: /* "mul_hs_lu" */
1490 t0 = newTemp(Ity_I64);
1491 t1 = newTemp(Ity_I64);
1492 t2 = newTemp(Ity_I64);
1493 t3 = newTemp(Ity_I64);
1494
1495 assign(t0, unop(Iop_32Sto64,
1496 unop(Iop_64to32,
1497 binop(Iop_Shr64, getIReg(ra), mkU8(32)))));
1498 assign(t1, binop(Iop_MullU32,
1499 unop(Iop_64to32, mkexpr(t0)),
1500 unop(Iop_64to32, getIReg(rb))));
1501 assign(t3, binop(Iop_MullU32,
1502 unop(Iop_64to32, binop(Iop_Shr64,
1503 mkexpr(t0),
1504 mkU8(32))),
1505 unop(Iop_64to32, getIReg(rb))));
1506 assign(t2, binop(Iop_Add64,
1507 mkexpr(t1),
1508 binop(Iop_Shl64,
1509 mkexpr(t3),
1510 mkU8(32))));
1511 MARK_REG_WB(rd, t2);
1512 break;
1513 case 159: /* "mul_hu_hu" */
1514 t2 = newTemp(Ity_I64);
1515 assign(t2, binop(Iop_MullU32,
1516 unop(Iop_64to32,
1517 binop(Iop_Shr64,
1518 getIReg(ra),
1519 mkU8(32))),
1520 unop(Iop_64to32,
1521 binop(Iop_Shr64,
1522 getIReg(rb),
1523 mkU8(32)))));
1524 MARK_REG_WB(rd, t2);
1525 break;
1526 case 160: /* "mul_hu_ls" */
1527 t0 = newTemp(Ity_I64);
1528 t1 = newTemp(Ity_I64);
1529 t2 = newTemp(Ity_I64);
1530 t3 = newTemp(Ity_I64);
1531
1532 assign(t0, unop(Iop_32Sto64,
1533 unop(Iop_64to32,
1534 getIReg(ra))));
1535
1536 assign(t1, binop(Iop_MullU32,
1537 unop(Iop_64to32, mkexpr(t0)),
1538 unop(Iop_64to32, binop(Iop_Shr64, getIReg(rb), mkU8(32)))));
1539 assign(t3, binop(Iop_MullU32,
1540 unop(Iop_64to32, binop(Iop_Shr64,
1541 mkexpr(t0),
1542 mkU8(32))),
1543 unop(Iop_64to32, binop(Iop_Shr64, getIReg(rb), mkU8(32)))));
1544 assign(t2, binop(Iop_Add64,
1545 mkexpr(t1),
1546 binop(Iop_Shl64,
1547 mkexpr(t3),
1548 mkU8(32))));
1549 MARK_REG_WB(rd, t2);
1550 break;
1551 case 161: /* "mul_hu_lu" */
1552 t2 = newTemp(Ity_I64);
1553 assign(t2, binop(Iop_MullU32,
1554 unop(Iop_64to32,
1555 binop(Iop_Shr64,
1556 getIReg(ra),
1557 mkU8(32))),
1558 unop(Iop_64to32,
1559 getIReg(rb))));
1560 MARK_REG_WB(rd, t2);
1561 break;
1562 case 162: /* "mul_ls_ls" */
1563 t2 = newTemp(Ity_I64);
1564 assign(t2, binop(Iop_MullS32,
1565 unop(Iop_64to32, getIReg(ra)),
1566 unop(Iop_64to32, getIReg(rb))));
1567 MARK_REG_WB(rd, t2);
1568 break;
1569 case 163: /* "mul_ls_lu" */
1570 t0 = newTemp(Ity_I64);
1571 t1 = newTemp(Ity_I64);
1572 t2 = newTemp(Ity_I64);
1573 t3 = newTemp(Ity_I64);
1574
1575 assign(t0, unop(Iop_32Sto64,
1576 unop(Iop_64to32, getIReg(ra))));
1577 assign(t1, binop(Iop_MullU32,
1578 unop(Iop_64to32, mkexpr(t0)),
1579 unop(Iop_64to32, getIReg(rb))));
1580 assign(t3, binop(Iop_MullU32,
1581 unop(Iop_64to32, binop(Iop_Shr64,
1582 mkexpr(t0),
1583 mkU8(32))),
1584 unop(Iop_64to32, getIReg(rb))));
1585 assign(t2, binop(Iop_Add64,
1586 mkexpr(t1),
1587 binop(Iop_Shl64,
1588 mkexpr(t3),
1589 mkU8(32))));
1590 MARK_REG_WB(rd, t2);
1591 break;
1592 case 164: /* "mul_lu_lu" */
1593 t2 = newTemp(Ity_I64);
1594 assign(t2, binop(Iop_MullU32,
1595 unop(Iop_64to32, getIReg(ra)),
1596 unop(Iop_64to32, getIReg(rb))));
1597 MARK_REG_WB(rd, t2);
1598 break;
1599 case 165: /* "mula_hs_hs" */
1600 t0 = newTemp(Ity_I64);
1601 t2 = newTemp(Ity_I64);
1602
1603 assign(t0, binop(Iop_MullS32,
1604 unop(Iop_64to32, binop(Iop_Shr64,
1605 getIReg(ra), mkU8(32))),
1606 unop(Iop_64to32, binop(Iop_Shr64,
1607 getIReg(rb), mkU8(32)))));
1608 assign(t2, binop(Iop_Add64, getIReg(rd), mkexpr(t0)));
1609 MARK_REG_WB(rd, t2);
1610 break;
1611 case 166: /* "mula_hs_hu" */
1612 t0 = newTemp(Ity_I64);
1613 t1 = newTemp(Ity_I64);
1614 t2 = newTemp(Ity_I64);
1615 t3 = newTemp(Ity_I64);
1616 t4 = newTemp(Ity_I64);
1617 assign(t0, unop(Iop_32Sto64,
1618 unop(Iop_64to32,
1619 binop(Iop_Shr64, getIReg(ra), mkU8(32)))));
1620 assign(t1, binop(Iop_MullU32,
1621 unop(Iop_64to32, mkexpr(t0)),
1622 unop(Iop_64to32, binop(Iop_Shr64,
1623 getIReg(rb), mkU8(32)))));
1624 assign(t3, binop(Iop_MullU32,
1625 unop(Iop_64to32, binop(Iop_Shr64,
1626 mkexpr(t0),
1627 mkU8(32))),
1628 unop(Iop_64to32, binop(Iop_Shr64,
1629 getIReg(rb), mkU8(32)))));
1630 assign(t2, binop(Iop_Add64,
1631 mkexpr(t1),
1632 binop(Iop_Shl64,
1633 mkexpr(t3),
1634 mkU8(32))));
1635 assign(t4, binop(Iop_Add64, getIReg(rd), mkexpr(t2)));
1636 MARK_REG_WB(rd, t4);
1637 break;
1638 case 167: /* "mula_hs_ls" */
1639 t2 = newTemp(Ity_I64);
1640 t4 = newTemp(Ity_I64);
1641 assign(t2, binop(Iop_MullS32,
1642 unop(Iop_64to32,
1643 binop(Iop_Shr64,
1644 getIReg(ra),
1645 mkU8(32))),
1646 unop(Iop_64to32,
1647 getIReg(rb))));
1648 assign(t4, binop(Iop_Add64, getIReg(rd), mkexpr(t2)));
1649 MARK_REG_WB(rd, t4);
1650 break;
1651 case 168: /* "mula_hs_lu" */
1652 t0 = newTemp(Ity_I64);
1653 t1 = newTemp(Ity_I64);
1654 t2 = newTemp(Ity_I64);
1655 t3 = newTemp(Ity_I64);
1656 t4 = newTemp(Ity_I64);
1657 assign(t0, unop(Iop_32Sto64,
1658 unop(Iop_64to32,
1659 binop(Iop_Shr64, getIReg(ra), mkU8(32)))));
1660 assign(t1, binop(Iop_MullU32,
1661 unop(Iop_64to32, mkexpr(t0)),
1662 unop(Iop_64to32, getIReg(rb))));
1663 assign(t3, binop(Iop_MullU32,
1664 unop(Iop_64to32, binop(Iop_Shr64,
1665 mkexpr(t0),
1666 mkU8(32))),
1667 unop(Iop_64to32, getIReg(rb))));
1668 assign(t2, binop(Iop_Add64,
1669 mkexpr(t1),
1670 binop(Iop_Shl64,
1671 mkexpr(t3),
1672 mkU8(32))));
1673 assign(t4, binop(Iop_Add64, getIReg(rd), mkexpr(t2)));
1674 MARK_REG_WB(rd, t4);
1675 break;
1676 case 169: /* "mula_hu_hu" */
1677 use_dirty_helper = 1;
1678 break;
1679 case 170: /* "mula_hu_ls" */
1680 use_dirty_helper = 1;
1681 break;
1682 case 171: /* "mula_hu_lu" */
1683 t2 = newTemp(Ity_I64);
1684 assign(t2, binop(Iop_Add64,
1685 binop(Iop_MullU32,
1686 unop(Iop_64to32,
1687 binop(Iop_Shr64,
1688 getIReg(ra),
1689 mkU8(32))),
1690 unop(Iop_64to32,
1691 getIReg(rb))),
1692 getIReg(rd)));
1693 MARK_REG_WB(rd, t2);
1694 break;
1695 case 172: /* "mula_ls_ls" */
1696 t2 = newTemp(Ity_I64);
1697 assign(t2, binop(Iop_Add64,
1698 getIReg(rd),
1699 binop(Iop_MullS32,
1700 unop(Iop_64to32, getIReg(ra)),
1701 unop(Iop_64to32, getIReg(rb)))));
1702 MARK_REG_WB(rd, t2);
1703 break;
1704 case 173: /* "mula_ls_lu" */
1705 t0 = newTemp(Ity_I64);
1706 t1 = newTemp(Ity_I64);
1707 t2 = newTemp(Ity_I64);
1708 t3 = newTemp(Ity_I64);
1709
1710 assign(t0, unop(Iop_32Sto64,
1711 unop(Iop_64to32, getIReg(ra))));
1712 assign(t1, binop(Iop_MullU32,
1713 unop(Iop_64to32, mkexpr(t0)),
1714 unop(Iop_64to32, getIReg(rb))));
1715 assign(t3, binop(Iop_MullU32,
1716 unop(Iop_64to32, binop(Iop_Shr64,
1717 mkexpr(t0),
1718 mkU8(32))),
1719 unop(Iop_64to32, getIReg(rb))));
1720 assign(t2, binop(Iop_Add64,
1721 getIReg(rd),
1722 binop(Iop_Add64,
1723 mkexpr(t1),
1724 binop(Iop_Shl64,
1725 mkexpr(t3),
1726 mkU8(32)))));
1727 MARK_REG_WB(rd, t2);
1728 break;
1729 case 174: /* "mula_lu_lu" */
1730 t2 = newTemp(Ity_I64);
1731 assign(t2, binop(Iop_Add64,
1732 binop(Iop_MullU32,
1733 unop(Iop_64to32,
1734 getIReg(ra)),
1735 unop(Iop_64to32,
1736 getIReg(rb))),
1737 getIReg(rd)));
1738 MARK_REG_WB(rd, t2);
1739 break;
1740 case 175: /* "mulax" */
1741 t2 = newTemp(Ity_I64);
1742 assign(t2, unop(Iop_32Sto64,
1743 unop(Iop_64to32,
1744 binop(Iop_Add64,
1745 getIReg(rd),
1746 binop(Iop_MullU32,
1747 narrowTo(Ity_I32, getIReg(ra)),
1748 narrowTo(Ity_I32, getIReg(rb)))))));
1749 MARK_REG_WB(rd, t2);
1750 break;
1751 case 176: /* "mulx" */
1752 t2 = newTemp(Ity_I64);
1753 assign(t2, unop(Iop_32Sto64,
1754 unop(Iop_64to32,
1755 binop(Iop_MullU32,
1756 narrowTo(Ity_I32, getIReg(ra)),
1757 narrowTo(Ity_I32, getIReg(rb))))));
1758 MARK_REG_WB(rd, t2);
1759 break;
1760 case 177: /* "mz" */
1761 t2 = newTemp(Ity_I64);
1762 assign(t2, binop(Iop_And64,
1763 unop(Iop_1Sto64, binop(Iop_CmpEQ64,
1764 getIReg(ra),
1765 mkU64(0))),
1766 getIReg(rb)));
1767 MARK_REG_WB(rd, t2);
1768 break;
1769 case 178: /* "nap" */
1770 break;
1771 case 179: /* "nop" */
1772 break;
1773 case 180: /* "nor" */
1774 t2 = newTemp(Ity_I64);
1775 assign(t2, unop(Iop_Not64,
1776 binop(Iop_Or64,
1777 getIReg(ra),
1778 getIReg(rb))));
1779 MARK_REG_WB(rd, t2);
1780 break;
1781 case 181: /* "or" */
1782 t2 = newTemp(Ity_I64);
1783 assign(t2, binop(Iop_Or64,
1784 getIReg(ra),
1785 getIReg(rb)));
1786 MARK_REG_WB(rd, t2);
1787 break;
1788 case 182: /* "ori" */
1789 t2 = newTemp(Ity_I64);
1790 assign(t2, binop(Iop_Or64,
1791 getIReg(ra),
1792 mkU64(imm)));
1793 MARK_REG_WB(rd, t2);
1794 break;
1795 case 183:
1796 /* Fall-through */
1797 case 184:
1798 /* Fall-through */
1799 case 185:
1800 use_dirty_helper = 1;
1801 break;
1802 case 186: /* "rotl" */
1803 t0 = newTemp(Ity_I64);
1804 t1 = newTemp(Ity_I64);
1805 t2 = newTemp(Ity_I64);
1806 assign(t0, binop(Iop_Shl64,
1807 getIReg(ra),
1808 unop(Iop_64to8, getIReg(rb))));
1809 assign(t1, binop(Iop_Shr64,
1810 getIReg(ra),
1811 unop(Iop_64to8, binop(Iop_Sub64,
1812 mkU64(0),
1813 getIReg(rb)))));
1814 assign(t2, binop(Iop_Or64, mkexpr(t0), mkexpr(t1)));
1815 MARK_REG_WB(rd, t2);
1816 break;
1817 case 187: /* "rotli" */
1818 t0 = newTemp(Ity_I64);
1819 t1 = newTemp(Ity_I64);
1820 t2 = newTemp(Ity_I64);
1821 assign(t0, binop(Iop_Shl64,
1822 getIReg(ra),
1823 mkU8(imm)));
1824 assign(t1, binop(Iop_Shr64,
1825 getIReg(ra),
1826 mkU8(0 - imm)));
1827 assign(t2, binop(Iop_Or64, mkexpr(t0), mkexpr(t1)));
1828 MARK_REG_WB(rd, t2);
1829 break;
1830 case 188: /* "shl" */
1831 t2 = newTemp(Ity_I64);
1832 assign(t2, binop(Iop_Shl64,
1833 getIReg(ra),
1834 unop(Iop_64to8, getIReg(rb))));
1835 MARK_REG_WB(rd, t2);
1836
1837 break;
1838 case 189: /* "shl16insli" */
1839 t2 = newTemp(Ity_I64);
1840 t3 = newTemp(Ity_I64);
1841 assign(t3, binop(Iop_Shl64, getIReg(ra), mkU8(16)));
1842 imm &= 0xFFFFULL;
1843 if (imm & 0x8000)
1844 {
1845 t4 = newTemp(Ity_I64);
1846 assign(t4, mkU64(imm));
1847 assign(t2, binop(Iop_Add64, mkexpr(t3), mkexpr(t4)));
1848 }
1849 else
1850 {
1851 assign(t2, binop(Iop_Add64, mkexpr(t3), mkU64(imm)));
1852 }
1853 MARK_REG_WB(rd, t2);
1854
1855 break;
1856 case 190: /* "shl1add" */
1857 t2 = newTemp(Ity_I64);
1858 assign(t2, binop(Iop_Add64,
1859 binop(Iop_Shl64,
1860 getIReg(ra), mkU8(1)),
1861 getIReg(rb)));
1862
1863 MARK_REG_WB(rd, t2);
1864 break;
1865 case 191: /* "shl1addx" */
1866 t2 = newTemp(Ity_I64);
1867 assign(t2,
1868 unop(Iop_32Sto64,
1869 unop(Iop_64to32,
1870 binop(Iop_Add64,
1871 binop(Iop_Shl64,
1872 getIReg(ra), mkU8(1)),
1873 getIReg(rb)))));
1874 MARK_REG_WB(rd, t2);
1875 break;
1876 case 192: /* "shl2add" */
1877 t2 = newTemp(Ity_I64);
1878 assign(t2, binop(Iop_Add64,
1879 binop(Iop_Shl64,
1880 getIReg(ra), mkU8(2)),
1881 getIReg(rb)));
1882
1883 MARK_REG_WB(rd, t2);
1884
1885 break;
1886 case 193: /* "shl2addx" */
1887 t2 = newTemp(Ity_I64);
1888 assign(t2,
1889 unop(Iop_32Sto64,
1890 unop(Iop_64to32,
1891 binop(Iop_Add64,
1892 binop(Iop_Shl64,
1893 getIReg(ra), mkU8(2)),
1894 getIReg(rb)))));
1895 MARK_REG_WB(rd, t2);
1896
1897 break;
1898 case 194: /* "shl3add" */
1899 t2 = newTemp(Ity_I64);
1900 assign(t2, binop(Iop_Add64,
1901 binop(Iop_Shl64,
1902 getIReg(ra), mkU8(3)),
1903 getIReg(rb)));
1904
1905 MARK_REG_WB(rd, t2);
1906 break;
1907 case 195: /* "shl3addx" */
1908 t2 = newTemp(Ity_I64);
1909 assign(t2,
1910 unop(Iop_32Sto64,
1911 unop(Iop_64to32,
1912 binop(Iop_Add64,
1913 binop(Iop_Shl64,
1914 getIReg(ra), mkU8(3)),
1915 getIReg(rb)))));
1916 MARK_REG_WB(rd, t2);
1917 break;
1918 case 196: /* "shli" */
1919 t2 = newTemp(Ity_I64);
1920 assign(t2, binop(Iop_Shl64, getIReg(ra),
1921 mkU8(imm)));
1922 MARK_REG_WB(rd, t2);
1923 break;
1924 case 197: /* "shlx" */
1925 t2 = newTemp(Ity_I64);
1926 assign(t2, unop(Iop_32Sto64,
1927 binop(Iop_Shl32,
1928 narrowTo(Ity_I32, getIReg(ra)),
1929 narrowTo(Ity_I8, getIReg(rb)))));
1930 MARK_REG_WB(rd, t2);
1931 break;
1932 case 198: /* "shlxi" */
1933 t2 = newTemp(Ity_I64);
1934 assign(t2, signExtend(binop(Iop_Shl32,
1935 narrowTo(Ity_I32, getIReg(ra)),
1936 mkU8(imm)),
1937 32));
1938 MARK_REG_WB(rd, t2);
1939 break;
1940 case 199: /* "shrs" */
1941 t2 = newTemp(Ity_I64);
1942 assign(t2, binop(Iop_Sar64, getIReg(ra),
1943 narrowTo(Ity_I8, getIReg(rb))));
1944
1945 MARK_REG_WB(rd, t2);
1946 break;
1947 case 200: /* "shrsi" */
1948 t2 = newTemp(Ity_I64);
1949 assign(t2, binop(Iop_Sar64, getIReg(ra),
1950 mkU8(imm)));
1951
1952 MARK_REG_WB(rd, t2);
1953 break;
1954 case 201: /* "shru" */
1955 t2 = newTemp(Ity_I64);
1956 assign(t2, binop(Iop_Shr64,
1957 getIReg(ra),
1958 narrowTo(Ity_I8, (getIReg(rb)))));
1959
1960 MARK_REG_WB(rd, t2);
1961 break;
1962 case 202: /* "shrui" */
1963 t2 = newTemp(Ity_I64);
1964 assign(t2, binop(Iop_Shr64, getIReg(ra), mkU8(imm)));
1965
1966 MARK_REG_WB(rd, t2);
1967 break;
1968 case 203: /* "shrux" */
1969 t2 = newTemp(Ity_I64);
1970 assign(t2, unop(Iop_32Sto64,
1971 (binop(Iop_Shr32,
1972 narrowTo(Ity_I32, getIReg(ra)),
1973 narrowTo(Ity_I8, getIReg(rb))))));
1974 MARK_REG_WB(rd, t2);
1975 break;
1976 case 204: /* "shruxi" */
1977 t2 = newTemp(Ity_I64);
1978 assign(t2, unop(Iop_32Sto64,
1979 (binop(Iop_Shr32,
1980 narrowTo(Ity_I32, getIReg(ra)),
1981 mkU8(imm)))));
1982 MARK_REG_WB(rd, t2);
1983 break;
1984 case 205: /* "shufflebytes" */
1985 use_dirty_helper = 1;
1986 break;
1987 case 206: /* "st" */
1988 store(getIReg(ra), getIReg(rb));
1989 break;
1990 case 207: /* "st1" */
1991 store(getIReg(ra), narrowTo(Ity_I8, getIReg(rb)));
1992 break;
1993 case 208: /* "st1_add" */
1994 t2 = newTemp(Ity_I64);
1995 store(getIReg(opd[0]), narrowTo(Ity_I8, getIReg(opd[1])));
1996 assign(t2, binop(Iop_Add64, getIReg(opd[0]), mkU64(opd[2])));
1997 MARK_REG_WB(opd[0], t2);
1998 break;
1999 case 209: /* "st2" */
2000 store(getIReg(ra), narrowTo(Ity_I16, getIReg(rb)));
2001 break;
2002 case 210: /* "st2_add" */
2003 t2 = newTemp(Ity_I64);
2004 store(getIReg(opd[0]), narrowTo(Ity_I16, getIReg(opd[1])));
2005 assign(t2, binop(Iop_Add64, getIReg(opd[0]), mkU64(opd[2])));
2006 MARK_REG_WB(opd[0], t2);
2007 break;
2008 case 211: /* "st4" */
2009 store(getIReg(ra), narrowTo(Ity_I32, getIReg(rb)));
2010 break;
2011 case 212: /* "st4_add" */
2012 t2 = newTemp(Ity_I64);
2013 store(getIReg(opd[0]), narrowTo(Ity_I32, getIReg(opd[1])));
2014 assign(t2, binop(Iop_Add64, getIReg(opd[0]), mkU64(opd[2])));
2015 MARK_REG_WB(opd[0], t2);
2016 break;
2017 case 213: /* "st_add" */
2018 t2 = newTemp(Ity_I64);
2019 store(getIReg(opd[0]), getIReg(opd[1]));
2020 assign(t2, binop(Iop_Add64, getIReg(opd[0]), mkU64(opd[2])));
2021 MARK_REG_WB(opd[0], t2);
2022 break;
2023 case 214: /* "stnt" */
2024 store(getIReg(ra), getIReg(rb));
2025 break;
2026 case 215: /* "stnt1" */
2027 store(getIReg(ra), narrowTo(Ity_I8, getIReg(rb)));
2028 break;
2029 case 216: /* "stnt1_add" */
2030 t2 = newTemp(Ity_I64);
2031 store(getIReg(opd[0]), narrowTo(Ity_I8, getIReg(opd[1])));
2032 assign(t2, binop(Iop_Add64, getIReg(opd[0]), mkU64(opd[2])));
2033 MARK_REG_WB(opd[0], t2);
2034 break;
2035 case 217: /* "stnt2" */
2036 store(getIReg(ra), narrowTo(Ity_I16, getIReg(rb)));
2037 break;
2038 case 218: /* "stnt2_add" */
2039 t2 = newTemp(Ity_I64);
2040 store(getIReg(opd[0]), narrowTo(Ity_I16, getIReg(opd[1])));
2041 assign(t2, binop(Iop_Add64, getIReg(opd[0]), mkU64(opd[2])));
2042 MARK_REG_WB(opd[0], t2);
2043 break;
2044 case 219: /* "stnt4" */
2045 store(getIReg(ra), narrowTo(Ity_I32, getIReg(rb)));
2046 break;
2047 case 220: /* "stnt4_add" */
2048 t2 = newTemp(Ity_I64);
2049 store(getIReg(opd[0]), narrowTo(Ity_I32, getIReg(opd[1])));
2050 assign(t2, binop(Iop_Add64, getIReg(opd[0]), mkU64(opd[2])));
2051 MARK_REG_WB(opd[0], t2);
2052 break;
2053 case 221: /* "stnt_add" */
2054 t2 = newTemp(Ity_I64);
2055 store(getIReg(opd[0]), getIReg(opd[1]));
2056 assign(t2, binop(Iop_Add64, getIReg(opd[0]), mkU64(opd[2])));
2057 MARK_REG_WB(opd[0], t2);
2058 break;
2059 case 222: /* "sub" */
2060 t2 = newTemp(Ity_I64);
2061 assign(t2, binop(Iop_Sub64, getIReg(ra),
2062 getIReg(rb)));
2063 MARK_REG_WB(rd, t2);
2064 break;
2065 case 223: /* "subx" */
2066 t2 = newTemp(Ity_I64);
2067 assign(t2, unop(Iop_32Sto64,
2068 binop(Iop_Sub32,
2069 narrowTo(Ity_I32, getIReg(ra)),
2070 narrowTo(Ity_I32, getIReg(rb)))));
2071 MARK_REG_WB(rd, t2);
2072 break;
2073 case 224: /* "subxsc" */
2074 use_dirty_helper = 1;
2075 break;
2076 case 225: /* "swint0" */
2077 vex_printf( "\n *** swint0 ***\n");
2078 vassert(0);
2079 break;
2080 case 226: /* "swint1" */
2081 next = mkU64(guest_PC_curr_instr + 8);
2082 jumpkind = Ijk_Sys_syscall;
2083 break;
2084 case 227: /* "swint2" */
2085 vex_printf( "\n *** swint2 ***\n");
2086 vassert(0);
2087 break;
2088 case 228: /* "swint3" */
2089 vex_printf( "\n *** swint3 ***\n");
2090 vassert(0);
2091 break;
2092 case 229:
2093 /* Fall-through */
2094 case 230:
2095 /* Fall-through */
2096 case 231:
2097 /* Fall-through */
2098 case 232:
2099 /* Fall-through */
2100 case 233:
2101 use_dirty_helper = 1;
2102 break;
2103 case 234:
2104 opd[3] = V1EXP(opd[3]);
2105 use_dirty_helper = 1;
2106 break;
2107 case 235:
2108 /* Fall-through */
2109 case 236:
2110 /* Fall-through */
2111 case 237:
2112 use_dirty_helper = 1;
2113 break;
2114 case 238: /* "v1cmpeq" */
2115 t2 = newTemp(Ity_I64);
2116 assign(t2, binop(Iop_CmpEQ8x8, getIReg(ra),
2117 getIReg(rb)));
2118 MARK_REG_WB(rd, t2);
2119 break;
2120 case 239: /* "v1cmpeqi" */
2121 t2 = newTemp(Ity_I64);
2122 assign(t2, binop(Iop_CmpEQ8x8, getIReg(ra),
2123 mkU64(imm)));
2124
2125 MARK_REG_WB(rd, t2);
2126 break;
2127 case 240:
2128 /* Fall-through */
2129 case 241:
2130 /* Fall-through */
2131 case 242:
2132 use_dirty_helper = 1;
2133 break;
2134 case 243:
2135 opd[3] = V1EXP(opd[3]);
2136 use_dirty_helper = 1;
2137 break;
2138 /* Fall-through */
2139 case 244:
2140 use_dirty_helper = 1;
2141 break;
2142 case 245:
2143 opd[3] = V1EXP(opd[3]);
2144 use_dirty_helper = 1;
2145 break;
2146 case 246: /* "v1cmpne" */
2147 t2 = newTemp(Ity_I64);
2148 assign(t2, binop(Iop_CmpEQ8x8,
2149 binop(Iop_CmpEQ8x8, getIReg(ra),
2150 getIReg(rb)),
2151 getIReg(63)));
2152 MARK_REG_WB(rd, t2);
2153 break;
2154 case 247:
2155 /* Fall-through */
2156 case 248:
2157 /* Fall-through */
2158 case 249:
2159 /* Fall-through */
2160 case 250:
2161 /* Fall-through */
2162 case 251:
2163 /* Fall-through */
2164 case 252:
2165 /* Fall-through */
2166 case 253:
2167 /* Fall-through */
2168 case 254:
2169 /* Fall-through */
2170 case 255:
2171 /* Fall-through */
2172 case 256:
2173 /* Fall-through */
2174 case 257:
2175 /* Fall-through */
2176 case 258:
2177 /* Fall-through */
2178 case 259:
2179 use_dirty_helper = 1;
2180 break;
2181 case 260:
2182 opd[3] = V1EXP(opd[3]);
2183 use_dirty_helper = 1;
2184 break;
2185 case 261:
2186 use_dirty_helper = 1;
2187 break;
2188 case 262:
2189 opd[3] = V1EXP(opd[3]);
2190 use_dirty_helper = 1;
2191 break;
2192 case 263:
2193 /* Fall-through */
2194 case 264:
2195 /* Fall-through */
2196 case 265:
2197 /* Fall-through */
2198 case 266:
2199 /* Fall-through */
2200 case 267:
2201 /* Fall-through */
2202 case 268:
2203 /* Fall-through */
2204 case 269:
2205 /* Fall-through */
2206 case 270:
2207 use_dirty_helper = 1;
2208 break;
2209 case 271:
2210 opd[3] = V1EXP(opd[3]);
2211 use_dirty_helper = 1;
2212 break;
2213 case 272:
2214 use_dirty_helper = 1;
2215 break;
2216 case 273:
2217 opd[3] = V1EXP(opd[3]);
2218 use_dirty_helper = 1;
2219 break;
2220 case 274:
2221 use_dirty_helper = 1;
2222 break;
2223 case 275: /* "v1shrui" */
2224 t2 = newTemp(Ity_I64);
2225 assign(t2, binop(Iop_Shr8x8,
2226 getIReg(ra),
2227 mkU64(imm)));
2228 MARK_REG_WB(rd, t2);
2229 break;
2230 case 276:
2231 /* Fall-through */
2232 case 277:
2233 /* Fall-through */
2234 case 278:
2235 use_dirty_helper = 1;
2236 break;
2237 case 279:
2238 opd[3] = V2EXP(opd[3]);
2239 use_dirty_helper = 1;
2240 break;
2241 case 280:
2242 /* Fall-through */
2243 case 281:
2244 /* Fall-through */
2245 case 282:
2246 /* Fall-through */
2247 case 283:
2248 use_dirty_helper = 1;
2249 break;
2250 case 284:
2251 opd[3] = V2EXP(opd[3]);
2252 use_dirty_helper = 1;
2253 break;
2254 case 285:
2255 /* Fall-through */
2256 case 286:
2257 /* Fall-through */
2258 case 287:
2259 use_dirty_helper = 1;
2260 break;
2261 case 288:
2262 opd[3] = V2EXP(opd[3]);
2263 use_dirty_helper = 1;
2264 break;
2265 case 289:
2266 use_dirty_helper = 1;
2267 break;
2268 case 290:
2269 opd[3] = V2EXP(opd[3]);
2270 use_dirty_helper = 1;
2271 break;
2272 case 291:
2273 /* Fall-through */
2274 case 292:
2275 /* Fall-through */
2276 case 293:
2277 /* Fall-through */
2278 case 294:
2279 /* Fall-through */
2280 case 295:
2281 /* Fall-through */
2282 case 296:
2283 use_dirty_helper = 1;
2284 break;
2285 case 297:
2286 opd[3] = V2EXP(opd[3]);
2287 use_dirty_helper = 1;
2288 break;
2289 case 298:
2290 use_dirty_helper = 1;
2291 break;
2292 case 299:
2293 opd[3] = V2EXP(opd[3]);
2294 use_dirty_helper = 1;
2295 break;
2296 case 300:
2297 /* Fall-through */
2298 case 301:
2299 /* Fall-through */
2300 case 302:
2301 /* Fall-through */
2302 case 303:
2303 /* Fall-through */
2304 case 304:
2305 /* Fall-through */
2306 case 305:
2307 /* Fall-through */
2308 case 306:
2309 /* Fall-through */
2310 case 307:
2311 /* Fall-through */
2312 case 308:
2313 /* Fall-through */
2314 case 309:
2315 /* Fall-through */
2316 case 310:
2317 /* Fall-through */
2318 case 311:
2319 /* Fall-through */
2320 case 312:
2321 use_dirty_helper = 1;
2322 break;
2323 case 313:
2324 opd[3] = V2EXP(opd[3]);
2325 use_dirty_helper = 1;
2326 break;
2327 case 314:
2328 /* Fall-through */
2329 case 315:
2330 use_dirty_helper = 1;
2331 break;
2332 case 316:
2333 opd[3] = V2EXP(opd[3]);
2334 use_dirty_helper = 1;
2335 break;
2336 case 317:
2337 use_dirty_helper = 1;
2338 break;
2339 case 318:
2340 opd[3] = V2EXP(opd[3]);
2341 use_dirty_helper = 1;
2342 break;
2343 case 319:
2344 /* Fall-through */
2345 case 320:
2346 /* Fall-through */
2347 case 321:
2348 /* Fall-through */
2349 case 322:
2350 /* Fall-through */
2351 case 323:
2352 use_dirty_helper = 1;
2353 break;
2354 case 324: /* "v4int_l" */
2355 t2 = newTemp(Ity_I64);
2356 assign(t2, binop(Iop_Or64,
2357 binop(Iop_Shl64,
2358 getIReg(ra),
2359 mkU8(32)),
2360 binop(Iop_And64,
2361 getIReg(rb),
2362 mkU64(0xFFFFFFFF))));
2363 MARK_REG_WB(rd, t2);
2364 break;
2365 case 325:
2366 /* Fall-through */
2367 case 326:
2368 /* Fall-through */
2369 case 327:
2370 /* Fall-through */
2371 case 328:
2372 /* Fall-through */
2373 case 329:
2374 /* Fall-through */
2375 case 330:
2376 /* Fall-through */
2377 case 331:
2378 use_dirty_helper = 1;
2379 break;
2380 case 332: /* "wh64" */ /* Ignore store hint */
2381 break;
2382 case 333: /* "xor" */
2383 t2 = newTemp(Ity_I64);
2384 assign(t2, binop(Iop_Xor64,
2385 getIReg(ra),
2386 getIReg(rb)));
2387 MARK_REG_WB(rd, t2);
2388 break;
2389 case 334: /* "xori" */
2390 t2 = newTemp(Ity_I64);
2391 assign(t2, binop(Iop_Xor64,
2392 getIReg(ra),
2393 mkU64(imm)));
2394 MARK_REG_WB(rd, t2);
2395 break;
2396 case 335: /* "(null)" */ /* ignore */
2397 break;
2398 default:
2399
2400 decode_failure:
2401 vex_printf("error: %d\n", (Int)opcode);
2402
2403 /* All decode failures end up here. */
2404 vex_printf("vex tilegx->IR: unhandled instruction: "
2405 "%s 0x%llx 0x%llx 0x%llx 0x%llx\n",
2406 decoded[n].opcode->name,
2407 opd[0], opd[1], opd[2], opd[3]);
2408
2409 /* Tell the dispatcher that this insn cannot be decoded, and so has
2410 not been executed, and (is currently) the next to be executed. */
2411 stmt(IRStmt_Put(offsetof(VexGuestTILEGXState, guest_pc),
2412 mkU64(guest_PC_curr_instr)));
2413 dres.whatNext = Dis_StopHere;
2414 dres.len = 0;
2415 return dres;
2416 }
2417
2418 /* Hook the dirty helper for rare instruxtions. */
2419 if (use_dirty_helper)
2420 {
2421 Int i = 0;
2422 Int wbc = 0;
2423 IRExpr *opc_oprand[5];
2424
2425 opc_oprand[0] = mkU64(opcode);
2426
2427 /* Get the operand registers or immediate. */
2428 for (i = 0 ; i < 4; i++)
2429 {
2430 opc_oprand[i + 1] = NULL;
2431
2432 if (opd_dst_map & (1ULL << i))
2433 {
2434 tb[wbc] = newTemp(Ity_I64);
2435 wbc++;
2436 opc_oprand[i + 1] = getIReg(opd[i]);
2437 }
2438 else if (opd_imm_map & (1ULL << i))
2439 opc_oprand[i + 1] = mkU64(opd[i]);
2440 else if (opd_src_map & (1ULL << i))
2441 opc_oprand[i + 1] = getIReg(opd[i]);
2442 else
2443 opc_oprand[i + 1] = mkU64(0xfeee);
2444 }
2445
2446 IRExpr **args = mkIRExprVec_5(opc_oprand[0], opc_oprand[1],
2447 opc_oprand[2], opc_oprand[3],
2448 opc_oprand[4]);
2449 IRDirty *genIR = NULL;
2450
2451 switch (wbc) {
2452 case 0:
2453 {
2454 genIR = unsafeIRDirty_0_N (0/*regparms*/,
2455 "tilegx_dirtyhelper_gen",
2456 &tilegx_dirtyhelper_gen,
2457 args);
2458 }
2459 break;
2460 case 1:
2461 {
2462 genIR = unsafeIRDirty_1_N (tb[0],
2463 0/*regparms*/,
2464 "tilegx_dirtyhelper_gen",
2465 &tilegx_dirtyhelper_gen,
2466 args);
2467 }
2468 break;
2469 default:
2470 vex_printf("opc = %d\n", (Int)opcode);
2471 vassert(0);
2472 }
2473
2474 stmt(IRStmt_Dirty(genIR));
2475
2476 wbc = 0;
2477 for (i = 0 ; i < 4; i++)
2478 {
2479 if(opd_dst_map & (1 << i))
2480 {
2481 /* Queue the writeback destination registers. */
2482 MARK_REG_WB(opd[i], tb[wbc]);
2483 wbc++;
2484 }
2485 }
2486 }
2487 }
2488
2489 /* Write back registers for a bundle. Note have to get all source registers
2490 for all instructions in a bundle before write the destinations b/c this is
2491 an VLIW processor. */
2492 for (n = 0; n < rd_wb_index; n++)
2493 putIReg(rd_wb_reg[n], mkexpr(rd_wb_temp[n]));
2494
2495 /* Add branch IR if apply finally, only upto one branch per bundle. */
2496 if (bstmt) {
2497 stmt(bstmt);
2498 dres.whatNext = Dis_StopHere;
2499
2500 dres.jk_StopHere = jumpkind;
2501 stmt(IRStmt_Put(offsetof(VexGuestTILEGXState, guest_pc),
2502 mkU64(guest_PC_curr_instr + 8)));
2503 } else if (next) {
2504 if (steering_pc != -1ULL) {
2505 if (resteerOkFn(callback_opaque, steering_pc)) {
2506 dres.whatNext = Dis_ResteerU;
2507 dres.continueAt = steering_pc;
2508 stmt(IRStmt_Put(offsetof(VexGuestTILEGXState, guest_pc),
2509 mkU64(steering_pc)));
2510 } else {
2511 dres.whatNext = Dis_StopHere;
2512 dres.jk_StopHere = jumpkind;
2513 stmt(IRStmt_Put(offsetof(VexGuestTILEGXState, guest_pc),
2514 mkU64(steering_pc)));
2515 }
2516 } else {
2517 dres.whatNext = Dis_StopHere;
2518 dres.jk_StopHere = jumpkind;
2519 stmt(IRStmt_Put(offsetof(VexGuestTILEGXState, guest_pc), next));
2520 }
2521 } else {
2522 /* As dafault dres.whatNext = Dis_Continue. */
2523 stmt(IRStmt_Put(offsetof(VexGuestTILEGXState, guest_pc),
2524 mkU64(guest_PC_curr_instr + 8)));
2525 }
2526
2527 irsb->jumpkind = Ijk_Boring;
2528 irsb->next = NULL;
2529 dres.len = 8;
2530
2531 decode_success:
2532
2533 return dres;
2534 }
2535
2536 /*------------------------------------------------------------*/
2537 /*--- Top-level fn ---*/
2538 /*------------------------------------------------------------*/
2539
2540 /* Disassemble a single instruction into IR. The instruction
2541 is located in host memory at &guest_code[delta]. */
2542
2543 DisResult
disInstr_TILEGX(IRSB * irsb_IN,Bool (* resteerOkFn)(void *,Addr),Bool resteerCisOk,void * callback_opaque,const UChar * guest_code_IN,Long delta,Addr guest_IP,VexArch guest_arch,const VexArchInfo * archinfo,const VexAbiInfo * abiinfo,VexEndness host_endness_IN,Bool sigill_diag_IN)2544 disInstr_TILEGX ( IRSB* irsb_IN,
2545 Bool (*resteerOkFn) (void *, Addr),
2546 Bool resteerCisOk,
2547 void* callback_opaque,
2548 const UChar* guest_code_IN,
2549 Long delta,
2550 Addr guest_IP,
2551 VexArch guest_arch,
2552 const VexArchInfo* archinfo,
2553 const VexAbiInfo* abiinfo,
2554 VexEndness host_endness_IN,
2555 Bool sigill_diag_IN )
2556 {
2557 DisResult dres;
2558
2559 /* Set globals (see top of this file) */
2560 vassert(guest_arch == VexArchTILEGX);
2561
2562 guest_code = (UChar*)(Addr)guest_code_IN;
2563 irsb = irsb_IN;
2564 host_endness = host_endness_IN;
2565 guest_PC_curr_instr = (Addr64) guest_IP;
2566 guest_PC_bbstart = (Addr64) toUInt(guest_IP - delta);
2567
2568 dres = disInstr_TILEGX_WRK(resteerOkFn, resteerCisOk,
2569 callback_opaque,
2570 delta, archinfo, abiinfo, sigill_diag_IN);
2571
2572 return dres;
2573 }
2574
2575 /*--------------------------------------------------------------------*/
2576 /*--- end guest_tilegx_toIR.c ---*/
2577 /*--------------------------------------------------------------------*/
2578