1
2 /*---------------------------------------------------------------*/
3 /*--- begin guest_ppc_helpers.c ---*/
4 /*---------------------------------------------------------------*/
5
6 /*
7 This file is part of Valgrind, a dynamic binary instrumentation
8 framework.
9
10 Copyright (C) 2004-2013 OpenWorks LLP
11 info@open-works.net
12
13 This program is free software; you can redistribute it and/or
14 modify it under the terms of the GNU General Public License as
15 published by the Free Software Foundation; either version 2 of the
16 License, or (at your option) any later version.
17
18 This program is distributed in the hope that it will be useful, but
19 WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 General Public License for more details.
22
23 You should have received a copy of the GNU General Public License
24 along with this program; if not, write to the Free Software
25 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
26 02110-1301, USA.
27
28 The GNU General Public License is contained in the file COPYING.
29
30 Neither the names of the U.S. Department of Energy nor the
31 University of California nor the names of its contributors may be
32 used to endorse or promote products derived from this software
33 without prior written permission.
34 */
35
36 #include "libvex_basictypes.h"
37 #include "libvex_emnote.h"
38 #include "libvex_guest_ppc32.h"
39 #include "libvex_guest_ppc64.h"
40 #include "libvex_ir.h"
41 #include "libvex.h"
42
43 #include "main_util.h"
44 #include "main_globals.h"
45 #include "guest_generic_bb_to_IR.h"
46 #include "guest_ppc_defs.h"
47
48
49 /* This file contains helper functions for ppc32 and ppc64 guest code.
50 Calls to these functions are generated by the back end. These
51 calls are of course in the host machine code and this file will be
52 compiled to host machine code, so that all makes sense.
53
54 Only change the signatures of these helper functions very
55 carefully. If you change the signature here, you'll have to change
56 the parameters passed to it in the IR calls constructed by
57 guest-ppc/toIR.c.
58 */
59
60
61 /*---------------------------------------------------------------*/
62 /*--- Misc integer helpers. ---*/
63 /*---------------------------------------------------------------*/
64
65 /* CALLED FROM GENERATED CODE */
66 /* DIRTY HELPER (non-referentially-transparent) */
67 /* Horrible hack. On non-ppc platforms, return 1. */
68 /* Reads a complete, consistent 64-bit TB value. */
ppcg_dirtyhelper_MFTB(void)69 ULong ppcg_dirtyhelper_MFTB ( void )
70 {
71 # if defined(__powerpc__)
72 ULong res;
73 UInt lo, hi1, hi2;
74 while (1) {
75 __asm__ __volatile__ ("\n"
76 "\tmftbu %0\n"
77 "\tmftb %1\n"
78 "\tmftbu %2\n"
79 : "=r" (hi1), "=r" (lo), "=r" (hi2)
80 );
81 if (hi1 == hi2) break;
82 }
83 res = ((ULong)hi1) << 32;
84 res |= (ULong)lo;
85 return res;
86 # else
87 return 1ULL;
88 # endif
89 }
90
91
92 /* CALLED FROM GENERATED CODE */
93 /* DIRTY HELPER (non-referentially transparent) */
ppc32g_dirtyhelper_MFSPR_268_269(UInt r269)94 UInt ppc32g_dirtyhelper_MFSPR_268_269 ( UInt r269 )
95 {
96 # if defined(__powerpc__)
97 UInt spr;
98 if (r269) {
99 __asm__ __volatile__("mfspr %0,269" : "=b"(spr));
100 } else {
101 __asm__ __volatile__("mfspr %0,268" : "=b"(spr));
102 }
103 return spr;
104 # else
105 return 0;
106 # endif
107 }
108
109
110 /* CALLED FROM GENERATED CODE */
111 /* DIRTY HELPER (I'm not really sure what the side effects are) */
ppc32g_dirtyhelper_MFSPR_287(void)112 UInt ppc32g_dirtyhelper_MFSPR_287 ( void )
113 {
114 # if defined(__powerpc__)
115 UInt spr;
116 __asm__ __volatile__("mfspr %0,287" : "=b"(spr));
117 return spr;
118 # else
119 return 0;
120 # endif
121 }
122
123
124 /* CALLED FROM GENERATED CODE */
125 /* DIRTY HELPER (reads guest state, writes guest mem) */
ppc32g_dirtyhelper_LVS(VexGuestPPC32State * gst,UInt vD_off,UInt sh,UInt shift_right)126 void ppc32g_dirtyhelper_LVS ( VexGuestPPC32State* gst,
127 UInt vD_off, UInt sh, UInt shift_right )
128 {
129 static
130 UChar ref[32] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
131 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
132 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
133 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F };
134 U128* pU128_src;
135 U128* pU128_dst;
136
137 vassert( vD_off <= sizeof(VexGuestPPC32State)-8 );
138 vassert( sh <= 15 );
139 vassert( shift_right <= 1 );
140 if (shift_right)
141 sh = 16-sh;
142 /* else shift left */
143
144 pU128_src = (U128*)&ref[sh];
145 pU128_dst = (U128*)( ((UChar*)gst) + vD_off );
146
147 (*pU128_dst)[0] = (*pU128_src)[0];
148 (*pU128_dst)[1] = (*pU128_src)[1];
149 (*pU128_dst)[2] = (*pU128_src)[2];
150 (*pU128_dst)[3] = (*pU128_src)[3];
151 }
152
153 /* CALLED FROM GENERATED CODE */
154 /* DIRTY HELPER (reads guest state, writes guest mem) */
ppc64g_dirtyhelper_LVS(VexGuestPPC64State * gst,UInt vD_off,UInt sh,UInt shift_right,UInt endness)155 void ppc64g_dirtyhelper_LVS ( VexGuestPPC64State* gst,
156 UInt vD_off, UInt sh, UInt shift_right,
157 UInt endness )
158 {
159 UChar ref[32];
160 ULong i;
161 Int k;
162 /* ref[] used to be a static const array, but this doesn't work on
163 ppc64 because VEX doesn't load the TOC pointer for the call here,
164 and so we wind up picking up some totally random other data.
165 (It's a wonder we don't segfault.) So, just to be clear, this
166 "fix" (vex r2073) is really a kludgearound for the fact that
167 VEX's 64-bit ppc code generation doesn't provide a valid TOC
168 pointer for helper function calls. Ick. (Bug 250038) */
169 for (i = 0; i < 32; i++) ref[i] = i;
170
171 U128* pU128_src;
172 U128* pU128_dst;
173
174 vassert( vD_off <= sizeof(VexGuestPPC64State)-8 );
175 vassert( sh <= 15 );
176 vassert( shift_right <= 1 );
177 if (shift_right)
178 sh = 16-sh;
179 /* else shift left */
180
181 pU128_src = (U128*)&ref[sh];
182 pU128_dst = (U128*)( ((UChar*)gst) + vD_off );
183
184 if ((0x1 & endness) == 0x0) {
185 /* Little endian */
186 unsigned char *srcp, *dstp;
187 srcp = (unsigned char *)pU128_src;
188 dstp = (unsigned char *)pU128_dst;
189 for (k = 15; k >= 0; k--, srcp++)
190 dstp[k] = *srcp;
191 } else {
192 (*pU128_dst)[0] = (*pU128_src)[0];
193 (*pU128_dst)[1] = (*pU128_src)[1];
194 (*pU128_dst)[2] = (*pU128_src)[2];
195 (*pU128_dst)[3] = (*pU128_src)[3];
196 }
197 }
198
199
200 /* Helper-function specialiser. */
201
guest_ppc32_spechelper(const HChar * function_name,IRExpr ** args,IRStmt ** precedingStmts,Int n_precedingStmts)202 IRExpr* guest_ppc32_spechelper ( const HChar* function_name,
203 IRExpr** args,
204 IRStmt** precedingStmts,
205 Int n_precedingStmts )
206 {
207 return NULL;
208 }
209
guest_ppc64_spechelper(const HChar * function_name,IRExpr ** args,IRStmt ** precedingStmts,Int n_precedingStmts)210 IRExpr* guest_ppc64_spechelper ( const HChar* function_name,
211 IRExpr** args,
212 IRStmt** precedingStmts,
213 Int n_precedingStmts )
214 {
215 return NULL;
216 }
217
218
219 /*----------------------------------------------*/
220 /*--- The exported fns .. ---*/
221 /*----------------------------------------------*/
222
223 /* VISIBLE TO LIBVEX CLIENT */
LibVEX_GuestPPC32_get_CR(const VexGuestPPC32State * vex_state)224 UInt LibVEX_GuestPPC32_get_CR ( /*IN*/const VexGuestPPC32State* vex_state )
225 {
226 # define FIELD(_n) \
227 ( ( (UInt) \
228 ( (vex_state->guest_CR##_n##_321 & (7<<1)) \
229 | (vex_state->guest_CR##_n##_0 & 1) \
230 ) \
231 ) \
232 << (4 * (7-(_n))) \
233 )
234
235 return
236 FIELD(0) | FIELD(1) | FIELD(2) | FIELD(3)
237 | FIELD(4) | FIELD(5) | FIELD(6) | FIELD(7);
238
239 # undef FIELD
240 }
241
242
243 /* VISIBLE TO LIBVEX CLIENT */
244 /* Note: %CR is 32 bits even for ppc64 */
LibVEX_GuestPPC64_get_CR(const VexGuestPPC64State * vex_state)245 UInt LibVEX_GuestPPC64_get_CR ( /*IN*/const VexGuestPPC64State* vex_state )
246 {
247 # define FIELD(_n) \
248 ( ( (UInt) \
249 ( (vex_state->guest_CR##_n##_321 & (7<<1)) \
250 | (vex_state->guest_CR##_n##_0 & 1) \
251 ) \
252 ) \
253 << (4 * (7-(_n))) \
254 )
255
256 return
257 FIELD(0) | FIELD(1) | FIELD(2) | FIELD(3)
258 | FIELD(4) | FIELD(5) | FIELD(6) | FIELD(7);
259
260 # undef FIELD
261 }
262
263
264 /* VISIBLE TO LIBVEX CLIENT */
LibVEX_GuestPPC32_put_CR(UInt cr_native,VexGuestPPC32State * vex_state)265 void LibVEX_GuestPPC32_put_CR ( UInt cr_native,
266 /*OUT*/VexGuestPPC32State* vex_state )
267 {
268 UInt t;
269
270 # define FIELD(_n) \
271 do { \
272 t = cr_native >> (4*(7-(_n))); \
273 vex_state->guest_CR##_n##_0 = toUChar(t & 1); \
274 vex_state->guest_CR##_n##_321 = toUChar(t & (7<<1)); \
275 } while (0)
276
277 FIELD(0);
278 FIELD(1);
279 FIELD(2);
280 FIELD(3);
281 FIELD(4);
282 FIELD(5);
283 FIELD(6);
284 FIELD(7);
285
286 # undef FIELD
287 }
288
289
290 /* VISIBLE TO LIBVEX CLIENT */
291 /* Note: %CR is 32 bits even for ppc64 */
LibVEX_GuestPPC64_put_CR(UInt cr_native,VexGuestPPC64State * vex_state)292 void LibVEX_GuestPPC64_put_CR ( UInt cr_native,
293 /*OUT*/VexGuestPPC64State* vex_state )
294 {
295 UInt t;
296
297 # define FIELD(_n) \
298 do { \
299 t = cr_native >> (4*(7-(_n))); \
300 vex_state->guest_CR##_n##_0 = toUChar(t & 1); \
301 vex_state->guest_CR##_n##_321 = toUChar(t & (7<<1)); \
302 } while (0)
303
304 FIELD(0);
305 FIELD(1);
306 FIELD(2);
307 FIELD(3);
308 FIELD(4);
309 FIELD(5);
310 FIELD(6);
311 FIELD(7);
312
313 # undef FIELD
314 }
315
316
317 /* VISIBLE TO LIBVEX CLIENT */
LibVEX_GuestPPC32_get_XER(const VexGuestPPC32State * vex_state)318 UInt LibVEX_GuestPPC32_get_XER ( /*IN*/const VexGuestPPC32State* vex_state )
319 {
320 UInt w = 0;
321 w |= ( ((UInt)vex_state->guest_XER_BC) & 0xFF );
322 w |= ( (((UInt)vex_state->guest_XER_SO) & 0x1) << 31 );
323 w |= ( (((UInt)vex_state->guest_XER_OV) & 0x1) << 30 );
324 w |= ( (((UInt)vex_state->guest_XER_CA) & 0x1) << 29 );
325 return w;
326 }
327
328
329 /* VISIBLE TO LIBVEX CLIENT */
330 /* Note: %XER is 32 bits even for ppc64 */
LibVEX_GuestPPC64_get_XER(const VexGuestPPC64State * vex_state)331 UInt LibVEX_GuestPPC64_get_XER ( /*IN*/const VexGuestPPC64State* vex_state )
332 {
333 UInt w = 0;
334 w |= ( ((UInt)vex_state->guest_XER_BC) & 0xFF );
335 w |= ( (((UInt)vex_state->guest_XER_SO) & 0x1) << 31 );
336 w |= ( (((UInt)vex_state->guest_XER_OV) & 0x1) << 30 );
337 w |= ( (((UInt)vex_state->guest_XER_CA) & 0x1) << 29 );
338 return w;
339 }
340
341
342 /* VISIBLE TO LIBVEX CLIENT */
LibVEX_GuestPPC32_put_XER(UInt xer_native,VexGuestPPC32State * vex_state)343 void LibVEX_GuestPPC32_put_XER ( UInt xer_native,
344 /*OUT*/VexGuestPPC32State* vex_state )
345 {
346 vex_state->guest_XER_BC = toUChar(xer_native & 0xFF);
347 vex_state->guest_XER_SO = toUChar((xer_native >> 31) & 0x1);
348 vex_state->guest_XER_OV = toUChar((xer_native >> 30) & 0x1);
349 vex_state->guest_XER_CA = toUChar((xer_native >> 29) & 0x1);
350 }
351
352 /* VISIBLE TO LIBVEX CLIENT */
353 /* Note: %XER is 32 bits even for ppc64 */
LibVEX_GuestPPC64_put_XER(UInt xer_native,VexGuestPPC64State * vex_state)354 void LibVEX_GuestPPC64_put_XER ( UInt xer_native,
355 /*OUT*/VexGuestPPC64State* vex_state )
356 {
357 vex_state->guest_XER_BC = toUChar(xer_native & 0xFF);
358 vex_state->guest_XER_SO = toUChar((xer_native >> 31) & 0x1);
359 vex_state->guest_XER_OV = toUChar((xer_native >> 30) & 0x1);
360 vex_state->guest_XER_CA = toUChar((xer_native >> 29) & 0x1);
361 }
362
363 /* VISIBLE TO LIBVEX CLIENT */
LibVEX_GuestPPC32_initialise(VexGuestPPC32State * vex_state)364 void LibVEX_GuestPPC32_initialise ( /*OUT*/VexGuestPPC32State* vex_state )
365 {
366 Int i;
367 vex_state->host_EvC_FAILADDR = 0;
368 vex_state->host_EvC_COUNTER = 0;
369 vex_state->pad3 = 0;
370 vex_state->pad4 = 0;
371
372 vex_state->guest_GPR0 = 0;
373 vex_state->guest_GPR1 = 0;
374 vex_state->guest_GPR2 = 0;
375 vex_state->guest_GPR3 = 0;
376 vex_state->guest_GPR4 = 0;
377 vex_state->guest_GPR5 = 0;
378 vex_state->guest_GPR6 = 0;
379 vex_state->guest_GPR7 = 0;
380 vex_state->guest_GPR8 = 0;
381 vex_state->guest_GPR9 = 0;
382 vex_state->guest_GPR10 = 0;
383 vex_state->guest_GPR11 = 0;
384 vex_state->guest_GPR12 = 0;
385 vex_state->guest_GPR13 = 0;
386 vex_state->guest_GPR14 = 0;
387 vex_state->guest_GPR15 = 0;
388 vex_state->guest_GPR16 = 0;
389 vex_state->guest_GPR17 = 0;
390 vex_state->guest_GPR18 = 0;
391 vex_state->guest_GPR19 = 0;
392 vex_state->guest_GPR20 = 0;
393 vex_state->guest_GPR21 = 0;
394 vex_state->guest_GPR22 = 0;
395 vex_state->guest_GPR23 = 0;
396 vex_state->guest_GPR24 = 0;
397 vex_state->guest_GPR25 = 0;
398 vex_state->guest_GPR26 = 0;
399 vex_state->guest_GPR27 = 0;
400 vex_state->guest_GPR28 = 0;
401 vex_state->guest_GPR29 = 0;
402 vex_state->guest_GPR30 = 0;
403 vex_state->guest_GPR31 = 0;
404
405 /* Initialise the vector state. */
406 # define VECZERO(_vr) _vr[0]=_vr[1]=_vr[2]=_vr[3] = 0;
407
408 VECZERO(vex_state->guest_VSR0 );
409 VECZERO(vex_state->guest_VSR1 );
410 VECZERO(vex_state->guest_VSR2 );
411 VECZERO(vex_state->guest_VSR3 );
412 VECZERO(vex_state->guest_VSR4 );
413 VECZERO(vex_state->guest_VSR5 );
414 VECZERO(vex_state->guest_VSR6 );
415 VECZERO(vex_state->guest_VSR7 );
416 VECZERO(vex_state->guest_VSR8 );
417 VECZERO(vex_state->guest_VSR9 );
418 VECZERO(vex_state->guest_VSR10);
419 VECZERO(vex_state->guest_VSR11);
420 VECZERO(vex_state->guest_VSR12);
421 VECZERO(vex_state->guest_VSR13);
422 VECZERO(vex_state->guest_VSR14);
423 VECZERO(vex_state->guest_VSR15);
424 VECZERO(vex_state->guest_VSR16);
425 VECZERO(vex_state->guest_VSR17);
426 VECZERO(vex_state->guest_VSR18);
427 VECZERO(vex_state->guest_VSR19);
428 VECZERO(vex_state->guest_VSR20);
429 VECZERO(vex_state->guest_VSR21);
430 VECZERO(vex_state->guest_VSR22);
431 VECZERO(vex_state->guest_VSR23);
432 VECZERO(vex_state->guest_VSR24);
433 VECZERO(vex_state->guest_VSR25);
434 VECZERO(vex_state->guest_VSR26);
435 VECZERO(vex_state->guest_VSR27);
436 VECZERO(vex_state->guest_VSR28);
437 VECZERO(vex_state->guest_VSR29);
438 VECZERO(vex_state->guest_VSR30);
439 VECZERO(vex_state->guest_VSR31);
440 VECZERO(vex_state->guest_VSR32);
441 VECZERO(vex_state->guest_VSR33);
442 VECZERO(vex_state->guest_VSR34);
443 VECZERO(vex_state->guest_VSR35);
444 VECZERO(vex_state->guest_VSR36);
445 VECZERO(vex_state->guest_VSR37);
446 VECZERO(vex_state->guest_VSR38);
447 VECZERO(vex_state->guest_VSR39);
448 VECZERO(vex_state->guest_VSR40);
449 VECZERO(vex_state->guest_VSR41);
450 VECZERO(vex_state->guest_VSR42);
451 VECZERO(vex_state->guest_VSR43);
452 VECZERO(vex_state->guest_VSR44);
453 VECZERO(vex_state->guest_VSR45);
454 VECZERO(vex_state->guest_VSR46);
455 VECZERO(vex_state->guest_VSR47);
456 VECZERO(vex_state->guest_VSR48);
457 VECZERO(vex_state->guest_VSR49);
458 VECZERO(vex_state->guest_VSR50);
459 VECZERO(vex_state->guest_VSR51);
460 VECZERO(vex_state->guest_VSR52);
461 VECZERO(vex_state->guest_VSR53);
462 VECZERO(vex_state->guest_VSR54);
463 VECZERO(vex_state->guest_VSR55);
464 VECZERO(vex_state->guest_VSR56);
465 VECZERO(vex_state->guest_VSR57);
466 VECZERO(vex_state->guest_VSR58);
467 VECZERO(vex_state->guest_VSR59);
468 VECZERO(vex_state->guest_VSR60);
469 VECZERO(vex_state->guest_VSR61);
470 VECZERO(vex_state->guest_VSR62);
471 VECZERO(vex_state->guest_VSR63);
472
473 # undef VECZERO
474
475 vex_state->guest_CIA = 0;
476 vex_state->guest_LR = 0;
477 vex_state->guest_CTR = 0;
478
479 vex_state->guest_XER_SO = 0;
480 vex_state->guest_XER_OV = 0;
481 vex_state->guest_XER_CA = 0;
482 vex_state->guest_XER_BC = 0;
483
484 vex_state->guest_CR0_321 = 0;
485 vex_state->guest_CR0_0 = 0;
486 vex_state->guest_CR1_321 = 0;
487 vex_state->guest_CR1_0 = 0;
488 vex_state->guest_CR2_321 = 0;
489 vex_state->guest_CR2_0 = 0;
490 vex_state->guest_CR3_321 = 0;
491 vex_state->guest_CR3_0 = 0;
492 vex_state->guest_CR4_321 = 0;
493 vex_state->guest_CR4_0 = 0;
494 vex_state->guest_CR5_321 = 0;
495 vex_state->guest_CR5_0 = 0;
496 vex_state->guest_CR6_321 = 0;
497 vex_state->guest_CR6_0 = 0;
498 vex_state->guest_CR7_321 = 0;
499 vex_state->guest_CR7_0 = 0;
500
501 vex_state->guest_FPROUND = PPCrm_NEAREST;
502 vex_state->guest_DFPROUND = PPCrm_NEAREST;
503 vex_state->pad1 = 0;
504 vex_state->pad2 = 0;
505
506 vex_state->guest_VRSAVE = 0;
507
508 vex_state->guest_VSCR = 0x0; // Non-Java mode = 0
509
510 vex_state->guest_EMNOTE = EmNote_NONE;
511
512 vex_state->guest_CMSTART = 0;
513 vex_state->guest_CMLEN = 0;
514
515 vex_state->guest_NRADDR = 0;
516 vex_state->guest_NRADDR_GPR2 = 0;
517
518 vex_state->guest_REDIR_SP = -1;
519 for (i = 0; i < VEX_GUEST_PPC32_REDIR_STACK_SIZE; i++)
520 vex_state->guest_REDIR_STACK[i] = 0;
521
522 vex_state->guest_IP_AT_SYSCALL = 0;
523 vex_state->guest_SPRG3_RO = 0;
524
525 vex_state->padding1 = 0;
526 vex_state->padding2 = 0;
527 }
528
529
530 /* VISIBLE TO LIBVEX CLIENT */
LibVEX_GuestPPC64_initialise(VexGuestPPC64State * vex_state)531 void LibVEX_GuestPPC64_initialise ( /*OUT*/VexGuestPPC64State* vex_state )
532 {
533 Int i;
534 vex_state->host_EvC_FAILADDR = 0;
535 vex_state->host_EvC_COUNTER = 0;
536 vex_state->pad0 = 0;
537 vex_state->guest_GPR0 = 0;
538 vex_state->guest_GPR1 = 0;
539 vex_state->guest_GPR2 = 0;
540 vex_state->guest_GPR3 = 0;
541 vex_state->guest_GPR4 = 0;
542 vex_state->guest_GPR5 = 0;
543 vex_state->guest_GPR6 = 0;
544 vex_state->guest_GPR7 = 0;
545 vex_state->guest_GPR8 = 0;
546 vex_state->guest_GPR9 = 0;
547 vex_state->guest_GPR10 = 0;
548 vex_state->guest_GPR11 = 0;
549 vex_state->guest_GPR12 = 0;
550 vex_state->guest_GPR13 = 0;
551 vex_state->guest_GPR14 = 0;
552 vex_state->guest_GPR15 = 0;
553 vex_state->guest_GPR16 = 0;
554 vex_state->guest_GPR17 = 0;
555 vex_state->guest_GPR18 = 0;
556 vex_state->guest_GPR19 = 0;
557 vex_state->guest_GPR20 = 0;
558 vex_state->guest_GPR21 = 0;
559 vex_state->guest_GPR22 = 0;
560 vex_state->guest_GPR23 = 0;
561 vex_state->guest_GPR24 = 0;
562 vex_state->guest_GPR25 = 0;
563 vex_state->guest_GPR26 = 0;
564 vex_state->guest_GPR27 = 0;
565 vex_state->guest_GPR28 = 0;
566 vex_state->guest_GPR29 = 0;
567 vex_state->guest_GPR30 = 0;
568 vex_state->guest_GPR31 = 0;
569
570 /* Initialise the vector state. */
571 # define VECZERO(_vr) _vr[0]=_vr[1]=_vr[2]=_vr[3] = 0;
572
573 VECZERO(vex_state->guest_VSR0 );
574 VECZERO(vex_state->guest_VSR1 );
575 VECZERO(vex_state->guest_VSR2 );
576 VECZERO(vex_state->guest_VSR3 );
577 VECZERO(vex_state->guest_VSR4 );
578 VECZERO(vex_state->guest_VSR5 );
579 VECZERO(vex_state->guest_VSR6 );
580 VECZERO(vex_state->guest_VSR7 );
581 VECZERO(vex_state->guest_VSR8 );
582 VECZERO(vex_state->guest_VSR9 );
583 VECZERO(vex_state->guest_VSR10);
584 VECZERO(vex_state->guest_VSR11);
585 VECZERO(vex_state->guest_VSR12);
586 VECZERO(vex_state->guest_VSR13);
587 VECZERO(vex_state->guest_VSR14);
588 VECZERO(vex_state->guest_VSR15);
589 VECZERO(vex_state->guest_VSR16);
590 VECZERO(vex_state->guest_VSR17);
591 VECZERO(vex_state->guest_VSR18);
592 VECZERO(vex_state->guest_VSR19);
593 VECZERO(vex_state->guest_VSR20);
594 VECZERO(vex_state->guest_VSR21);
595 VECZERO(vex_state->guest_VSR22);
596 VECZERO(vex_state->guest_VSR23);
597 VECZERO(vex_state->guest_VSR24);
598 VECZERO(vex_state->guest_VSR25);
599 VECZERO(vex_state->guest_VSR26);
600 VECZERO(vex_state->guest_VSR27);
601 VECZERO(vex_state->guest_VSR28);
602 VECZERO(vex_state->guest_VSR29);
603 VECZERO(vex_state->guest_VSR30);
604 VECZERO(vex_state->guest_VSR31);
605 VECZERO(vex_state->guest_VSR32);
606 VECZERO(vex_state->guest_VSR33);
607 VECZERO(vex_state->guest_VSR34);
608 VECZERO(vex_state->guest_VSR35);
609 VECZERO(vex_state->guest_VSR36);
610 VECZERO(vex_state->guest_VSR37);
611 VECZERO(vex_state->guest_VSR38);
612 VECZERO(vex_state->guest_VSR39);
613 VECZERO(vex_state->guest_VSR40);
614 VECZERO(vex_state->guest_VSR41);
615 VECZERO(vex_state->guest_VSR42);
616 VECZERO(vex_state->guest_VSR43);
617 VECZERO(vex_state->guest_VSR44);
618 VECZERO(vex_state->guest_VSR45);
619 VECZERO(vex_state->guest_VSR46);
620 VECZERO(vex_state->guest_VSR47);
621 VECZERO(vex_state->guest_VSR48);
622 VECZERO(vex_state->guest_VSR49);
623 VECZERO(vex_state->guest_VSR50);
624 VECZERO(vex_state->guest_VSR51);
625 VECZERO(vex_state->guest_VSR52);
626 VECZERO(vex_state->guest_VSR53);
627 VECZERO(vex_state->guest_VSR54);
628 VECZERO(vex_state->guest_VSR55);
629 VECZERO(vex_state->guest_VSR56);
630 VECZERO(vex_state->guest_VSR57);
631 VECZERO(vex_state->guest_VSR58);
632 VECZERO(vex_state->guest_VSR59);
633 VECZERO(vex_state->guest_VSR60);
634 VECZERO(vex_state->guest_VSR61);
635 VECZERO(vex_state->guest_VSR62);
636 VECZERO(vex_state->guest_VSR63);
637
638 # undef VECZERO
639
640 vex_state->guest_CIA = 0;
641 vex_state->guest_LR = 0;
642 vex_state->guest_CTR = 0;
643
644 vex_state->guest_XER_SO = 0;
645 vex_state->guest_XER_OV = 0;
646 vex_state->guest_XER_CA = 0;
647 vex_state->guest_XER_BC = 0;
648
649 vex_state->guest_CR0_321 = 0;
650 vex_state->guest_CR0_0 = 0;
651 vex_state->guest_CR1_321 = 0;
652 vex_state->guest_CR1_0 = 0;
653 vex_state->guest_CR2_321 = 0;
654 vex_state->guest_CR2_0 = 0;
655 vex_state->guest_CR3_321 = 0;
656 vex_state->guest_CR3_0 = 0;
657 vex_state->guest_CR4_321 = 0;
658 vex_state->guest_CR4_0 = 0;
659 vex_state->guest_CR5_321 = 0;
660 vex_state->guest_CR5_0 = 0;
661 vex_state->guest_CR6_321 = 0;
662 vex_state->guest_CR6_0 = 0;
663 vex_state->guest_CR7_321 = 0;
664 vex_state->guest_CR7_0 = 0;
665
666 vex_state->guest_FPROUND = PPCrm_NEAREST;
667 vex_state->guest_DFPROUND = PPCrm_NEAREST;
668 vex_state->pad1 = 0;
669 vex_state->pad2 = 0;
670
671 vex_state->guest_VRSAVE = 0;
672
673 vex_state->guest_VSCR = 0x0; // Non-Java mode = 0
674
675 vex_state->guest_EMNOTE = EmNote_NONE;
676
677 vex_state->padding = 0;
678
679 vex_state->guest_CMSTART = 0;
680 vex_state->guest_CMLEN = 0;
681
682 vex_state->guest_NRADDR = 0;
683 vex_state->guest_NRADDR_GPR2 = 0;
684
685 vex_state->guest_REDIR_SP = -1;
686 for (i = 0; i < VEX_GUEST_PPC64_REDIR_STACK_SIZE; i++)
687 vex_state->guest_REDIR_STACK[i] = 0;
688
689 vex_state->guest_IP_AT_SYSCALL = 0;
690 vex_state->guest_SPRG3_RO = 0;
691 vex_state->guest_TFHAR = 0;
692 vex_state->guest_TFIAR = 0;
693 vex_state->guest_TEXASR = 0;
694 }
695
696
697 /*-----------------------------------------------------------*/
698 /*--- Describing the ppc guest state, for the benefit ---*/
699 /*--- of iropt and instrumenters. ---*/
700 /*-----------------------------------------------------------*/
701
702 /* Figure out if any part of the guest state contained in minoff
703 .. maxoff requires precise memory exceptions. If in doubt return
704 True (but this is generates significantly slower code).
705
706 By default we enforce precise exns for guest R1 (stack pointer),
707 CIA (current insn address) and LR (link register). These are the
708 minimum needed to extract correct stack backtraces from ppc
709 code. [[NB: not sure if keeping LR up to date is actually
710 necessary.]]
711
712 Only R1 is needed in mode VexRegUpdSpAtMemAccess.
713 */
guest_ppc32_state_requires_precise_mem_exns(Int minoff,Int maxoff,VexRegisterUpdates pxControl)714 Bool guest_ppc32_state_requires_precise_mem_exns (
715 Int minoff, Int maxoff, VexRegisterUpdates pxControl
716 )
717 {
718 Int lr_min = offsetof(VexGuestPPC32State, guest_LR);
719 Int lr_max = lr_min + 4 - 1;
720 Int r1_min = offsetof(VexGuestPPC32State, guest_GPR1);
721 Int r1_max = r1_min + 4 - 1;
722 Int cia_min = offsetof(VexGuestPPC32State, guest_CIA);
723 Int cia_max = cia_min + 4 - 1;
724
725 if (maxoff < r1_min || minoff > r1_max) {
726 /* no overlap with R1 */
727 if (pxControl == VexRegUpdSpAtMemAccess)
728 return False; // We only need to check stack pointer.
729 } else {
730 return True;
731 }
732
733 if (maxoff < lr_min || minoff > lr_max) {
734 /* no overlap with LR */
735 } else {
736 return True;
737 }
738
739 if (maxoff < cia_min || minoff > cia_max) {
740 /* no overlap with CIA */
741 } else {
742 return True;
743 }
744
745 return False;
746 }
747
guest_ppc64_state_requires_precise_mem_exns(Int minoff,Int maxoff,VexRegisterUpdates pxControl)748 Bool guest_ppc64_state_requires_precise_mem_exns (
749 Int minoff, Int maxoff, VexRegisterUpdates pxControl
750 )
751 {
752 /* Given that R2 is a Big Deal in the ELF ppc64 ABI, it seems
753 prudent to be conservative with it, even though thus far there
754 is no evidence to suggest that it actually needs to be kept up
755 to date wrt possible exceptions. */
756 Int lr_min = offsetof(VexGuestPPC64State, guest_LR);
757 Int lr_max = lr_min + 8 - 1;
758 Int r1_min = offsetof(VexGuestPPC64State, guest_GPR1);
759 Int r1_max = r1_min + 8 - 1;
760 Int r2_min = offsetof(VexGuestPPC64State, guest_GPR2);
761 Int r2_max = r2_min + 8 - 1;
762 Int cia_min = offsetof(VexGuestPPC64State, guest_CIA);
763 Int cia_max = cia_min + 8 - 1;
764
765 if (maxoff < r1_min || minoff > r1_max) {
766 /* no overlap with R1 */
767 if (pxControl == VexRegUpdSpAtMemAccess)
768 return False; // We only need to check stack pointer.
769 } else {
770 return True;
771 }
772
773 if (maxoff < lr_min || minoff > lr_max) {
774 /* no overlap with LR */
775 } else {
776 return True;
777 }
778
779 if (maxoff < r2_min || minoff > r2_max) {
780 /* no overlap with R2 */
781 } else {
782 return True;
783 }
784
785 if (maxoff < cia_min || minoff > cia_max) {
786 /* no overlap with CIA */
787 } else {
788 return True;
789 }
790
791 return False;
792 }
793
794
795 #define ALWAYSDEFD32(field) \
796 { offsetof(VexGuestPPC32State, field), \
797 (sizeof ((VexGuestPPC32State*)0)->field) }
798
799 VexGuestLayout
800 ppc32Guest_layout
801 = {
802 /* Total size of the guest state, in bytes. */
803 .total_sizeB = sizeof(VexGuestPPC32State),
804
805 /* Describe the stack pointer. */
806 .offset_SP = offsetof(VexGuestPPC32State,guest_GPR1),
807 .sizeof_SP = 4,
808
809 /* Describe the frame pointer. */
810 .offset_FP = offsetof(VexGuestPPC32State,guest_GPR1),
811 .sizeof_FP = 4,
812
813 /* Describe the instruction pointer. */
814 .offset_IP = offsetof(VexGuestPPC32State,guest_CIA),
815 .sizeof_IP = 4,
816
817 /* Describe any sections to be regarded by Memcheck as
818 'always-defined'. */
819 .n_alwaysDefd = 11,
820
821 .alwaysDefd
822 = { /* 0 */ ALWAYSDEFD32(guest_CIA),
823 /* 1 */ ALWAYSDEFD32(guest_EMNOTE),
824 /* 2 */ ALWAYSDEFD32(guest_CMSTART),
825 /* 3 */ ALWAYSDEFD32(guest_CMLEN),
826 /* 4 */ ALWAYSDEFD32(guest_VSCR),
827 /* 5 */ ALWAYSDEFD32(guest_FPROUND),
828 /* 6 */ ALWAYSDEFD32(guest_NRADDR),
829 /* 7 */ ALWAYSDEFD32(guest_NRADDR_GPR2),
830 /* 8 */ ALWAYSDEFD32(guest_REDIR_SP),
831 /* 9 */ ALWAYSDEFD32(guest_REDIR_STACK),
832 /* 10 */ ALWAYSDEFD32(guest_IP_AT_SYSCALL)
833 }
834 };
835
836 #define ALWAYSDEFD64(field) \
837 { offsetof(VexGuestPPC64State, field), \
838 (sizeof ((VexGuestPPC64State*)0)->field) }
839
840 VexGuestLayout
841 ppc64Guest_layout
842 = {
843 /* Total size of the guest state, in bytes. */
844 .total_sizeB = sizeof(VexGuestPPC64State),
845
846 /* Describe the stack pointer. */
847 .offset_SP = offsetof(VexGuestPPC64State,guest_GPR1),
848 .sizeof_SP = 8,
849
850 /* Describe the frame pointer. */
851 .offset_FP = offsetof(VexGuestPPC64State,guest_GPR1),
852 .sizeof_FP = 8,
853
854 /* Describe the instruction pointer. */
855 .offset_IP = offsetof(VexGuestPPC64State,guest_CIA),
856 .sizeof_IP = 8,
857
858 /* Describe any sections to be regarded by Memcheck as
859 'always-defined'. */
860 .n_alwaysDefd = 11,
861
862 .alwaysDefd
863 = { /* 0 */ ALWAYSDEFD64(guest_CIA),
864 /* 1 */ ALWAYSDEFD64(guest_EMNOTE),
865 /* 2 */ ALWAYSDEFD64(guest_CMSTART),
866 /* 3 */ ALWAYSDEFD64(guest_CMLEN),
867 /* 4 */ ALWAYSDEFD64(guest_VSCR),
868 /* 5 */ ALWAYSDEFD64(guest_FPROUND),
869 /* 6 */ ALWAYSDEFD64(guest_NRADDR),
870 /* 7 */ ALWAYSDEFD64(guest_NRADDR_GPR2),
871 /* 8 */ ALWAYSDEFD64(guest_REDIR_SP),
872 /* 9 */ ALWAYSDEFD64(guest_REDIR_STACK),
873 /* 10 */ ALWAYSDEFD64(guest_IP_AT_SYSCALL)
874 }
875 };
876
877 /*---------------------------------------------------------------*/
878 /*--- end guest_ppc_helpers.c ---*/
879 /*---------------------------------------------------------------*/
880