1
2 /*---------------------------------------------------------------*/
3 /*--- begin guest_mips_helpers.c ---*/
4 /*---------------------------------------------------------------*/
5
6 /*
7 This file is part of Valgrind, a dynamic binary instrumentation
8 framework.
9
10 Copyright (C) 2010-2013 RT-RK
11 mips-valgrind@rt-rk.com
12
13 This program is free software; you can redistribute it and/or
14 modify it under the terms of the GNU General Public License as
15 published by the Free Software Foundation; either version 2 of the
16 License, or (at your option) any later version.
17
18 This program is distributed in the hope that it will be useful, but
19 WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 General Public License for more details.
22
23 You should have received a copy of the GNU General Public License
24 along with this program; if not, write to the Free Software
25 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
26 02111-1307, USA.
27
28 The GNU General Public License is contained in the file COPYING.
29 */
30
31 #include "libvex_basictypes.h"
32 #include "libvex_emnote.h"
33 #include "libvex_guest_mips32.h"
34 #include "libvex_guest_mips64.h"
35 #include "libvex_ir.h"
36 #include "libvex.h"
37
38 #include "main_util.h"
39 #include "main_globals.h"
40 #include "guest_generic_bb_to_IR.h"
41 #include "guest_mips_defs.h"
42
43 /* This file contains helper functions for mips guest code. Calls to
44 these functions are generated by the back end.
45 */
46
47 #define ALWAYSDEFD32(field) \
48 { offsetof(VexGuestMIPS32State, field), \
49 (sizeof ((VexGuestMIPS32State*)0)->field) }
50
51 #define ALWAYSDEFD64(field) \
52 { offsetof(VexGuestMIPS64State, field), \
53 (sizeof ((VexGuestMIPS64State*)0)->field) }
54
guest_mips32_spechelper(const HChar * function_name,IRExpr ** args,IRStmt ** precedingStmts,Int n_precedingStmts)55 IRExpr *guest_mips32_spechelper(const HChar * function_name, IRExpr ** args,
56 IRStmt ** precedingStmts, Int n_precedingStmts)
57 {
58 return NULL;
59 }
60
guest_mips64_spechelper(const HChar * function_name,IRExpr ** args,IRStmt ** precedingStmts,Int n_precedingStmts)61 IRExpr *guest_mips64_spechelper ( const HChar * function_name, IRExpr ** args,
62 IRStmt ** precedingStmts,
63 Int n_precedingStmts )
64 {
65 return NULL;
66 }
67
68 /* VISIBLE TO LIBVEX CLIENT */
LibVEX_GuestMIPS32_initialise(VexGuestMIPS32State * vex_state)69 void LibVEX_GuestMIPS32_initialise( /*OUT*/ VexGuestMIPS32State * vex_state)
70 {
71 vex_state->guest_r0 = 0; /* Hardwired to 0 */
72 vex_state->guest_r1 = 0; /* Assembler temporary */
73 vex_state->guest_r2 = 0; /* Values for function returns ... */
74 vex_state->guest_r3 = 0; /* ...and expression evaluation */
75 vex_state->guest_r4 = 0; /* Function arguments */
76 vex_state->guest_r5 = 0;
77 vex_state->guest_r6 = 0;
78 vex_state->guest_r7 = 0;
79 vex_state->guest_r8 = 0; /* Temporaries */
80 vex_state->guest_r9 = 0;
81 vex_state->guest_r10 = 0;
82 vex_state->guest_r11 = 0;
83 vex_state->guest_r12 = 0;
84 vex_state->guest_r13 = 0;
85 vex_state->guest_r14 = 0;
86 vex_state->guest_r15 = 0;
87 vex_state->guest_r16 = 0; /* Saved temporaries */
88 vex_state->guest_r17 = 0;
89 vex_state->guest_r18 = 0;
90 vex_state->guest_r19 = 0;
91 vex_state->guest_r20 = 0;
92 vex_state->guest_r21 = 0;
93 vex_state->guest_r22 = 0;
94 vex_state->guest_r23 = 0;
95 vex_state->guest_r24 = 0; /* Temporaries */
96 vex_state->guest_r25 = 0;
97 vex_state->guest_r26 = 0; /* Reserved for OS kernel */
98 vex_state->guest_r27 = 0;
99 vex_state->guest_r28 = 0; /* Global pointer */
100 vex_state->guest_r29 = 0; /* Stack pointer */
101 vex_state->guest_r30 = 0; /* Frame pointer */
102 vex_state->guest_r31 = 0; /* Return address */
103 vex_state->guest_PC = 0; /* Program counter */
104 vex_state->guest_HI = 0; /* Multiply and divide register higher result */
105 vex_state->guest_LO = 0; /* Multiply and divide register lower result */
106
107 /* FPU Registers */
108 vex_state->guest_f0 = 0x7ff800007ff80000ULL; /* Floting point GP registers */
109 vex_state->guest_f1 = 0x7ff800007ff80000ULL;
110 vex_state->guest_f2 = 0x7ff800007ff80000ULL;
111 vex_state->guest_f3 = 0x7ff800007ff80000ULL;
112 vex_state->guest_f4 = 0x7ff800007ff80000ULL;
113 vex_state->guest_f5 = 0x7ff800007ff80000ULL;
114 vex_state->guest_f6 = 0x7ff800007ff80000ULL;
115 vex_state->guest_f7 = 0x7ff800007ff80000ULL;
116 vex_state->guest_f8 = 0x7ff800007ff80000ULL;
117 vex_state->guest_f9 = 0x7ff800007ff80000ULL;
118 vex_state->guest_f10 = 0x7ff800007ff80000ULL;
119 vex_state->guest_f11 = 0x7ff800007ff80000ULL;
120 vex_state->guest_f12 = 0x7ff800007ff80000ULL;
121 vex_state->guest_f13 = 0x7ff800007ff80000ULL;
122 vex_state->guest_f14 = 0x7ff800007ff80000ULL;
123 vex_state->guest_f15 = 0x7ff800007ff80000ULL;
124 vex_state->guest_f16 = 0x7ff800007ff80000ULL;
125 vex_state->guest_f17 = 0x7ff800007ff80000ULL;
126 vex_state->guest_f18 = 0x7ff800007ff80000ULL;
127 vex_state->guest_f19 = 0x7ff800007ff80000ULL;
128 vex_state->guest_f20 = 0x7ff800007ff80000ULL;
129 vex_state->guest_f21 = 0x7ff800007ff80000ULL;
130 vex_state->guest_f22 = 0x7ff800007ff80000ULL;
131 vex_state->guest_f23 = 0x7ff800007ff80000ULL;
132 vex_state->guest_f24 = 0x7ff800007ff80000ULL;
133 vex_state->guest_f25 = 0x7ff800007ff80000ULL;
134 vex_state->guest_f26 = 0x7ff800007ff80000ULL;
135 vex_state->guest_f27 = 0x7ff800007ff80000ULL;
136 vex_state->guest_f28 = 0x7ff800007ff80000ULL;
137 vex_state->guest_f29 = 0x7ff800007ff80000ULL;
138 vex_state->guest_f30 = 0x7ff800007ff80000ULL;
139 vex_state->guest_f31 = 0x7ff800007ff80000ULL;
140
141 vex_state->guest_FIR = 0; /* FP implementation and revision register */
142 vex_state->guest_FCCR = 0; /* FP condition codes register */
143 vex_state->guest_FEXR = 0; /* FP exceptions register */
144 vex_state->guest_FENR = 0; /* FP enables register */
145 vex_state->guest_FCSR = 0; /* FP control/status register */
146 vex_state->guest_ULR = 0; /* TLS */
147
148 /* Various pseudo-regs mandated by Vex or Valgrind. */
149 /* Emulation notes */
150 vex_state->guest_EMNOTE = 0;
151
152 /* For clflush: record start and length of area to invalidate */
153 vex_state->guest_CMSTART = 0;
154 vex_state->guest_CMLEN = 0;
155 vex_state->host_EvC_COUNTER = 0;
156 vex_state->host_EvC_FAILADDR = 0;
157
158 /* Used to record the unredirected guest address at the start of
159 a translation whose start has been redirected. By reading
160 this pseudo-register shortly afterwards, the translation can
161 find out what the corresponding no-redirection address was.
162 Note, this is only set for wrap-style redirects, not for
163 replace-style ones. */
164 vex_state->guest_NRADDR = 0;
165
166 vex_state->guest_COND = 0;
167
168 /* MIPS32 DSP ASE(r2) specific registers */
169 vex_state->guest_DSPControl = 0; /* DSPControl register */
170 vex_state->guest_ac0 = 0; /* Accumulator 0 */
171 vex_state->guest_ac1 = 0; /* Accumulator 1 */
172 vex_state->guest_ac2 = 0; /* Accumulator 2 */
173 vex_state->guest_ac3 = 0; /* Accumulator 3 */
174 }
175
LibVEX_GuestMIPS64_initialise(VexGuestMIPS64State * vex_state)176 void LibVEX_GuestMIPS64_initialise ( /*OUT*/ VexGuestMIPS64State * vex_state )
177 {
178 vex_state->guest_r0 = 0; /* Hardwired to 0 */
179 vex_state->guest_r1 = 0; /* Assembler temporary */
180 vex_state->guest_r2 = 0; /* Values for function returns ... */
181 vex_state->guest_r3 = 0;
182 vex_state->guest_r4 = 0; /* Function arguments */
183 vex_state->guest_r5 = 0;
184 vex_state->guest_r6 = 0;
185 vex_state->guest_r7 = 0;
186 vex_state->guest_r8 = 0;
187 vex_state->guest_r9 = 0;
188 vex_state->guest_r10 = 0;
189 vex_state->guest_r11 = 0;
190 vex_state->guest_r12 = 0; /* Temporaries */
191 vex_state->guest_r13 = 0;
192 vex_state->guest_r14 = 0;
193 vex_state->guest_r15 = 0;
194 vex_state->guest_r16 = 0; /* Saved temporaries */
195 vex_state->guest_r17 = 0;
196 vex_state->guest_r18 = 0;
197 vex_state->guest_r19 = 0;
198 vex_state->guest_r20 = 0;
199 vex_state->guest_r21 = 0;
200 vex_state->guest_r22 = 0;
201 vex_state->guest_r23 = 0;
202 vex_state->guest_r24 = 0; /* Temporaries */
203 vex_state->guest_r25 = 0;
204 vex_state->guest_r26 = 0; /* Reserved for OS kernel */
205 vex_state->guest_r27 = 0;
206 vex_state->guest_r28 = 0; /* Global pointer */
207 vex_state->guest_r29 = 0; /* Stack pointer */
208 vex_state->guest_r30 = 0; /* Frame pointer */
209 vex_state->guest_r31 = 0; /* Return address */
210 vex_state->guest_PC = 0; /* Program counter */
211 vex_state->guest_HI = 0; /* Multiply and divide register higher result */
212 vex_state->guest_LO = 0; /* Multiply and divide register lower result */
213
214 /* FPU Registers */
215 vex_state->guest_f0 = 0x7ff800007ff80000ULL; /* Floting point registers */
216 vex_state->guest_f1 = 0x7ff800007ff80000ULL;
217 vex_state->guest_f2 = 0x7ff800007ff80000ULL;
218 vex_state->guest_f3 = 0x7ff800007ff80000ULL;
219 vex_state->guest_f4 = 0x7ff800007ff80000ULL;
220 vex_state->guest_f5 = 0x7ff800007ff80000ULL;
221 vex_state->guest_f6 = 0x7ff800007ff80000ULL;
222 vex_state->guest_f7 = 0x7ff800007ff80000ULL;
223 vex_state->guest_f8 = 0x7ff800007ff80000ULL;
224 vex_state->guest_f9 = 0x7ff800007ff80000ULL;
225 vex_state->guest_f10 = 0x7ff800007ff80000ULL;
226 vex_state->guest_f11 = 0x7ff800007ff80000ULL;
227 vex_state->guest_f12 = 0x7ff800007ff80000ULL;
228 vex_state->guest_f13 = 0x7ff800007ff80000ULL;
229 vex_state->guest_f14 = 0x7ff800007ff80000ULL;
230 vex_state->guest_f15 = 0x7ff800007ff80000ULL;
231 vex_state->guest_f16 = 0x7ff800007ff80000ULL;
232 vex_state->guest_f17 = 0x7ff800007ff80000ULL;
233 vex_state->guest_f18 = 0x7ff800007ff80000ULL;
234 vex_state->guest_f19 = 0x7ff800007ff80000ULL;
235 vex_state->guest_f20 = 0x7ff800007ff80000ULL;
236 vex_state->guest_f21 = 0x7ff800007ff80000ULL;
237 vex_state->guest_f22 = 0x7ff800007ff80000ULL;
238 vex_state->guest_f23 = 0x7ff800007ff80000ULL;
239 vex_state->guest_f24 = 0x7ff800007ff80000ULL;
240 vex_state->guest_f25 = 0x7ff800007ff80000ULL;
241 vex_state->guest_f26 = 0x7ff800007ff80000ULL;
242 vex_state->guest_f27 = 0x7ff800007ff80000ULL;
243 vex_state->guest_f28 = 0x7ff800007ff80000ULL;
244 vex_state->guest_f29 = 0x7ff800007ff80000ULL;
245 vex_state->guest_f30 = 0x7ff800007ff80000ULL;
246 vex_state->guest_f31 = 0x7ff800007ff80000ULL;
247
248 vex_state->guest_FIR = 0; /* FP implementation and revision register */
249 vex_state->guest_FCCR = 0; /* FP condition codes register */
250 vex_state->guest_FEXR = 0; /* FP exceptions register */
251 vex_state->guest_FENR = 0; /* FP enables register */
252 vex_state->guest_FCSR = 0; /* FP control/status register */
253
254 vex_state->guest_ULR = 0;
255
256 /* Various pseudo-regs mandated by Vex or Valgrind. */
257 /* Emulation notes */
258 vex_state->guest_EMNOTE = 0;
259
260 /* For clflush: record start and length of area to invalidate */
261 vex_state->guest_CMSTART = 0;
262 vex_state->guest_CMLEN = 0;
263 vex_state->host_EvC_COUNTER = 0;
264 vex_state->host_EvC_FAILADDR = 0;
265
266 /* Used to record the unredirected guest address at the start of
267 a translation whose start has been redirected. By reading
268 this pseudo-register shortly afterwards, the translation can
269 find out what the corresponding no-redirection address was.
270 Note, this is only set for wrap-style redirects, not for
271 replace-style ones. */
272 vex_state->guest_NRADDR = 0;
273
274 vex_state->guest_COND = 0;
275 }
276
277 /*-----------------------------------------------------------*/
278 /*--- Describing the mips guest state, for the benefit ---*/
279 /*--- of iropt and instrumenters. ---*/
280 /*-----------------------------------------------------------*/
281
282 /* Figure out if any part of the guest state contained in minoff
283 .. maxoff requires precise memory exceptions. If in doubt return
284 True (but this generates significantly slower code).
285
286 We enforce precise exns for guest SP, PC.
287
288 Only SP is needed in mode VexRegUpdSpAtMemAccess.
289 */
guest_mips32_state_requires_precise_mem_exns(Int minoff,Int maxoff,VexRegisterUpdates pxControl)290 Bool guest_mips32_state_requires_precise_mem_exns (
291 Int minoff, Int maxoff, VexRegisterUpdates pxControl
292 )
293 {
294 Int sp_min = offsetof(VexGuestMIPS32State, guest_r29);
295 Int sp_max = sp_min + 4 - 1;
296 Int pc_min = offsetof(VexGuestMIPS32State, guest_PC);
297 Int pc_max = pc_min + 4 - 1;
298
299 if (maxoff < sp_min || minoff > sp_max) {
300 /* no overlap with sp */
301 if (pxControl == VexRegUpdSpAtMemAccess)
302 return False; /* We only need to check stack pointer. */
303 } else {
304 return True;
305 }
306
307 if (maxoff < pc_min || minoff > pc_max) {
308 /* no overlap with pc */
309 } else {
310 return True;
311 }
312
313 /* We appear to need precise updates of R11 in order to get proper
314 stacktraces from non-optimised code. */
315 Int fp_min = offsetof(VexGuestMIPS32State, guest_r30);
316 Int fp_max = fp_min + 4 - 1;
317
318 if (maxoff < fp_min || minoff > fp_max) {
319 /* no overlap with fp */
320 } else {
321 return True;
322 }
323
324 return False;
325 }
326
guest_mips64_state_requires_precise_mem_exns(Int minoff,Int maxoff,VexRegisterUpdates pxControl)327 Bool guest_mips64_state_requires_precise_mem_exns (
328 Int minoff, Int maxoff, VexRegisterUpdates pxControl
329 )
330 {
331 Int sp_min = offsetof(VexGuestMIPS64State, guest_r29);
332 Int sp_max = sp_min + 8 - 1;
333 Int pc_min = offsetof(VexGuestMIPS64State, guest_PC);
334 Int pc_max = pc_min + 8 - 1;
335
336 if ( maxoff < sp_min || minoff > sp_max ) {
337 /* no overlap with sp */
338 if (pxControl == VexRegUpdSpAtMemAccess)
339 return False; /* We only need to check stack pointer. */
340 } else {
341 return True;
342 }
343
344 if ( maxoff < pc_min || minoff > pc_max ) {
345 /* no overlap with pc */
346 } else {
347 return True;
348 }
349
350 Int fp_min = offsetof(VexGuestMIPS64State, guest_r30);
351 Int fp_max = fp_min + 8 - 1;
352
353 if ( maxoff < fp_min || minoff > fp_max ) {
354 /* no overlap with fp */
355 } else {
356 return True;
357 }
358
359 return False;
360 }
361
362 VexGuestLayout mips32Guest_layout = {
363 /* Total size of the guest state, in bytes. */
364 .total_sizeB = sizeof(VexGuestMIPS32State),
365 /* Describe the stack pointer. */
366 .offset_SP = offsetof(VexGuestMIPS32State, guest_r29),
367 .sizeof_SP = 4,
368 /* Describe the frame pointer. */
369 .offset_FP = offsetof(VexGuestMIPS32State, guest_r30),
370 .sizeof_FP = 4,
371 /* Describe the instruction pointer. */
372 .offset_IP = offsetof(VexGuestMIPS32State, guest_PC),
373 .sizeof_IP = 4,
374 /* Describe any sections to be regarded by Memcheck as
375 'always-defined'. */
376 .n_alwaysDefd = 8,
377 /* ? :( */
378 .alwaysDefd = {
379 /* 0 */ ALWAYSDEFD32(guest_r0),
380 /* 1 */ ALWAYSDEFD32(guest_r1),
381 /* 2 */ ALWAYSDEFD32(guest_EMNOTE),
382 /* 3 */ ALWAYSDEFD32(guest_CMSTART),
383 /* 4 */ ALWAYSDEFD32(guest_CMLEN),
384 /* 5 */ ALWAYSDEFD32(guest_r29),
385 /* 6 */ ALWAYSDEFD32(guest_r31),
386 /* 7 */ ALWAYSDEFD32(guest_ULR)
387 }
388 };
389
390 VexGuestLayout mips64Guest_layout = {
391 /* Total size of the guest state, in bytes. */
392 .total_sizeB = sizeof(VexGuestMIPS64State),
393 /* Describe the stack pointer. */
394 .offset_SP = offsetof(VexGuestMIPS64State, guest_r29),
395 .sizeof_SP = 8,
396 /* Describe the frame pointer. */
397 .offset_FP = offsetof(VexGuestMIPS64State, guest_r30),
398 .sizeof_FP = 8,
399 /* Describe the instruction pointer. */
400 .offset_IP = offsetof(VexGuestMIPS64State, guest_PC),
401 .sizeof_IP = 8,
402 /* Describe any sections to be regarded by Memcheck as
403 'always-defined'. */
404 .n_alwaysDefd = 7,
405 /* ? :( */
406 .alwaysDefd = {
407 /* 0 */ ALWAYSDEFD64 (guest_r0),
408 /* 1 */ ALWAYSDEFD64 (guest_EMNOTE),
409 /* 2 */ ALWAYSDEFD64 (guest_CMSTART),
410 /* 3 */ ALWAYSDEFD64 (guest_CMLEN),
411 /* 4 */ ALWAYSDEFD64 (guest_r29),
412 /* 5 */ ALWAYSDEFD64 (guest_r31),
413 /* 6 */ ALWAYSDEFD64 (guest_ULR)
414 }
415 };
416
417 #define ASM_VOLATILE_CASE(rd, sel) \
418 case rd: \
419 asm volatile ("mfc0 %0, $" #rd ", "#sel"\n\t" :"=r" (x) ); \
420 break;
421
mips32_dirtyhelper_mfc0(UInt rd,UInt sel)422 UInt mips32_dirtyhelper_mfc0(UInt rd, UInt sel)
423 {
424 UInt x = 0;
425 #if defined(__mips__) && ((defined(__mips_isa_rev) && __mips_isa_rev >= 2))
426 switch (sel) {
427 case 0:
428 /* __asm__("mfc0 %0, $1, 0" :"=r" (x)); */
429 switch (rd) {
430 ASM_VOLATILE_CASE(0, 0);
431 ASM_VOLATILE_CASE(1, 0);
432 ASM_VOLATILE_CASE(2, 0);
433 ASM_VOLATILE_CASE(3, 0);
434 ASM_VOLATILE_CASE(4, 0);
435 ASM_VOLATILE_CASE(5, 0);
436 ASM_VOLATILE_CASE(6, 0);
437 ASM_VOLATILE_CASE(7, 0);
438 ASM_VOLATILE_CASE(8, 0);
439 ASM_VOLATILE_CASE(9, 0);
440 ASM_VOLATILE_CASE(10, 0);
441 ASM_VOLATILE_CASE(11, 0);
442 ASM_VOLATILE_CASE(12, 0);
443 ASM_VOLATILE_CASE(13, 0);
444 ASM_VOLATILE_CASE(14, 0);
445 ASM_VOLATILE_CASE(15, 0);
446 ASM_VOLATILE_CASE(16, 0);
447 ASM_VOLATILE_CASE(17, 0);
448 ASM_VOLATILE_CASE(18, 0);
449 ASM_VOLATILE_CASE(19, 0);
450 ASM_VOLATILE_CASE(20, 0);
451 ASM_VOLATILE_CASE(21, 0);
452 ASM_VOLATILE_CASE(22, 0);
453 ASM_VOLATILE_CASE(23, 0);
454 ASM_VOLATILE_CASE(24, 0);
455 ASM_VOLATILE_CASE(25, 0);
456 ASM_VOLATILE_CASE(26, 0);
457 ASM_VOLATILE_CASE(27, 0);
458 ASM_VOLATILE_CASE(28, 0);
459 ASM_VOLATILE_CASE(29, 0);
460 ASM_VOLATILE_CASE(30, 0);
461 ASM_VOLATILE_CASE(31, 0);
462 default:
463 break;
464 }
465 break;
466 case 1:
467 /* __asm__("mfc0 %0, $1, 0" :"=r" (x)); */
468 switch (rd) {
469 ASM_VOLATILE_CASE(0, 1);
470 ASM_VOLATILE_CASE(1, 1);
471 ASM_VOLATILE_CASE(2, 1);
472 ASM_VOLATILE_CASE(3, 1);
473 ASM_VOLATILE_CASE(4, 1);
474 ASM_VOLATILE_CASE(5, 1);
475 ASM_VOLATILE_CASE(6, 1);
476 ASM_VOLATILE_CASE(7, 1);
477 ASM_VOLATILE_CASE(8, 1);
478 ASM_VOLATILE_CASE(9, 1);
479 ASM_VOLATILE_CASE(10, 1);
480 ASM_VOLATILE_CASE(11, 1);
481 ASM_VOLATILE_CASE(12, 1);
482 ASM_VOLATILE_CASE(13, 1);
483 ASM_VOLATILE_CASE(14, 1);
484 ASM_VOLATILE_CASE(15, 1);
485 ASM_VOLATILE_CASE(16, 1);
486 ASM_VOLATILE_CASE(17, 1);
487 ASM_VOLATILE_CASE(18, 1);
488 ASM_VOLATILE_CASE(19, 1);
489 ASM_VOLATILE_CASE(20, 1);
490 ASM_VOLATILE_CASE(21, 1);
491 ASM_VOLATILE_CASE(22, 1);
492 ASM_VOLATILE_CASE(23, 1);
493 ASM_VOLATILE_CASE(24, 1);
494 ASM_VOLATILE_CASE(25, 1);
495 ASM_VOLATILE_CASE(26, 1);
496 ASM_VOLATILE_CASE(27, 1);
497 ASM_VOLATILE_CASE(28, 1);
498 ASM_VOLATILE_CASE(29, 1);
499 ASM_VOLATILE_CASE(30, 1);
500 ASM_VOLATILE_CASE(31, 1);
501 default:
502 break;
503 }
504 break;
505 case 2:
506 /* __asm__("mfc0 %0, $1, 0" :"=r" (x)); */
507 switch (rd) {
508 ASM_VOLATILE_CASE(0, 2);
509 ASM_VOLATILE_CASE(1, 2);
510 ASM_VOLATILE_CASE(2, 2);
511 ASM_VOLATILE_CASE(3, 1);
512 ASM_VOLATILE_CASE(4, 2);
513 ASM_VOLATILE_CASE(5, 2);
514 ASM_VOLATILE_CASE(6, 2);
515 ASM_VOLATILE_CASE(7, 2);
516 ASM_VOLATILE_CASE(8, 2);
517 ASM_VOLATILE_CASE(9, 2);
518 ASM_VOLATILE_CASE(10, 2);
519 ASM_VOLATILE_CASE(11, 2);
520 ASM_VOLATILE_CASE(12, 2);
521 ASM_VOLATILE_CASE(13, 2);
522 ASM_VOLATILE_CASE(14, 2);
523 ASM_VOLATILE_CASE(15, 2);
524 ASM_VOLATILE_CASE(16, 2);
525 ASM_VOLATILE_CASE(17, 2);
526 ASM_VOLATILE_CASE(18, 2);
527 ASM_VOLATILE_CASE(19, 2);
528 ASM_VOLATILE_CASE(20, 2);
529 ASM_VOLATILE_CASE(21, 2);
530 ASM_VOLATILE_CASE(22, 2);
531 ASM_VOLATILE_CASE(23, 2);
532 ASM_VOLATILE_CASE(24, 2);
533 ASM_VOLATILE_CASE(25, 2);
534 ASM_VOLATILE_CASE(26, 2);
535 ASM_VOLATILE_CASE(27, 2);
536 ASM_VOLATILE_CASE(28, 2);
537 ASM_VOLATILE_CASE(29, 2);
538 ASM_VOLATILE_CASE(30, 2);
539 ASM_VOLATILE_CASE(31, 2);
540 default:
541 break;
542 }
543 break;
544 case 3:
545 /* __asm__("mfc0 %0, $1, 0" :"=r" (x)); */
546 switch (rd) {
547 ASM_VOLATILE_CASE(0, 3);
548 ASM_VOLATILE_CASE(1, 3);
549 ASM_VOLATILE_CASE(2, 3);
550 ASM_VOLATILE_CASE(3, 3);
551 ASM_VOLATILE_CASE(4, 3);
552 ASM_VOLATILE_CASE(5, 3);
553 ASM_VOLATILE_CASE(6, 3);
554 ASM_VOLATILE_CASE(7, 3);
555 ASM_VOLATILE_CASE(8, 3);
556 ASM_VOLATILE_CASE(9, 3);
557 ASM_VOLATILE_CASE(10, 3);
558 ASM_VOLATILE_CASE(11, 3);
559 ASM_VOLATILE_CASE(12, 3);
560 ASM_VOLATILE_CASE(13, 3);
561 ASM_VOLATILE_CASE(14, 3);
562 ASM_VOLATILE_CASE(15, 3);
563 ASM_VOLATILE_CASE(16, 3);
564 ASM_VOLATILE_CASE(17, 3);
565 ASM_VOLATILE_CASE(18, 3);
566 ASM_VOLATILE_CASE(19, 3);
567 ASM_VOLATILE_CASE(20, 3);
568 ASM_VOLATILE_CASE(21, 3);
569 ASM_VOLATILE_CASE(22, 3);
570 ASM_VOLATILE_CASE(23, 3);
571 ASM_VOLATILE_CASE(24, 3);
572 ASM_VOLATILE_CASE(25, 3);
573 ASM_VOLATILE_CASE(26, 3);
574 ASM_VOLATILE_CASE(27, 3);
575 ASM_VOLATILE_CASE(28, 3);
576 ASM_VOLATILE_CASE(29, 3);
577 ASM_VOLATILE_CASE(30, 3);
578 ASM_VOLATILE_CASE(31, 3);
579 default:
580 break;
581 }
582 break;
583 case 4:
584 /* __asm__("mfc0 %0, $1, 0" :"=r" (x)); */
585 switch (rd) {
586 ASM_VOLATILE_CASE(0, 4);
587 ASM_VOLATILE_CASE(1, 4);
588 ASM_VOLATILE_CASE(2, 4);
589 ASM_VOLATILE_CASE(3, 4);
590 ASM_VOLATILE_CASE(4, 4);
591 ASM_VOLATILE_CASE(5, 4);
592 ASM_VOLATILE_CASE(6, 4);
593 ASM_VOLATILE_CASE(7, 4);
594 ASM_VOLATILE_CASE(8, 4);
595 ASM_VOLATILE_CASE(9, 4);
596 ASM_VOLATILE_CASE(10, 4);
597 ASM_VOLATILE_CASE(11, 4);
598 ASM_VOLATILE_CASE(12, 4);
599 ASM_VOLATILE_CASE(13, 4);
600 ASM_VOLATILE_CASE(14, 4);
601 ASM_VOLATILE_CASE(15, 4);
602 ASM_VOLATILE_CASE(16, 4);
603 ASM_VOLATILE_CASE(17, 4);
604 ASM_VOLATILE_CASE(18, 4);
605 ASM_VOLATILE_CASE(19, 4);
606 ASM_VOLATILE_CASE(20, 4);
607 ASM_VOLATILE_CASE(21, 4);
608 ASM_VOLATILE_CASE(22, 4);
609 ASM_VOLATILE_CASE(23, 4);
610 ASM_VOLATILE_CASE(24, 4);
611 ASM_VOLATILE_CASE(25, 4);
612 ASM_VOLATILE_CASE(26, 4);
613 ASM_VOLATILE_CASE(27, 4);
614 ASM_VOLATILE_CASE(28, 4);
615 ASM_VOLATILE_CASE(29, 4);
616 ASM_VOLATILE_CASE(30, 4);
617 ASM_VOLATILE_CASE(31, 4);
618 default:
619 break;
620 }
621 break;
622 case 5:
623 /* __asm__("mfc0 %0, $1, 0" :"=r" (x)); */
624 switch (rd) {
625 ASM_VOLATILE_CASE(0, 5);
626 ASM_VOLATILE_CASE(1, 5);
627 ASM_VOLATILE_CASE(2, 5);
628 ASM_VOLATILE_CASE(3, 5);
629 ASM_VOLATILE_CASE(4, 5);
630 ASM_VOLATILE_CASE(5, 5);
631 ASM_VOLATILE_CASE(6, 5);
632 ASM_VOLATILE_CASE(7, 5);
633 ASM_VOLATILE_CASE(8, 5);
634 ASM_VOLATILE_CASE(9, 5);
635 ASM_VOLATILE_CASE(10, 5);
636 ASM_VOLATILE_CASE(11, 5);
637 ASM_VOLATILE_CASE(12, 5);
638 ASM_VOLATILE_CASE(13, 5);
639 ASM_VOLATILE_CASE(14, 5);
640 ASM_VOLATILE_CASE(15, 5);
641 ASM_VOLATILE_CASE(16, 5);
642 ASM_VOLATILE_CASE(17, 5);
643 ASM_VOLATILE_CASE(18, 5);
644 ASM_VOLATILE_CASE(19, 5);
645 ASM_VOLATILE_CASE(20, 5);
646 ASM_VOLATILE_CASE(21, 5);
647 ASM_VOLATILE_CASE(22, 5);
648 ASM_VOLATILE_CASE(23, 5);
649 ASM_VOLATILE_CASE(24, 5);
650 ASM_VOLATILE_CASE(25, 5);
651 ASM_VOLATILE_CASE(26, 5);
652 ASM_VOLATILE_CASE(27, 5);
653 ASM_VOLATILE_CASE(28, 5);
654 ASM_VOLATILE_CASE(29, 5);
655 ASM_VOLATILE_CASE(30, 5);
656 ASM_VOLATILE_CASE(31, 5);
657 default:
658 break;
659 }
660 break;
661 case 6:
662 /* __asm__("mfc0 %0, $1, 0" :"=r" (x)); */
663 switch (rd) {
664 ASM_VOLATILE_CASE(0, 6);
665 ASM_VOLATILE_CASE(1, 6);
666 ASM_VOLATILE_CASE(2, 6);
667 ASM_VOLATILE_CASE(3, 6);
668 ASM_VOLATILE_CASE(4, 6);
669 ASM_VOLATILE_CASE(5, 6);
670 ASM_VOLATILE_CASE(6, 6);
671 ASM_VOLATILE_CASE(7, 6);
672 ASM_VOLATILE_CASE(8, 6);
673 ASM_VOLATILE_CASE(9, 6);
674 ASM_VOLATILE_CASE(10, 6);
675 ASM_VOLATILE_CASE(11, 6);
676 ASM_VOLATILE_CASE(12, 6);
677 ASM_VOLATILE_CASE(13, 6);
678 ASM_VOLATILE_CASE(14, 6);
679 ASM_VOLATILE_CASE(15, 6);
680 ASM_VOLATILE_CASE(16, 6);
681 ASM_VOLATILE_CASE(17, 6);
682 ASM_VOLATILE_CASE(18, 6);
683 ASM_VOLATILE_CASE(19, 6);
684 ASM_VOLATILE_CASE(20, 6);
685 ASM_VOLATILE_CASE(21, 6);
686 ASM_VOLATILE_CASE(22, 6);
687 ASM_VOLATILE_CASE(23, 6);
688 ASM_VOLATILE_CASE(24, 6);
689 ASM_VOLATILE_CASE(25, 6);
690 ASM_VOLATILE_CASE(26, 6);
691 ASM_VOLATILE_CASE(27, 6);
692 ASM_VOLATILE_CASE(28, 6);
693 ASM_VOLATILE_CASE(29, 6);
694 ASM_VOLATILE_CASE(30, 6);
695 ASM_VOLATILE_CASE(31, 6);
696 default:
697 break;
698 }
699 break;
700 case 7:
701 /* __asm__("mfc0 %0, $1, 0" :"=r" (x)); */
702 switch (rd) {
703 ASM_VOLATILE_CASE(0, 7);
704 ASM_VOLATILE_CASE(1, 7);
705 ASM_VOLATILE_CASE(2, 7);
706 ASM_VOLATILE_CASE(3, 7);
707 ASM_VOLATILE_CASE(4, 7);
708 ASM_VOLATILE_CASE(5, 7);
709 ASM_VOLATILE_CASE(6, 7);
710 ASM_VOLATILE_CASE(7, 7);
711 ASM_VOLATILE_CASE(8, 7);
712 ASM_VOLATILE_CASE(9, 7);
713 ASM_VOLATILE_CASE(10, 7);
714 ASM_VOLATILE_CASE(11, 7);
715 ASM_VOLATILE_CASE(12, 7);
716 ASM_VOLATILE_CASE(13, 7);
717 ASM_VOLATILE_CASE(14, 7);
718 ASM_VOLATILE_CASE(15, 7);
719 ASM_VOLATILE_CASE(16, 7);
720 ASM_VOLATILE_CASE(17, 7);
721 ASM_VOLATILE_CASE(18, 7);
722 ASM_VOLATILE_CASE(19, 7);
723 ASM_VOLATILE_CASE(20, 7);
724 ASM_VOLATILE_CASE(21, 7);
725 ASM_VOLATILE_CASE(22, 7);
726 ASM_VOLATILE_CASE(23, 7);
727 ASM_VOLATILE_CASE(24, 7);
728 ASM_VOLATILE_CASE(25, 7);
729 ASM_VOLATILE_CASE(26, 7);
730 ASM_VOLATILE_CASE(27, 7);
731 ASM_VOLATILE_CASE(28, 7);
732 ASM_VOLATILE_CASE(29, 7);
733 ASM_VOLATILE_CASE(30, 7);
734 ASM_VOLATILE_CASE(31, 7);
735 default:
736 break;
737 }
738 break;
739
740 default:
741 break;
742 }
743 #endif
744 return x;
745 }
746
747 #undef ASM_VOLATILE_CASE
748
749 #define ASM_VOLATILE_CASE(rd, sel) \
750 case rd: \
751 asm volatile ("dmfc0 %0, $" #rd ", "#sel"\n\t" :"=r" (x) ); \
752 break;
753
mips64_dirtyhelper_dmfc0(UInt rd,UInt sel)754 ULong mips64_dirtyhelper_dmfc0 ( UInt rd, UInt sel )
755 {
756 ULong x = 0;
757 #if defined(VGP_mips64_linux)
758 switch (sel) {
759 case 0:
760 /* __asm__("dmfc0 %0, $1, 0" :"=r" (x)); */
761 switch (rd) {
762 ASM_VOLATILE_CASE (0, 0);
763 ASM_VOLATILE_CASE (1, 0);
764 ASM_VOLATILE_CASE (2, 0);
765 ASM_VOLATILE_CASE (3, 0);
766 ASM_VOLATILE_CASE (4, 0);
767 ASM_VOLATILE_CASE (5, 0);
768 ASM_VOLATILE_CASE (6, 0);
769 ASM_VOLATILE_CASE (7, 0);
770 ASM_VOLATILE_CASE (8, 0);
771 ASM_VOLATILE_CASE (9, 0);
772 ASM_VOLATILE_CASE (10, 0);
773 ASM_VOLATILE_CASE (11, 0);
774 ASM_VOLATILE_CASE (12, 0);
775 ASM_VOLATILE_CASE (13, 0);
776 ASM_VOLATILE_CASE (14, 0);
777 ASM_VOLATILE_CASE (15, 0);
778 ASM_VOLATILE_CASE (16, 0);
779 ASM_VOLATILE_CASE (17, 0);
780 ASM_VOLATILE_CASE (18, 0);
781 ASM_VOLATILE_CASE (19, 0);
782 ASM_VOLATILE_CASE (20, 0);
783 ASM_VOLATILE_CASE (21, 0);
784 ASM_VOLATILE_CASE (22, 0);
785 ASM_VOLATILE_CASE (23, 0);
786 ASM_VOLATILE_CASE (24, 0);
787 ASM_VOLATILE_CASE (25, 0);
788 ASM_VOLATILE_CASE (26, 0);
789 ASM_VOLATILE_CASE (27, 0);
790 ASM_VOLATILE_CASE (28, 0);
791 ASM_VOLATILE_CASE (29, 0);
792 ASM_VOLATILE_CASE (30, 0);
793 ASM_VOLATILE_CASE (31, 0);
794 default:
795 break;
796 }
797 break;
798 case 1:
799 /* __asm__("dmfc0 %0, $1, 0" :"=r" (x)); */
800 switch (rd) {
801 ASM_VOLATILE_CASE (0, 1);
802 ASM_VOLATILE_CASE (1, 1);
803 ASM_VOLATILE_CASE (2, 1);
804 ASM_VOLATILE_CASE (3, 1);
805 ASM_VOLATILE_CASE (4, 1);
806 ASM_VOLATILE_CASE (5, 1);
807 ASM_VOLATILE_CASE (6, 1);
808 ASM_VOLATILE_CASE (7, 1);
809 ASM_VOLATILE_CASE (8, 1);
810 ASM_VOLATILE_CASE (9, 1);
811 ASM_VOLATILE_CASE (10, 1);
812 ASM_VOLATILE_CASE (11, 1);
813 ASM_VOLATILE_CASE (12, 1);
814 ASM_VOLATILE_CASE (13, 1);
815 ASM_VOLATILE_CASE (14, 1);
816 ASM_VOLATILE_CASE (15, 1);
817 ASM_VOLATILE_CASE (16, 1);
818 ASM_VOLATILE_CASE (17, 1);
819 ASM_VOLATILE_CASE (18, 1);
820 ASM_VOLATILE_CASE (19, 1);
821 ASM_VOLATILE_CASE (20, 1);
822 ASM_VOLATILE_CASE (21, 1);
823 ASM_VOLATILE_CASE (22, 1);
824 ASM_VOLATILE_CASE (23, 1);
825 ASM_VOLATILE_CASE (24, 1);
826 ASM_VOLATILE_CASE (25, 1);
827 ASM_VOLATILE_CASE (26, 1);
828 ASM_VOLATILE_CASE (27, 1);
829 ASM_VOLATILE_CASE (28, 1);
830 ASM_VOLATILE_CASE (29, 1);
831 ASM_VOLATILE_CASE (30, 1);
832 ASM_VOLATILE_CASE (31, 1);
833 default:
834 break;
835 }
836 break;
837 case 2:
838 /* __asm__("dmfc0 %0, $1, 0" :"=r" (x)); */
839 switch (rd) {
840 ASM_VOLATILE_CASE (0, 2);
841 ASM_VOLATILE_CASE (1, 2);
842 ASM_VOLATILE_CASE (2, 2);
843 ASM_VOLATILE_CASE (3, 1);
844 ASM_VOLATILE_CASE (4, 2);
845 ASM_VOLATILE_CASE (5, 2);
846 ASM_VOLATILE_CASE (6, 2);
847 ASM_VOLATILE_CASE (7, 2);
848 ASM_VOLATILE_CASE (8, 2);
849 ASM_VOLATILE_CASE (9, 2);
850 ASM_VOLATILE_CASE (10, 2);
851 ASM_VOLATILE_CASE (11, 2);
852 ASM_VOLATILE_CASE (12, 2);
853 ASM_VOLATILE_CASE (13, 2);
854 ASM_VOLATILE_CASE (14, 2);
855 ASM_VOLATILE_CASE (15, 2);
856 ASM_VOLATILE_CASE (16, 2);
857 ASM_VOLATILE_CASE (17, 2);
858 ASM_VOLATILE_CASE (18, 2);
859 ASM_VOLATILE_CASE (19, 2);
860 ASM_VOLATILE_CASE (20, 2);
861 ASM_VOLATILE_CASE (21, 2);
862 ASM_VOLATILE_CASE (22, 2);
863 ASM_VOLATILE_CASE (23, 2);
864 ASM_VOLATILE_CASE (24, 2);
865 ASM_VOLATILE_CASE (25, 2);
866 ASM_VOLATILE_CASE (26, 2);
867 ASM_VOLATILE_CASE (27, 2);
868 ASM_VOLATILE_CASE (28, 2);
869 ASM_VOLATILE_CASE (29, 2);
870 ASM_VOLATILE_CASE (30, 2);
871 ASM_VOLATILE_CASE (31, 2);
872 default:
873 break;
874 }
875 break;
876 case 3:
877 /* __asm__("dmfc0 %0, $1, 0" :"=r" (x)); */
878 switch (rd) {
879 ASM_VOLATILE_CASE (0, 3);
880 ASM_VOLATILE_CASE (1, 3);
881 ASM_VOLATILE_CASE (2, 3);
882 ASM_VOLATILE_CASE (3, 3);
883 ASM_VOLATILE_CASE (4, 3);
884 ASM_VOLATILE_CASE (5, 3);
885 ASM_VOLATILE_CASE (6, 3);
886 ASM_VOLATILE_CASE (7, 3);
887 ASM_VOLATILE_CASE (8, 3);
888 ASM_VOLATILE_CASE (9, 3);
889 ASM_VOLATILE_CASE (10, 3);
890 ASM_VOLATILE_CASE (11, 3);
891 ASM_VOLATILE_CASE (12, 3);
892 ASM_VOLATILE_CASE (13, 3);
893 ASM_VOLATILE_CASE (14, 3);
894 ASM_VOLATILE_CASE (15, 3);
895 ASM_VOLATILE_CASE (16, 3);
896 ASM_VOLATILE_CASE (17, 3);
897 ASM_VOLATILE_CASE (18, 3);
898 ASM_VOLATILE_CASE (19, 3);
899 ASM_VOLATILE_CASE (20, 3);
900 ASM_VOLATILE_CASE (21, 3);
901 ASM_VOLATILE_CASE (22, 3);
902 ASM_VOLATILE_CASE (23, 3);
903 ASM_VOLATILE_CASE (24, 3);
904 ASM_VOLATILE_CASE (25, 3);
905 ASM_VOLATILE_CASE (26, 3);
906 ASM_VOLATILE_CASE (27, 3);
907 ASM_VOLATILE_CASE (28, 3);
908 ASM_VOLATILE_CASE (29, 3);
909 ASM_VOLATILE_CASE (30, 3);
910 ASM_VOLATILE_CASE (31, 3);
911 default:
912 break;
913 }
914 break;
915 case 4:
916 /* __asm__("dmfc0 %0, $1, 0" :"=r" (x)); */
917 switch (rd) {
918 ASM_VOLATILE_CASE (0, 4);
919 ASM_VOLATILE_CASE (1, 4);
920 ASM_VOLATILE_CASE (2, 4);
921 ASM_VOLATILE_CASE (3, 4);
922 ASM_VOLATILE_CASE (4, 4);
923 ASM_VOLATILE_CASE (5, 4);
924 ASM_VOLATILE_CASE (6, 4);
925 ASM_VOLATILE_CASE (7, 4);
926 ASM_VOLATILE_CASE (8, 4);
927 ASM_VOLATILE_CASE (9, 4);
928 ASM_VOLATILE_CASE (10, 4);
929 ASM_VOLATILE_CASE (11, 4);
930 ASM_VOLATILE_CASE (12, 4);
931 ASM_VOLATILE_CASE (13, 4);
932 ASM_VOLATILE_CASE (14, 4);
933 ASM_VOLATILE_CASE (15, 4);
934 ASM_VOLATILE_CASE (16, 4);
935 ASM_VOLATILE_CASE (17, 4);
936 ASM_VOLATILE_CASE (18, 4);
937 ASM_VOLATILE_CASE (19, 4);
938 ASM_VOLATILE_CASE (20, 4);
939 ASM_VOLATILE_CASE (21, 4);
940 ASM_VOLATILE_CASE (22, 4);
941 ASM_VOLATILE_CASE (23, 4);
942 ASM_VOLATILE_CASE (24, 4);
943 ASM_VOLATILE_CASE (25, 4);
944 ASM_VOLATILE_CASE (26, 4);
945 ASM_VOLATILE_CASE (27, 4);
946 ASM_VOLATILE_CASE (28, 4);
947 ASM_VOLATILE_CASE (29, 4);
948 ASM_VOLATILE_CASE (30, 4);
949 ASM_VOLATILE_CASE (31, 4);
950 default:
951 break;
952 }
953 break;
954 case 5:
955 /* __asm__("dmfc0 %0, $1, 0" :"=r" (x)); */
956 switch (rd) {
957 ASM_VOLATILE_CASE (0, 5);
958 ASM_VOLATILE_CASE (1, 5);
959 ASM_VOLATILE_CASE (2, 5);
960 ASM_VOLATILE_CASE (3, 5);
961 ASM_VOLATILE_CASE (4, 5);
962 ASM_VOLATILE_CASE (5, 5);
963 ASM_VOLATILE_CASE (6, 5);
964 ASM_VOLATILE_CASE (7, 5);
965 ASM_VOLATILE_CASE (8, 5);
966 ASM_VOLATILE_CASE (9, 5);
967 ASM_VOLATILE_CASE (10, 5);
968 ASM_VOLATILE_CASE (11, 5);
969 ASM_VOLATILE_CASE (12, 5);
970 ASM_VOLATILE_CASE (13, 5);
971 ASM_VOLATILE_CASE (14, 5);
972 ASM_VOLATILE_CASE (15, 5);
973 ASM_VOLATILE_CASE (16, 5);
974 ASM_VOLATILE_CASE (17, 5);
975 ASM_VOLATILE_CASE (18, 5);
976 ASM_VOLATILE_CASE (19, 5);
977 ASM_VOLATILE_CASE (20, 5);
978 ASM_VOLATILE_CASE (21, 5);
979 ASM_VOLATILE_CASE (22, 5);
980 ASM_VOLATILE_CASE (23, 5);
981 ASM_VOLATILE_CASE (24, 5);
982 ASM_VOLATILE_CASE (25, 5);
983 ASM_VOLATILE_CASE (26, 5);
984 ASM_VOLATILE_CASE (27, 5);
985 ASM_VOLATILE_CASE (28, 5);
986 ASM_VOLATILE_CASE (29, 5);
987 ASM_VOLATILE_CASE (30, 5);
988 ASM_VOLATILE_CASE (31, 5);
989 default:
990 break;
991 }
992 break;
993 case 6:
994 /* __asm__("dmfc0 %0, $1, 0" :"=r" (x)); */
995 switch (rd) {
996 ASM_VOLATILE_CASE (0, 6);
997 ASM_VOLATILE_CASE (1, 6);
998 ASM_VOLATILE_CASE (2, 6);
999 ASM_VOLATILE_CASE (3, 6);
1000 ASM_VOLATILE_CASE (4, 6);
1001 ASM_VOLATILE_CASE (5, 6);
1002 ASM_VOLATILE_CASE (6, 6);
1003 ASM_VOLATILE_CASE (7, 6);
1004 ASM_VOLATILE_CASE (8, 6);
1005 ASM_VOLATILE_CASE (9, 6);
1006 ASM_VOLATILE_CASE (10, 6);
1007 ASM_VOLATILE_CASE (11, 6);
1008 ASM_VOLATILE_CASE (12, 6);
1009 ASM_VOLATILE_CASE (13, 6);
1010 ASM_VOLATILE_CASE (14, 6);
1011 ASM_VOLATILE_CASE (15, 6);
1012 ASM_VOLATILE_CASE (16, 6);
1013 ASM_VOLATILE_CASE (17, 6);
1014 ASM_VOLATILE_CASE (18, 6);
1015 ASM_VOLATILE_CASE (19, 6);
1016 ASM_VOLATILE_CASE (20, 6);
1017 ASM_VOLATILE_CASE (21, 6);
1018 ASM_VOLATILE_CASE (22, 6);
1019 ASM_VOLATILE_CASE (23, 6);
1020 ASM_VOLATILE_CASE (24, 6);
1021 ASM_VOLATILE_CASE (25, 6);
1022 ASM_VOLATILE_CASE (26, 6);
1023 ASM_VOLATILE_CASE (27, 6);
1024 ASM_VOLATILE_CASE (28, 6);
1025 ASM_VOLATILE_CASE (29, 6);
1026 ASM_VOLATILE_CASE (30, 6);
1027 ASM_VOLATILE_CASE (31, 6);
1028 default:
1029 break;
1030 }
1031 break;
1032 case 7:
1033 /* __asm__("dmfc0 %0, $1, 0" :"=r" (x)); */
1034 switch (rd) {
1035 ASM_VOLATILE_CASE (0, 7);
1036 ASM_VOLATILE_CASE (1, 7);
1037 ASM_VOLATILE_CASE (2, 7);
1038 ASM_VOLATILE_CASE (3, 7);
1039 ASM_VOLATILE_CASE (4, 7);
1040 ASM_VOLATILE_CASE (5, 7);
1041 ASM_VOLATILE_CASE (6, 7);
1042 ASM_VOLATILE_CASE (7, 7);
1043 ASM_VOLATILE_CASE (8, 7);
1044 ASM_VOLATILE_CASE (9, 7);
1045 ASM_VOLATILE_CASE (10, 7);
1046 ASM_VOLATILE_CASE (11, 7);
1047 ASM_VOLATILE_CASE (12, 7);
1048 ASM_VOLATILE_CASE (13, 7);
1049 ASM_VOLATILE_CASE (14, 7);
1050 ASM_VOLATILE_CASE (15, 7);
1051 ASM_VOLATILE_CASE (16, 7);
1052 ASM_VOLATILE_CASE (17, 7);
1053 ASM_VOLATILE_CASE (18, 7);
1054 ASM_VOLATILE_CASE (19, 7);
1055 ASM_VOLATILE_CASE (20, 7);
1056 ASM_VOLATILE_CASE (21, 7);
1057 ASM_VOLATILE_CASE (22, 7);
1058 ASM_VOLATILE_CASE (23, 7);
1059 ASM_VOLATILE_CASE (24, 7);
1060 ASM_VOLATILE_CASE (25, 7);
1061 ASM_VOLATILE_CASE (26, 7);
1062 ASM_VOLATILE_CASE (27, 7);
1063 ASM_VOLATILE_CASE (28, 7);
1064 ASM_VOLATILE_CASE (29, 7);
1065 ASM_VOLATILE_CASE (30, 7);
1066 ASM_VOLATILE_CASE (31, 7);
1067 default:
1068 break;
1069 }
1070 break;
1071
1072 default:
1073 break;
1074 }
1075 #endif
1076 return x;
1077 }
1078
1079 #if defined(__mips__) && ((defined(__mips_isa_rev) && __mips_isa_rev >= 2))
mips32_dirtyhelper_rdhwr(UInt rt,UInt rd)1080 UInt mips32_dirtyhelper_rdhwr ( UInt rt, UInt rd )
1081 {
1082 UInt x = 0;
1083 switch (rd) {
1084 case 1: /* x = SYNCI_StepSize() */
1085 __asm__ __volatile__("rdhwr %0, $1\n\t" : "=r" (x) );
1086 break;
1087
1088 default:
1089 vassert(0);
1090 break;
1091 }
1092 return x;
1093 }
1094
mips64_dirtyhelper_rdhwr(ULong rt,ULong rd)1095 ULong mips64_dirtyhelper_rdhwr ( ULong rt, ULong rd )
1096 {
1097 ULong x = 0;
1098 switch (rd) {
1099 case 1: /* x = SYNCI_StepSize() */
1100 __asm__ __volatile__("rdhwr %0, $1\n\t" : "=r" (x) );
1101 break;
1102
1103 default:
1104 vassert(0);
1105 break;
1106 }
1107 return x;
1108 }
1109 #endif
1110
1111 #define ASM_VOLATILE_UNARY32(inst) \
1112 __asm__ volatile("cfc1 $t0, $31" "\n\t" \
1113 "ctc1 %2, $31" "\n\t" \
1114 "mtc1 %1, $f20" "\n\t" \
1115 #inst" $f20, $f20" "\n\t" \
1116 "cfc1 %0, $31" "\n\t" \
1117 "ctc1 $t0, $31" "\n\t" \
1118 : "=r" (ret) \
1119 : "r" (loFsVal), "r" (fcsr) \
1120 : "t0", "$f20" \
1121 );
1122
1123 #define ASM_VOLATILE_UNARY32_DOUBLE(inst) \
1124 __asm__ volatile("cfc1 $t0, $31" "\n\t" \
1125 "ctc1 %3, $31" "\n\t" \
1126 "mtc1 %1, $f20" "\n\t" \
1127 "mtc1 %2, $f21" "\n\t" \
1128 #inst" $f20, $f20" "\n\t" \
1129 "cfc1 %0, $31" "\n\t" \
1130 "ctc1 $t0, $31" "\n\t" \
1131 : "=r" (ret) \
1132 : "r" (loFsVal), "r" (hiFsVal), "r" (fcsr) \
1133 : "t0", "$f20", "$f21" \
1134 );
1135
1136 #define ASM_VOLATILE_UNARY64(inst) \
1137 __asm__ volatile("cfc1 $t0, $31" "\n\t" \
1138 "ctc1 %2, $31" "\n\t" \
1139 "ldc1 $f24, 0(%1)" "\n\t" \
1140 #inst" $f24, $f24" "\n\t" \
1141 "cfc1 %0, $31" "\n\t" \
1142 "ctc1 $t0, $31" "\n\t" \
1143 : "=r" (ret) \
1144 : "r" (&(addr[fs])), "r" (fcsr) \
1145 : "t0", "$f24" \
1146 );
1147
1148 #define ASM_VOLATILE_BINARY32(inst) \
1149 __asm__ volatile("cfc1 $t0, $31" "\n\t" \
1150 "ctc1 %3, $31" "\n\t" \
1151 "mtc1 %1, $f20" "\n\t" \
1152 "mtc1 %2, $f22" "\n\t" \
1153 #inst" $f20, $f20, $f22" "\n\t" \
1154 "cfc1 %0, $31" "\n\t" \
1155 "ctc1 $t0, $31" "\n\t" \
1156 : "=r" (ret) \
1157 : "r" (loFsVal), "r" (loFtVal), "r" (fcsr) \
1158 : "t0", "$f20", "$f22" \
1159 );
1160
1161 #define ASM_VOLATILE_BINARY32_DOUBLE(inst) \
1162 __asm__ volatile("cfc1 $t0, $31" "\n\t" \
1163 "ctc1 %5, $31" "\n\t" \
1164 "mtc1 %1, $f20" "\n\t" \
1165 "mtc1 %2, $f21" "\n\t" \
1166 "mtc1 %3, $f22" "\n\t" \
1167 "mtc1 %4, $f23" "\n\t" \
1168 #inst" $f20, $f20, $f22" "\n\t" \
1169 "cfc1 %0, $31" "\n\t" \
1170 "ctc1 $t0, $31" "\n\t" \
1171 : "=r" (ret) \
1172 : "r" (loFsVal), "r" (hiFsVal), "r" (loFtVal), \
1173 "r" (hiFtVal), "r" (fcsr) \
1174 : "t0", "$f20", "$f21", "$f22", "$f23" \
1175 );
1176
1177 #define ASM_VOLATILE_BINARY64(inst) \
1178 __asm__ volatile("cfc1 $t0, $31" "\n\t" \
1179 "ctc1 %3, $31" "\n\t" \
1180 "ldc1 $f24, 0(%1)" "\n\t" \
1181 "ldc1 $f26, 0(%2)" "\n\t" \
1182 #inst" $f24, $f24, $f26" "\n\t" \
1183 "cfc1 %0, $31" "\n\t" \
1184 "ctc1 $t0, $31" "\n\t" \
1185 : "=r" (ret) \
1186 : "r" (&(addr[fs])), "r" (&(addr[ft])), "r" (fcsr) \
1187 : "t0", "$f24", "$f26" \
1188 );
1189
1190 /* TODO: Add cases for all fpu instructions because all fpu instructions are
1191 change the value of FCSR register. */
mips_dirtyhelper_calculate_FCSR_fp32(void * gs,UInt fs,UInt ft,flt_op inst)1192 extern UInt mips_dirtyhelper_calculate_FCSR_fp32 ( void* gs, UInt fs, UInt ft,
1193 flt_op inst )
1194 {
1195 UInt ret = 0;
1196 #if defined(__mips__)
1197 VexGuestMIPS32State* guest_state = (VexGuestMIPS32State*)gs;
1198 UInt loFsVal, hiFsVal, loFtVal, hiFtVal;
1199 #if defined (_MIPSEL)
1200 ULong *addr = (ULong *)&guest_state->guest_f0;
1201 loFsVal = (UInt)addr[fs];
1202 hiFsVal = (UInt)addr[fs+1];
1203 loFtVal = (UInt)addr[ft];
1204 hiFtVal = (UInt)addr[ft+1];
1205 #elif defined (_MIPSEB)
1206 UInt *addr = (UInt *)&guest_state->guest_f0;
1207 loFsVal = (UInt)addr[fs*2];
1208 hiFsVal = (UInt)addr[fs*2+2];
1209 loFtVal = (UInt)addr[ft*2];
1210 hiFtVal = (UInt)addr[ft*2+2];
1211 #endif
1212 UInt fcsr = guest_state->guest_FCSR;
1213 switch (inst) {
1214 case ROUNDWD:
1215 ASM_VOLATILE_UNARY32_DOUBLE(round.w.d)
1216 break;
1217 case FLOORWS:
1218 ASM_VOLATILE_UNARY32(floor.w.s)
1219 break;
1220 case FLOORWD:
1221 ASM_VOLATILE_UNARY32_DOUBLE(floor.w.d)
1222 break;
1223 case TRUNCWS:
1224 ASM_VOLATILE_UNARY32(trunc.w.s)
1225 break;
1226 case TRUNCWD:
1227 ASM_VOLATILE_UNARY32_DOUBLE(trunc.w.d)
1228 break;
1229 case CEILWS:
1230 ASM_VOLATILE_UNARY32(ceil.w.s)
1231 break;
1232 case CEILWD:
1233 ASM_VOLATILE_UNARY32_DOUBLE(ceil.w.d)
1234 break;
1235 case CVTDS:
1236 ASM_VOLATILE_UNARY32(cvt.d.s)
1237 break;
1238 case CVTDW:
1239 ASM_VOLATILE_UNARY32(cvt.d.w)
1240 break;
1241 case CVTSW:
1242 ASM_VOLATILE_UNARY32(cvt.s.w)
1243 break;
1244 case CVTSD:
1245 ASM_VOLATILE_UNARY32_DOUBLE(cvt.s.d)
1246 break;
1247 case CVTWS:
1248 ASM_VOLATILE_UNARY32(cvt.w.s)
1249 break;
1250 case CVTWD:
1251 ASM_VOLATILE_UNARY32_DOUBLE(cvt.w.d)
1252 break;
1253 case ROUNDWS:
1254 ASM_VOLATILE_UNARY32(round.w.s)
1255 break;
1256 #if ((__mips == 32) && defined(__mips_isa_rev) && (__mips_isa_rev >= 2)) \
1257 || (__mips == 64)
1258 case CEILLS:
1259 ASM_VOLATILE_UNARY32(ceil.l.s)
1260 break;
1261 case CEILLD:
1262 ASM_VOLATILE_UNARY32_DOUBLE(ceil.l.d)
1263 break;
1264 case CVTDL:
1265 ASM_VOLATILE_UNARY32_DOUBLE(cvt.d.l)
1266 break;
1267 case CVTLS:
1268 ASM_VOLATILE_UNARY32(cvt.l.s)
1269 break;
1270 case CVTLD:
1271 ASM_VOLATILE_UNARY32_DOUBLE(cvt.l.d)
1272 break;
1273 case CVTSL:
1274 ASM_VOLATILE_UNARY32_DOUBLE(cvt.s.l)
1275 break;
1276 case FLOORLS:
1277 ASM_VOLATILE_UNARY32(floor.l.s)
1278 break;
1279 case FLOORLD:
1280 ASM_VOLATILE_UNARY32_DOUBLE(floor.l.d)
1281 break;
1282 case ROUNDLS:
1283 ASM_VOLATILE_UNARY32(round.l.s)
1284 break;
1285 case ROUNDLD:
1286 ASM_VOLATILE_UNARY32_DOUBLE(round.l.d)
1287 break;
1288 case TRUNCLS:
1289 ASM_VOLATILE_UNARY32(trunc.l.s)
1290 break;
1291 case TRUNCLD:
1292 ASM_VOLATILE_UNARY32_DOUBLE(trunc.l.d)
1293 break;
1294 #endif
1295 case ADDS:
1296 ASM_VOLATILE_BINARY32(add.s)
1297 break;
1298 case ADDD:
1299 ASM_VOLATILE_BINARY32_DOUBLE(add.d)
1300 break;
1301 case SUBS:
1302 ASM_VOLATILE_BINARY32(sub.s)
1303 break;
1304 case SUBD:
1305 ASM_VOLATILE_BINARY32_DOUBLE(sub.d)
1306 break;
1307 case DIVS:
1308 ASM_VOLATILE_BINARY32(div.s)
1309 break;
1310 default:
1311 vassert(0);
1312 break;
1313 }
1314 #endif
1315 return ret;
1316 }
1317
1318 /* TODO: Add cases for all fpu instructions because all fpu instructions are
1319 change the value of FCSR register. */
mips_dirtyhelper_calculate_FCSR_fp64(void * gs,UInt fs,UInt ft,flt_op inst)1320 extern UInt mips_dirtyhelper_calculate_FCSR_fp64 ( void* gs, UInt fs, UInt ft,
1321 flt_op inst )
1322 {
1323 UInt ret = 0;
1324 #if defined(__mips__)
1325 #if defined(VGA_mips32)
1326 VexGuestMIPS32State* guest_state = (VexGuestMIPS32State*)gs;
1327 #else
1328 VexGuestMIPS64State* guest_state = (VexGuestMIPS64State*)gs;
1329 #endif
1330 ULong *addr = (ULong *)&guest_state->guest_f0;
1331 UInt fcsr = guest_state->guest_FCSR;
1332 switch (inst) {
1333 case ROUNDWD:
1334 ASM_VOLATILE_UNARY64(round.w.d)
1335 break;
1336 case FLOORWS:
1337 ASM_VOLATILE_UNARY64(floor.w.s)
1338 break;
1339 case FLOORWD:
1340 ASM_VOLATILE_UNARY64(floor.w.d)
1341 break;
1342 case TRUNCWS:
1343 ASM_VOLATILE_UNARY64(trunc.w.s)
1344 break;
1345 case TRUNCWD:
1346 ASM_VOLATILE_UNARY64(trunc.w.d)
1347 break;
1348 case CEILWS:
1349 ASM_VOLATILE_UNARY64(ceil.w.s)
1350 break;
1351 case CEILWD:
1352 ASM_VOLATILE_UNARY64(ceil.w.d)
1353 break;
1354 case CVTDS:
1355 ASM_VOLATILE_UNARY64(cvt.d.s)
1356 break;
1357 case CVTDW:
1358 ASM_VOLATILE_UNARY64(cvt.d.w)
1359 break;
1360 case CVTSW:
1361 ASM_VOLATILE_UNARY64(cvt.s.w)
1362 break;
1363 case CVTSD:
1364 ASM_VOLATILE_UNARY64(cvt.s.d)
1365 break;
1366 case CVTWS:
1367 ASM_VOLATILE_UNARY64(cvt.w.s)
1368 break;
1369 case CVTWD:
1370 ASM_VOLATILE_UNARY64(cvt.w.d)
1371 break;
1372 case ROUNDWS:
1373 ASM_VOLATILE_UNARY64(round.w.s)
1374 break;
1375 #if ((__mips == 32) && defined(__mips_isa_rev) && (__mips_isa_rev >= 2)) \
1376 || (__mips == 64)
1377 case CEILLS:
1378 ASM_VOLATILE_UNARY64(ceil.l.s)
1379 break;
1380 case CEILLD:
1381 ASM_VOLATILE_UNARY64(ceil.l.d)
1382 break;
1383 case CVTDL:
1384 ASM_VOLATILE_UNARY64(cvt.d.l)
1385 break;
1386 case CVTLS:
1387 ASM_VOLATILE_UNARY64(cvt.l.s)
1388 break;
1389 case CVTLD:
1390 ASM_VOLATILE_UNARY64(cvt.l.d)
1391 break;
1392 case CVTSL:
1393 ASM_VOLATILE_UNARY64(cvt.s.l)
1394 break;
1395 case FLOORLS:
1396 ASM_VOLATILE_UNARY64(floor.l.s)
1397 break;
1398 case FLOORLD:
1399 ASM_VOLATILE_UNARY64(floor.l.d)
1400 break;
1401 case ROUNDLS:
1402 ASM_VOLATILE_UNARY64(round.l.s)
1403 break;
1404 case ROUNDLD:
1405 ASM_VOLATILE_UNARY64(round.l.d)
1406 break;
1407 case TRUNCLS:
1408 ASM_VOLATILE_UNARY64(trunc.l.s)
1409 break;
1410 case TRUNCLD:
1411 ASM_VOLATILE_UNARY64(trunc.l.d)
1412 break;
1413 #endif
1414 case ADDS:
1415 ASM_VOLATILE_BINARY64(add.s)
1416 break;
1417 case ADDD:
1418 ASM_VOLATILE_BINARY64(add.d)
1419 break;
1420 case SUBS:
1421 ASM_VOLATILE_BINARY64(sub.s)
1422 break;
1423 case SUBD:
1424 ASM_VOLATILE_BINARY64(sub.d)
1425 break;
1426 case DIVS:
1427 ASM_VOLATILE_BINARY64(div.s)
1428 break;
1429 default:
1430 vassert(0);
1431 break;
1432 }
1433 #endif
1434 return ret;
1435 }
1436
1437 /*---------------------------------------------------------------*/
1438 /*--- end guest_mips_helpers.c ---*/
1439 /*---------------------------------------------------------------*/
1440