1
2 /*---------------------------------------------------------------*/
3 /*--- begin guest_mips_helpers.c ---*/
4 /*---------------------------------------------------------------*/
5
6 /*
7 This file is part of Valgrind, a dynamic binary instrumentation
8 framework.
9
10 Copyright (C) 2010-2015 RT-RK
11 mips-valgrind@rt-rk.com
12
13 This program is free software; you can redistribute it and/or
14 modify it under the terms of the GNU General Public License as
15 published by the Free Software Foundation; either version 2 of the
16 License, or (at your option) any later version.
17
18 This program is distributed in the hope that it will be useful, but
19 WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 General Public License for more details.
22
23 You should have received a copy of the GNU General Public License
24 along with this program; if not, write to the Free Software
25 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
26 02111-1307, USA.
27
28 The GNU General Public License is contained in the file COPYING.
29 */
30
31 #include "libvex_basictypes.h"
32 #include "libvex_emnote.h"
33 #include "libvex_guest_mips32.h"
34 #include "libvex_guest_mips64.h"
35 #include "libvex_ir.h"
36 #include "libvex.h"
37
38 #include "main_util.h"
39 #include "main_globals.h"
40 #include "guest_generic_bb_to_IR.h"
41 #include "guest_mips_defs.h"
42
43 /* This file contains helper functions for mips guest code. Calls to
44 these functions are generated by the back end.
45 */
46
47 #define ALWAYSDEFD32(field) \
48 { offsetof(VexGuestMIPS32State, field), \
49 (sizeof ((VexGuestMIPS32State*)0)->field) }
50
51 #define ALWAYSDEFD64(field) \
52 { offsetof(VexGuestMIPS64State, field), \
53 (sizeof ((VexGuestMIPS64State*)0)->field) }
54
guest_mips32_spechelper(const HChar * function_name,IRExpr ** args,IRStmt ** precedingStmts,Int n_precedingStmts)55 IRExpr *guest_mips32_spechelper(const HChar * function_name, IRExpr ** args,
56 IRStmt ** precedingStmts, Int n_precedingStmts)
57 {
58 return NULL;
59 }
60
guest_mips64_spechelper(const HChar * function_name,IRExpr ** args,IRStmt ** precedingStmts,Int n_precedingStmts)61 IRExpr *guest_mips64_spechelper ( const HChar * function_name, IRExpr ** args,
62 IRStmt ** precedingStmts,
63 Int n_precedingStmts )
64 {
65 return NULL;
66 }
67
68 /* VISIBLE TO LIBVEX CLIENT */
LibVEX_GuestMIPS32_initialise(VexGuestMIPS32State * vex_state)69 void LibVEX_GuestMIPS32_initialise( /*OUT*/ VexGuestMIPS32State * vex_state)
70 {
71 vex_state->guest_r0 = 0; /* Hardwired to 0 */
72 vex_state->guest_r1 = 0; /* Assembler temporary */
73 vex_state->guest_r2 = 0; /* Values for function returns ... */
74 vex_state->guest_r3 = 0; /* ...and expression evaluation */
75 vex_state->guest_r4 = 0; /* Function arguments */
76 vex_state->guest_r5 = 0;
77 vex_state->guest_r6 = 0;
78 vex_state->guest_r7 = 0;
79 vex_state->guest_r8 = 0; /* Temporaries */
80 vex_state->guest_r9 = 0;
81 vex_state->guest_r10 = 0;
82 vex_state->guest_r11 = 0;
83 vex_state->guest_r12 = 0;
84 vex_state->guest_r13 = 0;
85 vex_state->guest_r14 = 0;
86 vex_state->guest_r15 = 0;
87 vex_state->guest_r16 = 0; /* Saved temporaries */
88 vex_state->guest_r17 = 0;
89 vex_state->guest_r18 = 0;
90 vex_state->guest_r19 = 0;
91 vex_state->guest_r20 = 0;
92 vex_state->guest_r21 = 0;
93 vex_state->guest_r22 = 0;
94 vex_state->guest_r23 = 0;
95 vex_state->guest_r24 = 0; /* Temporaries */
96 vex_state->guest_r25 = 0;
97 vex_state->guest_r26 = 0; /* Reserved for OS kernel */
98 vex_state->guest_r27 = 0;
99 vex_state->guest_r28 = 0; /* Global pointer */
100 vex_state->guest_r29 = 0; /* Stack pointer */
101 vex_state->guest_r30 = 0; /* Frame pointer */
102 vex_state->guest_r31 = 0; /* Return address */
103 vex_state->guest_PC = 0; /* Program counter */
104 vex_state->guest_HI = 0; /* Multiply and divide register higher result */
105 vex_state->guest_LO = 0; /* Multiply and divide register lower result */
106
107 /* FPU Registers */
108 vex_state->guest_f0 = 0x7ff800007ff80000ULL; /* Floting point GP registers */
109 vex_state->guest_f1 = 0x7ff800007ff80000ULL;
110 vex_state->guest_f2 = 0x7ff800007ff80000ULL;
111 vex_state->guest_f3 = 0x7ff800007ff80000ULL;
112 vex_state->guest_f4 = 0x7ff800007ff80000ULL;
113 vex_state->guest_f5 = 0x7ff800007ff80000ULL;
114 vex_state->guest_f6 = 0x7ff800007ff80000ULL;
115 vex_state->guest_f7 = 0x7ff800007ff80000ULL;
116 vex_state->guest_f8 = 0x7ff800007ff80000ULL;
117 vex_state->guest_f9 = 0x7ff800007ff80000ULL;
118 vex_state->guest_f10 = 0x7ff800007ff80000ULL;
119 vex_state->guest_f11 = 0x7ff800007ff80000ULL;
120 vex_state->guest_f12 = 0x7ff800007ff80000ULL;
121 vex_state->guest_f13 = 0x7ff800007ff80000ULL;
122 vex_state->guest_f14 = 0x7ff800007ff80000ULL;
123 vex_state->guest_f15 = 0x7ff800007ff80000ULL;
124 vex_state->guest_f16 = 0x7ff800007ff80000ULL;
125 vex_state->guest_f17 = 0x7ff800007ff80000ULL;
126 vex_state->guest_f18 = 0x7ff800007ff80000ULL;
127 vex_state->guest_f19 = 0x7ff800007ff80000ULL;
128 vex_state->guest_f20 = 0x7ff800007ff80000ULL;
129 vex_state->guest_f21 = 0x7ff800007ff80000ULL;
130 vex_state->guest_f22 = 0x7ff800007ff80000ULL;
131 vex_state->guest_f23 = 0x7ff800007ff80000ULL;
132 vex_state->guest_f24 = 0x7ff800007ff80000ULL;
133 vex_state->guest_f25 = 0x7ff800007ff80000ULL;
134 vex_state->guest_f26 = 0x7ff800007ff80000ULL;
135 vex_state->guest_f27 = 0x7ff800007ff80000ULL;
136 vex_state->guest_f28 = 0x7ff800007ff80000ULL;
137 vex_state->guest_f29 = 0x7ff800007ff80000ULL;
138 vex_state->guest_f30 = 0x7ff800007ff80000ULL;
139 vex_state->guest_f31 = 0x7ff800007ff80000ULL;
140
141 vex_state->guest_FIR = 0; /* FP implementation and revision register */
142 vex_state->guest_FCCR = 0; /* FP condition codes register */
143 vex_state->guest_FEXR = 0; /* FP exceptions register */
144 vex_state->guest_FENR = 0; /* FP enables register */
145 vex_state->guest_FCSR = 0; /* FP control/status register */
146 vex_state->guest_ULR = 0; /* TLS */
147
148 /* Various pseudo-regs mandated by Vex or Valgrind. */
149 /* Emulation notes */
150 vex_state->guest_EMNOTE = 0;
151
152 /* For clflush: record start and length of area to invalidate */
153 vex_state->guest_CMSTART = 0;
154 vex_state->guest_CMLEN = 0;
155 vex_state->host_EvC_COUNTER = 0;
156 vex_state->host_EvC_FAILADDR = 0;
157
158 /* Used to record the unredirected guest address at the start of
159 a translation whose start has been redirected. By reading
160 this pseudo-register shortly afterwards, the translation can
161 find out what the corresponding no-redirection address was.
162 Note, this is only set for wrap-style redirects, not for
163 replace-style ones. */
164 vex_state->guest_NRADDR = 0;
165
166 vex_state->guest_COND = 0;
167
168 /* MIPS32 DSP ASE(r2) specific registers */
169 vex_state->guest_DSPControl = 0; /* DSPControl register */
170 vex_state->guest_ac0 = 0; /* Accumulator 0 */
171 vex_state->guest_ac1 = 0; /* Accumulator 1 */
172 vex_state->guest_ac2 = 0; /* Accumulator 2 */
173 vex_state->guest_ac3 = 0; /* Accumulator 3 */
174 }
175
LibVEX_GuestMIPS64_initialise(VexGuestMIPS64State * vex_state)176 void LibVEX_GuestMIPS64_initialise ( /*OUT*/ VexGuestMIPS64State * vex_state )
177 {
178 vex_state->guest_r0 = 0; /* Hardwired to 0 */
179 vex_state->guest_r1 = 0; /* Assembler temporary */
180 vex_state->guest_r2 = 0; /* Values for function returns ... */
181 vex_state->guest_r3 = 0;
182 vex_state->guest_r4 = 0; /* Function arguments */
183 vex_state->guest_r5 = 0;
184 vex_state->guest_r6 = 0;
185 vex_state->guest_r7 = 0;
186 vex_state->guest_r8 = 0;
187 vex_state->guest_r9 = 0;
188 vex_state->guest_r10 = 0;
189 vex_state->guest_r11 = 0;
190 vex_state->guest_r12 = 0; /* Temporaries */
191 vex_state->guest_r13 = 0;
192 vex_state->guest_r14 = 0;
193 vex_state->guest_r15 = 0;
194 vex_state->guest_r16 = 0; /* Saved temporaries */
195 vex_state->guest_r17 = 0;
196 vex_state->guest_r18 = 0;
197 vex_state->guest_r19 = 0;
198 vex_state->guest_r20 = 0;
199 vex_state->guest_r21 = 0;
200 vex_state->guest_r22 = 0;
201 vex_state->guest_r23 = 0;
202 vex_state->guest_r24 = 0; /* Temporaries */
203 vex_state->guest_r25 = 0;
204 vex_state->guest_r26 = 0; /* Reserved for OS kernel */
205 vex_state->guest_r27 = 0;
206 vex_state->guest_r28 = 0; /* Global pointer */
207 vex_state->guest_r29 = 0; /* Stack pointer */
208 vex_state->guest_r30 = 0; /* Frame pointer */
209 vex_state->guest_r31 = 0; /* Return address */
210 vex_state->guest_PC = 0; /* Program counter */
211 vex_state->guest_HI = 0; /* Multiply and divide register higher result */
212 vex_state->guest_LO = 0; /* Multiply and divide register lower result */
213
214 /* FPU Registers */
215 vex_state->guest_f0 = 0x7ff800007ff80000ULL; /* Floting point registers */
216 vex_state->guest_f1 = 0x7ff800007ff80000ULL;
217 vex_state->guest_f2 = 0x7ff800007ff80000ULL;
218 vex_state->guest_f3 = 0x7ff800007ff80000ULL;
219 vex_state->guest_f4 = 0x7ff800007ff80000ULL;
220 vex_state->guest_f5 = 0x7ff800007ff80000ULL;
221 vex_state->guest_f6 = 0x7ff800007ff80000ULL;
222 vex_state->guest_f7 = 0x7ff800007ff80000ULL;
223 vex_state->guest_f8 = 0x7ff800007ff80000ULL;
224 vex_state->guest_f9 = 0x7ff800007ff80000ULL;
225 vex_state->guest_f10 = 0x7ff800007ff80000ULL;
226 vex_state->guest_f11 = 0x7ff800007ff80000ULL;
227 vex_state->guest_f12 = 0x7ff800007ff80000ULL;
228 vex_state->guest_f13 = 0x7ff800007ff80000ULL;
229 vex_state->guest_f14 = 0x7ff800007ff80000ULL;
230 vex_state->guest_f15 = 0x7ff800007ff80000ULL;
231 vex_state->guest_f16 = 0x7ff800007ff80000ULL;
232 vex_state->guest_f17 = 0x7ff800007ff80000ULL;
233 vex_state->guest_f18 = 0x7ff800007ff80000ULL;
234 vex_state->guest_f19 = 0x7ff800007ff80000ULL;
235 vex_state->guest_f20 = 0x7ff800007ff80000ULL;
236 vex_state->guest_f21 = 0x7ff800007ff80000ULL;
237 vex_state->guest_f22 = 0x7ff800007ff80000ULL;
238 vex_state->guest_f23 = 0x7ff800007ff80000ULL;
239 vex_state->guest_f24 = 0x7ff800007ff80000ULL;
240 vex_state->guest_f25 = 0x7ff800007ff80000ULL;
241 vex_state->guest_f26 = 0x7ff800007ff80000ULL;
242 vex_state->guest_f27 = 0x7ff800007ff80000ULL;
243 vex_state->guest_f28 = 0x7ff800007ff80000ULL;
244 vex_state->guest_f29 = 0x7ff800007ff80000ULL;
245 vex_state->guest_f30 = 0x7ff800007ff80000ULL;
246 vex_state->guest_f31 = 0x7ff800007ff80000ULL;
247
248 vex_state->guest_FIR = 0; /* FP implementation and revision register */
249 vex_state->guest_FCCR = 0; /* FP condition codes register */
250 vex_state->guest_FEXR = 0; /* FP exceptions register */
251 vex_state->guest_FENR = 0; /* FP enables register */
252 vex_state->guest_FCSR = 0; /* FP control/status register */
253
254 vex_state->guest_ULR = 0;
255
256 /* Various pseudo-regs mandated by Vex or Valgrind. */
257 /* Emulation notes */
258 vex_state->guest_EMNOTE = 0;
259
260 /* For clflush: record start and length of area to invalidate */
261 vex_state->guest_CMSTART = 0;
262 vex_state->guest_CMLEN = 0;
263 vex_state->host_EvC_COUNTER = 0;
264 vex_state->host_EvC_FAILADDR = 0;
265
266 /* Used to record the unredirected guest address at the start of
267 a translation whose start has been redirected. By reading
268 this pseudo-register shortly afterwards, the translation can
269 find out what the corresponding no-redirection address was.
270 Note, this is only set for wrap-style redirects, not for
271 replace-style ones. */
272 vex_state->guest_NRADDR = 0;
273
274 vex_state->guest_COND = 0;
275 }
276
277 /*-----------------------------------------------------------*/
278 /*--- Describing the mips guest state, for the benefit ---*/
279 /*--- of iropt and instrumenters. ---*/
280 /*-----------------------------------------------------------*/
281
282 /* Figure out if any part of the guest state contained in minoff
283 .. maxoff requires precise memory exceptions. If in doubt return
284 True (but this generates significantly slower code).
285
286 We enforce precise exns for guest SP, PC.
287
288 Only SP is needed in mode VexRegUpdSpAtMemAccess.
289 */
guest_mips32_state_requires_precise_mem_exns(Int minoff,Int maxoff,VexRegisterUpdates pxControl)290 Bool guest_mips32_state_requires_precise_mem_exns (
291 Int minoff, Int maxoff, VexRegisterUpdates pxControl
292 )
293 {
294 Int sp_min = offsetof(VexGuestMIPS32State, guest_r29);
295 Int sp_max = sp_min + 4 - 1;
296 Int pc_min = offsetof(VexGuestMIPS32State, guest_PC);
297 Int pc_max = pc_min + 4 - 1;
298
299 if (maxoff < sp_min || minoff > sp_max) {
300 /* no overlap with sp */
301 if (pxControl == VexRegUpdSpAtMemAccess)
302 return False; /* We only need to check stack pointer. */
303 } else {
304 return True;
305 }
306
307 if (maxoff < pc_min || minoff > pc_max) {
308 /* no overlap with pc */
309 } else {
310 return True;
311 }
312
313 /* We appear to need precise updates of R11 in order to get proper
314 stacktraces from non-optimised code. */
315 Int fp_min = offsetof(VexGuestMIPS32State, guest_r30);
316 Int fp_max = fp_min + 4 - 1;
317
318 if (maxoff < fp_min || minoff > fp_max) {
319 /* no overlap with fp */
320 } else {
321 return True;
322 }
323
324 return False;
325 }
326
guest_mips64_state_requires_precise_mem_exns(Int minoff,Int maxoff,VexRegisterUpdates pxControl)327 Bool guest_mips64_state_requires_precise_mem_exns (
328 Int minoff, Int maxoff, VexRegisterUpdates pxControl
329 )
330 {
331 Int sp_min = offsetof(VexGuestMIPS64State, guest_r29);
332 Int sp_max = sp_min + 8 - 1;
333 Int pc_min = offsetof(VexGuestMIPS64State, guest_PC);
334 Int pc_max = pc_min + 8 - 1;
335
336 if ( maxoff < sp_min || minoff > sp_max ) {
337 /* no overlap with sp */
338 if (pxControl == VexRegUpdSpAtMemAccess)
339 return False; /* We only need to check stack pointer. */
340 } else {
341 return True;
342 }
343
344 if ( maxoff < pc_min || minoff > pc_max ) {
345 /* no overlap with pc */
346 } else {
347 return True;
348 }
349
350 Int fp_min = offsetof(VexGuestMIPS64State, guest_r30);
351 Int fp_max = fp_min + 8 - 1;
352
353 if ( maxoff < fp_min || minoff > fp_max ) {
354 /* no overlap with fp */
355 } else {
356 return True;
357 }
358
359 return False;
360 }
361
362 VexGuestLayout mips32Guest_layout = {
363 /* Total size of the guest state, in bytes. */
364 .total_sizeB = sizeof(VexGuestMIPS32State),
365 /* Describe the stack pointer. */
366 .offset_SP = offsetof(VexGuestMIPS32State, guest_r29),
367 .sizeof_SP = 4,
368 /* Describe the frame pointer. */
369 .offset_FP = offsetof(VexGuestMIPS32State, guest_r30),
370 .sizeof_FP = 4,
371 /* Describe the instruction pointer. */
372 .offset_IP = offsetof(VexGuestMIPS32State, guest_PC),
373 .sizeof_IP = 4,
374 /* Describe any sections to be regarded by Memcheck as
375 'always-defined'. */
376 .n_alwaysDefd = 8,
377 /* ? :( */
378 .alwaysDefd = {
379 /* 0 */ ALWAYSDEFD32(guest_r0),
380 /* 1 */ ALWAYSDEFD32(guest_r1),
381 /* 2 */ ALWAYSDEFD32(guest_EMNOTE),
382 /* 3 */ ALWAYSDEFD32(guest_CMSTART),
383 /* 4 */ ALWAYSDEFD32(guest_CMLEN),
384 /* 5 */ ALWAYSDEFD32(guest_r29),
385 /* 6 */ ALWAYSDEFD32(guest_r31),
386 /* 7 */ ALWAYSDEFD32(guest_ULR)
387 }
388 };
389
390 VexGuestLayout mips64Guest_layout = {
391 /* Total size of the guest state, in bytes. */
392 .total_sizeB = sizeof(VexGuestMIPS64State),
393 /* Describe the stack pointer. */
394 .offset_SP = offsetof(VexGuestMIPS64State, guest_r29),
395 .sizeof_SP = 8,
396 /* Describe the frame pointer. */
397 .offset_FP = offsetof(VexGuestMIPS64State, guest_r30),
398 .sizeof_FP = 8,
399 /* Describe the instruction pointer. */
400 .offset_IP = offsetof(VexGuestMIPS64State, guest_PC),
401 .sizeof_IP = 8,
402 /* Describe any sections to be regarded by Memcheck as
403 'always-defined'. */
404 .n_alwaysDefd = 7,
405 /* ? :( */
406 .alwaysDefd = {
407 /* 0 */ ALWAYSDEFD64 (guest_r0),
408 /* 1 */ ALWAYSDEFD64 (guest_EMNOTE),
409 /* 2 */ ALWAYSDEFD64 (guest_CMSTART),
410 /* 3 */ ALWAYSDEFD64 (guest_CMLEN),
411 /* 4 */ ALWAYSDEFD64 (guest_r29),
412 /* 5 */ ALWAYSDEFD64 (guest_r31),
413 /* 6 */ ALWAYSDEFD64 (guest_ULR)
414 }
415 };
416
417 #define ASM_VOLATILE_CASE(rd, sel) \
418 case rd: \
419 asm volatile ("mfc0 %0, $" #rd ", "#sel"\n\t" :"=r" (x) ); \
420 break;
421
mips32_dirtyhelper_mfc0(UInt rd,UInt sel)422 UInt mips32_dirtyhelper_mfc0(UInt rd, UInt sel)
423 {
424 UInt x = 0;
425 #if defined(__mips__) && ((defined(__mips_isa_rev) && __mips_isa_rev >= 2))
426 switch (sel) {
427 case 0:
428 /* __asm__("mfc0 %0, $1, 0" :"=r" (x)); */
429 switch (rd) {
430 ASM_VOLATILE_CASE(0, 0);
431 ASM_VOLATILE_CASE(1, 0);
432 ASM_VOLATILE_CASE(2, 0);
433 ASM_VOLATILE_CASE(3, 0);
434 ASM_VOLATILE_CASE(4, 0);
435 ASM_VOLATILE_CASE(5, 0);
436 ASM_VOLATILE_CASE(6, 0);
437 ASM_VOLATILE_CASE(7, 0);
438 ASM_VOLATILE_CASE(8, 0);
439 ASM_VOLATILE_CASE(9, 0);
440 ASM_VOLATILE_CASE(10, 0);
441 ASM_VOLATILE_CASE(11, 0);
442 ASM_VOLATILE_CASE(12, 0);
443 ASM_VOLATILE_CASE(13, 0);
444 ASM_VOLATILE_CASE(14, 0);
445 ASM_VOLATILE_CASE(15, 0);
446 ASM_VOLATILE_CASE(16, 0);
447 ASM_VOLATILE_CASE(17, 0);
448 ASM_VOLATILE_CASE(18, 0);
449 ASM_VOLATILE_CASE(19, 0);
450 ASM_VOLATILE_CASE(20, 0);
451 ASM_VOLATILE_CASE(21, 0);
452 ASM_VOLATILE_CASE(22, 0);
453 ASM_VOLATILE_CASE(23, 0);
454 ASM_VOLATILE_CASE(24, 0);
455 ASM_VOLATILE_CASE(25, 0);
456 ASM_VOLATILE_CASE(26, 0);
457 ASM_VOLATILE_CASE(27, 0);
458 ASM_VOLATILE_CASE(28, 0);
459 ASM_VOLATILE_CASE(29, 0);
460 ASM_VOLATILE_CASE(30, 0);
461 ASM_VOLATILE_CASE(31, 0);
462 default:
463 break;
464 }
465 break;
466 case 1:
467 /* __asm__("mfc0 %0, $1, 0" :"=r" (x)); */
468 switch (rd) {
469 ASM_VOLATILE_CASE(0, 1);
470 ASM_VOLATILE_CASE(1, 1);
471 ASM_VOLATILE_CASE(2, 1);
472 ASM_VOLATILE_CASE(3, 1);
473 ASM_VOLATILE_CASE(4, 1);
474 ASM_VOLATILE_CASE(5, 1);
475 ASM_VOLATILE_CASE(6, 1);
476 ASM_VOLATILE_CASE(7, 1);
477 ASM_VOLATILE_CASE(8, 1);
478 ASM_VOLATILE_CASE(9, 1);
479 ASM_VOLATILE_CASE(10, 1);
480 ASM_VOLATILE_CASE(11, 1);
481 ASM_VOLATILE_CASE(12, 1);
482 ASM_VOLATILE_CASE(13, 1);
483 ASM_VOLATILE_CASE(14, 1);
484 ASM_VOLATILE_CASE(15, 1);
485 ASM_VOLATILE_CASE(16, 1);
486 ASM_VOLATILE_CASE(17, 1);
487 ASM_VOLATILE_CASE(18, 1);
488 ASM_VOLATILE_CASE(19, 1);
489 ASM_VOLATILE_CASE(20, 1);
490 ASM_VOLATILE_CASE(21, 1);
491 ASM_VOLATILE_CASE(22, 1);
492 ASM_VOLATILE_CASE(23, 1);
493 ASM_VOLATILE_CASE(24, 1);
494 ASM_VOLATILE_CASE(25, 1);
495 ASM_VOLATILE_CASE(26, 1);
496 ASM_VOLATILE_CASE(27, 1);
497 ASM_VOLATILE_CASE(28, 1);
498 ASM_VOLATILE_CASE(29, 1);
499 ASM_VOLATILE_CASE(30, 1);
500 ASM_VOLATILE_CASE(31, 1);
501 default:
502 break;
503 }
504 break;
505 case 2:
506 /* __asm__("mfc0 %0, $1, 0" :"=r" (x)); */
507 switch (rd) {
508 ASM_VOLATILE_CASE(0, 2);
509 ASM_VOLATILE_CASE(1, 2);
510 ASM_VOLATILE_CASE(2, 2);
511 ASM_VOLATILE_CASE(3, 1);
512 ASM_VOLATILE_CASE(4, 2);
513 ASM_VOLATILE_CASE(5, 2);
514 ASM_VOLATILE_CASE(6, 2);
515 ASM_VOLATILE_CASE(7, 2);
516 ASM_VOLATILE_CASE(8, 2);
517 ASM_VOLATILE_CASE(9, 2);
518 ASM_VOLATILE_CASE(10, 2);
519 ASM_VOLATILE_CASE(11, 2);
520 ASM_VOLATILE_CASE(12, 2);
521 ASM_VOLATILE_CASE(13, 2);
522 ASM_VOLATILE_CASE(14, 2);
523 ASM_VOLATILE_CASE(15, 2);
524 ASM_VOLATILE_CASE(16, 2);
525 ASM_VOLATILE_CASE(17, 2);
526 ASM_VOLATILE_CASE(18, 2);
527 ASM_VOLATILE_CASE(19, 2);
528 ASM_VOLATILE_CASE(20, 2);
529 ASM_VOLATILE_CASE(21, 2);
530 ASM_VOLATILE_CASE(22, 2);
531 ASM_VOLATILE_CASE(23, 2);
532 ASM_VOLATILE_CASE(24, 2);
533 ASM_VOLATILE_CASE(25, 2);
534 ASM_VOLATILE_CASE(26, 2);
535 ASM_VOLATILE_CASE(27, 2);
536 ASM_VOLATILE_CASE(28, 2);
537 ASM_VOLATILE_CASE(29, 2);
538 ASM_VOLATILE_CASE(30, 2);
539 ASM_VOLATILE_CASE(31, 2);
540 default:
541 break;
542 }
543 break;
544 case 3:
545 /* __asm__("mfc0 %0, $1, 0" :"=r" (x)); */
546 switch (rd) {
547 ASM_VOLATILE_CASE(0, 3);
548 ASM_VOLATILE_CASE(1, 3);
549 ASM_VOLATILE_CASE(2, 3);
550 ASM_VOLATILE_CASE(3, 3);
551 ASM_VOLATILE_CASE(4, 3);
552 ASM_VOLATILE_CASE(5, 3);
553 ASM_VOLATILE_CASE(6, 3);
554 ASM_VOLATILE_CASE(7, 3);
555 ASM_VOLATILE_CASE(8, 3);
556 ASM_VOLATILE_CASE(9, 3);
557 ASM_VOLATILE_CASE(10, 3);
558 ASM_VOLATILE_CASE(11, 3);
559 ASM_VOLATILE_CASE(12, 3);
560 ASM_VOLATILE_CASE(13, 3);
561 ASM_VOLATILE_CASE(14, 3);
562 ASM_VOLATILE_CASE(15, 3);
563 ASM_VOLATILE_CASE(16, 3);
564 ASM_VOLATILE_CASE(17, 3);
565 ASM_VOLATILE_CASE(18, 3);
566 ASM_VOLATILE_CASE(19, 3);
567 ASM_VOLATILE_CASE(20, 3);
568 ASM_VOLATILE_CASE(21, 3);
569 ASM_VOLATILE_CASE(22, 3);
570 ASM_VOLATILE_CASE(23, 3);
571 ASM_VOLATILE_CASE(24, 3);
572 ASM_VOLATILE_CASE(25, 3);
573 ASM_VOLATILE_CASE(26, 3);
574 ASM_VOLATILE_CASE(27, 3);
575 ASM_VOLATILE_CASE(28, 3);
576 ASM_VOLATILE_CASE(29, 3);
577 ASM_VOLATILE_CASE(30, 3);
578 ASM_VOLATILE_CASE(31, 3);
579 default:
580 break;
581 }
582 break;
583 case 4:
584 /* __asm__("mfc0 %0, $1, 0" :"=r" (x)); */
585 switch (rd) {
586 ASM_VOLATILE_CASE(0, 4);
587 ASM_VOLATILE_CASE(1, 4);
588 ASM_VOLATILE_CASE(2, 4);
589 ASM_VOLATILE_CASE(3, 4);
590 ASM_VOLATILE_CASE(4, 4);
591 ASM_VOLATILE_CASE(5, 4);
592 ASM_VOLATILE_CASE(6, 4);
593 ASM_VOLATILE_CASE(7, 4);
594 ASM_VOLATILE_CASE(8, 4);
595 ASM_VOLATILE_CASE(9, 4);
596 ASM_VOLATILE_CASE(10, 4);
597 ASM_VOLATILE_CASE(11, 4);
598 ASM_VOLATILE_CASE(12, 4);
599 ASM_VOLATILE_CASE(13, 4);
600 ASM_VOLATILE_CASE(14, 4);
601 ASM_VOLATILE_CASE(15, 4);
602 ASM_VOLATILE_CASE(16, 4);
603 ASM_VOLATILE_CASE(17, 4);
604 ASM_VOLATILE_CASE(18, 4);
605 ASM_VOLATILE_CASE(19, 4);
606 ASM_VOLATILE_CASE(20, 4);
607 ASM_VOLATILE_CASE(21, 4);
608 ASM_VOLATILE_CASE(22, 4);
609 ASM_VOLATILE_CASE(23, 4);
610 ASM_VOLATILE_CASE(24, 4);
611 ASM_VOLATILE_CASE(25, 4);
612 ASM_VOLATILE_CASE(26, 4);
613 ASM_VOLATILE_CASE(27, 4);
614 ASM_VOLATILE_CASE(28, 4);
615 ASM_VOLATILE_CASE(29, 4);
616 ASM_VOLATILE_CASE(30, 4);
617 ASM_VOLATILE_CASE(31, 4);
618 default:
619 break;
620 }
621 break;
622 case 5:
623 /* __asm__("mfc0 %0, $1, 0" :"=r" (x)); */
624 switch (rd) {
625 ASM_VOLATILE_CASE(0, 5);
626 ASM_VOLATILE_CASE(1, 5);
627 ASM_VOLATILE_CASE(2, 5);
628 ASM_VOLATILE_CASE(3, 5);
629 ASM_VOLATILE_CASE(4, 5);
630 ASM_VOLATILE_CASE(5, 5);
631 ASM_VOLATILE_CASE(6, 5);
632 ASM_VOLATILE_CASE(7, 5);
633 ASM_VOLATILE_CASE(8, 5);
634 ASM_VOLATILE_CASE(9, 5);
635 ASM_VOLATILE_CASE(10, 5);
636 ASM_VOLATILE_CASE(11, 5);
637 ASM_VOLATILE_CASE(12, 5);
638 ASM_VOLATILE_CASE(13, 5);
639 ASM_VOLATILE_CASE(14, 5);
640 ASM_VOLATILE_CASE(15, 5);
641 ASM_VOLATILE_CASE(16, 5);
642 ASM_VOLATILE_CASE(17, 5);
643 ASM_VOLATILE_CASE(18, 5);
644 ASM_VOLATILE_CASE(19, 5);
645 ASM_VOLATILE_CASE(20, 5);
646 ASM_VOLATILE_CASE(21, 5);
647 ASM_VOLATILE_CASE(22, 5);
648 ASM_VOLATILE_CASE(23, 5);
649 ASM_VOLATILE_CASE(24, 5);
650 ASM_VOLATILE_CASE(25, 5);
651 ASM_VOLATILE_CASE(26, 5);
652 ASM_VOLATILE_CASE(27, 5);
653 ASM_VOLATILE_CASE(28, 5);
654 ASM_VOLATILE_CASE(29, 5);
655 ASM_VOLATILE_CASE(30, 5);
656 ASM_VOLATILE_CASE(31, 5);
657 default:
658 break;
659 }
660 break;
661 case 6:
662 /* __asm__("mfc0 %0, $1, 0" :"=r" (x)); */
663 switch (rd) {
664 ASM_VOLATILE_CASE(0, 6);
665 ASM_VOLATILE_CASE(1, 6);
666 ASM_VOLATILE_CASE(2, 6);
667 ASM_VOLATILE_CASE(3, 6);
668 ASM_VOLATILE_CASE(4, 6);
669 ASM_VOLATILE_CASE(5, 6);
670 ASM_VOLATILE_CASE(6, 6);
671 ASM_VOLATILE_CASE(7, 6);
672 ASM_VOLATILE_CASE(8, 6);
673 ASM_VOLATILE_CASE(9, 6);
674 ASM_VOLATILE_CASE(10, 6);
675 ASM_VOLATILE_CASE(11, 6);
676 ASM_VOLATILE_CASE(12, 6);
677 ASM_VOLATILE_CASE(13, 6);
678 ASM_VOLATILE_CASE(14, 6);
679 ASM_VOLATILE_CASE(15, 6);
680 ASM_VOLATILE_CASE(16, 6);
681 ASM_VOLATILE_CASE(17, 6);
682 ASM_VOLATILE_CASE(18, 6);
683 ASM_VOLATILE_CASE(19, 6);
684 ASM_VOLATILE_CASE(20, 6);
685 ASM_VOLATILE_CASE(21, 6);
686 ASM_VOLATILE_CASE(22, 6);
687 ASM_VOLATILE_CASE(23, 6);
688 ASM_VOLATILE_CASE(24, 6);
689 ASM_VOLATILE_CASE(25, 6);
690 ASM_VOLATILE_CASE(26, 6);
691 ASM_VOLATILE_CASE(27, 6);
692 ASM_VOLATILE_CASE(28, 6);
693 ASM_VOLATILE_CASE(29, 6);
694 ASM_VOLATILE_CASE(30, 6);
695 ASM_VOLATILE_CASE(31, 6);
696 default:
697 break;
698 }
699 break;
700 case 7:
701 /* __asm__("mfc0 %0, $1, 0" :"=r" (x)); */
702 switch (rd) {
703 ASM_VOLATILE_CASE(0, 7);
704 ASM_VOLATILE_CASE(1, 7);
705 ASM_VOLATILE_CASE(2, 7);
706 ASM_VOLATILE_CASE(3, 7);
707 ASM_VOLATILE_CASE(4, 7);
708 ASM_VOLATILE_CASE(5, 7);
709 ASM_VOLATILE_CASE(6, 7);
710 ASM_VOLATILE_CASE(7, 7);
711 ASM_VOLATILE_CASE(8, 7);
712 ASM_VOLATILE_CASE(9, 7);
713 ASM_VOLATILE_CASE(10, 7);
714 ASM_VOLATILE_CASE(11, 7);
715 ASM_VOLATILE_CASE(12, 7);
716 ASM_VOLATILE_CASE(13, 7);
717 ASM_VOLATILE_CASE(14, 7);
718 ASM_VOLATILE_CASE(15, 7);
719 ASM_VOLATILE_CASE(16, 7);
720 ASM_VOLATILE_CASE(17, 7);
721 ASM_VOLATILE_CASE(18, 7);
722 ASM_VOLATILE_CASE(19, 7);
723 ASM_VOLATILE_CASE(20, 7);
724 ASM_VOLATILE_CASE(21, 7);
725 ASM_VOLATILE_CASE(22, 7);
726 ASM_VOLATILE_CASE(23, 7);
727 ASM_VOLATILE_CASE(24, 7);
728 ASM_VOLATILE_CASE(25, 7);
729 ASM_VOLATILE_CASE(26, 7);
730 ASM_VOLATILE_CASE(27, 7);
731 ASM_VOLATILE_CASE(28, 7);
732 ASM_VOLATILE_CASE(29, 7);
733 ASM_VOLATILE_CASE(30, 7);
734 ASM_VOLATILE_CASE(31, 7);
735 default:
736 break;
737 }
738 break;
739
740 default:
741 break;
742 }
743 #endif
744 return x;
745 }
746
747 #undef ASM_VOLATILE_CASE
748
749 #define ASM_VOLATILE_CASE(rd, sel) \
750 case rd: \
751 asm volatile ("dmfc0 %0, $" #rd ", "#sel"\n\t" :"=r" (x) ); \
752 break;
753
mips64_dirtyhelper_dmfc0(UInt rd,UInt sel)754 ULong mips64_dirtyhelper_dmfc0 ( UInt rd, UInt sel )
755 {
756 ULong x = 0;
757 #if defined(VGP_mips64_linux)
758 switch (sel) {
759 case 0:
760 /* __asm__("dmfc0 %0, $1, 0" :"=r" (x)); */
761 switch (rd) {
762 ASM_VOLATILE_CASE (0, 0);
763 ASM_VOLATILE_CASE (1, 0);
764 ASM_VOLATILE_CASE (2, 0);
765 ASM_VOLATILE_CASE (3, 0);
766 ASM_VOLATILE_CASE (4, 0);
767 ASM_VOLATILE_CASE (5, 0);
768 ASM_VOLATILE_CASE (6, 0);
769 ASM_VOLATILE_CASE (7, 0);
770 ASM_VOLATILE_CASE (8, 0);
771 ASM_VOLATILE_CASE (9, 0);
772 ASM_VOLATILE_CASE (10, 0);
773 ASM_VOLATILE_CASE (11, 0);
774 ASM_VOLATILE_CASE (12, 0);
775 ASM_VOLATILE_CASE (13, 0);
776 ASM_VOLATILE_CASE (14, 0);
777 ASM_VOLATILE_CASE (15, 0);
778 ASM_VOLATILE_CASE (16, 0);
779 ASM_VOLATILE_CASE (17, 0);
780 ASM_VOLATILE_CASE (18, 0);
781 ASM_VOLATILE_CASE (19, 0);
782 ASM_VOLATILE_CASE (20, 0);
783 ASM_VOLATILE_CASE (21, 0);
784 ASM_VOLATILE_CASE (22, 0);
785 ASM_VOLATILE_CASE (23, 0);
786 ASM_VOLATILE_CASE (24, 0);
787 ASM_VOLATILE_CASE (25, 0);
788 ASM_VOLATILE_CASE (26, 0);
789 ASM_VOLATILE_CASE (27, 0);
790 ASM_VOLATILE_CASE (28, 0);
791 ASM_VOLATILE_CASE (29, 0);
792 ASM_VOLATILE_CASE (30, 0);
793 ASM_VOLATILE_CASE (31, 0);
794 default:
795 break;
796 }
797 break;
798 case 1:
799 /* __asm__("dmfc0 %0, $1, 0" :"=r" (x)); */
800 switch (rd) {
801 ASM_VOLATILE_CASE (0, 1);
802 ASM_VOLATILE_CASE (1, 1);
803 ASM_VOLATILE_CASE (2, 1);
804 ASM_VOLATILE_CASE (3, 1);
805 ASM_VOLATILE_CASE (4, 1);
806 ASM_VOLATILE_CASE (5, 1);
807 ASM_VOLATILE_CASE (6, 1);
808 ASM_VOLATILE_CASE (7, 1);
809 ASM_VOLATILE_CASE (8, 1);
810 ASM_VOLATILE_CASE (9, 1);
811 ASM_VOLATILE_CASE (10, 1);
812 ASM_VOLATILE_CASE (11, 1);
813 ASM_VOLATILE_CASE (12, 1);
814 ASM_VOLATILE_CASE (13, 1);
815 ASM_VOLATILE_CASE (14, 1);
816 ASM_VOLATILE_CASE (15, 1);
817 ASM_VOLATILE_CASE (16, 1);
818 ASM_VOLATILE_CASE (17, 1);
819 ASM_VOLATILE_CASE (18, 1);
820 ASM_VOLATILE_CASE (19, 1);
821 ASM_VOLATILE_CASE (20, 1);
822 ASM_VOLATILE_CASE (21, 1);
823 ASM_VOLATILE_CASE (22, 1);
824 ASM_VOLATILE_CASE (23, 1);
825 ASM_VOLATILE_CASE (24, 1);
826 ASM_VOLATILE_CASE (25, 1);
827 ASM_VOLATILE_CASE (26, 1);
828 ASM_VOLATILE_CASE (27, 1);
829 ASM_VOLATILE_CASE (28, 1);
830 ASM_VOLATILE_CASE (29, 1);
831 ASM_VOLATILE_CASE (30, 1);
832 ASM_VOLATILE_CASE (31, 1);
833 default:
834 break;
835 }
836 break;
837 case 2:
838 /* __asm__("dmfc0 %0, $1, 0" :"=r" (x)); */
839 switch (rd) {
840 ASM_VOLATILE_CASE (0, 2);
841 ASM_VOLATILE_CASE (1, 2);
842 ASM_VOLATILE_CASE (2, 2);
843 ASM_VOLATILE_CASE (3, 1);
844 ASM_VOLATILE_CASE (4, 2);
845 ASM_VOLATILE_CASE (5, 2);
846 ASM_VOLATILE_CASE (6, 2);
847 ASM_VOLATILE_CASE (7, 2);
848 ASM_VOLATILE_CASE (8, 2);
849 ASM_VOLATILE_CASE (9, 2);
850 ASM_VOLATILE_CASE (10, 2);
851 ASM_VOLATILE_CASE (11, 2);
852 ASM_VOLATILE_CASE (12, 2);
853 ASM_VOLATILE_CASE (13, 2);
854 ASM_VOLATILE_CASE (14, 2);
855 ASM_VOLATILE_CASE (15, 2);
856 ASM_VOLATILE_CASE (16, 2);
857 ASM_VOLATILE_CASE (17, 2);
858 ASM_VOLATILE_CASE (18, 2);
859 ASM_VOLATILE_CASE (19, 2);
860 ASM_VOLATILE_CASE (20, 2);
861 ASM_VOLATILE_CASE (21, 2);
862 ASM_VOLATILE_CASE (22, 2);
863 ASM_VOLATILE_CASE (23, 2);
864 ASM_VOLATILE_CASE (24, 2);
865 ASM_VOLATILE_CASE (25, 2);
866 ASM_VOLATILE_CASE (26, 2);
867 ASM_VOLATILE_CASE (27, 2);
868 ASM_VOLATILE_CASE (28, 2);
869 ASM_VOLATILE_CASE (29, 2);
870 ASM_VOLATILE_CASE (30, 2);
871 ASM_VOLATILE_CASE (31, 2);
872 default:
873 break;
874 }
875 break;
876 case 3:
877 /* __asm__("dmfc0 %0, $1, 0" :"=r" (x)); */
878 switch (rd) {
879 ASM_VOLATILE_CASE (0, 3);
880 ASM_VOLATILE_CASE (1, 3);
881 ASM_VOLATILE_CASE (2, 3);
882 ASM_VOLATILE_CASE (3, 3);
883 ASM_VOLATILE_CASE (4, 3);
884 ASM_VOLATILE_CASE (5, 3);
885 ASM_VOLATILE_CASE (6, 3);
886 ASM_VOLATILE_CASE (7, 3);
887 ASM_VOLATILE_CASE (8, 3);
888 ASM_VOLATILE_CASE (9, 3);
889 ASM_VOLATILE_CASE (10, 3);
890 ASM_VOLATILE_CASE (11, 3);
891 ASM_VOLATILE_CASE (12, 3);
892 ASM_VOLATILE_CASE (13, 3);
893 ASM_VOLATILE_CASE (14, 3);
894 ASM_VOLATILE_CASE (15, 3);
895 ASM_VOLATILE_CASE (16, 3);
896 ASM_VOLATILE_CASE (17, 3);
897 ASM_VOLATILE_CASE (18, 3);
898 ASM_VOLATILE_CASE (19, 3);
899 ASM_VOLATILE_CASE (20, 3);
900 ASM_VOLATILE_CASE (21, 3);
901 ASM_VOLATILE_CASE (22, 3);
902 ASM_VOLATILE_CASE (23, 3);
903 ASM_VOLATILE_CASE (24, 3);
904 ASM_VOLATILE_CASE (25, 3);
905 ASM_VOLATILE_CASE (26, 3);
906 ASM_VOLATILE_CASE (27, 3);
907 ASM_VOLATILE_CASE (28, 3);
908 ASM_VOLATILE_CASE (29, 3);
909 ASM_VOLATILE_CASE (30, 3);
910 ASM_VOLATILE_CASE (31, 3);
911 default:
912 break;
913 }
914 break;
915 case 4:
916 /* __asm__("dmfc0 %0, $1, 0" :"=r" (x)); */
917 switch (rd) {
918 ASM_VOLATILE_CASE (0, 4);
919 ASM_VOLATILE_CASE (1, 4);
920 ASM_VOLATILE_CASE (2, 4);
921 ASM_VOLATILE_CASE (3, 4);
922 ASM_VOLATILE_CASE (4, 4);
923 ASM_VOLATILE_CASE (5, 4);
924 ASM_VOLATILE_CASE (6, 4);
925 ASM_VOLATILE_CASE (7, 4);
926 ASM_VOLATILE_CASE (8, 4);
927 ASM_VOLATILE_CASE (9, 4);
928 ASM_VOLATILE_CASE (10, 4);
929 ASM_VOLATILE_CASE (11, 4);
930 ASM_VOLATILE_CASE (12, 4);
931 ASM_VOLATILE_CASE (13, 4);
932 ASM_VOLATILE_CASE (14, 4);
933 ASM_VOLATILE_CASE (15, 4);
934 ASM_VOLATILE_CASE (16, 4);
935 ASM_VOLATILE_CASE (17, 4);
936 ASM_VOLATILE_CASE (18, 4);
937 ASM_VOLATILE_CASE (19, 4);
938 ASM_VOLATILE_CASE (20, 4);
939 ASM_VOLATILE_CASE (21, 4);
940 ASM_VOLATILE_CASE (22, 4);
941 ASM_VOLATILE_CASE (23, 4);
942 ASM_VOLATILE_CASE (24, 4);
943 ASM_VOLATILE_CASE (25, 4);
944 ASM_VOLATILE_CASE (26, 4);
945 ASM_VOLATILE_CASE (27, 4);
946 ASM_VOLATILE_CASE (28, 4);
947 ASM_VOLATILE_CASE (29, 4);
948 ASM_VOLATILE_CASE (30, 4);
949 ASM_VOLATILE_CASE (31, 4);
950 default:
951 break;
952 }
953 break;
954 case 5:
955 /* __asm__("dmfc0 %0, $1, 0" :"=r" (x)); */
956 switch (rd) {
957 ASM_VOLATILE_CASE (0, 5);
958 ASM_VOLATILE_CASE (1, 5);
959 ASM_VOLATILE_CASE (2, 5);
960 ASM_VOLATILE_CASE (3, 5);
961 ASM_VOLATILE_CASE (4, 5);
962 ASM_VOLATILE_CASE (5, 5);
963 ASM_VOLATILE_CASE (6, 5);
964 ASM_VOLATILE_CASE (7, 5);
965 ASM_VOLATILE_CASE (8, 5);
966 ASM_VOLATILE_CASE (9, 5);
967 ASM_VOLATILE_CASE (10, 5);
968 ASM_VOLATILE_CASE (11, 5);
969 ASM_VOLATILE_CASE (12, 5);
970 ASM_VOLATILE_CASE (13, 5);
971 ASM_VOLATILE_CASE (14, 5);
972 ASM_VOLATILE_CASE (15, 5);
973 ASM_VOLATILE_CASE (16, 5);
974 ASM_VOLATILE_CASE (17, 5);
975 ASM_VOLATILE_CASE (18, 5);
976 ASM_VOLATILE_CASE (19, 5);
977 ASM_VOLATILE_CASE (20, 5);
978 ASM_VOLATILE_CASE (21, 5);
979 ASM_VOLATILE_CASE (22, 5);
980 ASM_VOLATILE_CASE (23, 5);
981 ASM_VOLATILE_CASE (24, 5);
982 ASM_VOLATILE_CASE (25, 5);
983 ASM_VOLATILE_CASE (26, 5);
984 ASM_VOLATILE_CASE (27, 5);
985 ASM_VOLATILE_CASE (28, 5);
986 ASM_VOLATILE_CASE (29, 5);
987 ASM_VOLATILE_CASE (30, 5);
988 ASM_VOLATILE_CASE (31, 5);
989 default:
990 break;
991 }
992 break;
993 case 6:
994 /* __asm__("dmfc0 %0, $1, 0" :"=r" (x)); */
995 switch (rd) {
996 ASM_VOLATILE_CASE (0, 6);
997 ASM_VOLATILE_CASE (1, 6);
998 ASM_VOLATILE_CASE (2, 6);
999 ASM_VOLATILE_CASE (3, 6);
1000 ASM_VOLATILE_CASE (4, 6);
1001 ASM_VOLATILE_CASE (5, 6);
1002 ASM_VOLATILE_CASE (6, 6);
1003 ASM_VOLATILE_CASE (7, 6);
1004 ASM_VOLATILE_CASE (8, 6);
1005 ASM_VOLATILE_CASE (9, 6);
1006 ASM_VOLATILE_CASE (10, 6);
1007 ASM_VOLATILE_CASE (11, 6);
1008 ASM_VOLATILE_CASE (12, 6);
1009 ASM_VOLATILE_CASE (13, 6);
1010 ASM_VOLATILE_CASE (14, 6);
1011 ASM_VOLATILE_CASE (15, 6);
1012 ASM_VOLATILE_CASE (16, 6);
1013 ASM_VOLATILE_CASE (17, 6);
1014 ASM_VOLATILE_CASE (18, 6);
1015 ASM_VOLATILE_CASE (19, 6);
1016 ASM_VOLATILE_CASE (20, 6);
1017 ASM_VOLATILE_CASE (21, 6);
1018 ASM_VOLATILE_CASE (22, 6);
1019 ASM_VOLATILE_CASE (23, 6);
1020 ASM_VOLATILE_CASE (24, 6);
1021 ASM_VOLATILE_CASE (25, 6);
1022 ASM_VOLATILE_CASE (26, 6);
1023 ASM_VOLATILE_CASE (27, 6);
1024 ASM_VOLATILE_CASE (28, 6);
1025 ASM_VOLATILE_CASE (29, 6);
1026 ASM_VOLATILE_CASE (30, 6);
1027 ASM_VOLATILE_CASE (31, 6);
1028 default:
1029 break;
1030 }
1031 break;
1032 case 7:
1033 /* __asm__("dmfc0 %0, $1, 0" :"=r" (x)); */
1034 switch (rd) {
1035 ASM_VOLATILE_CASE (0, 7);
1036 ASM_VOLATILE_CASE (1, 7);
1037 ASM_VOLATILE_CASE (2, 7);
1038 ASM_VOLATILE_CASE (3, 7);
1039 ASM_VOLATILE_CASE (4, 7);
1040 ASM_VOLATILE_CASE (5, 7);
1041 ASM_VOLATILE_CASE (6, 7);
1042 ASM_VOLATILE_CASE (7, 7);
1043 ASM_VOLATILE_CASE (8, 7);
1044 ASM_VOLATILE_CASE (9, 7);
1045 ASM_VOLATILE_CASE (10, 7);
1046 ASM_VOLATILE_CASE (11, 7);
1047 ASM_VOLATILE_CASE (12, 7);
1048 ASM_VOLATILE_CASE (13, 7);
1049 ASM_VOLATILE_CASE (14, 7);
1050 ASM_VOLATILE_CASE (15, 7);
1051 ASM_VOLATILE_CASE (16, 7);
1052 ASM_VOLATILE_CASE (17, 7);
1053 ASM_VOLATILE_CASE (18, 7);
1054 ASM_VOLATILE_CASE (19, 7);
1055 ASM_VOLATILE_CASE (20, 7);
1056 ASM_VOLATILE_CASE (21, 7);
1057 ASM_VOLATILE_CASE (22, 7);
1058 ASM_VOLATILE_CASE (23, 7);
1059 ASM_VOLATILE_CASE (24, 7);
1060 ASM_VOLATILE_CASE (25, 7);
1061 ASM_VOLATILE_CASE (26, 7);
1062 ASM_VOLATILE_CASE (27, 7);
1063 ASM_VOLATILE_CASE (28, 7);
1064 ASM_VOLATILE_CASE (29, 7);
1065 ASM_VOLATILE_CASE (30, 7);
1066 ASM_VOLATILE_CASE (31, 7);
1067 default:
1068 break;
1069 }
1070 break;
1071
1072 default:
1073 break;
1074 }
1075 #endif
1076 return x;
1077 }
1078
1079 #if defined(__mips__) && ((defined(__mips_isa_rev) && __mips_isa_rev >= 2))
mips32_dirtyhelper_rdhwr(UInt rt,UInt rd)1080 UInt mips32_dirtyhelper_rdhwr ( UInt rt, UInt rd )
1081 {
1082 UInt x = 0;
1083 switch (rd) {
1084 case 1: /* x = SYNCI_StepSize() */
1085 __asm__ __volatile__("rdhwr %0, $1\n\t" : "=r" (x) );
1086 break;
1087
1088 case 31: /* x = CVMX_get_cycles() */
1089 __asm__ __volatile__("rdhwr %0, $31\n\t" : "=r" (x) );
1090 break;
1091
1092 default:
1093 vassert(0);
1094 break;
1095 }
1096 return x;
1097 }
1098
mips64_dirtyhelper_rdhwr(ULong rt,ULong rd)1099 ULong mips64_dirtyhelper_rdhwr ( ULong rt, ULong rd )
1100 {
1101 ULong x = 0;
1102 switch (rd) {
1103 case 1: /* x = SYNCI_StepSize() */
1104 __asm__ __volatile__("rdhwr %0, $1\n\t" : "=r" (x) );
1105 break;
1106
1107 case 31: /* x = CVMX_get_cycles() */
1108 __asm__ __volatile__("rdhwr %0, $31\n\t" : "=r" (x) );
1109 break;
1110
1111 default:
1112 vassert(0);
1113 break;
1114 }
1115 return x;
1116 }
1117 #endif
1118
1119 #define ASM_VOLATILE_UNARY32(inst) \
1120 __asm__ volatile("cfc1 $t0, $31" "\n\t" \
1121 "ctc1 %2, $31" "\n\t" \
1122 "mtc1 %1, $f20" "\n\t" \
1123 #inst" $f20, $f20" "\n\t" \
1124 "cfc1 %0, $31" "\n\t" \
1125 "ctc1 $t0, $31" "\n\t" \
1126 : "=r" (ret) \
1127 : "r" (loFsVal), "r" (fcsr) \
1128 : "t0", "$f20" \
1129 );
1130
1131 #define ASM_VOLATILE_UNARY32_DOUBLE(inst) \
1132 __asm__ volatile("cfc1 $t0, $31" "\n\t" \
1133 "ctc1 %3, $31" "\n\t" \
1134 "mtc1 %1, $f20" "\n\t" \
1135 "mtc1 %2, $f21" "\n\t" \
1136 #inst" $f20, $f20" "\n\t" \
1137 "cfc1 %0, $31" "\n\t" \
1138 "ctc1 $t0, $31" "\n\t" \
1139 : "=r" (ret) \
1140 : "r" (loFsVal), "r" (hiFsVal), "r" (fcsr) \
1141 : "t0", "$f20", "$f21" \
1142 );
1143
1144 #define ASM_VOLATILE_UNARY64(inst) \
1145 __asm__ volatile("cfc1 $t0, $31" "\n\t" \
1146 "ctc1 %2, $31" "\n\t" \
1147 "ldc1 $f24, 0(%1)" "\n\t" \
1148 #inst" $f24, $f24" "\n\t" \
1149 "cfc1 %0, $31" "\n\t" \
1150 "ctc1 $t0, $31" "\n\t" \
1151 : "=r" (ret) \
1152 : "r" (&(addr[fs])), "r" (fcsr) \
1153 : "t0", "$f24" \
1154 );
1155
1156 #define ASM_VOLATILE_BINARY32(inst) \
1157 __asm__ volatile("cfc1 $t0, $31" "\n\t" \
1158 "ctc1 %3, $31" "\n\t" \
1159 "mtc1 %1, $f20" "\n\t" \
1160 "mtc1 %2, $f22" "\n\t" \
1161 #inst" $f20, $f20, $f22" "\n\t" \
1162 "cfc1 %0, $31" "\n\t" \
1163 "ctc1 $t0, $31" "\n\t" \
1164 : "=r" (ret) \
1165 : "r" (loFsVal), "r" (loFtVal), "r" (fcsr) \
1166 : "t0", "$f20", "$f22" \
1167 );
1168
1169 #define ASM_VOLATILE_BINARY32_DOUBLE(inst) \
1170 __asm__ volatile("cfc1 $t0, $31" "\n\t" \
1171 "ctc1 %5, $31" "\n\t" \
1172 "mtc1 %1, $f20" "\n\t" \
1173 "mtc1 %2, $f21" "\n\t" \
1174 "mtc1 %3, $f22" "\n\t" \
1175 "mtc1 %4, $f23" "\n\t" \
1176 #inst" $f20, $f20, $f22" "\n\t" \
1177 "cfc1 %0, $31" "\n\t" \
1178 "ctc1 $t0, $31" "\n\t" \
1179 : "=r" (ret) \
1180 : "r" (loFsVal), "r" (hiFsVal), "r" (loFtVal), \
1181 "r" (hiFtVal), "r" (fcsr) \
1182 : "t0", "$f20", "$f21", "$f22", "$f23" \
1183 );
1184
1185 #define ASM_VOLATILE_BINARY64(inst) \
1186 __asm__ volatile("cfc1 $t0, $31" "\n\t" \
1187 "ctc1 %3, $31" "\n\t" \
1188 "ldc1 $f24, 0(%1)" "\n\t" \
1189 "ldc1 $f26, 0(%2)" "\n\t" \
1190 #inst" $f24, $f24, $f26" "\n\t" \
1191 "cfc1 %0, $31" "\n\t" \
1192 "ctc1 $t0, $31" "\n\t" \
1193 : "=r" (ret) \
1194 : "r" (&(addr[fs])), "r" (&(addr[ft])), "r" (fcsr) \
1195 : "t0", "$f24", "$f26" \
1196 );
1197
1198 /* TODO: Add cases for all fpu instructions because all fpu instructions are
1199 change the value of FCSR register. */
mips_dirtyhelper_calculate_FCSR_fp32(void * gs,UInt fs,UInt ft,flt_op inst)1200 extern UInt mips_dirtyhelper_calculate_FCSR_fp32 ( void* gs, UInt fs, UInt ft,
1201 flt_op inst )
1202 {
1203 UInt ret = 0;
1204 #if defined(__mips__)
1205 VexGuestMIPS32State* guest_state = (VexGuestMIPS32State*)gs;
1206 UInt loFsVal, hiFsVal, loFtVal, hiFtVal;
1207 #if defined (_MIPSEL)
1208 ULong *addr = (ULong *)&guest_state->guest_f0;
1209 loFsVal = (UInt)addr[fs];
1210 hiFsVal = (UInt)addr[fs+1];
1211 loFtVal = (UInt)addr[ft];
1212 hiFtVal = (UInt)addr[ft+1];
1213 #elif defined (_MIPSEB)
1214 UInt *addr = (UInt *)&guest_state->guest_f0;
1215 loFsVal = (UInt)addr[fs*2];
1216 hiFsVal = (UInt)addr[fs*2+2];
1217 loFtVal = (UInt)addr[ft*2];
1218 hiFtVal = (UInt)addr[ft*2+2];
1219 #endif
1220 UInt fcsr = guest_state->guest_FCSR;
1221 switch (inst) {
1222 case ROUNDWD:
1223 ASM_VOLATILE_UNARY32_DOUBLE(round.w.d)
1224 break;
1225 case FLOORWS:
1226 ASM_VOLATILE_UNARY32(floor.w.s)
1227 break;
1228 case FLOORWD:
1229 ASM_VOLATILE_UNARY32_DOUBLE(floor.w.d)
1230 break;
1231 case TRUNCWS:
1232 ASM_VOLATILE_UNARY32(trunc.w.s)
1233 break;
1234 case TRUNCWD:
1235 ASM_VOLATILE_UNARY32_DOUBLE(trunc.w.d)
1236 break;
1237 case CEILWS:
1238 ASM_VOLATILE_UNARY32(ceil.w.s)
1239 break;
1240 case CEILWD:
1241 ASM_VOLATILE_UNARY32_DOUBLE(ceil.w.d)
1242 break;
1243 case CVTDS:
1244 ASM_VOLATILE_UNARY32(cvt.d.s)
1245 break;
1246 case CVTDW:
1247 ASM_VOLATILE_UNARY32(cvt.d.w)
1248 break;
1249 case CVTSW:
1250 ASM_VOLATILE_UNARY32(cvt.s.w)
1251 break;
1252 case CVTSD:
1253 ASM_VOLATILE_UNARY32_DOUBLE(cvt.s.d)
1254 break;
1255 case CVTWS:
1256 ASM_VOLATILE_UNARY32(cvt.w.s)
1257 break;
1258 case CVTWD:
1259 ASM_VOLATILE_UNARY32_DOUBLE(cvt.w.d)
1260 break;
1261 case ROUNDWS:
1262 ASM_VOLATILE_UNARY32(round.w.s)
1263 break;
1264 #if ((__mips == 32) && defined(__mips_isa_rev) && (__mips_isa_rev >= 2)) \
1265 || (__mips == 64)
1266 case CEILLS:
1267 ASM_VOLATILE_UNARY32(ceil.l.s)
1268 break;
1269 case CEILLD:
1270 ASM_VOLATILE_UNARY32_DOUBLE(ceil.l.d)
1271 break;
1272 case CVTDL:
1273 ASM_VOLATILE_UNARY32_DOUBLE(cvt.d.l)
1274 break;
1275 case CVTLS:
1276 ASM_VOLATILE_UNARY32(cvt.l.s)
1277 break;
1278 case CVTLD:
1279 ASM_VOLATILE_UNARY32_DOUBLE(cvt.l.d)
1280 break;
1281 case CVTSL:
1282 ASM_VOLATILE_UNARY32_DOUBLE(cvt.s.l)
1283 break;
1284 case FLOORLS:
1285 ASM_VOLATILE_UNARY32(floor.l.s)
1286 break;
1287 case FLOORLD:
1288 ASM_VOLATILE_UNARY32_DOUBLE(floor.l.d)
1289 break;
1290 case ROUNDLS:
1291 ASM_VOLATILE_UNARY32(round.l.s)
1292 break;
1293 case ROUNDLD:
1294 ASM_VOLATILE_UNARY32_DOUBLE(round.l.d)
1295 break;
1296 case TRUNCLS:
1297 ASM_VOLATILE_UNARY32(trunc.l.s)
1298 break;
1299 case TRUNCLD:
1300 ASM_VOLATILE_UNARY32_DOUBLE(trunc.l.d)
1301 break;
1302 #endif
1303 case ADDS:
1304 ASM_VOLATILE_BINARY32(add.s)
1305 break;
1306 case ADDD:
1307 ASM_VOLATILE_BINARY32_DOUBLE(add.d)
1308 break;
1309 case SUBS:
1310 ASM_VOLATILE_BINARY32(sub.s)
1311 break;
1312 case SUBD:
1313 ASM_VOLATILE_BINARY32_DOUBLE(sub.d)
1314 break;
1315 case DIVS:
1316 ASM_VOLATILE_BINARY32(div.s)
1317 break;
1318 default:
1319 vassert(0);
1320 break;
1321 }
1322 #endif
1323 return ret;
1324 }
1325
1326 /* TODO: Add cases for all fpu instructions because all fpu instructions are
1327 change the value of FCSR register. */
mips_dirtyhelper_calculate_FCSR_fp64(void * gs,UInt fs,UInt ft,flt_op inst)1328 extern UInt mips_dirtyhelper_calculate_FCSR_fp64 ( void* gs, UInt fs, UInt ft,
1329 flt_op inst )
1330 {
1331 UInt ret = 0;
1332 #if defined(__mips__)
1333 #if defined(VGA_mips32)
1334 VexGuestMIPS32State* guest_state = (VexGuestMIPS32State*)gs;
1335 #else
1336 VexGuestMIPS64State* guest_state = (VexGuestMIPS64State*)gs;
1337 #endif
1338 ULong *addr = (ULong *)&guest_state->guest_f0;
1339 UInt fcsr = guest_state->guest_FCSR;
1340 switch (inst) {
1341 case ROUNDWD:
1342 ASM_VOLATILE_UNARY64(round.w.d)
1343 break;
1344 case FLOORWS:
1345 ASM_VOLATILE_UNARY64(floor.w.s)
1346 break;
1347 case FLOORWD:
1348 ASM_VOLATILE_UNARY64(floor.w.d)
1349 break;
1350 case TRUNCWS:
1351 ASM_VOLATILE_UNARY64(trunc.w.s)
1352 break;
1353 case TRUNCWD:
1354 ASM_VOLATILE_UNARY64(trunc.w.d)
1355 break;
1356 case CEILWS:
1357 ASM_VOLATILE_UNARY64(ceil.w.s)
1358 break;
1359 case CEILWD:
1360 ASM_VOLATILE_UNARY64(ceil.w.d)
1361 break;
1362 case CVTDS:
1363 ASM_VOLATILE_UNARY64(cvt.d.s)
1364 break;
1365 case CVTDW:
1366 ASM_VOLATILE_UNARY64(cvt.d.w)
1367 break;
1368 case CVTSW:
1369 ASM_VOLATILE_UNARY64(cvt.s.w)
1370 break;
1371 case CVTSD:
1372 ASM_VOLATILE_UNARY64(cvt.s.d)
1373 break;
1374 case CVTWS:
1375 ASM_VOLATILE_UNARY64(cvt.w.s)
1376 break;
1377 case CVTWD:
1378 ASM_VOLATILE_UNARY64(cvt.w.d)
1379 break;
1380 case ROUNDWS:
1381 ASM_VOLATILE_UNARY64(round.w.s)
1382 break;
1383 #if ((__mips == 32) && defined(__mips_isa_rev) && (__mips_isa_rev >= 2)) \
1384 || (__mips == 64)
1385 case CEILLS:
1386 ASM_VOLATILE_UNARY64(ceil.l.s)
1387 break;
1388 case CEILLD:
1389 ASM_VOLATILE_UNARY64(ceil.l.d)
1390 break;
1391 case CVTDL:
1392 ASM_VOLATILE_UNARY64(cvt.d.l)
1393 break;
1394 case CVTLS:
1395 ASM_VOLATILE_UNARY64(cvt.l.s)
1396 break;
1397 case CVTLD:
1398 ASM_VOLATILE_UNARY64(cvt.l.d)
1399 break;
1400 case CVTSL:
1401 ASM_VOLATILE_UNARY64(cvt.s.l)
1402 break;
1403 case FLOORLS:
1404 ASM_VOLATILE_UNARY64(floor.l.s)
1405 break;
1406 case FLOORLD:
1407 ASM_VOLATILE_UNARY64(floor.l.d)
1408 break;
1409 case ROUNDLS:
1410 ASM_VOLATILE_UNARY64(round.l.s)
1411 break;
1412 case ROUNDLD:
1413 ASM_VOLATILE_UNARY64(round.l.d)
1414 break;
1415 case TRUNCLS:
1416 ASM_VOLATILE_UNARY64(trunc.l.s)
1417 break;
1418 case TRUNCLD:
1419 ASM_VOLATILE_UNARY64(trunc.l.d)
1420 break;
1421 #endif
1422 case ADDS:
1423 ASM_VOLATILE_BINARY64(add.s)
1424 break;
1425 case ADDD:
1426 ASM_VOLATILE_BINARY64(add.d)
1427 break;
1428 case SUBS:
1429 ASM_VOLATILE_BINARY64(sub.s)
1430 break;
1431 case SUBD:
1432 ASM_VOLATILE_BINARY64(sub.d)
1433 break;
1434 case DIVS:
1435 ASM_VOLATILE_BINARY64(div.s)
1436 break;
1437 default:
1438 vassert(0);
1439 break;
1440 }
1441 #endif
1442 return ret;
1443 }
1444
1445 /*---------------------------------------------------------------*/
1446 /*--- end guest_mips_helpers.c ---*/
1447 /*---------------------------------------------------------------*/
1448