1 2 /*---------------------------------------------------------------*/ 3 /*--- begin guest_mips_helpers.c ---*/ 4 /*---------------------------------------------------------------*/ 5 6 /* 7 This file is part of Valgrind, a dynamic binary instrumentation 8 framework. 9 10 Copyright (C) 2010-2013 RT-RK 11 mips-valgrind (at) rt-rk.com 12 13 This program is free software; you can redistribute it and/or 14 modify it under the terms of the GNU General Public License as 15 published by the Free Software Foundation; either version 2 of the 16 License, or (at your option) any later version. 17 18 This program is distributed in the hope that it will be useful, but 19 WITHOUT ANY WARRANTY; without even the implied warranty of 20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 21 General Public License for more details. 22 23 You should have received a copy of the GNU General Public License 24 along with this program; if not, write to the Free Software 25 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 26 02111-1307, USA. 27 28 The GNU General Public License is contained in the file COPYING. 29 */ 30 31 #include "libvex_basictypes.h" 32 #include "libvex_emnote.h" 33 #include "libvex_guest_mips32.h" 34 #include "libvex_guest_mips64.h" 35 #include "libvex_ir.h" 36 #include "libvex.h" 37 38 #include "main_util.h" 39 #include "main_globals.h" 40 #include "guest_generic_bb_to_IR.h" 41 #include "guest_mips_defs.h" 42 43 /* This file contains helper functions for mips guest code. Calls to 44 these functions are generated by the back end. 45 */ 46 47 #define ALWAYSDEFD32(field) \ 48 { offsetof(VexGuestMIPS32State, field), \ 49 (sizeof ((VexGuestMIPS32State*)0)->field) } 50 51 #define ALWAYSDEFD64(field) \ 52 { offsetof(VexGuestMIPS64State, field), \ 53 (sizeof ((VexGuestMIPS64State*)0)->field) } 54 55 IRExpr *guest_mips32_spechelper(const HChar * function_name, IRExpr ** args, 56 IRStmt ** precedingStmts, Int n_precedingStmts) 57 { 58 return NULL; 59 } 60 61 IRExpr *guest_mips64_spechelper ( const HChar * function_name, IRExpr ** args, 62 IRStmt ** precedingStmts, 63 Int n_precedingStmts ) 64 { 65 return NULL; 66 } 67 68 /* VISIBLE TO LIBVEX CLIENT */ 69 void LibVEX_GuestMIPS32_initialise( /*OUT*/ VexGuestMIPS32State * vex_state) 70 { 71 vex_state->guest_r0 = 0; /* Hardwired to 0 */ 72 vex_state->guest_r1 = 0; /* Assembler temporary */ 73 vex_state->guest_r2 = 0; /* Values for function returns ... */ 74 vex_state->guest_r3 = 0; /* ...and expression evaluation */ 75 vex_state->guest_r4 = 0; /* Function arguments */ 76 vex_state->guest_r5 = 0; 77 vex_state->guest_r6 = 0; 78 vex_state->guest_r7 = 0; 79 vex_state->guest_r8 = 0; /* Temporaries */ 80 vex_state->guest_r9 = 0; 81 vex_state->guest_r10 = 0; 82 vex_state->guest_r11 = 0; 83 vex_state->guest_r12 = 0; 84 vex_state->guest_r13 = 0; 85 vex_state->guest_r14 = 0; 86 vex_state->guest_r15 = 0; 87 vex_state->guest_r16 = 0; /* Saved temporaries */ 88 vex_state->guest_r17 = 0; 89 vex_state->guest_r18 = 0; 90 vex_state->guest_r19 = 0; 91 vex_state->guest_r20 = 0; 92 vex_state->guest_r21 = 0; 93 vex_state->guest_r22 = 0; 94 vex_state->guest_r23 = 0; 95 vex_state->guest_r24 = 0; /* Temporaries */ 96 vex_state->guest_r25 = 0; 97 vex_state->guest_r26 = 0; /* Reserved for OS kernel */ 98 vex_state->guest_r27 = 0; 99 vex_state->guest_r28 = 0; /* Global pointer */ 100 vex_state->guest_r29 = 0; /* Stack pointer */ 101 vex_state->guest_r30 = 0; /* Frame pointer */ 102 vex_state->guest_r31 = 0; /* Return address */ 103 vex_state->guest_PC = 0; /* Program counter */ 104 vex_state->guest_HI = 0; /* Multiply and divide register higher result */ 105 vex_state->guest_LO = 0; /* Multiply and divide register lower result */ 106 107 /* FPU Registers */ 108 vex_state->guest_f0 = 0x7ff800007ff80000ULL; /* Floting point GP registers */ 109 vex_state->guest_f1 = 0x7ff800007ff80000ULL; 110 vex_state->guest_f2 = 0x7ff800007ff80000ULL; 111 vex_state->guest_f3 = 0x7ff800007ff80000ULL; 112 vex_state->guest_f4 = 0x7ff800007ff80000ULL; 113 vex_state->guest_f5 = 0x7ff800007ff80000ULL; 114 vex_state->guest_f6 = 0x7ff800007ff80000ULL; 115 vex_state->guest_f7 = 0x7ff800007ff80000ULL; 116 vex_state->guest_f8 = 0x7ff800007ff80000ULL; 117 vex_state->guest_f9 = 0x7ff800007ff80000ULL; 118 vex_state->guest_f10 = 0x7ff800007ff80000ULL; 119 vex_state->guest_f11 = 0x7ff800007ff80000ULL; 120 vex_state->guest_f12 = 0x7ff800007ff80000ULL; 121 vex_state->guest_f13 = 0x7ff800007ff80000ULL; 122 vex_state->guest_f14 = 0x7ff800007ff80000ULL; 123 vex_state->guest_f15 = 0x7ff800007ff80000ULL; 124 vex_state->guest_f16 = 0x7ff800007ff80000ULL; 125 vex_state->guest_f17 = 0x7ff800007ff80000ULL; 126 vex_state->guest_f18 = 0x7ff800007ff80000ULL; 127 vex_state->guest_f19 = 0x7ff800007ff80000ULL; 128 vex_state->guest_f20 = 0x7ff800007ff80000ULL; 129 vex_state->guest_f21 = 0x7ff800007ff80000ULL; 130 vex_state->guest_f22 = 0x7ff800007ff80000ULL; 131 vex_state->guest_f23 = 0x7ff800007ff80000ULL; 132 vex_state->guest_f24 = 0x7ff800007ff80000ULL; 133 vex_state->guest_f25 = 0x7ff800007ff80000ULL; 134 vex_state->guest_f26 = 0x7ff800007ff80000ULL; 135 vex_state->guest_f27 = 0x7ff800007ff80000ULL; 136 vex_state->guest_f28 = 0x7ff800007ff80000ULL; 137 vex_state->guest_f29 = 0x7ff800007ff80000ULL; 138 vex_state->guest_f30 = 0x7ff800007ff80000ULL; 139 vex_state->guest_f31 = 0x7ff800007ff80000ULL; 140 141 vex_state->guest_FIR = 0; /* FP implementation and revision register */ 142 vex_state->guest_FCCR = 0; /* FP condition codes register */ 143 vex_state->guest_FEXR = 0; /* FP exceptions register */ 144 vex_state->guest_FENR = 0; /* FP enables register */ 145 vex_state->guest_FCSR = 0; /* FP control/status register */ 146 vex_state->guest_ULR = 0; /* TLS */ 147 148 /* Various pseudo-regs mandated by Vex or Valgrind. */ 149 /* Emulation notes */ 150 vex_state->guest_EMNOTE = 0; 151 152 /* For clflush: record start and length of area to invalidate */ 153 vex_state->guest_CMSTART = 0; 154 vex_state->guest_CMLEN = 0; 155 vex_state->host_EvC_COUNTER = 0; 156 vex_state->host_EvC_FAILADDR = 0; 157 158 /* Used to record the unredirected guest address at the start of 159 a translation whose start has been redirected. By reading 160 this pseudo-register shortly afterwards, the translation can 161 find out what the corresponding no-redirection address was. 162 Note, this is only set for wrap-style redirects, not for 163 replace-style ones. */ 164 vex_state->guest_NRADDR = 0; 165 166 vex_state->guest_COND = 0; 167 168 /* MIPS32 DSP ASE(r2) specific registers */ 169 vex_state->guest_DSPControl = 0; /* DSPControl register */ 170 vex_state->guest_ac0 = 0; /* Accumulator 0 */ 171 vex_state->guest_ac1 = 0; /* Accumulator 1 */ 172 vex_state->guest_ac2 = 0; /* Accumulator 2 */ 173 vex_state->guest_ac3 = 0; /* Accumulator 3 */ 174 } 175 176 void LibVEX_GuestMIPS64_initialise ( /*OUT*/ VexGuestMIPS64State * vex_state ) 177 { 178 vex_state->guest_r0 = 0; /* Hardwired to 0 */ 179 vex_state->guest_r1 = 0; /* Assembler temporary */ 180 vex_state->guest_r2 = 0; /* Values for function returns ... */ 181 vex_state->guest_r3 = 0; 182 vex_state->guest_r4 = 0; /* Function arguments */ 183 vex_state->guest_r5 = 0; 184 vex_state->guest_r6 = 0; 185 vex_state->guest_r7 = 0; 186 vex_state->guest_r8 = 0; 187 vex_state->guest_r9 = 0; 188 vex_state->guest_r10 = 0; 189 vex_state->guest_r11 = 0; 190 vex_state->guest_r12 = 0; /* Temporaries */ 191 vex_state->guest_r13 = 0; 192 vex_state->guest_r14 = 0; 193 vex_state->guest_r15 = 0; 194 vex_state->guest_r16 = 0; /* Saved temporaries */ 195 vex_state->guest_r17 = 0; 196 vex_state->guest_r18 = 0; 197 vex_state->guest_r19 = 0; 198 vex_state->guest_r20 = 0; 199 vex_state->guest_r21 = 0; 200 vex_state->guest_r22 = 0; 201 vex_state->guest_r23 = 0; 202 vex_state->guest_r24 = 0; /* Temporaries */ 203 vex_state->guest_r25 = 0; 204 vex_state->guest_r26 = 0; /* Reserved for OS kernel */ 205 vex_state->guest_r27 = 0; 206 vex_state->guest_r28 = 0; /* Global pointer */ 207 vex_state->guest_r29 = 0; /* Stack pointer */ 208 vex_state->guest_r30 = 0; /* Frame pointer */ 209 vex_state->guest_r31 = 0; /* Return address */ 210 vex_state->guest_PC = 0; /* Program counter */ 211 vex_state->guest_HI = 0; /* Multiply and divide register higher result */ 212 vex_state->guest_LO = 0; /* Multiply and divide register lower result */ 213 214 /* FPU Registers */ 215 vex_state->guest_f0 = 0x7ff800007ff80000ULL; /* Floting point registers */ 216 vex_state->guest_f1 = 0x7ff800007ff80000ULL; 217 vex_state->guest_f2 = 0x7ff800007ff80000ULL; 218 vex_state->guest_f3 = 0x7ff800007ff80000ULL; 219 vex_state->guest_f4 = 0x7ff800007ff80000ULL; 220 vex_state->guest_f5 = 0x7ff800007ff80000ULL; 221 vex_state->guest_f6 = 0x7ff800007ff80000ULL; 222 vex_state->guest_f7 = 0x7ff800007ff80000ULL; 223 vex_state->guest_f8 = 0x7ff800007ff80000ULL; 224 vex_state->guest_f9 = 0x7ff800007ff80000ULL; 225 vex_state->guest_f10 = 0x7ff800007ff80000ULL; 226 vex_state->guest_f11 = 0x7ff800007ff80000ULL; 227 vex_state->guest_f12 = 0x7ff800007ff80000ULL; 228 vex_state->guest_f13 = 0x7ff800007ff80000ULL; 229 vex_state->guest_f14 = 0x7ff800007ff80000ULL; 230 vex_state->guest_f15 = 0x7ff800007ff80000ULL; 231 vex_state->guest_f16 = 0x7ff800007ff80000ULL; 232 vex_state->guest_f17 = 0x7ff800007ff80000ULL; 233 vex_state->guest_f18 = 0x7ff800007ff80000ULL; 234 vex_state->guest_f19 = 0x7ff800007ff80000ULL; 235 vex_state->guest_f20 = 0x7ff800007ff80000ULL; 236 vex_state->guest_f21 = 0x7ff800007ff80000ULL; 237 vex_state->guest_f22 = 0x7ff800007ff80000ULL; 238 vex_state->guest_f23 = 0x7ff800007ff80000ULL; 239 vex_state->guest_f24 = 0x7ff800007ff80000ULL; 240 vex_state->guest_f25 = 0x7ff800007ff80000ULL; 241 vex_state->guest_f26 = 0x7ff800007ff80000ULL; 242 vex_state->guest_f27 = 0x7ff800007ff80000ULL; 243 vex_state->guest_f28 = 0x7ff800007ff80000ULL; 244 vex_state->guest_f29 = 0x7ff800007ff80000ULL; 245 vex_state->guest_f30 = 0x7ff800007ff80000ULL; 246 vex_state->guest_f31 = 0x7ff800007ff80000ULL; 247 248 vex_state->guest_FIR = 0; /* FP implementation and revision register */ 249 vex_state->guest_FCCR = 0; /* FP condition codes register */ 250 vex_state->guest_FEXR = 0; /* FP exceptions register */ 251 vex_state->guest_FENR = 0; /* FP enables register */ 252 vex_state->guest_FCSR = 0; /* FP control/status register */ 253 254 vex_state->guest_ULR = 0; 255 256 /* Various pseudo-regs mandated by Vex or Valgrind. */ 257 /* Emulation notes */ 258 vex_state->guest_EMNOTE = 0; 259 260 /* For clflush: record start and length of area to invalidate */ 261 vex_state->guest_CMSTART = 0; 262 vex_state->guest_CMLEN = 0; 263 vex_state->host_EvC_COUNTER = 0; 264 vex_state->host_EvC_FAILADDR = 0; 265 266 /* Used to record the unredirected guest address at the start of 267 a translation whose start has been redirected. By reading 268 this pseudo-register shortly afterwards, the translation can 269 find out what the corresponding no-redirection address was. 270 Note, this is only set for wrap-style redirects, not for 271 replace-style ones. */ 272 vex_state->guest_NRADDR = 0; 273 274 vex_state->guest_COND = 0; 275 } 276 277 /*-----------------------------------------------------------*/ 278 /*--- Describing the mips guest state, for the benefit ---*/ 279 /*--- of iropt and instrumenters. ---*/ 280 /*-----------------------------------------------------------*/ 281 282 /* Figure out if any part of the guest state contained in minoff 283 .. maxoff requires precise memory exceptions. If in doubt return 284 True (but this generates significantly slower code). 285 286 We enforce precise exns for guest SP, PC. 287 288 Only SP is needed in mode VexRegUpdSpAtMemAccess. 289 */ 290 Bool guest_mips32_state_requires_precise_mem_exns(Int minoff, Int maxoff) 291 { 292 Int sp_min = offsetof(VexGuestMIPS32State, guest_r29); 293 Int sp_max = sp_min + 4 - 1; 294 Int pc_min = offsetof(VexGuestMIPS32State, guest_PC); 295 Int pc_max = pc_min + 4 - 1; 296 297 if (maxoff < sp_min || minoff > sp_max) { 298 /* no overlap with sp */ 299 if (vex_control.iropt_register_updates == VexRegUpdSpAtMemAccess) 300 return False; /* We only need to check stack pointer. */ 301 } else { 302 return True; 303 } 304 305 if (maxoff < pc_min || minoff > pc_max) { 306 /* no overlap with pc */ 307 } else { 308 return True; 309 } 310 311 /* We appear to need precise updates of R11 in order to get proper 312 stacktraces from non-optimised code. */ 313 Int fp_min = offsetof(VexGuestMIPS32State, guest_r30); 314 Int fp_max = fp_min + 4 - 1; 315 316 if (maxoff < fp_min || minoff > fp_max) { 317 /* no overlap with fp */ 318 } else { 319 return True; 320 } 321 322 return False; 323 } 324 325 Bool guest_mips64_state_requires_precise_mem_exns ( Int minoff, Int maxoff ) 326 { 327 Int sp_min = offsetof(VexGuestMIPS64State, guest_r29); 328 Int sp_max = sp_min + 8 - 1; 329 Int pc_min = offsetof(VexGuestMIPS64State, guest_PC); 330 Int pc_max = pc_min + 8 - 1; 331 332 if ( maxoff < sp_min || minoff > sp_max ) { 333 /* no overlap with sp */ 334 if (vex_control.iropt_register_updates == VexRegUpdSpAtMemAccess) 335 return False; /* We only need to check stack pointer. */ 336 } else { 337 return True; 338 } 339 340 if ( maxoff < pc_min || minoff > pc_max ) { 341 /* no overlap with pc */ 342 } else { 343 return True; 344 } 345 346 Int fp_min = offsetof(VexGuestMIPS64State, guest_r30); 347 Int fp_max = fp_min + 8 - 1; 348 349 if ( maxoff < fp_min || minoff > fp_max ) { 350 /* no overlap with fp */ 351 } else { 352 return True; 353 } 354 355 return False; 356 } 357 358 VexGuestLayout mips32Guest_layout = { 359 /* Total size of the guest state, in bytes. */ 360 .total_sizeB = sizeof(VexGuestMIPS32State), 361 /* Describe the stack pointer. */ 362 .offset_SP = offsetof(VexGuestMIPS32State, guest_r29), 363 .sizeof_SP = 4, 364 /* Describe the frame pointer. */ 365 .offset_FP = offsetof(VexGuestMIPS32State, guest_r30), 366 .sizeof_FP = 4, 367 /* Describe the instruction pointer. */ 368 .offset_IP = offsetof(VexGuestMIPS32State, guest_PC), 369 .sizeof_IP = 4, 370 /* Describe any sections to be regarded by Memcheck as 371 'always-defined'. */ 372 .n_alwaysDefd = 8, 373 /* ? :( */ 374 .alwaysDefd = { 375 /* 0 */ ALWAYSDEFD32(guest_r0), 376 /* 1 */ ALWAYSDEFD32(guest_r1), 377 /* 2 */ ALWAYSDEFD32(guest_EMNOTE), 378 /* 3 */ ALWAYSDEFD32(guest_CMSTART), 379 /* 4 */ ALWAYSDEFD32(guest_CMLEN), 380 /* 5 */ ALWAYSDEFD32(guest_r29), 381 /* 6 */ ALWAYSDEFD32(guest_r31), 382 /* 7 */ ALWAYSDEFD32(guest_ULR) 383 } 384 }; 385 386 VexGuestLayout mips64Guest_layout = { 387 /* Total size of the guest state, in bytes. */ 388 .total_sizeB = sizeof(VexGuestMIPS64State), 389 /* Describe the stack pointer. */ 390 .offset_SP = offsetof(VexGuestMIPS64State, guest_r29), 391 .sizeof_SP = 8, 392 /* Describe the frame pointer. */ 393 .offset_FP = offsetof(VexGuestMIPS64State, guest_r30), 394 .sizeof_FP = 8, 395 /* Describe the instruction pointer. */ 396 .offset_IP = offsetof(VexGuestMIPS64State, guest_PC), 397 .sizeof_IP = 8, 398 /* Describe any sections to be regarded by Memcheck as 399 'always-defined'. */ 400 .n_alwaysDefd = 7, 401 /* ? :( */ 402 .alwaysDefd = { 403 /* 0 */ ALWAYSDEFD64 (guest_r0), 404 /* 1 */ ALWAYSDEFD64 (guest_EMNOTE), 405 /* 2 */ ALWAYSDEFD64 (guest_CMSTART), 406 /* 3 */ ALWAYSDEFD64 (guest_CMLEN), 407 /* 4 */ ALWAYSDEFD64 (guest_r29), 408 /* 5 */ ALWAYSDEFD64 (guest_r31), 409 /* 6 */ ALWAYSDEFD64 (guest_ULR) 410 } 411 }; 412 413 #define ASM_VOLATILE_CASE(rd, sel) \ 414 case rd: \ 415 asm volatile ("mfc0 %0, $" #rd ", "#sel"\n\t" :"=r" (x) ); \ 416 break; 417 418 UInt mips32_dirtyhelper_mfc0(UInt rd, UInt sel) 419 { 420 UInt x = 0; 421 #if defined(__mips__) && ((defined(__mips_isa_rev) && __mips_isa_rev >= 2)) 422 switch (sel) { 423 case 0: 424 /* __asm__("mfc0 %0, $1, 0" :"=r" (x)); */ 425 switch (rd) { 426 ASM_VOLATILE_CASE(0, 0); 427 ASM_VOLATILE_CASE(1, 0); 428 ASM_VOLATILE_CASE(2, 0); 429 ASM_VOLATILE_CASE(3, 0); 430 ASM_VOLATILE_CASE(4, 0); 431 ASM_VOLATILE_CASE(5, 0); 432 ASM_VOLATILE_CASE(6, 0); 433 ASM_VOLATILE_CASE(7, 0); 434 ASM_VOLATILE_CASE(8, 0); 435 ASM_VOLATILE_CASE(9, 0); 436 ASM_VOLATILE_CASE(10, 0); 437 ASM_VOLATILE_CASE(11, 0); 438 ASM_VOLATILE_CASE(12, 0); 439 ASM_VOLATILE_CASE(13, 0); 440 ASM_VOLATILE_CASE(14, 0); 441 ASM_VOLATILE_CASE(15, 0); 442 ASM_VOLATILE_CASE(16, 0); 443 ASM_VOLATILE_CASE(17, 0); 444 ASM_VOLATILE_CASE(18, 0); 445 ASM_VOLATILE_CASE(19, 0); 446 ASM_VOLATILE_CASE(20, 0); 447 ASM_VOLATILE_CASE(21, 0); 448 ASM_VOLATILE_CASE(22, 0); 449 ASM_VOLATILE_CASE(23, 0); 450 ASM_VOLATILE_CASE(24, 0); 451 ASM_VOLATILE_CASE(25, 0); 452 ASM_VOLATILE_CASE(26, 0); 453 ASM_VOLATILE_CASE(27, 0); 454 ASM_VOLATILE_CASE(28, 0); 455 ASM_VOLATILE_CASE(29, 0); 456 ASM_VOLATILE_CASE(30, 0); 457 ASM_VOLATILE_CASE(31, 0); 458 default: 459 break; 460 } 461 break; 462 case 1: 463 /* __asm__("mfc0 %0, $1, 0" :"=r" (x)); */ 464 switch (rd) { 465 ASM_VOLATILE_CASE(0, 1); 466 ASM_VOLATILE_CASE(1, 1); 467 ASM_VOLATILE_CASE(2, 1); 468 ASM_VOLATILE_CASE(3, 1); 469 ASM_VOLATILE_CASE(4, 1); 470 ASM_VOLATILE_CASE(5, 1); 471 ASM_VOLATILE_CASE(6, 1); 472 ASM_VOLATILE_CASE(7, 1); 473 ASM_VOLATILE_CASE(8, 1); 474 ASM_VOLATILE_CASE(9, 1); 475 ASM_VOLATILE_CASE(10, 1); 476 ASM_VOLATILE_CASE(11, 1); 477 ASM_VOLATILE_CASE(12, 1); 478 ASM_VOLATILE_CASE(13, 1); 479 ASM_VOLATILE_CASE(14, 1); 480 ASM_VOLATILE_CASE(15, 1); 481 ASM_VOLATILE_CASE(16, 1); 482 ASM_VOLATILE_CASE(17, 1); 483 ASM_VOLATILE_CASE(18, 1); 484 ASM_VOLATILE_CASE(19, 1); 485 ASM_VOLATILE_CASE(20, 1); 486 ASM_VOLATILE_CASE(21, 1); 487 ASM_VOLATILE_CASE(22, 1); 488 ASM_VOLATILE_CASE(23, 1); 489 ASM_VOLATILE_CASE(24, 1); 490 ASM_VOLATILE_CASE(25, 1); 491 ASM_VOLATILE_CASE(26, 1); 492 ASM_VOLATILE_CASE(27, 1); 493 ASM_VOLATILE_CASE(28, 1); 494 ASM_VOLATILE_CASE(29, 1); 495 ASM_VOLATILE_CASE(30, 1); 496 ASM_VOLATILE_CASE(31, 1); 497 default: 498 break; 499 } 500 break; 501 case 2: 502 /* __asm__("mfc0 %0, $1, 0" :"=r" (x)); */ 503 switch (rd) { 504 ASM_VOLATILE_CASE(0, 2); 505 ASM_VOLATILE_CASE(1, 2); 506 ASM_VOLATILE_CASE(2, 2); 507 ASM_VOLATILE_CASE(3, 1); 508 ASM_VOLATILE_CASE(4, 2); 509 ASM_VOLATILE_CASE(5, 2); 510 ASM_VOLATILE_CASE(6, 2); 511 ASM_VOLATILE_CASE(7, 2); 512 ASM_VOLATILE_CASE(8, 2); 513 ASM_VOLATILE_CASE(9, 2); 514 ASM_VOLATILE_CASE(10, 2); 515 ASM_VOLATILE_CASE(11, 2); 516 ASM_VOLATILE_CASE(12, 2); 517 ASM_VOLATILE_CASE(13, 2); 518 ASM_VOLATILE_CASE(14, 2); 519 ASM_VOLATILE_CASE(15, 2); 520 ASM_VOLATILE_CASE(16, 2); 521 ASM_VOLATILE_CASE(17, 2); 522 ASM_VOLATILE_CASE(18, 2); 523 ASM_VOLATILE_CASE(19, 2); 524 ASM_VOLATILE_CASE(20, 2); 525 ASM_VOLATILE_CASE(21, 2); 526 ASM_VOLATILE_CASE(22, 2); 527 ASM_VOLATILE_CASE(23, 2); 528 ASM_VOLATILE_CASE(24, 2); 529 ASM_VOLATILE_CASE(25, 2); 530 ASM_VOLATILE_CASE(26, 2); 531 ASM_VOLATILE_CASE(27, 2); 532 ASM_VOLATILE_CASE(28, 2); 533 ASM_VOLATILE_CASE(29, 2); 534 ASM_VOLATILE_CASE(30, 2); 535 ASM_VOLATILE_CASE(31, 2); 536 default: 537 break; 538 } 539 break; 540 case 3: 541 /* __asm__("mfc0 %0, $1, 0" :"=r" (x)); */ 542 switch (rd) { 543 ASM_VOLATILE_CASE(0, 3); 544 ASM_VOLATILE_CASE(1, 3); 545 ASM_VOLATILE_CASE(2, 3); 546 ASM_VOLATILE_CASE(3, 3); 547 ASM_VOLATILE_CASE(4, 3); 548 ASM_VOLATILE_CASE(5, 3); 549 ASM_VOLATILE_CASE(6, 3); 550 ASM_VOLATILE_CASE(7, 3); 551 ASM_VOLATILE_CASE(8, 3); 552 ASM_VOLATILE_CASE(9, 3); 553 ASM_VOLATILE_CASE(10, 3); 554 ASM_VOLATILE_CASE(11, 3); 555 ASM_VOLATILE_CASE(12, 3); 556 ASM_VOLATILE_CASE(13, 3); 557 ASM_VOLATILE_CASE(14, 3); 558 ASM_VOLATILE_CASE(15, 3); 559 ASM_VOLATILE_CASE(16, 3); 560 ASM_VOLATILE_CASE(17, 3); 561 ASM_VOLATILE_CASE(18, 3); 562 ASM_VOLATILE_CASE(19, 3); 563 ASM_VOLATILE_CASE(20, 3); 564 ASM_VOLATILE_CASE(21, 3); 565 ASM_VOLATILE_CASE(22, 3); 566 ASM_VOLATILE_CASE(23, 3); 567 ASM_VOLATILE_CASE(24, 3); 568 ASM_VOLATILE_CASE(25, 3); 569 ASM_VOLATILE_CASE(26, 3); 570 ASM_VOLATILE_CASE(27, 3); 571 ASM_VOLATILE_CASE(28, 3); 572 ASM_VOLATILE_CASE(29, 3); 573 ASM_VOLATILE_CASE(30, 3); 574 ASM_VOLATILE_CASE(31, 3); 575 default: 576 break; 577 } 578 break; 579 case 4: 580 /* __asm__("mfc0 %0, $1, 0" :"=r" (x)); */ 581 switch (rd) { 582 ASM_VOLATILE_CASE(0, 4); 583 ASM_VOLATILE_CASE(1, 4); 584 ASM_VOLATILE_CASE(2, 4); 585 ASM_VOLATILE_CASE(3, 4); 586 ASM_VOLATILE_CASE(4, 4); 587 ASM_VOLATILE_CASE(5, 4); 588 ASM_VOLATILE_CASE(6, 4); 589 ASM_VOLATILE_CASE(7, 4); 590 ASM_VOLATILE_CASE(8, 4); 591 ASM_VOLATILE_CASE(9, 4); 592 ASM_VOLATILE_CASE(10, 4); 593 ASM_VOLATILE_CASE(11, 4); 594 ASM_VOLATILE_CASE(12, 4); 595 ASM_VOLATILE_CASE(13, 4); 596 ASM_VOLATILE_CASE(14, 4); 597 ASM_VOLATILE_CASE(15, 4); 598 ASM_VOLATILE_CASE(16, 4); 599 ASM_VOLATILE_CASE(17, 4); 600 ASM_VOLATILE_CASE(18, 4); 601 ASM_VOLATILE_CASE(19, 4); 602 ASM_VOLATILE_CASE(20, 4); 603 ASM_VOLATILE_CASE(21, 4); 604 ASM_VOLATILE_CASE(22, 4); 605 ASM_VOLATILE_CASE(23, 4); 606 ASM_VOLATILE_CASE(24, 4); 607 ASM_VOLATILE_CASE(25, 4); 608 ASM_VOLATILE_CASE(26, 4); 609 ASM_VOLATILE_CASE(27, 4); 610 ASM_VOLATILE_CASE(28, 4); 611 ASM_VOLATILE_CASE(29, 4); 612 ASM_VOLATILE_CASE(30, 4); 613 ASM_VOLATILE_CASE(31, 4); 614 default: 615 break; 616 } 617 break; 618 case 5: 619 /* __asm__("mfc0 %0, $1, 0" :"=r" (x)); */ 620 switch (rd) { 621 ASM_VOLATILE_CASE(0, 5); 622 ASM_VOLATILE_CASE(1, 5); 623 ASM_VOLATILE_CASE(2, 5); 624 ASM_VOLATILE_CASE(3, 5); 625 ASM_VOLATILE_CASE(4, 5); 626 ASM_VOLATILE_CASE(5, 5); 627 ASM_VOLATILE_CASE(6, 5); 628 ASM_VOLATILE_CASE(7, 5); 629 ASM_VOLATILE_CASE(8, 5); 630 ASM_VOLATILE_CASE(9, 5); 631 ASM_VOLATILE_CASE(10, 5); 632 ASM_VOLATILE_CASE(11, 5); 633 ASM_VOLATILE_CASE(12, 5); 634 ASM_VOLATILE_CASE(13, 5); 635 ASM_VOLATILE_CASE(14, 5); 636 ASM_VOLATILE_CASE(15, 5); 637 ASM_VOLATILE_CASE(16, 5); 638 ASM_VOLATILE_CASE(17, 5); 639 ASM_VOLATILE_CASE(18, 5); 640 ASM_VOLATILE_CASE(19, 5); 641 ASM_VOLATILE_CASE(20, 5); 642 ASM_VOLATILE_CASE(21, 5); 643 ASM_VOLATILE_CASE(22, 5); 644 ASM_VOLATILE_CASE(23, 5); 645 ASM_VOLATILE_CASE(24, 5); 646 ASM_VOLATILE_CASE(25, 5); 647 ASM_VOLATILE_CASE(26, 5); 648 ASM_VOLATILE_CASE(27, 5); 649 ASM_VOLATILE_CASE(28, 5); 650 ASM_VOLATILE_CASE(29, 5); 651 ASM_VOLATILE_CASE(30, 5); 652 ASM_VOLATILE_CASE(31, 5); 653 default: 654 break; 655 } 656 break; 657 case 6: 658 /* __asm__("mfc0 %0, $1, 0" :"=r" (x)); */ 659 switch (rd) { 660 ASM_VOLATILE_CASE(0, 6); 661 ASM_VOLATILE_CASE(1, 6); 662 ASM_VOLATILE_CASE(2, 6); 663 ASM_VOLATILE_CASE(3, 6); 664 ASM_VOLATILE_CASE(4, 6); 665 ASM_VOLATILE_CASE(5, 6); 666 ASM_VOLATILE_CASE(6, 6); 667 ASM_VOLATILE_CASE(7, 6); 668 ASM_VOLATILE_CASE(8, 6); 669 ASM_VOLATILE_CASE(9, 6); 670 ASM_VOLATILE_CASE(10, 6); 671 ASM_VOLATILE_CASE(11, 6); 672 ASM_VOLATILE_CASE(12, 6); 673 ASM_VOLATILE_CASE(13, 6); 674 ASM_VOLATILE_CASE(14, 6); 675 ASM_VOLATILE_CASE(15, 6); 676 ASM_VOLATILE_CASE(16, 6); 677 ASM_VOLATILE_CASE(17, 6); 678 ASM_VOLATILE_CASE(18, 6); 679 ASM_VOLATILE_CASE(19, 6); 680 ASM_VOLATILE_CASE(20, 6); 681 ASM_VOLATILE_CASE(21, 6); 682 ASM_VOLATILE_CASE(22, 6); 683 ASM_VOLATILE_CASE(23, 6); 684 ASM_VOLATILE_CASE(24, 6); 685 ASM_VOLATILE_CASE(25, 6); 686 ASM_VOLATILE_CASE(26, 6); 687 ASM_VOLATILE_CASE(27, 6); 688 ASM_VOLATILE_CASE(28, 6); 689 ASM_VOLATILE_CASE(29, 6); 690 ASM_VOLATILE_CASE(30, 6); 691 ASM_VOLATILE_CASE(31, 6); 692 default: 693 break; 694 } 695 break; 696 case 7: 697 /* __asm__("mfc0 %0, $1, 0" :"=r" (x)); */ 698 switch (rd) { 699 ASM_VOLATILE_CASE(0, 7); 700 ASM_VOLATILE_CASE(1, 7); 701 ASM_VOLATILE_CASE(2, 7); 702 ASM_VOLATILE_CASE(3, 7); 703 ASM_VOLATILE_CASE(4, 7); 704 ASM_VOLATILE_CASE(5, 7); 705 ASM_VOLATILE_CASE(6, 7); 706 ASM_VOLATILE_CASE(7, 7); 707 ASM_VOLATILE_CASE(8, 7); 708 ASM_VOLATILE_CASE(9, 7); 709 ASM_VOLATILE_CASE(10, 7); 710 ASM_VOLATILE_CASE(11, 7); 711 ASM_VOLATILE_CASE(12, 7); 712 ASM_VOLATILE_CASE(13, 7); 713 ASM_VOLATILE_CASE(14, 7); 714 ASM_VOLATILE_CASE(15, 7); 715 ASM_VOLATILE_CASE(16, 7); 716 ASM_VOLATILE_CASE(17, 7); 717 ASM_VOLATILE_CASE(18, 7); 718 ASM_VOLATILE_CASE(19, 7); 719 ASM_VOLATILE_CASE(20, 7); 720 ASM_VOLATILE_CASE(21, 7); 721 ASM_VOLATILE_CASE(22, 7); 722 ASM_VOLATILE_CASE(23, 7); 723 ASM_VOLATILE_CASE(24, 7); 724 ASM_VOLATILE_CASE(25, 7); 725 ASM_VOLATILE_CASE(26, 7); 726 ASM_VOLATILE_CASE(27, 7); 727 ASM_VOLATILE_CASE(28, 7); 728 ASM_VOLATILE_CASE(29, 7); 729 ASM_VOLATILE_CASE(30, 7); 730 ASM_VOLATILE_CASE(31, 7); 731 default: 732 break; 733 } 734 break; 735 736 default: 737 break; 738 } 739 #endif 740 return x; 741 } 742 743 #undef ASM_VOLATILE_CASE 744 745 #define ASM_VOLATILE_CASE(rd, sel) \ 746 case rd: \ 747 asm volatile ("dmfc0 %0, $" #rd ", "#sel"\n\t" :"=r" (x) ); \ 748 break; 749 750 ULong mips64_dirtyhelper_dmfc0 ( UInt rd, UInt sel ) 751 { 752 ULong x = 0; 753 #if defined(VGP_mips64_linux) 754 switch (sel) { 755 case 0: 756 /* __asm__("dmfc0 %0, $1, 0" :"=r" (x)); */ 757 switch (rd) { 758 ASM_VOLATILE_CASE (0, 0); 759 ASM_VOLATILE_CASE (1, 0); 760 ASM_VOLATILE_CASE (2, 0); 761 ASM_VOLATILE_CASE (3, 0); 762 ASM_VOLATILE_CASE (4, 0); 763 ASM_VOLATILE_CASE (5, 0); 764 ASM_VOLATILE_CASE (6, 0); 765 ASM_VOLATILE_CASE (7, 0); 766 ASM_VOLATILE_CASE (8, 0); 767 ASM_VOLATILE_CASE (9, 0); 768 ASM_VOLATILE_CASE (10, 0); 769 ASM_VOLATILE_CASE (11, 0); 770 ASM_VOLATILE_CASE (12, 0); 771 ASM_VOLATILE_CASE (13, 0); 772 ASM_VOLATILE_CASE (14, 0); 773 ASM_VOLATILE_CASE (15, 0); 774 ASM_VOLATILE_CASE (16, 0); 775 ASM_VOLATILE_CASE (17, 0); 776 ASM_VOLATILE_CASE (18, 0); 777 ASM_VOLATILE_CASE (19, 0); 778 ASM_VOLATILE_CASE (20, 0); 779 ASM_VOLATILE_CASE (21, 0); 780 ASM_VOLATILE_CASE (22, 0); 781 ASM_VOLATILE_CASE (23, 0); 782 ASM_VOLATILE_CASE (24, 0); 783 ASM_VOLATILE_CASE (25, 0); 784 ASM_VOLATILE_CASE (26, 0); 785 ASM_VOLATILE_CASE (27, 0); 786 ASM_VOLATILE_CASE (28, 0); 787 ASM_VOLATILE_CASE (29, 0); 788 ASM_VOLATILE_CASE (30, 0); 789 ASM_VOLATILE_CASE (31, 0); 790 default: 791 break; 792 } 793 break; 794 case 1: 795 /* __asm__("dmfc0 %0, $1, 0" :"=r" (x)); */ 796 switch (rd) { 797 ASM_VOLATILE_CASE (0, 1); 798 ASM_VOLATILE_CASE (1, 1); 799 ASM_VOLATILE_CASE (2, 1); 800 ASM_VOLATILE_CASE (3, 1); 801 ASM_VOLATILE_CASE (4, 1); 802 ASM_VOLATILE_CASE (5, 1); 803 ASM_VOLATILE_CASE (6, 1); 804 ASM_VOLATILE_CASE (7, 1); 805 ASM_VOLATILE_CASE (8, 1); 806 ASM_VOLATILE_CASE (9, 1); 807 ASM_VOLATILE_CASE (10, 1); 808 ASM_VOLATILE_CASE (11, 1); 809 ASM_VOLATILE_CASE (12, 1); 810 ASM_VOLATILE_CASE (13, 1); 811 ASM_VOLATILE_CASE (14, 1); 812 ASM_VOLATILE_CASE (15, 1); 813 ASM_VOLATILE_CASE (16, 1); 814 ASM_VOLATILE_CASE (17, 1); 815 ASM_VOLATILE_CASE (18, 1); 816 ASM_VOLATILE_CASE (19, 1); 817 ASM_VOLATILE_CASE (20, 1); 818 ASM_VOLATILE_CASE (21, 1); 819 ASM_VOLATILE_CASE (22, 1); 820 ASM_VOLATILE_CASE (23, 1); 821 ASM_VOLATILE_CASE (24, 1); 822 ASM_VOLATILE_CASE (25, 1); 823 ASM_VOLATILE_CASE (26, 1); 824 ASM_VOLATILE_CASE (27, 1); 825 ASM_VOLATILE_CASE (28, 1); 826 ASM_VOLATILE_CASE (29, 1); 827 ASM_VOLATILE_CASE (30, 1); 828 ASM_VOLATILE_CASE (31, 1); 829 default: 830 break; 831 } 832 break; 833 case 2: 834 /* __asm__("dmfc0 %0, $1, 0" :"=r" (x)); */ 835 switch (rd) { 836 ASM_VOLATILE_CASE (0, 2); 837 ASM_VOLATILE_CASE (1, 2); 838 ASM_VOLATILE_CASE (2, 2); 839 ASM_VOLATILE_CASE (3, 1); 840 ASM_VOLATILE_CASE (4, 2); 841 ASM_VOLATILE_CASE (5, 2); 842 ASM_VOLATILE_CASE (6, 2); 843 ASM_VOLATILE_CASE (7, 2); 844 ASM_VOLATILE_CASE (8, 2); 845 ASM_VOLATILE_CASE (9, 2); 846 ASM_VOLATILE_CASE (10, 2); 847 ASM_VOLATILE_CASE (11, 2); 848 ASM_VOLATILE_CASE (12, 2); 849 ASM_VOLATILE_CASE (13, 2); 850 ASM_VOLATILE_CASE (14, 2); 851 ASM_VOLATILE_CASE (15, 2); 852 ASM_VOLATILE_CASE (16, 2); 853 ASM_VOLATILE_CASE (17, 2); 854 ASM_VOLATILE_CASE (18, 2); 855 ASM_VOLATILE_CASE (19, 2); 856 ASM_VOLATILE_CASE (20, 2); 857 ASM_VOLATILE_CASE (21, 2); 858 ASM_VOLATILE_CASE (22, 2); 859 ASM_VOLATILE_CASE (23, 2); 860 ASM_VOLATILE_CASE (24, 2); 861 ASM_VOLATILE_CASE (25, 2); 862 ASM_VOLATILE_CASE (26, 2); 863 ASM_VOLATILE_CASE (27, 2); 864 ASM_VOLATILE_CASE (28, 2); 865 ASM_VOLATILE_CASE (29, 2); 866 ASM_VOLATILE_CASE (30, 2); 867 ASM_VOLATILE_CASE (31, 2); 868 default: 869 break; 870 } 871 break; 872 case 3: 873 /* __asm__("dmfc0 %0, $1, 0" :"=r" (x)); */ 874 switch (rd) { 875 ASM_VOLATILE_CASE (0, 3); 876 ASM_VOLATILE_CASE (1, 3); 877 ASM_VOLATILE_CASE (2, 3); 878 ASM_VOLATILE_CASE (3, 3); 879 ASM_VOLATILE_CASE (4, 3); 880 ASM_VOLATILE_CASE (5, 3); 881 ASM_VOLATILE_CASE (6, 3); 882 ASM_VOLATILE_CASE (7, 3); 883 ASM_VOLATILE_CASE (8, 3); 884 ASM_VOLATILE_CASE (9, 3); 885 ASM_VOLATILE_CASE (10, 3); 886 ASM_VOLATILE_CASE (11, 3); 887 ASM_VOLATILE_CASE (12, 3); 888 ASM_VOLATILE_CASE (13, 3); 889 ASM_VOLATILE_CASE (14, 3); 890 ASM_VOLATILE_CASE (15, 3); 891 ASM_VOLATILE_CASE (16, 3); 892 ASM_VOLATILE_CASE (17, 3); 893 ASM_VOLATILE_CASE (18, 3); 894 ASM_VOLATILE_CASE (19, 3); 895 ASM_VOLATILE_CASE (20, 3); 896 ASM_VOLATILE_CASE (21, 3); 897 ASM_VOLATILE_CASE (22, 3); 898 ASM_VOLATILE_CASE (23, 3); 899 ASM_VOLATILE_CASE (24, 3); 900 ASM_VOLATILE_CASE (25, 3); 901 ASM_VOLATILE_CASE (26, 3); 902 ASM_VOLATILE_CASE (27, 3); 903 ASM_VOLATILE_CASE (28, 3); 904 ASM_VOLATILE_CASE (29, 3); 905 ASM_VOLATILE_CASE (30, 3); 906 ASM_VOLATILE_CASE (31, 3); 907 default: 908 break; 909 } 910 break; 911 case 4: 912 /* __asm__("dmfc0 %0, $1, 0" :"=r" (x)); */ 913 switch (rd) { 914 ASM_VOLATILE_CASE (0, 4); 915 ASM_VOLATILE_CASE (1, 4); 916 ASM_VOLATILE_CASE (2, 4); 917 ASM_VOLATILE_CASE (3, 4); 918 ASM_VOLATILE_CASE (4, 4); 919 ASM_VOLATILE_CASE (5, 4); 920 ASM_VOLATILE_CASE (6, 4); 921 ASM_VOLATILE_CASE (7, 4); 922 ASM_VOLATILE_CASE (8, 4); 923 ASM_VOLATILE_CASE (9, 4); 924 ASM_VOLATILE_CASE (10, 4); 925 ASM_VOLATILE_CASE (11, 4); 926 ASM_VOLATILE_CASE (12, 4); 927 ASM_VOLATILE_CASE (13, 4); 928 ASM_VOLATILE_CASE (14, 4); 929 ASM_VOLATILE_CASE (15, 4); 930 ASM_VOLATILE_CASE (16, 4); 931 ASM_VOLATILE_CASE (17, 4); 932 ASM_VOLATILE_CASE (18, 4); 933 ASM_VOLATILE_CASE (19, 4); 934 ASM_VOLATILE_CASE (20, 4); 935 ASM_VOLATILE_CASE (21, 4); 936 ASM_VOLATILE_CASE (22, 4); 937 ASM_VOLATILE_CASE (23, 4); 938 ASM_VOLATILE_CASE (24, 4); 939 ASM_VOLATILE_CASE (25, 4); 940 ASM_VOLATILE_CASE (26, 4); 941 ASM_VOLATILE_CASE (27, 4); 942 ASM_VOLATILE_CASE (28, 4); 943 ASM_VOLATILE_CASE (29, 4); 944 ASM_VOLATILE_CASE (30, 4); 945 ASM_VOLATILE_CASE (31, 4); 946 default: 947 break; 948 } 949 break; 950 case 5: 951 /* __asm__("dmfc0 %0, $1, 0" :"=r" (x)); */ 952 switch (rd) { 953 ASM_VOLATILE_CASE (0, 5); 954 ASM_VOLATILE_CASE (1, 5); 955 ASM_VOLATILE_CASE (2, 5); 956 ASM_VOLATILE_CASE (3, 5); 957 ASM_VOLATILE_CASE (4, 5); 958 ASM_VOLATILE_CASE (5, 5); 959 ASM_VOLATILE_CASE (6, 5); 960 ASM_VOLATILE_CASE (7, 5); 961 ASM_VOLATILE_CASE (8, 5); 962 ASM_VOLATILE_CASE (9, 5); 963 ASM_VOLATILE_CASE (10, 5); 964 ASM_VOLATILE_CASE (11, 5); 965 ASM_VOLATILE_CASE (12, 5); 966 ASM_VOLATILE_CASE (13, 5); 967 ASM_VOLATILE_CASE (14, 5); 968 ASM_VOLATILE_CASE (15, 5); 969 ASM_VOLATILE_CASE (16, 5); 970 ASM_VOLATILE_CASE (17, 5); 971 ASM_VOLATILE_CASE (18, 5); 972 ASM_VOLATILE_CASE (19, 5); 973 ASM_VOLATILE_CASE (20, 5); 974 ASM_VOLATILE_CASE (21, 5); 975 ASM_VOLATILE_CASE (22, 5); 976 ASM_VOLATILE_CASE (23, 5); 977 ASM_VOLATILE_CASE (24, 5); 978 ASM_VOLATILE_CASE (25, 5); 979 ASM_VOLATILE_CASE (26, 5); 980 ASM_VOLATILE_CASE (27, 5); 981 ASM_VOLATILE_CASE (28, 5); 982 ASM_VOLATILE_CASE (29, 5); 983 ASM_VOLATILE_CASE (30, 5); 984 ASM_VOLATILE_CASE (31, 5); 985 default: 986 break; 987 } 988 break; 989 case 6: 990 /* __asm__("dmfc0 %0, $1, 0" :"=r" (x)); */ 991 switch (rd) { 992 ASM_VOLATILE_CASE (0, 6); 993 ASM_VOLATILE_CASE (1, 6); 994 ASM_VOLATILE_CASE (2, 6); 995 ASM_VOLATILE_CASE (3, 6); 996 ASM_VOLATILE_CASE (4, 6); 997 ASM_VOLATILE_CASE (5, 6); 998 ASM_VOLATILE_CASE (6, 6); 999 ASM_VOLATILE_CASE (7, 6); 1000 ASM_VOLATILE_CASE (8, 6); 1001 ASM_VOLATILE_CASE (9, 6); 1002 ASM_VOLATILE_CASE (10, 6); 1003 ASM_VOLATILE_CASE (11, 6); 1004 ASM_VOLATILE_CASE (12, 6); 1005 ASM_VOLATILE_CASE (13, 6); 1006 ASM_VOLATILE_CASE (14, 6); 1007 ASM_VOLATILE_CASE (15, 6); 1008 ASM_VOLATILE_CASE (16, 6); 1009 ASM_VOLATILE_CASE (17, 6); 1010 ASM_VOLATILE_CASE (18, 6); 1011 ASM_VOLATILE_CASE (19, 6); 1012 ASM_VOLATILE_CASE (20, 6); 1013 ASM_VOLATILE_CASE (21, 6); 1014 ASM_VOLATILE_CASE (22, 6); 1015 ASM_VOLATILE_CASE (23, 6); 1016 ASM_VOLATILE_CASE (24, 6); 1017 ASM_VOLATILE_CASE (25, 6); 1018 ASM_VOLATILE_CASE (26, 6); 1019 ASM_VOLATILE_CASE (27, 6); 1020 ASM_VOLATILE_CASE (28, 6); 1021 ASM_VOLATILE_CASE (29, 6); 1022 ASM_VOLATILE_CASE (30, 6); 1023 ASM_VOLATILE_CASE (31, 6); 1024 default: 1025 break; 1026 } 1027 break; 1028 case 7: 1029 /* __asm__("dmfc0 %0, $1, 0" :"=r" (x)); */ 1030 switch (rd) { 1031 ASM_VOLATILE_CASE (0, 7); 1032 ASM_VOLATILE_CASE (1, 7); 1033 ASM_VOLATILE_CASE (2, 7); 1034 ASM_VOLATILE_CASE (3, 7); 1035 ASM_VOLATILE_CASE (4, 7); 1036 ASM_VOLATILE_CASE (5, 7); 1037 ASM_VOLATILE_CASE (6, 7); 1038 ASM_VOLATILE_CASE (7, 7); 1039 ASM_VOLATILE_CASE (8, 7); 1040 ASM_VOLATILE_CASE (9, 7); 1041 ASM_VOLATILE_CASE (10, 7); 1042 ASM_VOLATILE_CASE (11, 7); 1043 ASM_VOLATILE_CASE (12, 7); 1044 ASM_VOLATILE_CASE (13, 7); 1045 ASM_VOLATILE_CASE (14, 7); 1046 ASM_VOLATILE_CASE (15, 7); 1047 ASM_VOLATILE_CASE (16, 7); 1048 ASM_VOLATILE_CASE (17, 7); 1049 ASM_VOLATILE_CASE (18, 7); 1050 ASM_VOLATILE_CASE (19, 7); 1051 ASM_VOLATILE_CASE (20, 7); 1052 ASM_VOLATILE_CASE (21, 7); 1053 ASM_VOLATILE_CASE (22, 7); 1054 ASM_VOLATILE_CASE (23, 7); 1055 ASM_VOLATILE_CASE (24, 7); 1056 ASM_VOLATILE_CASE (25, 7); 1057 ASM_VOLATILE_CASE (26, 7); 1058 ASM_VOLATILE_CASE (27, 7); 1059 ASM_VOLATILE_CASE (28, 7); 1060 ASM_VOLATILE_CASE (29, 7); 1061 ASM_VOLATILE_CASE (30, 7); 1062 ASM_VOLATILE_CASE (31, 7); 1063 default: 1064 break; 1065 } 1066 break; 1067 1068 default: 1069 break; 1070 } 1071 #endif 1072 return x; 1073 } 1074 1075 #define ASM_VOLATILE_CASE(rd, sel) \ 1076 case rd: asm volatile ("dmfc0 %0, $" #rd ", "#sel"\n\t" :"=r" (x) ); break; 1077 1078 #if defined(__mips__) && ((defined(__mips_isa_rev) && __mips_isa_rev >= 2)) 1079 UInt mips32_dirtyhelper_rdhwr ( UInt rt, UInt rd ) 1080 { 1081 UInt x = 0; 1082 switch (rd) { 1083 case 1: /* x = SYNCI_StepSize() */ 1084 __asm__ __volatile__("rdhwr %0, $1\n\t" : "=r" (x) ); 1085 break; 1086 1087 default: 1088 vassert(0); 1089 break; 1090 } 1091 return x; 1092 } 1093 1094 ULong mips64_dirtyhelper_rdhwr ( ULong rt, ULong rd ) 1095 { 1096 ULong x = 0; 1097 switch (rd) { 1098 case 1: /* x = SYNCI_StepSize() */ 1099 __asm__ __volatile__("rdhwr %0, $1\n\t" : "=r" (x) ); 1100 break; 1101 1102 default: 1103 vassert(0); 1104 break; 1105 } 1106 return x; 1107 } 1108 #endif 1109 1110 #define ASM_VOLATILE_UNARY32(inst) \ 1111 __asm__ volatile("cfc1 $t0, $31" "\n\t" \ 1112 "ctc1 %2, $31" "\n\t" \ 1113 "mtc1 %1, $f20" "\n\t" \ 1114 #inst" $f20, $f20" "\n\t" \ 1115 "cfc1 %0, $31" "\n\t" \ 1116 "ctc1 $t0, $31" "\n\t" \ 1117 : "=r" (ret) \ 1118 : "r" (loFsVal), "r" (fcsr) \ 1119 : "t0", "$f20" \ 1120 ); 1121 1122 #define ASM_VOLATILE_UNARY32_DOUBLE(inst) \ 1123 __asm__ volatile("cfc1 $t0, $31" "\n\t" \ 1124 "ctc1 %3, $31" "\n\t" \ 1125 "mtc1 %1, $f20" "\n\t" \ 1126 "mtc1 %2, $f21" "\n\t" \ 1127 #inst" $f20, $f20" "\n\t" \ 1128 "cfc1 %0, $31" "\n\t" \ 1129 "ctc1 $t0, $31" "\n\t" \ 1130 : "=r" (ret) \ 1131 : "r" (loFsVal), "r" (hiFsVal), "r" (fcsr) \ 1132 : "t0", "$f20", "$f21" \ 1133 ); 1134 1135 #define ASM_VOLATILE_UNARY64(inst) \ 1136 __asm__ volatile("cfc1 $t0, $31" "\n\t" \ 1137 "ctc1 %2, $31" "\n\t" \ 1138 "ldc1 $f24, 0(%1)" "\n\t" \ 1139 #inst" $f24, $f24" "\n\t" \ 1140 "cfc1 %0, $31" "\n\t" \ 1141 "ctc1 $t0, $31" "\n\t" \ 1142 : "=r" (ret) \ 1143 : "r" (&(addr[fs])), "r" (fcsr) \ 1144 : "t0", "$f24" \ 1145 ); 1146 1147 #define ASM_VOLATILE_BINARY32(inst) \ 1148 __asm__ volatile("cfc1 $t0, $31" "\n\t" \ 1149 "ctc1 %3, $31" "\n\t" \ 1150 "mtc1 %1, $f20" "\n\t" \ 1151 "mtc1 %2, $f22" "\n\t" \ 1152 #inst" $f20, $f20, $f22" "\n\t" \ 1153 "cfc1 %0, $31" "\n\t" \ 1154 "ctc1 $t0, $31" "\n\t" \ 1155 : "=r" (ret) \ 1156 : "r" (loFsVal), "r" (loFtVal), "r" (fcsr) \ 1157 : "t0", "$f20", "$f22" \ 1158 ); 1159 1160 #define ASM_VOLATILE_BINARY32_DOUBLE(inst) \ 1161 __asm__ volatile("cfc1 $t0, $31" "\n\t" \ 1162 "ctc1 %5, $31" "\n\t" \ 1163 "mtc1 %1, $f20" "\n\t" \ 1164 "mtc1 %2, $f21" "\n\t" \ 1165 "mtc1 %3, $f22" "\n\t" \ 1166 "mtc1 %4, $f23" "\n\t" \ 1167 #inst" $f20, $f20, $f22" "\n\t" \ 1168 "cfc1 %0, $31" "\n\t" \ 1169 "ctc1 $t0, $31" "\n\t" \ 1170 : "=r" (ret) \ 1171 : "r" (loFsVal), "r" (hiFsVal), "r" (loFtVal), \ 1172 "r" (hiFtVal), "r" (fcsr) \ 1173 : "t0", "$f20", "$f21", "$f22", "$f23" \ 1174 ); 1175 1176 #define ASM_VOLATILE_BINARY64(inst) \ 1177 __asm__ volatile("cfc1 $t0, $31" "\n\t" \ 1178 "ctc1 %3, $31" "\n\t" \ 1179 "ldc1 $f24, 0(%1)" "\n\t" \ 1180 "ldc1 $f26, 0(%2)" "\n\t" \ 1181 #inst" $f24, $f24, $f26" "\n\t" \ 1182 "cfc1 %0, $31" "\n\t" \ 1183 "ctc1 $t0, $31" "\n\t" \ 1184 : "=r" (ret) \ 1185 : "r" (&(addr[fs])), "r" (&(addr[ft])), "r" (fcsr) \ 1186 : "t0", "$f24", "$f26" \ 1187 ); 1188 1189 /* TODO: Add cases for all fpu instructions because all fpu instructions are 1190 change the value of FCSR register. */ 1191 extern UInt mips_dirtyhelper_calculate_FCSR_fp32 ( void* gs, UInt fs, UInt ft, 1192 flt_op inst ) 1193 { 1194 UInt ret = 0; 1195 #if defined(__mips__) 1196 VexGuestMIPS32State* guest_state = (VexGuestMIPS32State*)gs; 1197 UInt loFsVal, hiFsVal, loFtVal, hiFtVal; 1198 #if defined (_MIPSEL) 1199 ULong *addr = (ULong *)&guest_state->guest_f0; 1200 loFsVal = (UInt)addr[fs]; 1201 hiFsVal = (UInt)addr[fs+1]; 1202 loFtVal = (UInt)addr[ft]; 1203 hiFtVal = (UInt)addr[ft+1]; 1204 #elif defined (_MIPSEB) 1205 UInt *addr = (UInt *)&guest_state->guest_f0; 1206 loFsVal = (UInt)addr[fs*2]; 1207 hiFsVal = (UInt)addr[fs*2+2]; 1208 loFtVal = (UInt)addr[ft*2]; 1209 hiFtVal = (UInt)addr[ft*2+2]; 1210 #endif 1211 UInt fcsr = guest_state->guest_FCSR; 1212 switch (inst) { 1213 case ROUNDWD: 1214 ASM_VOLATILE_UNARY32_DOUBLE(round.w.d) 1215 break; 1216 case FLOORWS: 1217 ASM_VOLATILE_UNARY32(floor.w.s) 1218 break; 1219 case FLOORWD: 1220 ASM_VOLATILE_UNARY32_DOUBLE(floor.w.d) 1221 break; 1222 case TRUNCWS: 1223 ASM_VOLATILE_UNARY32(trunc.w.s) 1224 break; 1225 case TRUNCWD: 1226 ASM_VOLATILE_UNARY32_DOUBLE(trunc.w.d) 1227 break; 1228 case CEILWS: 1229 ASM_VOLATILE_UNARY32(ceil.w.s) 1230 break; 1231 case CEILWD: 1232 ASM_VOLATILE_UNARY32_DOUBLE(ceil.w.d) 1233 break; 1234 case CVTDS: 1235 ASM_VOLATILE_UNARY32(cvt.d.s) 1236 break; 1237 case CVTDW: 1238 ASM_VOLATILE_UNARY32(cvt.d.w) 1239 break; 1240 case CVTSW: 1241 ASM_VOLATILE_UNARY32(cvt.s.w) 1242 break; 1243 case CVTSD: 1244 ASM_VOLATILE_UNARY32_DOUBLE(cvt.s.d) 1245 break; 1246 case CVTWS: 1247 ASM_VOLATILE_UNARY32(cvt.w.s) 1248 break; 1249 case CVTWD: 1250 ASM_VOLATILE_UNARY32_DOUBLE(cvt.w.d) 1251 break; 1252 case ROUNDWS: 1253 ASM_VOLATILE_UNARY32(round.w.s) 1254 break; 1255 #if ((__mips == 32) && defined(__mips_isa_rev) && (__mips_isa_rev >= 2)) \ 1256 || (__mips == 64) 1257 case CEILLS: 1258 ASM_VOLATILE_UNARY32(ceil.l.s) 1259 break; 1260 case CEILLD: 1261 ASM_VOLATILE_UNARY32_DOUBLE(ceil.l.d) 1262 break; 1263 case CVTDL: 1264 ASM_VOLATILE_UNARY32_DOUBLE(cvt.d.l) 1265 break; 1266 case CVTLS: 1267 ASM_VOLATILE_UNARY32(cvt.l.s) 1268 break; 1269 case CVTLD: 1270 ASM_VOLATILE_UNARY32_DOUBLE(cvt.l.d) 1271 break; 1272 case CVTSL: 1273 ASM_VOLATILE_UNARY32_DOUBLE(cvt.s.l) 1274 break; 1275 case FLOORLS: 1276 ASM_VOLATILE_UNARY32(floor.l.s) 1277 break; 1278 case FLOORLD: 1279 ASM_VOLATILE_UNARY32_DOUBLE(floor.l.d) 1280 break; 1281 case ROUNDLS: 1282 ASM_VOLATILE_UNARY32(round.l.s) 1283 break; 1284 case ROUNDLD: 1285 ASM_VOLATILE_UNARY32_DOUBLE(round.l.d) 1286 break; 1287 case TRUNCLS: 1288 ASM_VOLATILE_UNARY32(trunc.l.s) 1289 break; 1290 case TRUNCLD: 1291 ASM_VOLATILE_UNARY32_DOUBLE(trunc.l.d) 1292 break; 1293 #endif 1294 case ADDS: 1295 ASM_VOLATILE_BINARY32(add.s) 1296 break; 1297 case ADDD: 1298 ASM_VOLATILE_BINARY32_DOUBLE(add.d) 1299 break; 1300 case SUBS: 1301 ASM_VOLATILE_BINARY32(sub.s) 1302 break; 1303 case SUBD: 1304 ASM_VOLATILE_BINARY32_DOUBLE(sub.d) 1305 break; 1306 case DIVS: 1307 ASM_VOLATILE_BINARY32(div.s) 1308 break; 1309 default: 1310 vassert(0); 1311 break; 1312 } 1313 #endif 1314 return ret; 1315 } 1316 1317 /* TODO: Add cases for all fpu instructions because all fpu instructions are 1318 change the value of FCSR register. */ 1319 extern UInt mips_dirtyhelper_calculate_FCSR_fp64 ( void* gs, UInt fs, UInt ft, 1320 flt_op inst ) 1321 { 1322 UInt ret = 0; 1323 #if defined(__mips__) 1324 #if defined(VGA_mips32) 1325 VexGuestMIPS32State* guest_state = (VexGuestMIPS32State*)gs; 1326 #else 1327 VexGuestMIPS64State* guest_state = (VexGuestMIPS64State*)gs; 1328 #endif 1329 ULong *addr = (ULong *)&guest_state->guest_f0; 1330 UInt fcsr = guest_state->guest_FCSR; 1331 switch (inst) { 1332 case ROUNDWD: 1333 ASM_VOLATILE_UNARY64(round.w.d) 1334 break; 1335 case FLOORWS: 1336 ASM_VOLATILE_UNARY64(floor.w.s) 1337 break; 1338 case FLOORWD: 1339 ASM_VOLATILE_UNARY64(floor.w.d) 1340 break; 1341 case TRUNCWS: 1342 ASM_VOLATILE_UNARY64(trunc.w.s) 1343 break; 1344 case TRUNCWD: 1345 ASM_VOLATILE_UNARY64(trunc.w.d) 1346 break; 1347 case CEILWS: 1348 ASM_VOLATILE_UNARY64(ceil.w.s) 1349 break; 1350 case CEILWD: 1351 ASM_VOLATILE_UNARY64(ceil.w.d) 1352 break; 1353 case CVTDS: 1354 ASM_VOLATILE_UNARY64(cvt.d.s) 1355 break; 1356 case CVTDW: 1357 ASM_VOLATILE_UNARY64(cvt.d.w) 1358 break; 1359 case CVTSW: 1360 ASM_VOLATILE_UNARY64(cvt.s.w) 1361 break; 1362 case CVTSD: 1363 ASM_VOLATILE_UNARY64(cvt.s.d) 1364 break; 1365 case CVTWS: 1366 ASM_VOLATILE_UNARY64(cvt.w.s) 1367 break; 1368 case CVTWD: 1369 ASM_VOLATILE_UNARY64(cvt.w.d) 1370 break; 1371 case ROUNDWS: 1372 ASM_VOLATILE_UNARY64(round.w.s) 1373 break; 1374 #if ((__mips == 32) && defined(__mips_isa_rev) && (__mips_isa_rev >= 2)) \ 1375 || (__mips == 64) 1376 case CEILLS: 1377 ASM_VOLATILE_UNARY64(ceil.l.s) 1378 break; 1379 case CEILLD: 1380 ASM_VOLATILE_UNARY64(ceil.l.d) 1381 break; 1382 case CVTDL: 1383 ASM_VOLATILE_UNARY64(cvt.d.l) 1384 break; 1385 case CVTLS: 1386 ASM_VOLATILE_UNARY64(cvt.l.s) 1387 break; 1388 case CVTLD: 1389 ASM_VOLATILE_UNARY64(cvt.l.d) 1390 break; 1391 case CVTSL: 1392 ASM_VOLATILE_UNARY64(cvt.s.l) 1393 break; 1394 case FLOORLS: 1395 ASM_VOLATILE_UNARY64(floor.l.s) 1396 break; 1397 case FLOORLD: 1398 ASM_VOLATILE_UNARY64(floor.l.d) 1399 break; 1400 case ROUNDLS: 1401 ASM_VOLATILE_UNARY64(round.l.s) 1402 break; 1403 case ROUNDLD: 1404 ASM_VOLATILE_UNARY64(round.l.d) 1405 break; 1406 case TRUNCLS: 1407 ASM_VOLATILE_UNARY64(trunc.l.s) 1408 break; 1409 case TRUNCLD: 1410 ASM_VOLATILE_UNARY64(trunc.l.d) 1411 break; 1412 #endif 1413 case ADDS: 1414 ASM_VOLATILE_BINARY64(add.s) 1415 break; 1416 case ADDD: 1417 ASM_VOLATILE_BINARY64(add.d) 1418 break; 1419 case SUBS: 1420 ASM_VOLATILE_BINARY64(sub.s) 1421 break; 1422 case SUBD: 1423 ASM_VOLATILE_BINARY64(sub.d) 1424 break; 1425 case DIVS: 1426 ASM_VOLATILE_BINARY64(div.s) 1427 break; 1428 default: 1429 vassert(0); 1430 break; 1431 } 1432 #endif 1433 return ret; 1434 } 1435 1436 /*---------------------------------------------------------------*/ 1437 /*--- end guest_mips_helpers.c ---*/ 1438 /*---------------------------------------------------------------*/ 1439