1 /* Target operations for the remote server for GDB. 2 Copyright (C) 2002, 2004, 2005, 2011 3 Free Software Foundation, Inc. 4 5 Contributed by MontaVista Software. 6 7 This file is part of GDB. 8 It has been modified to integrate it in valgrind 9 10 This program is free software; you can redistribute it and/or modify 11 it under the terms of the GNU General Public License as published by 12 the Free Software Foundation; either version 2 of the License, or 13 (at your option) any later version. 14 15 This program is distributed in the hope that it will be useful, 16 but WITHOUT ANY WARRANTY; without even the implied warranty of 17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 GNU General Public License for more details. 19 20 You should have received a copy of the GNU General Public License 21 along with this program; if not, write to the Free Software 22 Foundation, Inc., 51 Franklin Street, Fifth Floor, 23 Boston, MA 02110-1301, USA. */ 24 25 #include "server.h" 26 #include "target.h" 27 #include "regdef.h" 28 #include "regcache.h" 29 #include "valgrind_low.h" 30 #include "gdb/signals.h" 31 #include "pub_core_aspacemgr.h" 32 #include "pub_core_machine.h" 33 #include "pub_core_threadstate.h" 34 #include "pub_core_transtab.h" 35 #include "pub_core_gdbserver.h" 36 #include "pub_core_debuginfo.h" 37 38 39 /* the_low_target defines the architecture specific aspects depending 40 on the cpu */ 41 static struct valgrind_target_ops the_low_target; 42 43 static 44 char *image_ptid(unsigned long ptid) 45 { 46 static char result[100]; 47 VG_(sprintf) (result, "id %ld", ptid); 48 return result; 49 } 50 #define get_thread(inf) ((struct thread_info *)(inf)) 51 static 52 void remove_thread_if_not_in_vg_threads (struct inferior_list_entry *inf) 53 { 54 struct thread_info *thread = get_thread (inf); 55 if (!VG_(lwpid_to_vgtid)(thread_to_gdb_id(thread))) { 56 dlog(1, "removing gdb ptid %s\n", 57 image_ptid(thread_to_gdb_id(thread))); 58 remove_thread (thread); 59 } 60 } 61 62 /* synchronize threads known by valgrind and threads known by gdbserver */ 63 static 64 void valgrind_update_threads (int pid) 65 { 66 ThreadId tid; 67 ThreadState *ts; 68 unsigned long ptid; 69 struct thread_info *ti; 70 71 /* call remove_thread for all gdb threads not in valgrind threads */ 72 for_each_inferior (&all_threads, remove_thread_if_not_in_vg_threads); 73 74 /* call add_thread for all valgrind threads not known in gdb all_threads */ 75 for (tid = 1; tid < VG_N_THREADS; tid++) { 76 77 #define LOCAL_THREAD_TRACE " ti* %p vgtid %d status %s as gdb ptid %s lwpid %d\n", \ 78 ti, tid, VG_(name_of_ThreadStatus) (ts->status), \ 79 image_ptid (ptid), ts->os_state.lwpid 80 81 if (VG_(is_valid_tid) (tid)) { 82 ts = VG_(get_ThreadState) (tid); 83 ptid = ts->os_state.lwpid; 84 ti = gdb_id_to_thread (ptid); 85 if (!ti) { 86 /* we do not report the threads which are not yet fully 87 initialized otherwise this creates duplicated threads 88 in gdb: once with pid xxx lwpid 0, then after that 89 with pid xxx lwpid yyy. */ 90 if (ts->status != VgTs_Init) { 91 dlog(1, "adding_thread" LOCAL_THREAD_TRACE); 92 add_thread (ptid, ts, ptid); 93 } 94 } else { 95 dlog(2, "(known thread)" LOCAL_THREAD_TRACE); 96 } 97 } 98 #undef LOCAL_THREAD_TRACE 99 } 100 } 101 102 static 103 struct reg* build_shadow_arch (struct reg *reg_defs, int n) { 104 int i, r; 105 static const char *postfix[3] = { "", "s1", "s2" }; 106 struct reg *new_regs = malloc(3 * n * sizeof(reg_defs[0])); 107 int reg_set_len = reg_defs[n-1].offset + reg_defs[n-1].size; 108 109 for (i = 0; i < 3; i++) { 110 for (r = 0; r < n; r++) { 111 char *regname = malloc(strlen(reg_defs[r].name) 112 + strlen (postfix[i]) + 1); 113 strcpy (regname, reg_defs[r].name); 114 strcat (regname, postfix[i]); 115 new_regs[i*n + r].name = regname; 116 new_regs[i*n + r].offset = i*reg_set_len + reg_defs[r].offset; 117 new_regs[i*n + r].size = reg_defs[r].size; 118 dlog(1, 119 "%10s Nr %d offset(bit) %d offset(byte) %d size(bit) %d\n", 120 new_regs[i*n + r].name, i*n + r, new_regs[i*n + r].offset, 121 (new_regs[i*n + r].offset) / 8, new_regs[i*n + r].size); 122 } 123 } 124 125 return new_regs; 126 } 127 128 129 static CORE_ADDR stopped_data_address = 0; 130 void VG_(set_watchpoint_stop_address) (Addr addr) 131 { 132 stopped_data_address = addr; 133 } 134 135 int valgrind_stopped_by_watchpoint (void) 136 { 137 return stopped_data_address != 0; 138 } 139 140 CORE_ADDR valgrind_stopped_data_address (void) 141 { 142 return stopped_data_address; 143 } 144 145 /* pc at which we last stopped */ 146 static CORE_ADDR stop_pc; 147 148 /* pc at which we resume. 149 If stop_pc != resume_pc, it means 150 gdb/gdbserver has changed the pc so as to have either 151 a "continue by jumping at that address" 152 or a "continue at that address to call some code from gdb". 153 */ 154 static CORE_ADDR resume_pc; 155 156 static int vki_signal_to_report; 157 158 void gdbserver_signal_encountered (Int vki_sigNo) 159 { 160 vki_signal_to_report = vki_sigNo; 161 } 162 163 static int vki_signal_to_deliver; 164 Bool gdbserver_deliver_signal (Int vki_sigNo) 165 { 166 return vki_sigNo == vki_signal_to_deliver; 167 } 168 169 static unsigned char exit_status_to_report; 170 static int exit_code_to_report; 171 void gdbserver_process_exit_encountered (unsigned char status, Int code) 172 { 173 vg_assert (status == 'W' || status == 'X'); 174 exit_status_to_report = status; 175 exit_code_to_report = code; 176 } 177 178 static 179 char* sym (Addr addr) 180 { 181 static char buf[200]; 182 VG_(describe_IP) (addr, buf, 200); 183 return buf; 184 } 185 186 ThreadId vgdb_interrupted_tid = 0; 187 188 /* 0 => not single stepping. 189 1 => single stepping asked by gdb 190 2 => single stepping asked by valgrind (watchpoint) */ 191 static int stepping = 0; 192 193 Addr valgrind_get_ignore_break_once(void) 194 { 195 if (valgrind_single_stepping()) 196 return resume_pc; 197 else 198 return 0; 199 } 200 201 void valgrind_set_single_stepping(Bool set) 202 { 203 if (set) 204 stepping = 2; 205 else 206 stepping = 0; 207 } 208 209 Bool valgrind_single_stepping(void) 210 { 211 if (stepping) 212 return True; 213 else 214 return False; 215 } 216 217 int valgrind_thread_alive (unsigned long tid) 218 { 219 struct thread_info *ti = gdb_id_to_thread(tid); 220 ThreadState *tst; 221 222 if (ti != NULL) { 223 tst = (ThreadState *) inferior_target_data (ti); 224 return tst->status != VgTs_Zombie; 225 } 226 else { 227 return 0; 228 } 229 } 230 231 void valgrind_resume (struct thread_resume *resume_info) 232 { 233 dlog(1, 234 "resume_info step %d sig %d stepping %d\n", 235 resume_info->step, 236 resume_info->sig, 237 stepping); 238 if (valgrind_stopped_by_watchpoint()) { 239 dlog(1, "clearing watchpoint stopped_data_address %p\n", 240 C2v(stopped_data_address)); 241 VG_(set_watchpoint_stop_address) ((Addr) 0); 242 } 243 vki_signal_to_deliver = resume_info->sig; 244 245 stepping = resume_info->step; 246 resume_pc = (*the_low_target.get_pc) (); 247 if (resume_pc != stop_pc) { 248 dlog(1, 249 "stop_pc %p changed to be resume_pc %s\n", 250 C2v(stop_pc), sym(resume_pc)); 251 } 252 regcache_invalidate(); 253 } 254 255 unsigned char valgrind_wait (char *ourstatus) 256 { 257 int pid; 258 unsigned long wptid; 259 ThreadState *tst; 260 enum target_signal sig; 261 int code; 262 263 pid = VG_(getpid) (); 264 dlog(1, "enter valgrind_wait pid %d\n", pid); 265 266 regcache_invalidate(); 267 valgrind_update_threads(pid); 268 269 /* First see if we are done with this process. */ 270 if (exit_status_to_report != 0) { 271 *ourstatus = exit_status_to_report; 272 exit_status_to_report = 0; 273 274 if (*ourstatus == 'W') { 275 code = exit_code_to_report; 276 exit_code_to_report = 0; 277 dlog(1, "exit valgrind_wait status W exit code %d\n", code); 278 return code; 279 } 280 281 if (*ourstatus == 'X') { 282 sig = target_signal_from_host(exit_code_to_report); 283 exit_code_to_report = 0; 284 dlog(1, "exit valgrind_wait status X signal %d\n", sig); 285 return sig; 286 } 287 } 288 289 /* in valgrind, we consider that a wait always succeeds with STOPPED 'T' 290 and with a signal TRAP (i.e. a breakpoint), unless there is 291 a signal to report. */ 292 *ourstatus = 'T'; 293 if (vki_signal_to_report == 0) 294 sig = TARGET_SIGNAL_TRAP; 295 else { 296 sig = target_signal_from_host(vki_signal_to_report); 297 vki_signal_to_report = 0; 298 } 299 300 if (vgdb_interrupted_tid != 0) 301 tst = VG_(get_ThreadState) (vgdb_interrupted_tid); 302 else 303 tst = VG_(get_ThreadState) (VG_(running_tid)); 304 wptid = tst->os_state.lwpid; 305 /* we can only change the current_inferior when the wptid references 306 an existing thread. Otherwise, we are still in the init phase. 307 (hack similar to main thread hack in valgrind_update_threads) */ 308 if (tst->os_state.lwpid) 309 current_inferior = gdb_id_to_thread (wptid); 310 stop_pc = (*the_low_target.get_pc) (); 311 312 dlog(1, 313 "exit valgrind_wait status T ptid %s stop_pc %s signal %d\n", 314 image_ptid (wptid), sym (stop_pc), sig); 315 return sig; 316 } 317 318 /* Fetch one register from valgrind VEX guest state. */ 319 static 320 void fetch_register (int regno) 321 { 322 int size; 323 ThreadState *tst = (ThreadState *) inferior_target_data (current_inferior); 324 ThreadId tid = tst->tid; 325 326 if (regno >= the_low_target.num_regs) { 327 dlog(0, "error fetch_register regno %d max %d\n", 328 regno, the_low_target.num_regs); 329 return; 330 } 331 size = register_size (regno); 332 if (size > 0) { 333 Bool mod; 334 char buf [size]; 335 VG_(memset) (buf, 0, size); // registers not fetched will be seen as 0. 336 (*the_low_target.transfer_register) (tid, regno, buf, 337 valgrind_to_gdbserver, size, &mod); 338 // Note: the *mod received from transfer_register is not interesting. 339 // We are interested to see if the register data in the register cache is modified. 340 supply_register (regno, buf, &mod); 341 if (mod && VG_(debugLog_getLevel)() > 1) { 342 char bufimage [2*size + 1]; 343 heximage (bufimage, buf, size); 344 dlog(2, "fetched register %d size %d name %s value %s tid %d status %s\n", 345 regno, size, the_low_target.reg_defs[regno].name, bufimage, 346 tid, VG_(name_of_ThreadStatus) (tst->status)); 347 } 348 } 349 } 350 351 /* Fetch all registers, or just one, from the child process. */ 352 static 353 void usr_fetch_inferior_registers (int regno) 354 { 355 if (regno == -1 || regno == 0) 356 for (regno = 0; regno < the_low_target.num_regs; regno++) 357 fetch_register (regno); 358 else 359 fetch_register (regno); 360 } 361 362 /* Store our register values back into the inferior. 363 If REGNO is -1, do this for all registers. 364 Otherwise, REGNO specifies which register (so we can save time). */ 365 static 366 void usr_store_inferior_registers (int regno) 367 { 368 int size; 369 ThreadState *tst = (ThreadState *) inferior_target_data (current_inferior); 370 ThreadId tid = tst->tid; 371 372 if (regno >= 0) { 373 374 if (regno >= the_low_target.num_regs) { 375 dlog(0, "error store_register regno %d max %d\n", 376 regno, the_low_target.num_regs); 377 return; 378 } 379 380 size = register_size (regno); 381 if (size > 0) { 382 Bool mod; 383 Addr old_SP, new_SP; 384 char buf[size]; 385 386 if (regno == the_low_target.stack_pointer_regno) { 387 /* When the stack pointer register is changed such that 388 the stack is extended, we better inform the tool of the 389 stack increase. This is needed in particular to avoid 390 spurious Memcheck errors during Inferior calls. So, we 391 save in old_SP the SP before the change. A change of 392 stack pointer is also assumed to have initialised this 393 new stack space. For the typical example of an inferior 394 call, gdb writes arguments on the stack, and then 395 changes the stack pointer. As the stack increase tool 396 function might mark it as undefined, we have to call it 397 at the good moment. */ 398 VG_(memset) ((void *) &old_SP, 0, size); 399 (*the_low_target.transfer_register) (tid, regno, (void *) &old_SP, 400 valgrind_to_gdbserver, size, &mod); 401 } 402 403 VG_(memset) (buf, 0, size); 404 collect_register (regno, buf); 405 (*the_low_target.transfer_register) (tid, regno, buf, 406 gdbserver_to_valgrind, size, &mod); 407 if (mod && VG_(debugLog_getLevel)() > 1) { 408 char bufimage [2*size + 1]; 409 heximage (bufimage, buf, size); 410 dlog(2, 411 "stored register %d size %d name %s value %s " 412 "tid %d status %s\n", 413 regno, size, the_low_target.reg_defs[regno].name, bufimage, 414 tid, VG_(name_of_ThreadStatus) (tst->status)); 415 } 416 if (regno == the_low_target.stack_pointer_regno) { 417 VG_(memcpy) (&new_SP, buf, size); 418 if (old_SP > new_SP) { 419 Word delta = (Word)new_SP - (Word)old_SP; 420 dlog(1, 421 " stack increase by stack pointer changed from %p to %p " 422 "delta %ld\n", 423 (void*) old_SP, (void *) new_SP, 424 delta); 425 VG_TRACK( new_mem_stack_w_ECU, new_SP, -delta, 0 ); 426 VG_TRACK( new_mem_stack, new_SP, -delta ); 427 VG_TRACK( post_mem_write, Vg_CoreClientReq, tid, 428 new_SP, -delta); 429 } 430 } 431 } 432 } 433 else { 434 for (regno = 0; regno < the_low_target.num_regs; regno++) 435 usr_store_inferior_registers (regno); 436 } 437 } 438 439 void valgrind_fetch_registers (int regno) 440 { 441 usr_fetch_inferior_registers (regno); 442 } 443 444 void valgrind_store_registers (int regno) 445 { 446 usr_store_inferior_registers (regno); 447 } 448 449 Bool hostvisibility = False; 450 451 int valgrind_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len) 452 { 453 const void *sourceaddr = C2v (memaddr); 454 dlog(2, "reading memory %p size %d\n", sourceaddr, len); 455 if (VG_(am_is_valid_for_client) ((Addr) sourceaddr, 456 len, VKI_PROT_READ) 457 || (hostvisibility 458 && VG_(am_is_valid_for_valgrind) ((Addr) sourceaddr, 459 len, VKI_PROT_READ))) { 460 VG_(memcpy) (myaddr, sourceaddr, len); 461 return 0; 462 } else { 463 dlog(1, "error reading memory %p size %d\n", sourceaddr, len); 464 return -1; 465 } 466 } 467 468 int valgrind_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len) 469 { 470 Bool is_valid_client_memory; 471 void *targetaddr = C2v (memaddr); 472 dlog(2, "writing memory %p size %d\n", targetaddr, len); 473 is_valid_client_memory 474 = VG_(am_is_valid_for_client) ((Addr)targetaddr, len, VKI_PROT_WRITE); 475 if (is_valid_client_memory 476 || (hostvisibility 477 && VG_(am_is_valid_for_valgrind) ((Addr) targetaddr, 478 len, VKI_PROT_READ))) { 479 if (len > 0) { 480 VG_(memcpy) (targetaddr, myaddr, len); 481 if (is_valid_client_memory && VG_(tdict).track_post_mem_write) { 482 /* Inform the tool of the post memwrite. Note that we do the 483 minimum necessary to avoid complains from e.g. 484 memcheck. The idea is that the debugger is as least 485 intrusive as possible. So, we do not inform of the pre 486 mem write (and in any case, this would cause problems with 487 memcheck that does not like our CorePart in 488 pre_mem_write. */ 489 ThreadState *tst = 490 (ThreadState *) inferior_target_data (current_inferior); 491 ThreadId tid = tst->tid; 492 VG_(tdict).track_post_mem_write( Vg_CoreClientReq, tid, 493 (Addr) targetaddr, len ); 494 } 495 } 496 return 0; 497 } else { 498 dlog(1, "error writing memory %p size %d\n", targetaddr, len); 499 return -1; 500 } 501 } 502 503 /* insert or remove a breakpoint */ 504 static 505 int valgrind_point (Bool insert, char type, CORE_ADDR addr, int len) 506 { 507 PointKind kind; 508 switch (type) { 509 case '0': /* implemented by inserting checks at each instruction in sb */ 510 kind = software_breakpoint; 511 break; 512 case '1': /* hw breakpoint, same implementation as sw breakpoint */ 513 kind = hardware_breakpoint; 514 break; 515 case '2': 516 kind = write_watchpoint; 517 break; 518 case '3': 519 kind = read_watchpoint; 520 break; 521 case '4': 522 kind = access_watchpoint; 523 break; 524 default: 525 vg_assert (0); 526 } 527 528 /* Attention: gdbserver convention differs: 0 means ok; 1 means not ok */ 529 if (VG_(gdbserver_point) (kind, insert, addr, len)) 530 return 0; 531 else 532 return 1; /* error or unsupported */ 533 } 534 535 const char* valgrind_target_xml (Bool shadow_mode) 536 { 537 return (*the_low_target.target_xml) (shadow_mode); 538 } 539 540 int valgrind_insert_watchpoint (char type, CORE_ADDR addr, int len) 541 { 542 return valgrind_point (/* insert */ True, type, addr, len); 543 } 544 545 int valgrind_remove_watchpoint (char type, CORE_ADDR addr, int len) 546 { 547 return valgrind_point (/* insert*/ False, type, addr, len); 548 } 549 550 /* returns a pointer to the architecture state corresponding to 551 the provided register set: 0 => normal guest registers, 552 1 => shadow1 553 2 => shadow2 554 */ 555 VexGuestArchState* get_arch (int set, ThreadState* tst) 556 { 557 switch (set) { 558 case 0: return &tst->arch.vex; 559 case 1: return &tst->arch.vex_shadow1; 560 case 2: return &tst->arch.vex_shadow2; 561 default: vg_assert(0); 562 } 563 } 564 565 static int non_shadow_num_regs = 0; 566 static struct reg *non_shadow_reg_defs = NULL; 567 void initialize_shadow_low(Bool shadow_mode) 568 { 569 if (non_shadow_reg_defs == NULL) { 570 non_shadow_reg_defs = the_low_target.reg_defs; 571 non_shadow_num_regs = the_low_target.num_regs; 572 } 573 574 regcache_invalidate(); 575 if (the_low_target.reg_defs != non_shadow_reg_defs) { 576 free (the_low_target.reg_defs); 577 } 578 if (shadow_mode) { 579 the_low_target.num_regs = 3 * non_shadow_num_regs; 580 the_low_target.reg_defs = build_shadow_arch (non_shadow_reg_defs, non_shadow_num_regs); 581 } else { 582 the_low_target.num_regs = non_shadow_num_regs; 583 the_low_target.reg_defs = non_shadow_reg_defs; 584 } 585 set_register_cache (the_low_target.reg_defs, the_low_target.num_regs); 586 } 587 588 void set_desired_inferior (int use_general) 589 { 590 struct thread_info *found; 591 592 if (use_general == 1) { 593 found = (struct thread_info *) find_inferior_id (&all_threads, 594 general_thread); 595 } else { 596 found = NULL; 597 598 /* If we are continuing any (all) thread(s), use step_thread 599 to decide which thread to step and/or send the specified 600 signal to. */ 601 if ((step_thread != 0 && step_thread != -1) 602 && (cont_thread == 0 || cont_thread == -1)) 603 found = (struct thread_info *) find_inferior_id (&all_threads, 604 step_thread); 605 606 if (found == NULL) 607 found = (struct thread_info *) find_inferior_id (&all_threads, 608 cont_thread); 609 } 610 611 if (found == NULL) 612 current_inferior = (struct thread_info *) all_threads.head; 613 else 614 current_inferior = found; 615 { 616 ThreadState *tst = (ThreadState *) inferior_target_data (current_inferior); 617 ThreadId tid = tst->tid; 618 dlog(1, "set_desired_inferior use_general %d found %p tid %d lwpid %d\n", 619 use_general, found, tid, tst->os_state.lwpid); 620 } 621 } 622 623 void* VG_(dmemcpy) ( void *d, const void *s, SizeT sz, Bool *mod ) 624 { 625 if (VG_(memcmp) (d, s, sz)) { 626 *mod = True; 627 return VG_(memcpy) (d, s, sz); 628 } else { 629 *mod = False; 630 return d; 631 } 632 } 633 634 void VG_(transfer) (void *valgrind, 635 void *gdbserver, 636 transfer_direction dir, 637 SizeT sz, 638 Bool *mod) 639 { 640 if (dir == valgrind_to_gdbserver) 641 VG_(dmemcpy) (gdbserver, valgrind, sz, mod); 642 else if (dir == gdbserver_to_valgrind) 643 VG_(dmemcpy) (valgrind, gdbserver, sz, mod); 644 else 645 vg_assert (0); 646 } 647 648 void valgrind_initialize_target(void) 649 { 650 #if defined(VGA_x86) 651 x86_init_architecture(&the_low_target); 652 #elif defined(VGA_amd64) 653 amd64_init_architecture(&the_low_target); 654 #elif defined(VGA_arm) 655 arm_init_architecture(&the_low_target); 656 #elif defined(VGA_arm64) 657 arm64_init_architecture(&the_low_target); 658 #elif defined(VGA_ppc32) 659 ppc32_init_architecture(&the_low_target); 660 #elif defined(VGA_ppc64) 661 ppc64_init_architecture(&the_low_target); 662 #elif defined(VGA_s390x) 663 s390x_init_architecture(&the_low_target); 664 #elif defined(VGA_mips32) 665 mips32_init_architecture(&the_low_target); 666 #elif defined(VGA_mips64) 667 mips64_init_architecture(&the_low_target); 668 #else 669 #error "architecture missing in target.c valgrind_initialize_target" 670 #endif 671 } 672