1 /* 2 This file is part of drd, a thread error detector. 3 4 Copyright (C) 2006-2013 Bart Van Assche <bvanassche (at) acm.org>. 5 6 This program is free software; you can redistribute it and/or 7 modify it under the terms of the GNU General Public License as 8 published by the Free Software Foundation; either version 2 of the 9 License, or (at your option) any later version. 10 11 This program is distributed in the hope that it will be useful, but 12 WITHOUT ANY WARRANTY; without even the implied warranty of 13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 General Public License for more details. 15 16 You should have received a copy of the GNU General Public License 17 along with this program; if not, write to the Free Software 18 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 19 02111-1307, USA. 20 21 The GNU General Public License is contained in the file COPYING. 22 */ 23 24 25 #include "drd_barrier.h" 26 #include "drd_clientreq.h" 27 #include "drd_cond.h" 28 #include "drd_error.h" 29 #include "drd_hb.h" 30 #include "drd_load_store.h" 31 #include "drd_malloc_wrappers.h" 32 #include "drd_mutex.h" 33 #include "drd_rwlock.h" 34 #include "drd_semaphore.h" 35 #include "drd_suppression.h" // drd_start_suppression() 36 #include "drd_thread.h" 37 #include "pub_tool_basics.h" // Bool 38 #include "pub_tool_debuginfo.h" // VG_(describe_IP)() 39 #include "pub_tool_libcassert.h" 40 #include "pub_tool_libcassert.h" // tl_assert() 41 #include "pub_tool_libcprint.h" // VG_(message)() 42 #include "pub_tool_machine.h" // VG_(get_SP)() 43 #include "pub_tool_threadstate.h" 44 #include "pub_tool_tooliface.h" // VG_(needs_...)() 45 46 47 /* Global variables. */ 48 49 Bool DRD_(g_free_is_write); 50 51 52 /* Local function declarations. */ 53 54 static Bool handle_client_request(ThreadId vg_tid, UWord* arg, UWord* ret); 55 56 57 /* Function definitions. */ 58 59 /** 60 * Tell the Valgrind core the address of the DRD function that processes 61 * client requests. Must be called before any client code is run. 62 */ 63 void DRD_(clientreq_init)(void) 64 { 65 VG_(needs_client_requests)(handle_client_request); 66 } 67 68 /** 69 * DRD's handler for Valgrind client requests. The code below handles both 70 * DRD's public and tool-internal client requests. 71 */ 72 #if defined(VGP_mips32_linux) || defined(VGP_mips64_linux) 73 /* There is a cse related issue in gcc for MIPS. Optimization level 74 has to be lowered, so cse related optimizations are not 75 included. */ 76 __attribute__((optimize("O1"))) 77 #endif 78 static Bool handle_client_request(ThreadId vg_tid, UWord* arg, UWord* ret) 79 { 80 UWord result = 0; 81 const DrdThreadId drd_tid = DRD_(thread_get_running_tid)(); 82 83 tl_assert(vg_tid == VG_(get_running_tid())); 84 tl_assert(DRD_(VgThreadIdToDrdThreadId)(vg_tid) == drd_tid 85 || (VG_USERREQ__GDB_MONITOR_COMMAND == arg[0] 86 && vg_tid == VG_INVALID_THREADID)); 87 /* Check the consistency of vg_tid and drd_tid, unless 88 vgdb has forced the invokation of a gdb monitor cmd 89 when no threads was running (i.e. all threads blocked 90 in a syscall. In such a case, vg_tid is invalid, 91 its conversion to a drd thread id gives also an invalid 92 drd thread id, but drd_tid is not invalid (probably 93 equal to the last running drd thread. */ 94 95 switch (arg[0]) 96 { 97 case VG_USERREQ__MALLOCLIKE_BLOCK: 98 if (DRD_(g_free_is_write)) { 99 GenericErrInfo GEI = { 100 .tid = DRD_(thread_get_running_tid)(), 101 .addr = 0, 102 }; 103 VG_(maybe_record_error)(vg_tid, 104 GenericErr, 105 VG_(get_IP)(vg_tid), 106 "--free-is-write=yes is incompatible with" 107 " custom memory allocator client requests", 108 &GEI); 109 } 110 if (arg[1]) 111 DRD_(malloclike_block)(vg_tid, arg[1]/*addr*/, arg[2]/*size*/); 112 break; 113 114 case VG_USERREQ__RESIZEINPLACE_BLOCK: 115 if (!DRD_(freelike_block)(vg_tid, arg[1]/*addr*/, False)) 116 { 117 GenericErrInfo GEI = { 118 .tid = DRD_(thread_get_running_tid)(), 119 .addr = 0, 120 }; 121 VG_(maybe_record_error)(vg_tid, 122 GenericErr, 123 VG_(get_IP)(vg_tid), 124 "Invalid VG_USERREQ__RESIZEINPLACE_BLOCK request", 125 &GEI); 126 } 127 DRD_(malloclike_block)(vg_tid, arg[1]/*addr*/, arg[3]/*newSize*/); 128 break; 129 130 case VG_USERREQ__FREELIKE_BLOCK: 131 if (arg[1] && ! DRD_(freelike_block)(vg_tid, arg[1]/*addr*/, False)) 132 { 133 GenericErrInfo GEI = { 134 .tid = DRD_(thread_get_running_tid)(), 135 .addr = 0, 136 }; 137 VG_(maybe_record_error)(vg_tid, 138 GenericErr, 139 VG_(get_IP)(vg_tid), 140 "Invalid VG_USERREQ__FREELIKE_BLOCK request", 141 &GEI); 142 } 143 break; 144 145 case VG_USERREQ__DRD_GET_VALGRIND_THREAD_ID: 146 result = vg_tid; 147 break; 148 149 case VG_USERREQ__DRD_GET_DRD_THREAD_ID: 150 result = drd_tid; 151 break; 152 153 case VG_USERREQ__DRD_SET_THREAD_NAME: 154 DRD_(thread_set_name)(drd_tid, (const HChar*)arg[1]); 155 break; 156 157 case VG_USERREQ__DRD_START_SUPPRESSION: 158 /*_VG_USERREQ__HG_ARANGE_MAKE_UNTRACKED*/ 159 case VG_USERREQ_TOOL_BASE('H','G') + 256 + 39: 160 DRD_(start_suppression)(arg[1], arg[1] + arg[2], "client"); 161 break; 162 163 case VG_USERREQ__DRD_FINISH_SUPPRESSION: 164 /*_VG_USERREQ__HG_ARANGE_MAKE_TRACKED*/ 165 case VG_USERREQ_TOOL_BASE('H','G') + 256 + 40: 166 DRD_(finish_suppression)(arg[1], arg[1] + arg[2]); 167 break; 168 169 case VG_USERREQ__DRD_ANNOTATE_HAPPENS_BEFORE: 170 DRD_(hb_happens_before)(drd_tid, arg[1]); 171 break; 172 173 case VG_USERREQ__DRD_ANNOTATE_HAPPENS_AFTER: 174 DRD_(hb_happens_after)(drd_tid, arg[1]); 175 break; 176 177 case VG_USERREQ__DRD_ANNOTATE_RWLOCK_CREATE: 178 if (arg[1]) 179 { 180 struct mutex_info* const mutex_p = DRD_(mutex_get)(arg[1]); 181 if (mutex_p && mutex_p->mutex_type == mutex_type_spinlock) 182 break; 183 } 184 DRD_(rwlock_pre_init)(arg[1], user_rwlock); 185 break; 186 187 case VG_USERREQ__DRD_ANNOTATE_RWLOCK_DESTROY: 188 if (arg[1]) 189 { 190 struct mutex_info* const mutex_p = DRD_(mutex_get)(arg[1]); 191 if (mutex_p && mutex_p->mutex_type == mutex_type_spinlock) 192 break; 193 } 194 DRD_(rwlock_post_destroy)(arg[1], user_rwlock); 195 break; 196 197 case VG_USERREQ__DRD_ANNOTATE_RWLOCK_ACQUIRED: 198 if (arg[1]) 199 { 200 struct mutex_info* const mutex_p = DRD_(mutex_get)(arg[1]); 201 if (mutex_p && mutex_p->mutex_type == mutex_type_spinlock) 202 break; 203 } 204 tl_assert(arg[2] == !! arg[2]); 205 if (arg[2]) 206 { 207 DRD_(rwlock_pre_wrlock)(arg[1], user_rwlock); 208 DRD_(rwlock_post_wrlock)(arg[1], user_rwlock, True); 209 } 210 else 211 { 212 DRD_(rwlock_pre_rdlock)(arg[1], user_rwlock); 213 DRD_(rwlock_post_rdlock)(arg[1], user_rwlock, True); 214 } 215 break; 216 217 case VG_USERREQ__DRD_ANNOTATE_RWLOCK_RELEASED: 218 if (arg[1]) 219 { 220 struct mutex_info* const mutex_p = DRD_(mutex_get)(arg[1]); 221 if (mutex_p && mutex_p->mutex_type == mutex_type_spinlock) 222 break; 223 } 224 tl_assert(arg[2] == !! arg[2]); 225 DRD_(rwlock_pre_unlock)(arg[1], user_rwlock); 226 break; 227 228 case VG_USERREQ__DRD_ANNOTATE_SEM_INIT_PRE: 229 DRD_(semaphore_init)(arg[1], 0, arg[2]); 230 break; 231 232 case VG_USERREQ__DRD_ANNOTATE_SEM_DESTROY_POST: 233 DRD_(semaphore_destroy)(arg[1]); 234 break; 235 236 case VG_USERREQ__DRD_ANNOTATE_SEM_WAIT_PRE: 237 DRD_(semaphore_pre_wait)(arg[1]); 238 break; 239 240 case VG_USERREQ__DRD_ANNOTATE_SEM_WAIT_POST: 241 DRD_(semaphore_post_wait)(drd_tid, arg[1], True /* waited */); 242 break; 243 244 case VG_USERREQ__DRD_ANNOTATE_SEM_POST_PRE: 245 DRD_(semaphore_pre_post)(drd_tid, arg[1]); 246 break; 247 248 case VG_USERREQ__SET_PTHREAD_COND_INITIALIZER: 249 DRD_(pthread_cond_initializer) = (Addr)arg[1]; 250 DRD_(pthread_cond_initializer_size) = arg[2]; 251 break; 252 253 case VG_USERREQ__DRD_START_NEW_SEGMENT: 254 DRD_(thread_new_segment)(DRD_(PtThreadIdToDrdThreadId)(arg[1])); 255 break; 256 257 case VG_USERREQ__DRD_START_TRACE_ADDR: 258 DRD_(start_tracing_address_range)(arg[1], arg[1] + arg[2], False); 259 break; 260 261 case VG_USERREQ__DRD_STOP_TRACE_ADDR: 262 DRD_(stop_tracing_address_range)(arg[1], arg[1] + arg[2]); 263 break; 264 265 case VG_USERREQ__DRD_RECORD_LOADS: 266 DRD_(thread_set_record_loads)(drd_tid, arg[1]); 267 break; 268 269 case VG_USERREQ__DRD_RECORD_STORES: 270 DRD_(thread_set_record_stores)(drd_tid, arg[1]); 271 break; 272 273 case VG_USERREQ__SET_PTHREADID: 274 // pthread_self() returns 0 for programs not linked with libpthread.so. 275 if (arg[1] != INVALID_POSIX_THREADID) 276 DRD_(thread_set_pthreadid)(drd_tid, arg[1]); 277 break; 278 279 case VG_USERREQ__SET_JOINABLE: 280 { 281 const DrdThreadId drd_joinable = DRD_(PtThreadIdToDrdThreadId)(arg[1]); 282 if (drd_joinable != DRD_INVALID_THREADID) 283 DRD_(thread_set_joinable)(drd_joinable, (Bool)arg[2]); 284 else { 285 InvalidThreadIdInfo ITI = { DRD_(thread_get_running_tid)(), arg[1] }; 286 VG_(maybe_record_error)(vg_tid, 287 InvalidThreadId, 288 VG_(get_IP)(vg_tid), 289 "pthread_detach(): invalid thread ID", 290 &ITI); 291 } 292 break; 293 } 294 295 case VG_USERREQ__ENTERING_PTHREAD_CREATE: 296 DRD_(thread_entering_pthread_create)(drd_tid); 297 break; 298 299 case VG_USERREQ__LEFT_PTHREAD_CREATE: 300 DRD_(thread_left_pthread_create)(drd_tid); 301 break; 302 303 case VG_USERREQ__POST_THREAD_JOIN: 304 { 305 const DrdThreadId thread_to_join = DRD_(PtThreadIdToDrdThreadId)(arg[1]); 306 if (thread_to_join == DRD_INVALID_THREADID) 307 { 308 InvalidThreadIdInfo ITI = { DRD_(thread_get_running_tid)(), arg[1] }; 309 VG_(maybe_record_error)(vg_tid, 310 InvalidThreadId, 311 VG_(get_IP)(vg_tid), 312 "pthread_join(): invalid thread ID", 313 &ITI); 314 } 315 else 316 { 317 DRD_(thread_post_join)(drd_tid, thread_to_join); 318 } 319 break; 320 } 321 322 case VG_USERREQ__PRE_THREAD_CANCEL: 323 { 324 const DrdThreadId thread_to_cancel =DRD_(PtThreadIdToDrdThreadId)(arg[1]); 325 if (thread_to_cancel == DRD_INVALID_THREADID) 326 { 327 InvalidThreadIdInfo ITI = { DRD_(thread_get_running_tid)(), arg[1] }; 328 VG_(maybe_record_error)(vg_tid, 329 InvalidThreadId, 330 VG_(get_IP)(vg_tid), 331 "pthread_cancel(): invalid thread ID", 332 &ITI); 333 } 334 else 335 { 336 DRD_(thread_pre_cancel)(thread_to_cancel); 337 } 338 break; 339 } 340 341 case VG_USERREQ__POST_THREAD_CANCEL: 342 break; 343 344 case VG_USERREQ__PRE_MUTEX_INIT: 345 if (DRD_(thread_enter_synchr)(drd_tid) == 0) 346 DRD_(mutex_init)(arg[1], arg[2]); 347 break; 348 349 case VG_USERREQ__POST_MUTEX_INIT: 350 DRD_(thread_leave_synchr)(drd_tid); 351 break; 352 353 case VG_USERREQ__PRE_MUTEX_DESTROY: 354 DRD_(thread_enter_synchr)(drd_tid); 355 break; 356 357 case VG_USERREQ__POST_MUTEX_DESTROY: 358 if (DRD_(thread_leave_synchr)(drd_tid) == 0) 359 DRD_(mutex_post_destroy)(arg[1]); 360 break; 361 362 case VG_USERREQ__PRE_MUTEX_LOCK: 363 if (DRD_(thread_enter_synchr)(drd_tid) == 0) 364 DRD_(mutex_pre_lock)(arg[1], arg[2], arg[3]); 365 break; 366 367 case VG_USERREQ__POST_MUTEX_LOCK: 368 if (DRD_(thread_leave_synchr)(drd_tid) == 0) 369 DRD_(mutex_post_lock)(arg[1], arg[2], False/*post_cond_wait*/); 370 break; 371 372 case VG_USERREQ__PRE_MUTEX_UNLOCK: 373 if (DRD_(thread_enter_synchr)(drd_tid) == 0) 374 DRD_(mutex_unlock)(arg[1], arg[2]); 375 break; 376 377 case VG_USERREQ__POST_MUTEX_UNLOCK: 378 DRD_(thread_leave_synchr)(drd_tid); 379 break; 380 381 case VG_USERREQ__PRE_SPIN_INIT_OR_UNLOCK: 382 if (DRD_(thread_enter_synchr)(drd_tid) == 0) 383 DRD_(spinlock_init_or_unlock)(arg[1]); 384 break; 385 386 case VG_USERREQ__POST_SPIN_INIT_OR_UNLOCK: 387 DRD_(thread_leave_synchr)(drd_tid); 388 break; 389 390 case VG_USERREQ__PRE_COND_INIT: 391 if (DRD_(thread_enter_synchr)(drd_tid) == 0) 392 DRD_(cond_pre_init)(arg[1]); 393 break; 394 395 case VG_USERREQ__POST_COND_INIT: 396 DRD_(thread_leave_synchr)(drd_tid); 397 break; 398 399 case VG_USERREQ__PRE_COND_DESTROY: 400 DRD_(thread_enter_synchr)(drd_tid); 401 break; 402 403 case VG_USERREQ__POST_COND_DESTROY: 404 if (DRD_(thread_leave_synchr)(drd_tid) == 0) 405 DRD_(cond_post_destroy)(arg[1], arg[2]); 406 break; 407 408 case VG_USERREQ__PRE_COND_WAIT: 409 if (DRD_(thread_enter_synchr)(drd_tid) == 0) 410 { 411 const Addr cond = arg[1]; 412 const Addr mutex = arg[2]; 413 const MutexT mutex_type = arg[3]; 414 DRD_(mutex_unlock)(mutex, mutex_type); 415 DRD_(cond_pre_wait)(cond, mutex); 416 } 417 break; 418 419 case VG_USERREQ__POST_COND_WAIT: 420 if (DRD_(thread_leave_synchr)(drd_tid) == 0) 421 { 422 const Addr cond = arg[1]; 423 const Addr mutex = arg[2]; 424 const Bool took_lock = arg[3]; 425 DRD_(cond_post_wait)(cond); 426 DRD_(mutex_post_lock)(mutex, took_lock, True); 427 } 428 break; 429 430 case VG_USERREQ__PRE_COND_SIGNAL: 431 if (DRD_(thread_enter_synchr)(drd_tid) == 0) 432 DRD_(cond_pre_signal)(arg[1]); 433 break; 434 435 case VG_USERREQ__POST_COND_SIGNAL: 436 DRD_(thread_leave_synchr)(drd_tid); 437 break; 438 439 case VG_USERREQ__PRE_COND_BROADCAST: 440 if (DRD_(thread_enter_synchr)(drd_tid) == 0) 441 DRD_(cond_pre_broadcast)(arg[1]); 442 break; 443 444 case VG_USERREQ__POST_COND_BROADCAST: 445 DRD_(thread_leave_synchr)(drd_tid); 446 break; 447 448 case VG_USERREQ__PRE_SEM_INIT: 449 if (DRD_(thread_enter_synchr)(drd_tid) == 0) 450 DRD_(semaphore_init)(arg[1], arg[2], arg[3]); 451 break; 452 453 case VG_USERREQ__POST_SEM_INIT: 454 DRD_(thread_leave_synchr)(drd_tid); 455 break; 456 457 case VG_USERREQ__PRE_SEM_DESTROY: 458 DRD_(thread_enter_synchr)(drd_tid); 459 break; 460 461 case VG_USERREQ__POST_SEM_DESTROY: 462 if (DRD_(thread_leave_synchr)(drd_tid) == 0) 463 DRD_(semaphore_destroy)(arg[1]); 464 break; 465 466 case VG_USERREQ__PRE_SEM_OPEN: 467 DRD_(thread_enter_synchr)(drd_tid); 468 break; 469 470 case VG_USERREQ__POST_SEM_OPEN: 471 if (DRD_(thread_leave_synchr)(drd_tid) == 0) 472 DRD_(semaphore_open)(arg[1], (HChar*)arg[2], arg[3], arg[4], arg[5]); 473 break; 474 475 case VG_USERREQ__PRE_SEM_CLOSE: 476 if (DRD_(thread_enter_synchr)(drd_tid) == 0) 477 DRD_(semaphore_close)(arg[1]); 478 break; 479 480 case VG_USERREQ__POST_SEM_CLOSE: 481 DRD_(thread_leave_synchr)(drd_tid); 482 break; 483 484 case VG_USERREQ__PRE_SEM_WAIT: 485 if (DRD_(thread_enter_synchr)(drd_tid) == 0) 486 DRD_(semaphore_pre_wait)(arg[1]); 487 break; 488 489 case VG_USERREQ__POST_SEM_WAIT: 490 if (DRD_(thread_leave_synchr)(drd_tid) == 0) 491 DRD_(semaphore_post_wait)(drd_tid, arg[1], arg[2]); 492 break; 493 494 case VG_USERREQ__PRE_SEM_POST: 495 if (DRD_(thread_enter_synchr)(drd_tid) == 0) 496 DRD_(semaphore_pre_post)(drd_tid, arg[1]); 497 break; 498 499 case VG_USERREQ__POST_SEM_POST: 500 if (DRD_(thread_leave_synchr)(drd_tid) == 0) 501 DRD_(semaphore_post_post)(drd_tid, arg[1], arg[2]); 502 break; 503 504 case VG_USERREQ__PRE_BARRIER_INIT: 505 if (DRD_(thread_enter_synchr)(drd_tid) == 0) 506 DRD_(barrier_init)(arg[1], arg[2], arg[3], arg[4]); 507 break; 508 509 case VG_USERREQ__POST_BARRIER_INIT: 510 DRD_(thread_leave_synchr)(drd_tid); 511 break; 512 513 case VG_USERREQ__PRE_BARRIER_DESTROY: 514 DRD_(thread_enter_synchr)(drd_tid); 515 break; 516 517 case VG_USERREQ__POST_BARRIER_DESTROY: 518 if (DRD_(thread_leave_synchr)(drd_tid) == 0) 519 DRD_(barrier_destroy)(arg[1], arg[2]); 520 break; 521 522 case VG_USERREQ__PRE_BARRIER_WAIT: 523 if (DRD_(thread_enter_synchr)(drd_tid) == 0) 524 DRD_(barrier_pre_wait)(drd_tid, arg[1], arg[2]); 525 break; 526 527 case VG_USERREQ__POST_BARRIER_WAIT: 528 if (DRD_(thread_leave_synchr)(drd_tid) == 0) 529 DRD_(barrier_post_wait)(drd_tid, arg[1], arg[2], arg[3], arg[4]); 530 break; 531 532 case VG_USERREQ__PRE_RWLOCK_INIT: 533 if (DRD_(thread_enter_synchr)(drd_tid) == 0) 534 DRD_(rwlock_pre_init)(arg[1], pthread_rwlock); 535 break; 536 537 case VG_USERREQ__POST_RWLOCK_INIT: 538 DRD_(thread_leave_synchr)(drd_tid); 539 break; 540 541 case VG_USERREQ__PRE_RWLOCK_DESTROY: 542 DRD_(thread_enter_synchr)(drd_tid); 543 break; 544 545 case VG_USERREQ__POST_RWLOCK_DESTROY: 546 if (DRD_(thread_leave_synchr)(drd_tid) == 0) 547 DRD_(rwlock_post_destroy)(arg[1], pthread_rwlock); 548 break; 549 550 case VG_USERREQ__PRE_RWLOCK_RDLOCK: 551 if (DRD_(thread_enter_synchr)(drd_tid) == 0) 552 DRD_(rwlock_pre_rdlock)(arg[1], pthread_rwlock); 553 break; 554 555 case VG_USERREQ__POST_RWLOCK_RDLOCK: 556 if (DRD_(thread_leave_synchr)(drd_tid) == 0) 557 DRD_(rwlock_post_rdlock)(arg[1], pthread_rwlock, arg[2]); 558 break; 559 560 case VG_USERREQ__PRE_RWLOCK_WRLOCK: 561 if (DRD_(thread_enter_synchr)(drd_tid) == 0) 562 DRD_(rwlock_pre_wrlock)(arg[1], pthread_rwlock); 563 break; 564 565 case VG_USERREQ__POST_RWLOCK_WRLOCK: 566 if (DRD_(thread_leave_synchr)(drd_tid) == 0) 567 DRD_(rwlock_post_wrlock)(arg[1], pthread_rwlock, arg[2]); 568 break; 569 570 case VG_USERREQ__PRE_RWLOCK_UNLOCK: 571 if (DRD_(thread_enter_synchr)(drd_tid) == 0) 572 DRD_(rwlock_pre_unlock)(arg[1], pthread_rwlock); 573 break; 574 575 case VG_USERREQ__POST_RWLOCK_UNLOCK: 576 DRD_(thread_leave_synchr)(drd_tid); 577 break; 578 579 case VG_USERREQ__DRD_CLEAN_MEMORY: 580 if (arg[2] > 0) 581 DRD_(clean_memory)(arg[1], arg[2]); 582 break; 583 584 case VG_USERREQ__HELGRIND_ANNOTATION_UNIMP: 585 { 586 /* Note: it is assumed below that the text arg[1] points to is never 587 * freed, e.g. because it points to static data. 588 */ 589 UnimpClReqInfo UICR = 590 { DRD_(thread_get_running_tid)(), (HChar*)arg[1] }; 591 VG_(maybe_record_error)(vg_tid, 592 UnimpHgClReq, 593 VG_(get_IP)(vg_tid), 594 "", 595 &UICR); 596 } 597 break; 598 599 case VG_USERREQ__DRD_ANNOTATION_UNIMP: 600 { 601 /* Note: it is assumed below that the text arg[1] points to is never 602 * freed, e.g. because it points to static data. 603 */ 604 UnimpClReqInfo UICR = 605 { DRD_(thread_get_running_tid)(), (HChar*)arg[1] }; 606 VG_(maybe_record_error)(vg_tid, 607 UnimpDrdClReq, 608 VG_(get_IP)(vg_tid), 609 "", 610 &UICR); 611 } 612 break; 613 614 default: 615 #if 0 616 VG_(message)(Vg_DebugMsg, "Unrecognized client request 0x%lx 0x%lx", 617 arg[0], arg[1]); 618 tl_assert(0); 619 #endif 620 return False; 621 } 622 623 *ret = result; 624 return True; 625 } 626