1 /* 2 This file is part of drd, a thread error detector. 3 4 Copyright (C) 2006-2012 Bart Van Assche <bvanassche (at) acm.org>. 5 6 This program is free software; you can redistribute it and/or 7 modify it under the terms of the GNU General Public License as 8 published by the Free Software Foundation; either version 2 of the 9 License, or (at your option) any later version. 10 11 This program is distributed in the hope that it will be useful, but 12 WITHOUT ANY WARRANTY; without even the implied warranty of 13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 General Public License for more details. 15 16 You should have received a copy of the GNU General Public License 17 along with this program; if not, write to the Free Software 18 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 19 02111-1307, USA. 20 21 The GNU General Public License is contained in the file COPYING. 22 */ 23 24 25 #include "drd_bitmap.h" 26 #include "drd_thread_bitmap.h" 27 #include "drd_vc.h" /* DRD_(vc_snprint)() */ 28 29 /* Include several source files here in order to allow the compiler to */ 30 /* do more inlining. */ 31 #include "drd_bitmap.c" 32 #include "drd_load_store.h" 33 #include "drd_segment.c" 34 #include "drd_thread.c" 35 #include "drd_vc.c" 36 #include "libvex_guest_offsets.h" 37 38 39 /* STACK_POINTER_OFFSET: VEX register offset for the stack pointer register. */ 40 #if defined(VGA_x86) 41 #define STACK_POINTER_OFFSET OFFSET_x86_ESP 42 #elif defined(VGA_amd64) 43 #define STACK_POINTER_OFFSET OFFSET_amd64_RSP 44 #elif defined(VGA_ppc32) 45 #define STACK_POINTER_OFFSET OFFSET_ppc32_GPR1 46 #elif defined(VGA_ppc64) 47 #define STACK_POINTER_OFFSET OFFSET_ppc64_GPR1 48 #elif defined(VGA_arm) 49 #define STACK_POINTER_OFFSET OFFSET_arm_R13 50 #elif defined(VGA_s390x) 51 #define STACK_POINTER_OFFSET OFFSET_s390x_r15 52 #elif defined(VGA_mips32) 53 #define STACK_POINTER_OFFSET OFFSET_mips32_r29 54 #else 55 #error Unknown architecture. 56 #endif 57 58 59 /* Local variables. */ 60 61 static Bool s_check_stack_accesses = False; 62 static Bool s_first_race_only = False; 63 64 65 /* Function definitions. */ 66 67 Bool DRD_(get_check_stack_accesses)() 68 { 69 return s_check_stack_accesses; 70 } 71 72 void DRD_(set_check_stack_accesses)(const Bool c) 73 { 74 tl_assert(c == False || c == True); 75 s_check_stack_accesses = c; 76 } 77 78 Bool DRD_(get_first_race_only)() 79 { 80 return s_first_race_only; 81 } 82 83 void DRD_(set_first_race_only)(const Bool fro) 84 { 85 tl_assert(fro == False || fro == True); 86 s_first_race_only = fro; 87 } 88 89 void DRD_(trace_mem_access)(const Addr addr, const SizeT size, 90 const BmAccessTypeT access_type, 91 const HWord stored_value_hi, 92 const HWord stored_value_lo) 93 { 94 if (DRD_(is_any_traced)(addr, addr + size)) 95 { 96 char* vc; 97 98 vc = DRD_(vc_aprint)(DRD_(thread_get_vc)(DRD_(thread_get_running_tid)())); 99 if (access_type == eStore && size <= sizeof(HWord)) { 100 DRD_(trace_msg_w_bt)("store 0x%lx size %ld val %ld/0x%lx (thread %d /" 101 " vc %s)", addr, size, stored_value_lo, 102 stored_value_lo, DRD_(thread_get_running_tid)(), 103 vc); 104 } else if (access_type == eStore && size > sizeof(HWord)) { 105 ULong sv; 106 107 tl_assert(sizeof(HWord) == 4); 108 sv = ((ULong)stored_value_hi << 32) | stored_value_lo; 109 DRD_(trace_msg_w_bt)("store 0x%lx size %ld val %lld/0x%llx (thread %d" 110 " / vc %s)", addr, size, sv, sv, 111 DRD_(thread_get_running_tid)(), vc); 112 } else { 113 DRD_(trace_msg_w_bt)("%s 0x%lx size %ld (thread %d / vc %s)", 114 access_type == eLoad ? "load " 115 : access_type == eStore ? "store" 116 : access_type == eStart ? "start" 117 : access_type == eEnd ? "end " : "????", 118 addr, size, DRD_(thread_get_running_tid)(), vc); 119 } 120 VG_(free)(vc); 121 tl_assert(DRD_(DrdThreadIdToVgThreadId)(DRD_(thread_get_running_tid)()) 122 == VG_(get_running_tid)()); 123 } 124 } 125 126 static VG_REGPARM(2) void drd_trace_mem_load(const Addr addr, const SizeT size) 127 { 128 return DRD_(trace_mem_access)(addr, size, eLoad, 0, 0); 129 } 130 131 static VG_REGPARM(3) void drd_trace_mem_store(const Addr addr,const SizeT size, 132 const HWord stored_value_hi, 133 const HWord stored_value_lo) 134 { 135 return DRD_(trace_mem_access)(addr, size, eStore, stored_value_hi, 136 stored_value_lo); 137 } 138 139 static void drd_report_race(const Addr addr, const SizeT size, 140 const BmAccessTypeT access_type) 141 { 142 ThreadId vg_tid; 143 144 vg_tid = VG_(get_running_tid)(); 145 if (!DRD_(get_check_stack_accesses)() 146 && DRD_(thread_address_on_any_stack)(addr)) { 147 #if 0 148 GenericErrInfo GEI = { 149 .tid = DRD_(thread_get_running_tid)(), 150 .addr = addr, 151 }; 152 VG_(maybe_record_error)(vg_tid, GenericErr, VG_(get_IP)(vg_tid), 153 "--check-stack-var=no skips checking stack" 154 " variables shared over threads", 155 &GEI); 156 #endif 157 } else { 158 DataRaceErrInfo drei = { 159 .tid = DRD_(thread_get_running_tid)(), 160 .addr = addr, 161 .size = size, 162 .access_type = access_type, 163 }; 164 VG_(maybe_record_error)(vg_tid, DataRaceErr, VG_(get_IP)(vg_tid), 165 "Conflicting access", &drei); 166 167 if (s_first_race_only) 168 DRD_(start_suppression)(addr, addr + size, "first race only"); 169 } 170 } 171 172 VG_REGPARM(2) void DRD_(trace_load)(Addr addr, SizeT size) 173 { 174 #ifdef ENABLE_DRD_CONSISTENCY_CHECKS 175 /* The assert below has been commented out because of performance reasons.*/ 176 tl_assert(DRD_(thread_get_running_tid)() 177 == DRD_(VgThreadIdToDrdThreadId)(VG_(get_running_tid()))); 178 #endif 179 180 if (DRD_(running_thread_is_recording_loads)() 181 && (s_check_stack_accesses 182 || ! DRD_(thread_address_on_stack)(addr)) 183 && bm_access_load_triggers_conflict(addr, addr + size) 184 && ! DRD_(is_suppressed)(addr, addr + size)) 185 { 186 drd_report_race(addr, size, eLoad); 187 } 188 } 189 190 static VG_REGPARM(1) void drd_trace_load_1(Addr addr) 191 { 192 if (DRD_(running_thread_is_recording_loads)() 193 && (s_check_stack_accesses 194 || ! DRD_(thread_address_on_stack)(addr)) 195 && bm_access_load_1_triggers_conflict(addr) 196 && ! DRD_(is_suppressed)(addr, addr + 1)) 197 { 198 drd_report_race(addr, 1, eLoad); 199 } 200 } 201 202 static VG_REGPARM(1) void drd_trace_load_2(Addr addr) 203 { 204 if (DRD_(running_thread_is_recording_loads)() 205 && (s_check_stack_accesses 206 || ! DRD_(thread_address_on_stack)(addr)) 207 && bm_access_load_2_triggers_conflict(addr) 208 && ! DRD_(is_suppressed)(addr, addr + 2)) 209 { 210 drd_report_race(addr, 2, eLoad); 211 } 212 } 213 214 static VG_REGPARM(1) void drd_trace_load_4(Addr addr) 215 { 216 if (DRD_(running_thread_is_recording_loads)() 217 && (s_check_stack_accesses 218 || ! DRD_(thread_address_on_stack)(addr)) 219 && bm_access_load_4_triggers_conflict(addr) 220 && ! DRD_(is_suppressed)(addr, addr + 4)) 221 { 222 drd_report_race(addr, 4, eLoad); 223 } 224 } 225 226 static VG_REGPARM(1) void drd_trace_load_8(Addr addr) 227 { 228 if (DRD_(running_thread_is_recording_loads)() 229 && (s_check_stack_accesses 230 || ! DRD_(thread_address_on_stack)(addr)) 231 && bm_access_load_8_triggers_conflict(addr) 232 && ! DRD_(is_suppressed)(addr, addr + 8)) 233 { 234 drd_report_race(addr, 8, eLoad); 235 } 236 } 237 238 VG_REGPARM(2) void DRD_(trace_store)(Addr addr, SizeT size) 239 { 240 #ifdef ENABLE_DRD_CONSISTENCY_CHECKS 241 /* The assert below has been commented out because of performance reasons.*/ 242 tl_assert(DRD_(thread_get_running_tid)() 243 == DRD_(VgThreadIdToDrdThreadId)(VG_(get_running_tid()))); 244 #endif 245 246 if (DRD_(running_thread_is_recording_stores)() 247 && (s_check_stack_accesses 248 || ! DRD_(thread_address_on_stack)(addr)) 249 && bm_access_store_triggers_conflict(addr, addr + size) 250 && ! DRD_(is_suppressed)(addr, addr + size)) 251 { 252 drd_report_race(addr, size, eStore); 253 } 254 } 255 256 static VG_REGPARM(1) void drd_trace_store_1(Addr addr) 257 { 258 if (DRD_(running_thread_is_recording_stores)() 259 && (s_check_stack_accesses 260 || ! DRD_(thread_address_on_stack)(addr)) 261 && bm_access_store_1_triggers_conflict(addr) 262 && ! DRD_(is_suppressed)(addr, addr + 1)) 263 { 264 drd_report_race(addr, 1, eStore); 265 } 266 } 267 268 static VG_REGPARM(1) void drd_trace_store_2(Addr addr) 269 { 270 if (DRD_(running_thread_is_recording_stores)() 271 && (s_check_stack_accesses 272 || ! DRD_(thread_address_on_stack)(addr)) 273 && bm_access_store_2_triggers_conflict(addr) 274 && ! DRD_(is_suppressed)(addr, addr + 2)) 275 { 276 drd_report_race(addr, 2, eStore); 277 } 278 } 279 280 static VG_REGPARM(1) void drd_trace_store_4(Addr addr) 281 { 282 if (DRD_(running_thread_is_recording_stores)() 283 && (s_check_stack_accesses 284 || !DRD_(thread_address_on_stack)(addr)) 285 && bm_access_store_4_triggers_conflict(addr) 286 && !DRD_(is_suppressed)(addr, addr + 4)) 287 { 288 drd_report_race(addr, 4, eStore); 289 } 290 } 291 292 static VG_REGPARM(1) void drd_trace_store_8(Addr addr) 293 { 294 if (DRD_(running_thread_is_recording_stores)() 295 && (s_check_stack_accesses 296 || ! DRD_(thread_address_on_stack)(addr)) 297 && bm_access_store_8_triggers_conflict(addr) 298 && ! DRD_(is_suppressed)(addr, addr + 8)) 299 { 300 drd_report_race(addr, 8, eStore); 301 } 302 } 303 304 /** 305 * Return true if and only if addr_expr matches the pattern (SP) or 306 * <offset>(SP). 307 */ 308 static Bool is_stack_access(IRSB* const bb, IRExpr* const addr_expr) 309 { 310 Bool result = False; 311 312 if (addr_expr->tag == Iex_RdTmp) 313 { 314 int i; 315 for (i = 0; i < bb->stmts_size; i++) 316 { 317 if (bb->stmts[i] 318 && bb->stmts[i]->tag == Ist_WrTmp 319 && bb->stmts[i]->Ist.WrTmp.tmp == addr_expr->Iex.RdTmp.tmp) 320 { 321 IRExpr* e = bb->stmts[i]->Ist.WrTmp.data; 322 if (e->tag == Iex_Get && e->Iex.Get.offset == STACK_POINTER_OFFSET) 323 { 324 result = True; 325 } 326 327 //ppIRExpr(e); 328 //VG_(printf)(" (%s)\n", result ? "True" : "False"); 329 break; 330 } 331 } 332 } 333 return result; 334 } 335 336 static const IROp u_widen_irop[5][9] = { 337 [Ity_I1 - Ity_I1] = { [4] = Iop_1Uto32, [8] = Iop_1Uto64 }, 338 [Ity_I8 - Ity_I1] = { [4] = Iop_8Uto32, [8] = Iop_8Uto64 }, 339 [Ity_I16 - Ity_I1] = { [4] = Iop_16Uto32, [8] = Iop_16Uto64 }, 340 [Ity_I32 - Ity_I1] = { [8] = Iop_32Uto64 }, 341 }; 342 343 /** 344 * Instrument the client code to trace a memory load (--trace-addr). 345 */ 346 static IRExpr* instr_trace_mem_load(IRSB* const bb, IRExpr* addr_expr, 347 const HWord size) 348 { 349 IRTemp tmp; 350 351 tmp = newIRTemp(bb->tyenv, typeOfIRExpr(bb->tyenv, addr_expr)); 352 addStmtToIRSB(bb, IRStmt_WrTmp(tmp, addr_expr)); 353 addr_expr = IRExpr_RdTmp(tmp); 354 355 addStmtToIRSB(bb, 356 IRStmt_Dirty( 357 unsafeIRDirty_0_N(/*regparms*/2, 358 "drd_trace_mem_load", 359 VG_(fnptr_to_fnentry) 360 (drd_trace_mem_load), 361 mkIRExprVec_2(addr_expr, mkIRExpr_HWord(size))))); 362 363 return addr_expr; 364 } 365 366 /** 367 * Instrument the client code to trace a memory store (--trace-addr). 368 */ 369 static void instr_trace_mem_store(IRSB* const bb, IRExpr* const addr_expr, 370 IRExpr* data_expr_hi, IRExpr* data_expr_lo) 371 { 372 IRType ty_data_expr; 373 HWord size; 374 375 tl_assert(sizeof(HWord) == 4 || sizeof(HWord) == 8); 376 tl_assert(!data_expr_hi || typeOfIRExpr(bb->tyenv, data_expr_hi) == Ity_I32); 377 378 ty_data_expr = typeOfIRExpr(bb->tyenv, data_expr_lo); 379 size = sizeofIRType(ty_data_expr); 380 381 #if 0 382 // Test code 383 if (ty_data_expr == Ity_I32) { 384 IRTemp tmp = newIRTemp(bb->tyenv, Ity_F32); 385 data_expr_lo = IRExpr_Unop(Iop_ReinterpI32asF32, data_expr_lo); 386 addStmtToIRSB(bb, IRStmt_WrTmp(tmp, data_expr_lo)); 387 data_expr_lo = IRExpr_RdTmp(tmp); 388 ty_data_expr = Ity_F32; 389 } else if (ty_data_expr == Ity_I64) { 390 IRTemp tmp = newIRTemp(bb->tyenv, Ity_F64); 391 data_expr_lo = IRExpr_Unop(Iop_ReinterpI64asF64, data_expr_lo); 392 addStmtToIRSB(bb, IRStmt_WrTmp(tmp, data_expr_lo)); 393 data_expr_lo = IRExpr_RdTmp(tmp); 394 ty_data_expr = Ity_F64; 395 } 396 #endif 397 398 if (ty_data_expr == Ity_F32) { 399 IRTemp tmp = newIRTemp(bb->tyenv, Ity_I32); 400 addStmtToIRSB(bb, IRStmt_WrTmp(tmp, IRExpr_Unop(Iop_ReinterpF32asI32, 401 data_expr_lo))); 402 data_expr_lo = IRExpr_RdTmp(tmp); 403 ty_data_expr = Ity_I32; 404 } else if (ty_data_expr == Ity_F64) { 405 IRTemp tmp = newIRTemp(bb->tyenv, Ity_I64); 406 addStmtToIRSB(bb, IRStmt_WrTmp(tmp, IRExpr_Unop(Iop_ReinterpF64asI64, 407 data_expr_lo))); 408 data_expr_lo = IRExpr_RdTmp(tmp); 409 ty_data_expr = Ity_I64; 410 } 411 412 if (size == sizeof(HWord) 413 && (ty_data_expr == Ity_I32 || ty_data_expr == Ity_I64)) 414 { 415 /* No conversion necessary */ 416 } else { 417 IROp widen_op; 418 419 if (Ity_I1 <= ty_data_expr 420 && ty_data_expr 421 < Ity_I1 + sizeof(u_widen_irop)/sizeof(u_widen_irop[0])) 422 { 423 widen_op = u_widen_irop[ty_data_expr - Ity_I1][sizeof(HWord)]; 424 if (!widen_op) 425 widen_op = Iop_INVALID; 426 } else { 427 widen_op = Iop_INVALID; 428 } 429 if (widen_op != Iop_INVALID) { 430 IRTemp tmp; 431 432 /* Widen the integer expression to a HWord */ 433 tmp = newIRTemp(bb->tyenv, sizeof(HWord) == 4 ? Ity_I32 : Ity_I64); 434 addStmtToIRSB(bb, 435 IRStmt_WrTmp(tmp, IRExpr_Unop(widen_op, data_expr_lo))); 436 data_expr_lo = IRExpr_RdTmp(tmp); 437 } else if (size > sizeof(HWord) && !data_expr_hi 438 && ty_data_expr == Ity_I64) { 439 IRTemp tmp; 440 441 tl_assert(sizeof(HWord) == 4); 442 tl_assert(size == 8); 443 tmp = newIRTemp(bb->tyenv, Ity_I32); 444 addStmtToIRSB(bb, 445 IRStmt_WrTmp(tmp, 446 IRExpr_Unop(Iop_64HIto32, data_expr_lo))); 447 data_expr_hi = IRExpr_RdTmp(tmp); 448 tmp = newIRTemp(bb->tyenv, Ity_I32); 449 addStmtToIRSB(bb, IRStmt_WrTmp(tmp, 450 IRExpr_Unop(Iop_64to32, data_expr_lo))); 451 data_expr_lo = IRExpr_RdTmp(tmp); 452 } else { 453 data_expr_lo = mkIRExpr_HWord(0); 454 } 455 } 456 addStmtToIRSB(bb, 457 IRStmt_Dirty( 458 unsafeIRDirty_0_N(/*regparms*/3, 459 "drd_trace_mem_store", 460 VG_(fnptr_to_fnentry)(drd_trace_mem_store), 461 mkIRExprVec_4(addr_expr, mkIRExpr_HWord(size), 462 data_expr_hi ? data_expr_hi 463 : mkIRExpr_HWord(0), data_expr_lo)))); 464 } 465 466 static void instrument_load(IRSB* const bb, IRExpr* const addr_expr, 467 const HWord size) 468 { 469 IRExpr* size_expr; 470 IRExpr** argv; 471 IRDirty* di; 472 473 if (!s_check_stack_accesses && is_stack_access(bb, addr_expr)) 474 return; 475 476 switch (size) 477 { 478 case 1: 479 argv = mkIRExprVec_1(addr_expr); 480 di = unsafeIRDirty_0_N(/*regparms*/1, 481 "drd_trace_load_1", 482 VG_(fnptr_to_fnentry)(drd_trace_load_1), 483 argv); 484 break; 485 case 2: 486 argv = mkIRExprVec_1(addr_expr); 487 di = unsafeIRDirty_0_N(/*regparms*/1, 488 "drd_trace_load_2", 489 VG_(fnptr_to_fnentry)(drd_trace_load_2), 490 argv); 491 break; 492 case 4: 493 argv = mkIRExprVec_1(addr_expr); 494 di = unsafeIRDirty_0_N(/*regparms*/1, 495 "drd_trace_load_4", 496 VG_(fnptr_to_fnentry)(drd_trace_load_4), 497 argv); 498 break; 499 case 8: 500 argv = mkIRExprVec_1(addr_expr); 501 di = unsafeIRDirty_0_N(/*regparms*/1, 502 "drd_trace_load_8", 503 VG_(fnptr_to_fnentry)(drd_trace_load_8), 504 argv); 505 break; 506 default: 507 size_expr = mkIRExpr_HWord(size); 508 argv = mkIRExprVec_2(addr_expr, size_expr); 509 di = unsafeIRDirty_0_N(/*regparms*/2, 510 "drd_trace_load", 511 VG_(fnptr_to_fnentry)(DRD_(trace_load)), 512 argv); 513 break; 514 } 515 addStmtToIRSB(bb, IRStmt_Dirty(di)); 516 } 517 518 static void instrument_store(IRSB* const bb, IRExpr* addr_expr, 519 IRExpr* const data_expr) 520 { 521 IRExpr* size_expr; 522 IRExpr** argv; 523 IRDirty* di; 524 HWord size; 525 526 size = sizeofIRType(typeOfIRExpr(bb->tyenv, data_expr)); 527 528 if (UNLIKELY(DRD_(any_address_is_traced)())) { 529 IRTemp tmp = newIRTemp(bb->tyenv, typeOfIRExpr(bb->tyenv, addr_expr)); 530 addStmtToIRSB(bb, IRStmt_WrTmp(tmp, addr_expr)); 531 addr_expr = IRExpr_RdTmp(tmp); 532 instr_trace_mem_store(bb, addr_expr, NULL, data_expr); 533 } 534 535 if (!s_check_stack_accesses && is_stack_access(bb, addr_expr)) 536 return; 537 538 switch (size) 539 { 540 case 1: 541 argv = mkIRExprVec_1(addr_expr); 542 di = unsafeIRDirty_0_N(/*regparms*/1, 543 "drd_trace_store_1", 544 VG_(fnptr_to_fnentry)(drd_trace_store_1), 545 argv); 546 break; 547 case 2: 548 argv = mkIRExprVec_1(addr_expr); 549 di = unsafeIRDirty_0_N(/*regparms*/1, 550 "drd_trace_store_2", 551 VG_(fnptr_to_fnentry)(drd_trace_store_2), 552 argv); 553 break; 554 case 4: 555 argv = mkIRExprVec_1(addr_expr); 556 di = unsafeIRDirty_0_N(/*regparms*/1, 557 "drd_trace_store_4", 558 VG_(fnptr_to_fnentry)(drd_trace_store_4), 559 argv); 560 break; 561 case 8: 562 argv = mkIRExprVec_1(addr_expr); 563 di = unsafeIRDirty_0_N(/*regparms*/1, 564 "drd_trace_store_8", 565 VG_(fnptr_to_fnentry)(drd_trace_store_8), 566 argv); 567 break; 568 default: 569 size_expr = mkIRExpr_HWord(size); 570 argv = mkIRExprVec_2(addr_expr, size_expr); 571 di = unsafeIRDirty_0_N(/*regparms*/2, 572 "drd_trace_store", 573 VG_(fnptr_to_fnentry)(DRD_(trace_store)), 574 argv); 575 break; 576 } 577 addStmtToIRSB(bb, IRStmt_Dirty(di)); 578 } 579 580 IRSB* DRD_(instrument)(VgCallbackClosure* const closure, 581 IRSB* const bb_in, 582 VexGuestLayout* const layout, 583 VexGuestExtents* const vge, 584 IRType const gWordTy, 585 IRType const hWordTy) 586 { 587 IRDirty* di; 588 Int i; 589 IRSB* bb; 590 IRExpr** argv; 591 Bool instrument = True; 592 593 /* Set up BB */ 594 bb = emptyIRSB(); 595 bb->tyenv = deepCopyIRTypeEnv(bb_in->tyenv); 596 bb->next = deepCopyIRExpr(bb_in->next); 597 bb->jumpkind = bb_in->jumpkind; 598 bb->offsIP = bb_in->offsIP; 599 600 for (i = 0; i < bb_in->stmts_used; i++) 601 { 602 IRStmt* const st = bb_in->stmts[i]; 603 tl_assert(st); 604 tl_assert(isFlatIRStmt(st)); 605 606 switch (st->tag) 607 { 608 /* Note: the code for not instrumenting the code in .plt */ 609 /* sections is only necessary on CentOS 3.0 x86 (kernel 2.4.21 */ 610 /* + glibc 2.3.2 + NPTL 0.60 + binutils 2.14.90.0.4). */ 611 /* This is because on this platform dynamic library symbols are */ 612 /* relocated in another way than by later binutils versions. The */ 613 /* linker e.g. does not generate .got.plt sections on CentOS 3.0. */ 614 case Ist_IMark: 615 instrument = VG_(DebugInfo_sect_kind)(NULL, 0, st->Ist.IMark.addr) 616 != Vg_SectPLT; 617 addStmtToIRSB(bb, st); 618 break; 619 620 case Ist_MBE: 621 switch (st->Ist.MBE.event) 622 { 623 case Imbe_Fence: 624 break; /* not interesting */ 625 default: 626 tl_assert(0); 627 } 628 addStmtToIRSB(bb, st); 629 break; 630 631 case Ist_Store: 632 if (instrument) 633 instrument_store(bb, st->Ist.Store.addr, st->Ist.Store.data); 634 addStmtToIRSB(bb, st); 635 break; 636 637 case Ist_WrTmp: 638 if (instrument) { 639 const IRExpr* const data = st->Ist.WrTmp.data; 640 IRExpr* addr_expr = data->Iex.Load.addr; 641 if (data->tag == Iex_Load) { 642 if (UNLIKELY(DRD_(any_address_is_traced)())) { 643 addr_expr = instr_trace_mem_load(bb, addr_expr, 644 sizeofIRType(data->Iex.Load.ty)); 645 } 646 instrument_load(bb, data->Iex.Load.addr, 647 sizeofIRType(data->Iex.Load.ty)); 648 } 649 } 650 addStmtToIRSB(bb, st); 651 break; 652 653 case Ist_Dirty: 654 if (instrument) { 655 IRDirty* d = st->Ist.Dirty.details; 656 IREffect const mFx = d->mFx; 657 switch (mFx) { 658 case Ifx_None: 659 break; 660 case Ifx_Read: 661 case Ifx_Write: 662 case Ifx_Modify: 663 tl_assert(d->mAddr); 664 tl_assert(d->mSize > 0); 665 argv = mkIRExprVec_2(d->mAddr, mkIRExpr_HWord(d->mSize)); 666 if (mFx == Ifx_Read || mFx == Ifx_Modify) { 667 di = unsafeIRDirty_0_N( 668 /*regparms*/2, 669 "drd_trace_load", 670 VG_(fnptr_to_fnentry)(DRD_(trace_load)), 671 argv); 672 addStmtToIRSB(bb, IRStmt_Dirty(di)); 673 } 674 if (mFx == Ifx_Write || mFx == Ifx_Modify) 675 { 676 di = unsafeIRDirty_0_N( 677 /*regparms*/2, 678 "drd_trace_store", 679 VG_(fnptr_to_fnentry)(DRD_(trace_store)), 680 argv); 681 addStmtToIRSB(bb, IRStmt_Dirty(di)); 682 } 683 break; 684 default: 685 tl_assert(0); 686 } 687 } 688 addStmtToIRSB(bb, st); 689 break; 690 691 case Ist_CAS: 692 if (instrument) { 693 /* 694 * Treat compare-and-swap as a read. By handling atomic 695 * instructions as read instructions no data races are reported 696 * between conflicting atomic operations nor between atomic 697 * operations and non-atomic reads. Conflicts between atomic 698 * operations and non-atomic write operations are still reported 699 * however. 700 */ 701 Int dataSize; 702 IRCAS* cas = st->Ist.CAS.details; 703 704 tl_assert(cas->addr != NULL); 705 tl_assert(cas->dataLo != NULL); 706 dataSize = sizeofIRType(typeOfIRExpr(bb->tyenv, cas->dataLo)); 707 if (cas->dataHi != NULL) 708 dataSize *= 2; /* since it's a doubleword-CAS */ 709 710 if (UNLIKELY(DRD_(any_address_is_traced)())) 711 instr_trace_mem_store(bb, cas->addr, cas->dataHi, cas->dataLo); 712 713 instrument_load(bb, cas->addr, dataSize); 714 } 715 addStmtToIRSB(bb, st); 716 break; 717 718 case Ist_LLSC: { 719 /* 720 * Ignore store-conditionals (except for tracing), and handle 721 * load-linked's exactly like normal loads. 722 */ 723 IRType dataTy; 724 725 if (st->Ist.LLSC.storedata == NULL) { 726 /* LL */ 727 dataTy = typeOfIRTemp(bb_in->tyenv, st->Ist.LLSC.result); 728 if (instrument) { 729 IRExpr* addr_expr = st->Ist.LLSC.addr; 730 if (UNLIKELY(DRD_(any_address_is_traced)())) 731 addr_expr = instr_trace_mem_load(bb, addr_expr, 732 sizeofIRType(dataTy)); 733 734 instrument_load(bb, addr_expr, sizeofIRType(dataTy)); 735 } 736 } else { 737 /* SC */ 738 instr_trace_mem_store(bb, st->Ist.LLSC.addr, NULL, 739 st->Ist.LLSC.storedata); 740 } 741 addStmtToIRSB(bb, st); 742 break; 743 } 744 745 case Ist_NoOp: 746 case Ist_AbiHint: 747 case Ist_Put: 748 case Ist_PutI: 749 case Ist_Exit: 750 /* None of these can contain any memory references. */ 751 addStmtToIRSB(bb, st); 752 break; 753 754 default: 755 ppIRStmt(st); 756 tl_assert(0); 757 } 758 } 759 760 return bb; 761 } 762 763