1 2 /*--------------------------------------------------------------------*/ 3 /*--- Store and compare stack backtraces m_execontext.c ---*/ 4 /*--------------------------------------------------------------------*/ 5 6 /* 7 This file is part of Valgrind, a dynamic binary instrumentation 8 framework. 9 10 Copyright (C) 2000-2017 Julian Seward 11 jseward (at) acm.org 12 13 This program is free software; you can redistribute it and/or 14 modify it under the terms of the GNU General Public License as 15 published by the Free Software Foundation; either version 2 of the 16 License, or (at your option) any later version. 17 18 This program is distributed in the hope that it will be useful, but 19 WITHOUT ANY WARRANTY; without even the implied warranty of 20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 21 General Public License for more details. 22 23 You should have received a copy of the GNU General Public License 24 along with this program; if not, write to the Free Software 25 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 26 02111-1307, USA. 27 28 The GNU General Public License is contained in the file COPYING. 29 */ 30 31 #include "pub_core_basics.h" 32 #include "pub_core_debuglog.h" 33 #include "pub_core_libcassert.h" 34 #include "pub_core_libcprint.h" // For VG_(message)() 35 #include "pub_core_mallocfree.h" 36 #include "pub_core_options.h" 37 #include "pub_core_stacktrace.h" 38 #include "pub_core_machine.h" // VG_(get_IP) 39 #include "pub_core_threadstate.h" // VG_(is_valid_tid) 40 #include "pub_core_execontext.h" // self 41 42 /*------------------------------------------------------------*/ 43 /*--- Low-level ExeContext storage. ---*/ 44 /*------------------------------------------------------------*/ 45 46 /* Depending on VgRes, the first 2, 4 or all IP values are used in 47 comparisons to remove duplicate errors, and for comparing against 48 suppression specifications. If not used in comparison, the rest 49 are purely informational (but often important). 50 51 The contexts are stored in a traditional chained hash table, so as 52 to allow quick determination of whether a new context already 53 exists. The hash table starts small and expands dynamically, so as 54 to keep the load factor below 1.0. 55 56 The idea is only to ever store any one context once, so as to save 57 space and make exact comparisons faster. */ 58 59 60 /* Primes for the hash table */ 61 62 #define N_EC_PRIMES 18 63 64 static SizeT ec_primes[N_EC_PRIMES] = { 65 769UL, 1543UL, 3079UL, 6151UL, 66 12289UL, 24593UL, 49157UL, 98317UL, 67 196613UL, 393241UL, 786433UL, 1572869UL, 68 3145739UL, 6291469UL, 12582917UL, 25165843UL, 69 50331653UL, 100663319UL 70 }; 71 72 73 /* Each element is present in a hash chain, and also contains a 74 variable length array of guest code addresses (the useful part). */ 75 76 struct _ExeContext { 77 struct _ExeContext* chain; 78 /* A 32-bit unsigned integer that uniquely identifies this 79 ExeContext. Memcheck uses these for origin tracking. Values 80 must be nonzero (else Memcheck's origin tracking is hosed), must 81 be a multiple of four, and must be unique. Hence they start at 82 4. */ 83 UInt ecu; 84 /* Variable-length array. The size is 'n_ips'; at 85 least 1, at most VG_DEEPEST_BACKTRACE. [0] is the current IP, 86 [1] is its caller, [2] is the caller of [1], etc. */ 87 UInt n_ips; 88 Addr ips[0]; 89 }; 90 91 92 /* This is the dynamically expanding hash table. */ 93 static ExeContext** ec_htab; /* array [ec_htab_size] of ExeContext* */ 94 static SizeT ec_htab_size; /* one of the values in ec_primes */ 95 static SizeT ec_htab_size_idx; /* 0 .. N_EC_PRIMES-1 */ 96 97 /* ECU serial number */ 98 static UInt ec_next_ecu = 4; /* We must never issue zero */ 99 100 static ExeContext* null_ExeContext; 101 102 /* Stats only: the number of times the system was searched to locate a 103 context. */ 104 static ULong ec_searchreqs; 105 106 /* Stats only: the number of full context comparisons done. */ 107 static ULong ec_searchcmps; 108 109 /* Stats only: total number of stored contexts. */ 110 static ULong ec_totstored; 111 112 /* Number of 2, 4 and (fast) full cmps done. */ 113 static ULong ec_cmp2s; 114 static ULong ec_cmp4s; 115 static ULong ec_cmpAlls; 116 117 118 /*------------------------------------------------------------*/ 119 /*--- Exported functions. ---*/ 120 /*------------------------------------------------------------*/ 121 122 static ExeContext* record_ExeContext_wrk2 ( const Addr* ips, UInt n_ips ); 123 124 /* Initialise this subsystem. */ 125 static void init_ExeContext_storage ( void ) 126 { 127 Int i; 128 static Bool init_done = False; 129 if (LIKELY(init_done)) 130 return; 131 ec_searchreqs = 0; 132 ec_searchcmps = 0; 133 ec_totstored = 0; 134 ec_cmp2s = 0; 135 ec_cmp4s = 0; 136 ec_cmpAlls = 0; 137 138 ec_htab_size_idx = 0; 139 ec_htab_size = ec_primes[ec_htab_size_idx]; 140 ec_htab = VG_(malloc)("execontext.iEs1", 141 sizeof(ExeContext*) * ec_htab_size); 142 for (i = 0; i < ec_htab_size; i++) 143 ec_htab[i] = NULL; 144 145 { 146 Addr ips[1]; 147 ips[0] = 0; 148 null_ExeContext = record_ExeContext_wrk2(ips, 1); 149 // null execontext must be the first one created and get ecu 4. 150 vg_assert(null_ExeContext->ecu == 4); 151 } 152 153 init_done = True; 154 } 155 156 157 /* Print stats. */ 158 void VG_(print_ExeContext_stats) ( Bool with_stacktraces ) 159 { 160 Int i; 161 ULong total_n_ips; 162 ExeContext* ec; 163 164 init_ExeContext_storage(); 165 166 if (with_stacktraces) { 167 VG_(message)(Vg_DebugMsg, " exectx: Printing contexts stacktraces\n"); 168 for (i = 0; i < ec_htab_size; i++) { 169 for (ec = ec_htab[i]; ec; ec = ec->chain) { 170 VG_(message)(Vg_DebugMsg, " exectx: stacktrace ecu %u n_ips %u\n", 171 ec->ecu, ec->n_ips); 172 VG_(pp_StackTrace)( ec->ips, ec->n_ips ); 173 } 174 } 175 VG_(message)(Vg_DebugMsg, 176 " exectx: Printed %'llu contexts stacktraces\n", 177 ec_totstored); 178 } 179 180 total_n_ips = 0; 181 for (i = 0; i < ec_htab_size; i++) { 182 for (ec = ec_htab[i]; ec; ec = ec->chain) 183 total_n_ips += ec->n_ips; 184 } 185 VG_(message)(Vg_DebugMsg, 186 " exectx: %'lu lists, %'llu contexts (avg %3.2f per list)" 187 " (avg %3.2f IP per context)\n", 188 ec_htab_size, ec_totstored, (Double)ec_totstored / (Double)ec_htab_size, 189 (Double)total_n_ips / (Double)ec_totstored 190 ); 191 VG_(message)(Vg_DebugMsg, 192 " exectx: %'llu searches, %'llu full compares (%'llu per 1000)\n", 193 ec_searchreqs, ec_searchcmps, 194 ec_searchreqs == 0 195 ? 0ULL 196 : ( (ec_searchcmps * 1000ULL) / ec_searchreqs ) 197 ); 198 VG_(message)(Vg_DebugMsg, 199 " exectx: %'llu cmp2, %'llu cmp4, %'llu cmpAll\n", 200 ec_cmp2s, ec_cmp4s, ec_cmpAlls 201 ); 202 } 203 204 205 /* Print an ExeContext. */ 206 void VG_(pp_ExeContext) ( ExeContext* ec ) 207 { 208 VG_(pp_StackTrace)( ec->ips, ec->n_ips ); 209 } 210 211 212 /* Compare two ExeContexts. Number of callers considered depends on res. */ 213 Bool VG_(eq_ExeContext) ( VgRes res, const ExeContext* e1, 214 const ExeContext* e2 ) 215 { 216 Int i; 217 218 if (e1 == NULL || e2 == NULL) 219 return False; 220 221 // Must be at least one address in each trace. 222 vg_assert(e1->n_ips >= 1 && e2->n_ips >= 1); 223 224 switch (res) { 225 case Vg_LowRes: 226 /* Just compare the top two callers. */ 227 ec_cmp2s++; 228 for (i = 0; i < 2; i++) { 229 if ( (e1->n_ips <= i) && (e2->n_ips <= i)) return True; 230 if ( (e1->n_ips <= i) && !(e2->n_ips <= i)) return False; 231 if (!(e1->n_ips <= i) && (e2->n_ips <= i)) return False; 232 if (e1->ips[i] != e2->ips[i]) return False; 233 } 234 return True; 235 236 case Vg_MedRes: 237 /* Just compare the top four callers. */ 238 ec_cmp4s++; 239 for (i = 0; i < 4; i++) { 240 if ( (e1->n_ips <= i) && (e2->n_ips <= i)) return True; 241 if ( (e1->n_ips <= i) && !(e2->n_ips <= i)) return False; 242 if (!(e1->n_ips <= i) && (e2->n_ips <= i)) return False; 243 if (e1->ips[i] != e2->ips[i]) return False; 244 } 245 return True; 246 247 case Vg_HighRes: 248 ec_cmpAlls++; 249 /* Compare them all -- just do pointer comparison. */ 250 if (e1 != e2) return False; 251 return True; 252 253 default: 254 VG_(core_panic)("VG_(eq_ExeContext): unrecognised VgRes"); 255 } 256 } 257 258 /* VG_(record_ExeContext) is the head honcho here. Take a snapshot of 259 the client's stack. Search our collection of ExeContexts to see if 260 we already have it, and if not, allocate a new one. Either way, 261 return a pointer to the context. If there is a matching context we 262 guarantee to not allocate a new one. Thus we never store 263 duplicates, and so exact equality can be quickly done as equality 264 on the returned ExeContext* values themselves. Inspired by Hugs's 265 Text type. 266 267 Also checks whether the hash table needs expanding, and expands it 268 if so. */ 269 270 static inline UWord ROLW ( UWord w, Int n ) 271 { 272 Int bpw = 8 * sizeof(UWord); 273 w = (w << n) | (w >> (bpw-n)); 274 return w; 275 } 276 277 static UWord calc_hash ( const Addr* ips, UInt n_ips, UWord htab_sz ) 278 { 279 UInt i; 280 UWord hash = 0; 281 vg_assert(htab_sz > 0); 282 for (i = 0; i < n_ips; i++) { 283 hash ^= ips[i]; 284 hash = ROLW(hash, 19); 285 } 286 return hash % htab_sz; 287 } 288 289 static void resize_ec_htab ( void ) 290 { 291 SizeT i; 292 SizeT new_size; 293 ExeContext** new_ec_htab; 294 295 vg_assert(ec_htab_size_idx >= 0 && ec_htab_size_idx < N_EC_PRIMES); 296 if (ec_htab_size_idx == N_EC_PRIMES-1) 297 return; /* out of primes - can't resize further */ 298 299 new_size = ec_primes[ec_htab_size_idx + 1]; 300 new_ec_htab = VG_(malloc)("execontext.reh1", 301 sizeof(ExeContext*) * new_size); 302 303 VG_(debugLog)( 304 1, "execontext", 305 "resizing htab from size %lu to %lu (idx %lu) Total#ECs=%llu\n", 306 ec_htab_size, new_size, ec_htab_size_idx + 1, ec_totstored); 307 308 for (i = 0; i < new_size; i++) 309 new_ec_htab[i] = NULL; 310 311 for (i = 0; i < ec_htab_size; i++) { 312 ExeContext* cur = ec_htab[i]; 313 while (cur) { 314 ExeContext* next = cur->chain; 315 UWord hash = calc_hash(cur->ips, cur->n_ips, new_size); 316 vg_assert(hash < new_size); 317 cur->chain = new_ec_htab[hash]; 318 new_ec_htab[hash] = cur; 319 cur = next; 320 } 321 } 322 323 VG_(free)(ec_htab); 324 ec_htab = new_ec_htab; 325 ec_htab_size = new_size; 326 ec_htab_size_idx++; 327 } 328 329 /* Used by the outer as a marker to separate the frames of the inner valgrind 330 from the frames of the inner guest frames. */ 331 static void _______VVVVVVVV_appended_inner_guest_stack_VVVVVVVV_______ (void) 332 { 333 } 334 335 /* Do the first part of getting a stack trace: actually unwind the 336 stack, and hand the results off to the duplicate-trace-finder 337 (_wrk2). */ 338 static ExeContext* record_ExeContext_wrk ( ThreadId tid, Word first_ip_delta, 339 Bool first_ip_only ) 340 { 341 Addr ips[VG_(clo_backtrace_size)]; 342 UInt n_ips; 343 344 init_ExeContext_storage(); 345 346 vg_assert(sizeof(void*) == sizeof(UWord)); 347 vg_assert(sizeof(void*) == sizeof(Addr)); 348 349 vg_assert(VG_(is_valid_tid)(tid)); 350 351 if (first_ip_only) { 352 n_ips = 1; 353 ips[0] = VG_(get_IP)(tid) + first_ip_delta; 354 } else { 355 n_ips = VG_(get_StackTrace)( tid, ips, VG_(clo_backtrace_size), 356 NULL/*array to dump SP values in*/, 357 NULL/*array to dump FP values in*/, 358 first_ip_delta ); 359 if (VG_(inner_threads) != NULL 360 && n_ips + 1 < VG_(clo_backtrace_size)) { 361 /* An inner V has informed us (the outer) of its thread array. 362 Append the inner guest stack trace, if we still have some 363 room in the ips array for the separator and (some) inner 364 guest IPs. */ 365 UInt inner_tid; 366 367 for (inner_tid = 1; inner_tid < VG_N_THREADS; inner_tid++) { 368 if (VG_(threads)[tid].os_state.lwpid 369 == VG_(inner_threads)[inner_tid].os_state.lwpid) { 370 ThreadState* save_outer_vg_threads = VG_(threads); 371 UInt n_ips_inner_guest; 372 373 /* Append the separator + the inner guest stack trace. */ 374 ips[n_ips] = (Addr) 375 _______VVVVVVVV_appended_inner_guest_stack_VVVVVVVV_______; 376 n_ips++; 377 VG_(threads) = VG_(inner_threads); 378 n_ips_inner_guest 379 = VG_(get_StackTrace)( inner_tid, 380 ips + n_ips, 381 VG_(clo_backtrace_size) - n_ips, 382 NULL/*array to dump SP values in*/, 383 NULL/*array to dump FP values in*/, 384 first_ip_delta ); 385 n_ips += n_ips_inner_guest; 386 VG_(threads) = save_outer_vg_threads; 387 break; 388 } 389 } 390 } 391 } 392 393 return record_ExeContext_wrk2 ( ips, n_ips ); 394 } 395 396 /* Do the second part of getting a stack trace: ips[0 .. n_ips-1] 397 holds a proposed trace. Find or allocate a suitable ExeContext. 398 Note that callers must have done init_ExeContext_storage() before 399 getting to this point. */ 400 static ExeContext* record_ExeContext_wrk2 ( const Addr* ips, UInt n_ips ) 401 { 402 Int i; 403 Bool same; 404 UWord hash; 405 ExeContext* new_ec; 406 ExeContext* list; 407 ExeContext *prev2, *prev; 408 409 static UInt ctr = 0; 410 411 vg_assert(n_ips >= 1 && n_ips <= VG_(clo_backtrace_size)); 412 413 /* Now figure out if we've seen this one before. First hash it so 414 as to determine the list number. */ 415 hash = calc_hash( ips, n_ips, ec_htab_size ); 416 417 /* And (the expensive bit) look a for matching entry in the list. */ 418 419 ec_searchreqs++; 420 421 prev2 = NULL; 422 prev = NULL; 423 list = ec_htab[hash]; 424 425 while (True) { 426 if (list == NULL) break; 427 ec_searchcmps++; 428 same = list->n_ips == n_ips; 429 for (i = 0; i < n_ips && same ; i++) { 430 same = list->ips[i] == ips[i]; 431 } 432 if (same) break; 433 prev2 = prev; 434 prev = list; 435 list = list->chain; 436 } 437 438 if (list != NULL) { 439 /* Yay! We found it. Once every 8 searches, move it one step 440 closer to the start of the list to make future searches 441 cheaper. */ 442 if (0 == ((ctr++) & 7)) { 443 if (prev2 != NULL && prev != NULL) { 444 /* Found at 3rd or later position in the chain. */ 445 vg_assert(prev2->chain == prev); 446 vg_assert(prev->chain == list); 447 prev2->chain = list; 448 prev->chain = list->chain; 449 list->chain = prev; 450 } 451 else if (prev2 == NULL && prev != NULL) { 452 /* Found at 2nd position in the chain. */ 453 vg_assert(ec_htab[hash] == prev); 454 vg_assert(prev->chain == list); 455 prev->chain = list->chain; 456 list->chain = prev; 457 ec_htab[hash] = list; 458 } 459 } 460 return list; 461 } 462 463 /* Bummer. We have to allocate a new context record. */ 464 ec_totstored++; 465 466 new_ec = VG_(perm_malloc)( sizeof(struct _ExeContext) 467 + n_ips * sizeof(Addr), 468 vg_alignof(struct _ExeContext)); 469 470 for (i = 0; i < n_ips; i++) 471 new_ec->ips[i] = ips[i]; 472 473 vg_assert(VG_(is_plausible_ECU)(ec_next_ecu)); 474 new_ec->ecu = ec_next_ecu; 475 ec_next_ecu += 4; 476 if (ec_next_ecu == 0) { 477 /* Urr. Now we're hosed; we emitted 2^30 ExeContexts already 478 and have run out of numbers. Not sure what to do. */ 479 VG_(core_panic)("m_execontext: more than 2^30 ExeContexts created"); 480 } 481 482 new_ec->n_ips = n_ips; 483 new_ec->chain = ec_htab[hash]; 484 ec_htab[hash] = new_ec; 485 486 /* Resize the hash table, maybe? */ 487 if ( ((ULong)ec_totstored) > ((ULong)ec_htab_size) ) { 488 vg_assert(ec_htab_size_idx >= 0 && ec_htab_size_idx < N_EC_PRIMES); 489 if (ec_htab_size_idx < N_EC_PRIMES-1) 490 resize_ec_htab(); 491 } 492 493 return new_ec; 494 } 495 496 ExeContext* VG_(record_ExeContext)( ThreadId tid, Word first_ip_delta ) { 497 return record_ExeContext_wrk( tid, first_ip_delta, 498 False/*!first_ip_only*/ ); 499 } 500 501 ExeContext* VG_(record_depth_1_ExeContext)( ThreadId tid, Word first_ip_delta ) 502 { 503 return record_ExeContext_wrk( tid, first_ip_delta, 504 True/*first_ip_only*/ ); 505 } 506 507 ExeContext* VG_(make_depth_1_ExeContext_from_Addr)( Addr a ) { 508 init_ExeContext_storage(); 509 return record_ExeContext_wrk2( &a, 1 ); 510 } 511 512 StackTrace VG_(get_ExeContext_StackTrace) ( ExeContext* e ) { 513 return e->ips; 514 } 515 516 UInt VG_(get_ECU_from_ExeContext)( const ExeContext* e ) { 517 vg_assert(VG_(is_plausible_ECU)(e->ecu)); 518 return e->ecu; 519 } 520 521 Int VG_(get_ExeContext_n_ips)( const ExeContext* e ) { 522 vg_assert(e->n_ips >= 1); 523 return e->n_ips; 524 } 525 526 ExeContext* VG_(get_ExeContext_from_ECU)( UInt ecu ) 527 { 528 UWord i; 529 ExeContext* ec; 530 vg_assert(VG_(is_plausible_ECU)(ecu)); 531 vg_assert(ec_htab_size > 0); 532 for (i = 0; i < ec_htab_size; i++) { 533 for (ec = ec_htab[i]; ec; ec = ec->chain) { 534 if (ec->ecu == ecu) 535 return ec; 536 } 537 } 538 return NULL; 539 } 540 541 ExeContext* VG_(make_ExeContext_from_StackTrace)( const Addr* ips, UInt n_ips ) 542 { 543 init_ExeContext_storage(); 544 return record_ExeContext_wrk2(ips, n_ips); 545 } 546 547 ExeContext* VG_(null_ExeContext) (void) 548 { 549 init_ExeContext_storage(); 550 return null_ExeContext; 551 } 552 553 /*--------------------------------------------------------------------*/ 554 /*--- end m_execontext.c ---*/ 555 /*--------------------------------------------------------------------*/ 556