1 /* 2 * Copyright (C) 2012 The Android Open Source Project 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * * Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * * Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in 12 * the documentation and/or other materials provided with the 13 * distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 16 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 17 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 18 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 19 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS 22 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 25 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 #include <arpa/inet.h> 30 #include <dlfcn.h> 31 #include <errno.h> 32 #include <errno.h> 33 #include <fcntl.h> 34 #include <pthread.h> 35 #include <stdarg.h> 36 #include <stdbool.h> 37 #include <stddef.h> 38 #include <stdio.h> 39 #include <stdlib.h> 40 #include <string.h> 41 #include <sys/param.h> 42 #include <sys/socket.h> 43 #include <sys/system_properties.h> 44 #include <sys/types.h> 45 #include <time.h> 46 #include <unistd.h> 47 #include <unwind.h> 48 49 #include "debug_mapinfo.h" 50 #include "debug_stacktrace.h" 51 #include "malloc_debug_backtrace.h" 52 #include "malloc_debug_common.h" 53 #include "malloc_debug_disable.h" 54 #include "private/bionic_macros.h" 55 #include "private/libc_logging.h" 56 #include "private/ScopedPthreadMutexLocker.h" 57 58 #define MAX_BACKTRACE_DEPTH 16 59 #define ALLOCATION_TAG 0x1ee7d00d 60 #define BACKLOG_TAG 0xbabecafe 61 #define FREE_POISON 0xa5 62 #define FRONT_GUARD 0xaa 63 #define FRONT_GUARD_LEN (1<<5) 64 #define REAR_GUARD 0xbb 65 #define REAR_GUARD_LEN (1<<5) 66 67 static void log_message(const char* format, ...) { 68 va_list args; 69 va_start(args, format); 70 __libc_format_log_va_list(ANDROID_LOG_ERROR, "libc", format, args); 71 va_end(args); 72 } 73 74 struct hdr_t { 75 uint32_t tag; 76 void* base; // Always points to the memory allocated using malloc. 77 // For memory allocated in chk_memalign, this value will 78 // not be the same as the location of the start of this 79 // structure. 80 hdr_t* prev; 81 hdr_t* next; 82 uintptr_t bt[MAX_BACKTRACE_DEPTH]; 83 int bt_depth; 84 uintptr_t freed_bt[MAX_BACKTRACE_DEPTH]; 85 int freed_bt_depth; 86 size_t size; 87 uint8_t front_guard[FRONT_GUARD_LEN]; 88 } __attribute__((packed, aligned(MALLOC_ALIGNMENT))); 89 90 struct ftr_t { 91 uint8_t rear_guard[REAR_GUARD_LEN]; 92 } __attribute__((packed)); 93 94 static inline ftr_t* to_ftr(hdr_t* hdr) { 95 return reinterpret_cast<ftr_t*>(reinterpret_cast<char*>(hdr + 1) + hdr->size); 96 } 97 98 static inline void* user(hdr_t* hdr) { 99 return hdr + 1; 100 } 101 102 static inline hdr_t* meta(void* user) { 103 return reinterpret_cast<hdr_t*>(user) - 1; 104 } 105 106 static inline const hdr_t* const_meta(const void* user) { 107 return reinterpret_cast<const hdr_t*>(user) - 1; 108 } 109 110 // TODO: introduce a struct for this global state. 111 // There are basically two lists here, the regular list and the backlog list. 112 // We should be able to remove the duplication. 113 static unsigned g_allocated_block_count; 114 static hdr_t* tail; 115 static hdr_t* head; 116 static pthread_mutex_t lock = PTHREAD_MUTEX_INITIALIZER; 117 118 static unsigned backlog_num; 119 static hdr_t* backlog_tail; 120 static hdr_t* backlog_head; 121 static pthread_mutex_t backlog_lock = PTHREAD_MUTEX_INITIALIZER; 122 123 // This variable is set to the value of property libc.debug.malloc.backlog. 124 // It determines the size of the backlog we use to detect multiple frees. 125 static unsigned g_malloc_debug_backlog = 100; 126 127 // This variable is set to false if the property libc.debug.malloc.nobacktrace 128 // is set to non-zero. 129 __LIBC_HIDDEN__ bool g_backtrace_enabled = true; 130 131 __LIBC_HIDDEN__ HashTable* g_hash_table; 132 __LIBC_HIDDEN__ const MallocDebug* g_malloc_dispatch; 133 134 static inline void init_front_guard(hdr_t* hdr) { 135 memset(hdr->front_guard, FRONT_GUARD, FRONT_GUARD_LEN); 136 } 137 138 static inline bool is_front_guard_valid(hdr_t* hdr) { 139 for (size_t i = 0; i < FRONT_GUARD_LEN; i++) { 140 if (hdr->front_guard[i] != FRONT_GUARD) { 141 return false; 142 } 143 } 144 return true; 145 } 146 147 static inline void init_rear_guard(hdr_t* hdr) { 148 ftr_t* ftr = to_ftr(hdr); 149 memset(ftr->rear_guard, REAR_GUARD, REAR_GUARD_LEN); 150 } 151 152 static inline bool is_rear_guard_valid(hdr_t* hdr) { 153 unsigned i; 154 int valid = 1; 155 int first_mismatch = -1; 156 ftr_t* ftr = to_ftr(hdr); 157 for (i = 0; i < REAR_GUARD_LEN; i++) { 158 if (ftr->rear_guard[i] != REAR_GUARD) { 159 if (first_mismatch < 0) 160 first_mismatch = i; 161 valid = 0; 162 } else if (first_mismatch >= 0) { 163 log_message("+++ REAR GUARD MISMATCH [%d, %d)\n", first_mismatch, i); 164 first_mismatch = -1; 165 } 166 } 167 168 if (first_mismatch >= 0) 169 log_message("+++ REAR GUARD MISMATCH [%d, %d)\n", first_mismatch, i); 170 return valid; 171 } 172 173 static inline void add_locked(hdr_t* hdr, hdr_t** tail, hdr_t** head) { 174 hdr->prev = NULL; 175 hdr->next = *head; 176 if (*head) 177 (*head)->prev = hdr; 178 else 179 *tail = hdr; 180 *head = hdr; 181 } 182 183 static inline int del_locked(hdr_t* hdr, hdr_t** tail, hdr_t** head) { 184 if (hdr->prev) { 185 hdr->prev->next = hdr->next; 186 } else { 187 *head = hdr->next; 188 } 189 if (hdr->next) { 190 hdr->next->prev = hdr->prev; 191 } else { 192 *tail = hdr->prev; 193 } 194 return 0; 195 } 196 197 static inline void add(hdr_t* hdr, size_t size) { 198 ScopedPthreadMutexLocker locker(&lock); 199 hdr->tag = ALLOCATION_TAG; 200 hdr->size = size; 201 init_front_guard(hdr); 202 init_rear_guard(hdr); 203 ++g_allocated_block_count; 204 add_locked(hdr, &tail, &head); 205 } 206 207 static inline int del(hdr_t* hdr) { 208 if (hdr->tag != ALLOCATION_TAG) { 209 return -1; 210 } 211 212 ScopedPthreadMutexLocker locker(&lock); 213 del_locked(hdr, &tail, &head); 214 --g_allocated_block_count; 215 return 0; 216 } 217 218 static inline void poison(hdr_t* hdr) { 219 memset(user(hdr), FREE_POISON, hdr->size); 220 } 221 222 static bool was_used_after_free(hdr_t* hdr) { 223 const uint8_t* data = reinterpret_cast<const uint8_t*>(user(hdr)); 224 for (size_t i = 0; i < hdr->size; i++) { 225 if (data[i] != FREE_POISON) { 226 return true; 227 } 228 } 229 return false; 230 } 231 232 /* returns 1 if valid, *safe == 1 if safe to dump stack */ 233 static inline int check_guards(hdr_t* hdr, int* safe) { 234 *safe = 1; 235 if (!is_front_guard_valid(hdr)) { 236 if (hdr->front_guard[0] == FRONT_GUARD) { 237 log_message("+++ ALLOCATION %p SIZE %d HAS A CORRUPTED FRONT GUARD\n", 238 user(hdr), hdr->size); 239 } else { 240 log_message("+++ ALLOCATION %p HAS A CORRUPTED FRONT GUARD "\ 241 "(NOT DUMPING STACKTRACE)\n", user(hdr)); 242 /* Allocation header is probably corrupt, do not print stack trace */ 243 *safe = 0; 244 } 245 return 0; 246 } 247 248 if (!is_rear_guard_valid(hdr)) { 249 log_message("+++ ALLOCATION %p SIZE %d HAS A CORRUPTED REAR GUARD\n", 250 user(hdr), hdr->size); 251 return 0; 252 } 253 254 return 1; 255 } 256 257 /* returns 1 if valid, *safe == 1 if safe to dump stack */ 258 static inline int check_allocation_locked(hdr_t* hdr, int* safe) { 259 int valid = 1; 260 *safe = 1; 261 262 if (hdr->tag != ALLOCATION_TAG && hdr->tag != BACKLOG_TAG) { 263 log_message("+++ ALLOCATION %p HAS INVALID TAG %08x (NOT DUMPING STACKTRACE)\n", 264 user(hdr), hdr->tag); 265 // Allocation header is probably corrupt, do not dequeue or dump stack 266 // trace. 267 *safe = 0; 268 return 0; 269 } 270 271 if (hdr->tag == BACKLOG_TAG && was_used_after_free(hdr)) { 272 log_message("+++ ALLOCATION %p SIZE %d WAS USED AFTER BEING FREED\n", 273 user(hdr), hdr->size); 274 valid = 0; 275 /* check the guards to see if it's safe to dump a stack trace */ 276 check_guards(hdr, safe); 277 } else { 278 valid = check_guards(hdr, safe); 279 } 280 281 if (!valid && *safe && g_backtrace_enabled) { 282 log_message("+++ ALLOCATION %p SIZE %d ALLOCATED HERE:\n", 283 user(hdr), hdr->size); 284 log_backtrace(hdr->bt, hdr->bt_depth); 285 if (hdr->tag == BACKLOG_TAG) { 286 log_message("+++ ALLOCATION %p SIZE %d FREED HERE:\n", 287 user(hdr), hdr->size); 288 log_backtrace(hdr->freed_bt, hdr->freed_bt_depth); 289 } 290 } 291 292 return valid; 293 } 294 295 static inline int del_and_check_locked(hdr_t* hdr, 296 hdr_t** tail, hdr_t** head, unsigned* cnt, 297 int* safe) { 298 int valid = check_allocation_locked(hdr, safe); 299 if (safe) { 300 (*cnt)--; 301 del_locked(hdr, tail, head); 302 } 303 return valid; 304 } 305 306 static inline void del_from_backlog_locked(hdr_t* hdr) { 307 int safe; 308 del_and_check_locked(hdr, 309 &backlog_tail, &backlog_head, &backlog_num, 310 &safe); 311 hdr->tag = 0; /* clear the tag */ 312 } 313 314 static inline void del_from_backlog(hdr_t* hdr) { 315 ScopedPthreadMutexLocker locker(&backlog_lock); 316 del_from_backlog_locked(hdr); 317 } 318 319 static inline int del_leak(hdr_t* hdr, int* safe) { 320 ScopedPthreadMutexLocker locker(&lock); 321 return del_and_check_locked(hdr, &tail, &head, &g_allocated_block_count, safe); 322 } 323 324 static inline void add_to_backlog(hdr_t* hdr) { 325 ScopedPthreadMutexLocker locker(&backlog_lock); 326 hdr->tag = BACKLOG_TAG; 327 backlog_num++; 328 add_locked(hdr, &backlog_tail, &backlog_head); 329 poison(hdr); 330 /* If we've exceeded the maximum backlog, clear it up */ 331 while (backlog_num > g_malloc_debug_backlog) { 332 hdr_t* gone = backlog_tail; 333 del_from_backlog_locked(gone); 334 g_malloc_dispatch->free(gone->base); 335 } 336 } 337 338 extern "C" void* chk_malloc(size_t bytes) { 339 // log_message("%s: %s\n", __FILE__, __FUNCTION__); 340 if (DebugCallsDisabled()) { 341 return g_malloc_dispatch->malloc(bytes); 342 } 343 344 size_t size = sizeof(hdr_t) + bytes + sizeof(ftr_t); 345 if (size < bytes) { // Overflow 346 errno = ENOMEM; 347 return NULL; 348 } 349 hdr_t* hdr = static_cast<hdr_t*>(g_malloc_dispatch->malloc(size)); 350 if (hdr) { 351 hdr->base = hdr; 352 hdr->bt_depth = GET_BACKTRACE(hdr->bt, MAX_BACKTRACE_DEPTH); 353 add(hdr, bytes); 354 return user(hdr); 355 } 356 return NULL; 357 } 358 359 extern "C" void* chk_memalign(size_t alignment, size_t bytes) { 360 if (DebugCallsDisabled()) { 361 return g_malloc_dispatch->memalign(alignment, bytes); 362 } 363 364 if (alignment <= MALLOC_ALIGNMENT) { 365 return chk_malloc(bytes); 366 } 367 368 // Make the alignment a power of two. 369 if (!powerof2(alignment)) { 370 alignment = BIONIC_ROUND_UP_POWER_OF_2(alignment); 371 } 372 373 // here, alignment is at least MALLOC_ALIGNMENT<<1 bytes 374 // we will align by at least MALLOC_ALIGNMENT bytes 375 // and at most alignment-MALLOC_ALIGNMENT bytes 376 size_t size = (alignment-MALLOC_ALIGNMENT) + bytes; 377 if (size < bytes) { // Overflow. 378 return NULL; 379 } 380 381 void* base = g_malloc_dispatch->malloc(sizeof(hdr_t) + size + sizeof(ftr_t)); 382 if (base != NULL) { 383 // Check that the actual pointer that will be returned is aligned 384 // properly. 385 uintptr_t ptr = reinterpret_cast<uintptr_t>(user(reinterpret_cast<hdr_t*>(base))); 386 if ((ptr % alignment) != 0) { 387 // Align the pointer. 388 ptr += ((-ptr) % alignment); 389 } 390 391 hdr_t* hdr = meta(reinterpret_cast<void*>(ptr)); 392 hdr->base = base; 393 hdr->bt_depth = GET_BACKTRACE(hdr->bt, MAX_BACKTRACE_DEPTH); 394 add(hdr, bytes); 395 return user(hdr); 396 } 397 return base; 398 } 399 400 extern "C" void chk_free(void* ptr) { 401 // log_message("%s: %s\n", __FILE__, __FUNCTION__); 402 if (DebugCallsDisabled()) { 403 return g_malloc_dispatch->free(ptr); 404 } 405 406 if (!ptr) /* ignore free(NULL) */ 407 return; 408 409 hdr_t* hdr = meta(ptr); 410 411 if (del(hdr) < 0) { 412 uintptr_t bt[MAX_BACKTRACE_DEPTH]; 413 int depth = GET_BACKTRACE(bt, MAX_BACKTRACE_DEPTH); 414 if (hdr->tag == BACKLOG_TAG) { 415 log_message("+++ ALLOCATION %p SIZE %d BYTES MULTIPLY FREED!\n", 416 user(hdr), hdr->size); 417 if (g_backtrace_enabled) { 418 log_message("+++ ALLOCATION %p SIZE %d ALLOCATED HERE:\n", 419 user(hdr), hdr->size); 420 log_backtrace(hdr->bt, hdr->bt_depth); 421 /* hdr->freed_bt_depth should be nonzero here */ 422 log_message("+++ ALLOCATION %p SIZE %d FIRST FREED HERE:\n", 423 user(hdr), hdr->size); 424 log_backtrace(hdr->freed_bt, hdr->freed_bt_depth); 425 log_message("+++ ALLOCATION %p SIZE %d NOW BEING FREED HERE:\n", 426 user(hdr), hdr->size); 427 log_backtrace(bt, depth); 428 } 429 } else { 430 log_message("+++ ALLOCATION %p IS CORRUPTED OR NOT ALLOCATED VIA TRACKER!\n", 431 user(hdr)); 432 if (g_backtrace_enabled) { 433 log_backtrace(bt, depth); 434 } 435 } 436 } else { 437 hdr->freed_bt_depth = GET_BACKTRACE(hdr->freed_bt, MAX_BACKTRACE_DEPTH); 438 add_to_backlog(hdr); 439 } 440 } 441 442 extern "C" void* chk_realloc(void* ptr, size_t bytes) { 443 // log_message("%s: %s\n", __FILE__, __FUNCTION__); 444 if (DebugCallsDisabled()) { 445 return g_malloc_dispatch->realloc(ptr, bytes); 446 } 447 448 if (!ptr) { 449 return chk_malloc(bytes); 450 } 451 452 #ifdef REALLOC_ZERO_BYTES_FREE 453 if (!bytes) { 454 chk_free(ptr); 455 return NULL; 456 } 457 #endif 458 459 hdr_t* hdr = meta(ptr); 460 461 if (del(hdr) < 0) { 462 uintptr_t bt[MAX_BACKTRACE_DEPTH]; 463 int depth = GET_BACKTRACE(bt, MAX_BACKTRACE_DEPTH); 464 if (hdr->tag == BACKLOG_TAG) { 465 log_message("+++ REALLOCATION %p SIZE %d OF FREED MEMORY!\n", 466 user(hdr), bytes, hdr->size); 467 if (g_backtrace_enabled) { 468 log_message("+++ ALLOCATION %p SIZE %d ALLOCATED HERE:\n", 469 user(hdr), hdr->size); 470 log_backtrace(hdr->bt, hdr->bt_depth); 471 /* hdr->freed_bt_depth should be nonzero here */ 472 log_message("+++ ALLOCATION %p SIZE %d FIRST FREED HERE:\n", 473 user(hdr), hdr->size); 474 log_backtrace(hdr->freed_bt, hdr->freed_bt_depth); 475 log_message("+++ ALLOCATION %p SIZE %d NOW BEING REALLOCATED HERE:\n", 476 user(hdr), hdr->size); 477 log_backtrace(bt, depth); 478 } 479 480 /* We take the memory out of the backlog and fall through so the 481 * reallocation below succeeds. Since we didn't really free it, we 482 * can default to this behavior. 483 */ 484 del_from_backlog(hdr); 485 } else { 486 log_message("+++ REALLOCATION %p SIZE %d IS CORRUPTED OR NOT ALLOCATED VIA TRACKER!\n", 487 user(hdr), bytes); 488 if (g_backtrace_enabled) { 489 log_backtrace(bt, depth); 490 } 491 // just get a whole new allocation and leak the old one 492 return g_malloc_dispatch->realloc(0, bytes); 493 // return realloc(user(hdr), bytes); // assuming it was allocated externally 494 } 495 } 496 497 size_t size = sizeof(hdr_t) + bytes + sizeof(ftr_t); 498 if (size < bytes) { // Overflow 499 errno = ENOMEM; 500 return NULL; 501 } 502 if (hdr->base != hdr) { 503 // An allocation from memalign, so create another allocation and 504 // copy the data out. 505 void* newMem = g_malloc_dispatch->malloc(size); 506 if (newMem == NULL) { 507 return NULL; 508 } 509 memcpy(newMem, hdr, sizeof(hdr_t) + hdr->size); 510 g_malloc_dispatch->free(hdr->base); 511 hdr = static_cast<hdr_t*>(newMem); 512 } else { 513 hdr = static_cast<hdr_t*>(g_malloc_dispatch->realloc(hdr, size)); 514 } 515 if (hdr) { 516 hdr->base = hdr; 517 hdr->bt_depth = GET_BACKTRACE(hdr->bt, MAX_BACKTRACE_DEPTH); 518 add(hdr, bytes); 519 return user(hdr); 520 } 521 return NULL; 522 } 523 524 extern "C" void* chk_calloc(size_t nmemb, size_t bytes) { 525 // log_message("%s: %s\n", __FILE__, __FUNCTION__); 526 if (DebugCallsDisabled()) { 527 return g_malloc_dispatch->calloc(nmemb, bytes); 528 } 529 530 size_t total_bytes = nmemb * bytes; 531 size_t size = sizeof(hdr_t) + total_bytes + sizeof(ftr_t); 532 if (size < total_bytes || (nmemb && SIZE_MAX / nmemb < bytes)) { // Overflow 533 errno = ENOMEM; 534 return NULL; 535 } 536 hdr_t* hdr = static_cast<hdr_t*>(g_malloc_dispatch->calloc(1, size)); 537 if (hdr) { 538 hdr->base = hdr; 539 hdr->bt_depth = GET_BACKTRACE(hdr->bt, MAX_BACKTRACE_DEPTH); 540 add(hdr, total_bytes); 541 return user(hdr); 542 } 543 return NULL; 544 } 545 546 extern "C" size_t chk_malloc_usable_size(const void* ptr) { 547 if (DebugCallsDisabled()) { 548 return g_malloc_dispatch->malloc_usable_size(ptr); 549 } 550 551 // malloc_usable_size returns 0 for NULL and unknown blocks. 552 if (ptr == NULL) 553 return 0; 554 555 const hdr_t* hdr = const_meta(ptr); 556 557 // The sentinel tail is written just after the request block bytes 558 // so there is no extra room we can report here. 559 return hdr->size; 560 } 561 562 extern "C" struct mallinfo chk_mallinfo() { 563 return g_malloc_dispatch->mallinfo(); 564 } 565 566 extern "C" int chk_posix_memalign(void** memptr, size_t alignment, size_t size) { 567 if (DebugCallsDisabled()) { 568 return g_malloc_dispatch->posix_memalign(memptr, alignment, size); 569 } 570 571 if (!powerof2(alignment)) { 572 return EINVAL; 573 } 574 int saved_errno = errno; 575 *memptr = chk_memalign(alignment, size); 576 errno = saved_errno; 577 return (*memptr != NULL) ? 0 : ENOMEM; 578 } 579 580 #if defined(HAVE_DEPRECATED_MALLOC_FUNCS) 581 extern "C" void* chk_pvalloc(size_t bytes) { 582 if (DebugCallsDisabled()) { 583 return g_malloc_dispatch->pvalloc(bytes); 584 } 585 586 size_t pagesize = getpagesize(); 587 size_t size = BIONIC_ALIGN(bytes, pagesize); 588 if (size < bytes) { // Overflow 589 return NULL; 590 } 591 return chk_memalign(pagesize, size); 592 } 593 594 extern "C" void* chk_valloc(size_t size) { 595 if (DebugCallsDisabled()) { 596 return g_malloc_dispatch->valloc(size); 597 } 598 return chk_memalign(getpagesize(), size); 599 } 600 #endif 601 602 static void ReportMemoryLeaks() { 603 ScopedDisableDebugCalls disable; 604 605 // Use /proc/self/exe link to obtain the program name for logging 606 // purposes. If it's not available, we set it to "<unknown>". 607 char exe[PATH_MAX]; 608 int count; 609 if ((count = readlink("/proc/self/exe", exe, sizeof(exe) - 1)) == -1) { 610 strlcpy(exe, "<unknown>", sizeof(exe)); 611 } else { 612 exe[count] = '\0'; 613 } 614 615 if (g_allocated_block_count == 0) { 616 log_message("+++ %s did not leak", exe); 617 return; 618 } 619 620 size_t index = 1; 621 const size_t total = g_allocated_block_count; 622 while (head != NULL) { 623 int safe; 624 hdr_t* block = head; 625 log_message("+++ %s leaked block of size %d at %p (leak %d of %d)", 626 exe, block->size, user(block), index++, total); 627 if (del_leak(block, &safe) && g_backtrace_enabled) { 628 /* safe == 1, because the allocation is valid */ 629 log_backtrace(block->bt, block->bt_depth); 630 } 631 } 632 633 while (backlog_head != NULL) { 634 del_from_backlog(backlog_tail); 635 } 636 } 637 638 pthread_key_t g_debug_calls_disabled; 639 640 extern "C" bool malloc_debug_initialize(HashTable* hash_table, const MallocDebug* malloc_dispatch) { 641 g_hash_table = hash_table; 642 g_malloc_dispatch = malloc_dispatch; 643 644 pthread_key_create(&g_debug_calls_disabled, NULL); 645 646 char debug_backlog[PROP_VALUE_MAX]; 647 if (__system_property_get("libc.debug.malloc.backlog", debug_backlog)) { 648 g_malloc_debug_backlog = atoi(debug_backlog); 649 info_log("%s: setting backlog length to %d\n", getprogname(), g_malloc_debug_backlog); 650 } 651 652 // Check if backtracing should be disabled. 653 char env[PROP_VALUE_MAX]; 654 if (__system_property_get("libc.debug.malloc.nobacktrace", env) && atoi(env) != 0) { 655 g_backtrace_enabled = false; 656 __libc_format_log(ANDROID_LOG_INFO, "libc", "not gathering backtrace information\n"); 657 } 658 659 if (g_backtrace_enabled) { 660 backtrace_startup(); 661 } 662 663 return true; 664 } 665 666 extern "C" void malloc_debug_finalize(int malloc_debug_level) { 667 // We only track leaks at level 10. 668 if (malloc_debug_level == 10) { 669 ReportMemoryLeaks(); 670 } 671 if (g_backtrace_enabled) { 672 backtrace_shutdown(); 673 } 674 675 pthread_setspecific(g_debug_calls_disabled, NULL); 676 } 677