1 //=-- lsan_common_linux.cc ------------------------------------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file is a part of LeakSanitizer. 11 // Implementation of common leak checking functionality. Linux-specific code. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "sanitizer_common/sanitizer_platform.h" 16 #include "lsan_common.h" 17 18 #if CAN_SANITIZE_LEAKS && SANITIZER_LINUX 19 #include <link.h> 20 21 #include "sanitizer_common/sanitizer_common.h" 22 #include "sanitizer_common/sanitizer_flags.h" 23 #include "sanitizer_common/sanitizer_linux.h" 24 #include "sanitizer_common/sanitizer_stackdepot.h" 25 26 namespace __lsan { 27 28 static const char kLinkerName[] = "ld"; 29 // We request 2 modules matching "ld", so we can print a warning if there's more 30 // than one match. But only the first one is actually used. 31 static char linker_placeholder[2 * sizeof(LoadedModule)] ALIGNED(64); 32 static LoadedModule *linker = nullptr; 33 34 static bool IsLinker(const char* full_name) { 35 return LibraryNameIs(full_name, kLinkerName); 36 } 37 38 void InitializePlatformSpecificModules() { 39 internal_memset(linker_placeholder, 0, sizeof(linker_placeholder)); 40 uptr num_matches = GetListOfModules( 41 reinterpret_cast<LoadedModule *>(linker_placeholder), 2, IsLinker); 42 if (num_matches == 1) { 43 linker = reinterpret_cast<LoadedModule *>(linker_placeholder); 44 return; 45 } 46 if (num_matches == 0) 47 VReport(1, "LeakSanitizer: Dynamic linker not found. " 48 "TLS will not be handled correctly.\n"); 49 else if (num_matches > 1) 50 VReport(1, "LeakSanitizer: Multiple modules match \"%s\". " 51 "TLS will not be handled correctly.\n", kLinkerName); 52 linker = nullptr; 53 } 54 55 static int ProcessGlobalRegionsCallback(struct dl_phdr_info *info, size_t size, 56 void *data) { 57 Frontier *frontier = reinterpret_cast<Frontier *>(data); 58 for (uptr j = 0; j < info->dlpi_phnum; j++) { 59 const ElfW(Phdr) *phdr = &(info->dlpi_phdr[j]); 60 // We're looking for .data and .bss sections, which reside in writeable, 61 // loadable segments. 62 if (!(phdr->p_flags & PF_W) || (phdr->p_type != PT_LOAD) || 63 (phdr->p_memsz == 0)) 64 continue; 65 uptr begin = info->dlpi_addr + phdr->p_vaddr; 66 uptr end = begin + phdr->p_memsz; 67 uptr allocator_begin = 0, allocator_end = 0; 68 GetAllocatorGlobalRange(&allocator_begin, &allocator_end); 69 if (begin <= allocator_begin && allocator_begin < end) { 70 CHECK_LE(allocator_begin, allocator_end); 71 CHECK_LT(allocator_end, end); 72 if (begin < allocator_begin) 73 ScanRangeForPointers(begin, allocator_begin, frontier, "GLOBAL", 74 kReachable); 75 if (allocator_end < end) 76 ScanRangeForPointers(allocator_end, end, frontier, "GLOBAL", 77 kReachable); 78 } else { 79 ScanRangeForPointers(begin, end, frontier, "GLOBAL", kReachable); 80 } 81 } 82 return 0; 83 } 84 85 // Scans global variables for heap pointers. 86 void ProcessGlobalRegions(Frontier *frontier) { 87 if (!flags()->use_globals) return; 88 dl_iterate_phdr(ProcessGlobalRegionsCallback, frontier); 89 } 90 91 static uptr GetCallerPC(u32 stack_id, StackDepotReverseMap *map) { 92 CHECK(stack_id); 93 StackTrace stack = map->Get(stack_id); 94 // The top frame is our malloc/calloc/etc. The next frame is the caller. 95 if (stack.size >= 2) 96 return stack.trace[1]; 97 return 0; 98 } 99 100 struct ProcessPlatformAllocParam { 101 Frontier *frontier; 102 StackDepotReverseMap *stack_depot_reverse_map; 103 }; 104 105 // ForEachChunk callback. Identifies unreachable chunks which must be treated as 106 // reachable. Marks them as reachable and adds them to the frontier. 107 static void ProcessPlatformSpecificAllocationsCb(uptr chunk, void *arg) { 108 CHECK(arg); 109 ProcessPlatformAllocParam *param = 110 reinterpret_cast<ProcessPlatformAllocParam *>(arg); 111 chunk = GetUserBegin(chunk); 112 LsanMetadata m(chunk); 113 if (m.allocated() && m.tag() != kReachable && m.tag() != kIgnored) { 114 u32 stack_id = m.stack_trace_id(); 115 uptr caller_pc = 0; 116 if (stack_id > 0) 117 caller_pc = GetCallerPC(stack_id, param->stack_depot_reverse_map); 118 // If caller_pc is unknown, this chunk may be allocated in a coroutine. Mark 119 // it as reachable, as we can't properly report its allocation stack anyway. 120 if (caller_pc == 0 || linker->containsAddress(caller_pc)) { 121 m.set_tag(kReachable); 122 param->frontier->push_back(chunk); 123 } 124 } 125 } 126 127 // Handles dynamically allocated TLS blocks by treating all chunks allocated 128 // from ld-linux.so as reachable. 129 // Dynamic TLS blocks contain the TLS variables of dynamically loaded modules. 130 // They are allocated with a __libc_memalign() call in allocate_and_init() 131 // (elf/dl-tls.c). Glibc won't tell us the address ranges occupied by those 132 // blocks, but we can make sure they come from our own allocator by intercepting 133 // __libc_memalign(). On top of that, there is no easy way to reach them. Their 134 // addresses are stored in a dynamically allocated array (the DTV) which is 135 // referenced from the static TLS. Unfortunately, we can't just rely on the DTV 136 // being reachable from the static TLS, and the dynamic TLS being reachable from 137 // the DTV. This is because the initial DTV is allocated before our interception 138 // mechanism kicks in, and thus we don't recognize it as allocated memory. We 139 // can't special-case it either, since we don't know its size. 140 // Our solution is to include in the root set all allocations made from 141 // ld-linux.so (which is where allocate_and_init() is implemented). This is 142 // guaranteed to include all dynamic TLS blocks (and possibly other allocations 143 // which we don't care about). 144 void ProcessPlatformSpecificAllocations(Frontier *frontier) { 145 if (!flags()->use_tls) return; 146 if (!linker) return; 147 StackDepotReverseMap stack_depot_reverse_map; 148 ProcessPlatformAllocParam arg = {frontier, &stack_depot_reverse_map}; 149 ForEachChunk(ProcessPlatformSpecificAllocationsCb, &arg); 150 } 151 152 struct DoStopTheWorldParam { 153 StopTheWorldCallback callback; 154 void *argument; 155 }; 156 157 static int DoStopTheWorldCallback(struct dl_phdr_info *info, size_t size, 158 void *data) { 159 DoStopTheWorldParam *param = reinterpret_cast<DoStopTheWorldParam *>(data); 160 StopTheWorld(param->callback, param->argument); 161 return 1; 162 } 163 164 // LSan calls dl_iterate_phdr() from the tracer task. This may deadlock: if one 165 // of the threads is frozen while holding the libdl lock, the tracer will hang 166 // in dl_iterate_phdr() forever. 167 // Luckily, (a) the lock is reentrant and (b) libc can't distinguish between the 168 // tracer task and the thread that spawned it. Thus, if we run the tracer task 169 // while holding the libdl lock in the parent thread, we can safely reenter it 170 // in the tracer. The solution is to run stoptheworld from a dl_iterate_phdr() 171 // callback in the parent thread. 172 void DoStopTheWorld(StopTheWorldCallback callback, void *argument) { 173 DoStopTheWorldParam param = {callback, argument}; 174 dl_iterate_phdr(DoStopTheWorldCallback, ¶m); 175 } 176 177 } // namespace __lsan 178 179 #endif // CAN_SANITIZE_LEAKS && SANITIZER_LINUX 180