1 //===-- tsan_mman.cc ------------------------------------------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file is a part of ThreadSanitizer (TSan), a race detector. 11 // 12 //===----------------------------------------------------------------------===// 13 #include "sanitizer_common/sanitizer_allocator_interface.h" 14 #include "sanitizer_common/sanitizer_common.h" 15 #include "sanitizer_common/sanitizer_placement_new.h" 16 #include "tsan_mman.h" 17 #include "tsan_rtl.h" 18 #include "tsan_report.h" 19 #include "tsan_flags.h" 20 21 // May be overriden by front-end. 22 SANITIZER_WEAK_DEFAULT_IMPL 23 void __sanitizer_malloc_hook(void *ptr, uptr size) { 24 (void)ptr; 25 (void)size; 26 } 27 28 SANITIZER_WEAK_DEFAULT_IMPL 29 void __sanitizer_free_hook(void *ptr) { 30 (void)ptr; 31 } 32 33 namespace __tsan { 34 35 struct MapUnmapCallback { 36 void OnMap(uptr p, uptr size) const { } 37 void OnUnmap(uptr p, uptr size) const { 38 // We are about to unmap a chunk of user memory. 39 // Mark the corresponding shadow memory as not needed. 40 DontNeedShadowFor(p, size); 41 // Mark the corresponding meta shadow memory as not needed. 42 // Note the block does not contain any meta info at this point 43 // (this happens after free). 44 const uptr kMetaRatio = kMetaShadowCell / kMetaShadowSize; 45 const uptr kPageSize = GetPageSizeCached() * kMetaRatio; 46 // Block came from LargeMmapAllocator, so must be large. 47 // We rely on this in the calculations below. 48 CHECK_GE(size, 2 * kPageSize); 49 uptr diff = RoundUp(p, kPageSize) - p; 50 if (diff != 0) { 51 p += diff; 52 size -= diff; 53 } 54 diff = p + size - RoundDown(p + size, kPageSize); 55 if (diff != 0) 56 size -= diff; 57 FlushUnneededShadowMemory((uptr)MemToMeta(p), size / kMetaRatio); 58 } 59 }; 60 61 static char allocator_placeholder[sizeof(Allocator)] ALIGNED(64); 62 Allocator *allocator() { 63 return reinterpret_cast<Allocator*>(&allocator_placeholder); 64 } 65 66 struct GlobalProc { 67 Mutex mtx; 68 Processor *proc; 69 70 GlobalProc() 71 : mtx(MutexTypeGlobalProc, StatMtxGlobalProc) 72 , proc(ProcCreate()) { 73 } 74 }; 75 76 static char global_proc_placeholder[sizeof(GlobalProc)] ALIGNED(64); 77 GlobalProc *global_proc() { 78 return reinterpret_cast<GlobalProc*>(&global_proc_placeholder); 79 } 80 81 ScopedGlobalProcessor::ScopedGlobalProcessor() { 82 GlobalProc *gp = global_proc(); 83 ThreadState *thr = cur_thread(); 84 if (thr->proc()) 85 return; 86 // If we don't have a proc, use the global one. 87 // There are currently only two known case where this path is triggered: 88 // __interceptor_free 89 // __nptl_deallocate_tsd 90 // start_thread 91 // clone 92 // and: 93 // ResetRange 94 // __interceptor_munmap 95 // __deallocate_stack 96 // start_thread 97 // clone 98 // Ideally, we destroy thread state (and unwire proc) when a thread actually 99 // exits (i.e. when we join/wait it). Then we would not need the global proc 100 gp->mtx.Lock(); 101 ProcWire(gp->proc, thr); 102 } 103 104 ScopedGlobalProcessor::~ScopedGlobalProcessor() { 105 GlobalProc *gp = global_proc(); 106 ThreadState *thr = cur_thread(); 107 if (thr->proc() != gp->proc) 108 return; 109 ProcUnwire(gp->proc, thr); 110 gp->mtx.Unlock(); 111 } 112 113 void InitializeAllocator() { 114 allocator()->Init(common_flags()->allocator_may_return_null); 115 } 116 117 void InitializeAllocatorLate() { 118 new(global_proc()) GlobalProc(); 119 } 120 121 void AllocatorProcStart(Processor *proc) { 122 allocator()->InitCache(&proc->alloc_cache); 123 internal_allocator()->InitCache(&proc->internal_alloc_cache); 124 } 125 126 void AllocatorProcFinish(Processor *proc) { 127 allocator()->DestroyCache(&proc->alloc_cache); 128 internal_allocator()->DestroyCache(&proc->internal_alloc_cache); 129 } 130 131 void AllocatorPrintStats() { 132 allocator()->PrintStats(); 133 } 134 135 static void SignalUnsafeCall(ThreadState *thr, uptr pc) { 136 if (atomic_load_relaxed(&thr->in_signal_handler) == 0 || 137 !flags()->report_signal_unsafe) 138 return; 139 VarSizeStackTrace stack; 140 ObtainCurrentStack(thr, pc, &stack); 141 if (IsFiredSuppression(ctx, ReportTypeSignalUnsafe, stack)) 142 return; 143 ThreadRegistryLock l(ctx->thread_registry); 144 ScopedReport rep(ReportTypeSignalUnsafe); 145 rep.AddStack(stack, true); 146 OutputReport(thr, rep); 147 } 148 149 void *user_alloc(ThreadState *thr, uptr pc, uptr sz, uptr align, bool signal) { 150 if ((sz >= (1ull << 40)) || (align >= (1ull << 40))) 151 return allocator()->ReturnNullOrDie(); 152 void *p = allocator()->Allocate(&thr->proc()->alloc_cache, sz, align); 153 if (p == 0) 154 return 0; 155 if (ctx && ctx->initialized) 156 OnUserAlloc(thr, pc, (uptr)p, sz, true); 157 if (signal) 158 SignalUnsafeCall(thr, pc); 159 return p; 160 } 161 162 void *user_calloc(ThreadState *thr, uptr pc, uptr size, uptr n) { 163 if (CallocShouldReturnNullDueToOverflow(size, n)) 164 return allocator()->ReturnNullOrDie(); 165 void *p = user_alloc(thr, pc, n * size); 166 if (p) 167 internal_memset(p, 0, n * size); 168 return p; 169 } 170 171 void user_free(ThreadState *thr, uptr pc, void *p, bool signal) { 172 ScopedGlobalProcessor sgp; 173 if (ctx && ctx->initialized) 174 OnUserFree(thr, pc, (uptr)p, true); 175 allocator()->Deallocate(&thr->proc()->alloc_cache, p); 176 if (signal) 177 SignalUnsafeCall(thr, pc); 178 } 179 180 void OnUserAlloc(ThreadState *thr, uptr pc, uptr p, uptr sz, bool write) { 181 DPrintf("#%d: alloc(%zu) = %p\n", thr->tid, sz, p); 182 ctx->metamap.AllocBlock(thr, pc, p, sz); 183 if (write && thr->ignore_reads_and_writes == 0) 184 MemoryRangeImitateWrite(thr, pc, (uptr)p, sz); 185 else 186 MemoryResetRange(thr, pc, (uptr)p, sz); 187 } 188 189 void OnUserFree(ThreadState *thr, uptr pc, uptr p, bool write) { 190 CHECK_NE(p, (void*)0); 191 uptr sz = ctx->metamap.FreeBlock(thr->proc(), p); 192 DPrintf("#%d: free(%p, %zu)\n", thr->tid, p, sz); 193 if (write && thr->ignore_reads_and_writes == 0) 194 MemoryRangeFreed(thr, pc, (uptr)p, sz); 195 } 196 197 void *user_realloc(ThreadState *thr, uptr pc, void *p, uptr sz) { 198 void *p2 = 0; 199 // FIXME: Handle "shrinking" more efficiently, 200 // it seems that some software actually does this. 201 if (sz) { 202 p2 = user_alloc(thr, pc, sz); 203 if (p2 == 0) 204 return 0; 205 if (p) { 206 uptr oldsz = user_alloc_usable_size(p); 207 internal_memcpy(p2, p, min(oldsz, sz)); 208 } 209 } 210 if (p) 211 user_free(thr, pc, p); 212 return p2; 213 } 214 215 uptr user_alloc_usable_size(const void *p) { 216 if (p == 0) 217 return 0; 218 MBlock *b = ctx->metamap.GetBlock((uptr)p); 219 if (!b) 220 return 0; // Not a valid pointer. 221 if (b->siz == 0) 222 return 1; // Zero-sized allocations are actually 1 byte. 223 return b->siz; 224 } 225 226 void invoke_malloc_hook(void *ptr, uptr size) { 227 ThreadState *thr = cur_thread(); 228 if (ctx == 0 || !ctx->initialized || thr->ignore_interceptors) 229 return; 230 __sanitizer_malloc_hook(ptr, size); 231 RunMallocHooks(ptr, size); 232 } 233 234 void invoke_free_hook(void *ptr) { 235 ThreadState *thr = cur_thread(); 236 if (ctx == 0 || !ctx->initialized || thr->ignore_interceptors) 237 return; 238 __sanitizer_free_hook(ptr); 239 RunFreeHooks(ptr); 240 } 241 242 void *internal_alloc(MBlockType typ, uptr sz) { 243 ThreadState *thr = cur_thread(); 244 if (thr->nomalloc) { 245 thr->nomalloc = 0; // CHECK calls internal_malloc(). 246 CHECK(0); 247 } 248 return InternalAlloc(sz, &thr->proc()->internal_alloc_cache); 249 } 250 251 void internal_free(void *p) { 252 ThreadState *thr = cur_thread(); 253 if (thr->nomalloc) { 254 thr->nomalloc = 0; // CHECK calls internal_malloc(). 255 CHECK(0); 256 } 257 InternalFree(p, &thr->proc()->internal_alloc_cache); 258 } 259 260 } // namespace __tsan 261 262 using namespace __tsan; 263 264 extern "C" { 265 uptr __sanitizer_get_current_allocated_bytes() { 266 uptr stats[AllocatorStatCount]; 267 allocator()->GetStats(stats); 268 return stats[AllocatorStatAllocated]; 269 } 270 271 uptr __sanitizer_get_heap_size() { 272 uptr stats[AllocatorStatCount]; 273 allocator()->GetStats(stats); 274 return stats[AllocatorStatMapped]; 275 } 276 277 uptr __sanitizer_get_free_bytes() { 278 return 1; 279 } 280 281 uptr __sanitizer_get_unmapped_bytes() { 282 return 1; 283 } 284 285 uptr __sanitizer_get_estimated_allocated_size(uptr size) { 286 return size; 287 } 288 289 int __sanitizer_get_ownership(const void *p) { 290 return allocator()->GetBlockBegin(p) != 0; 291 } 292 293 uptr __sanitizer_get_allocated_size(const void *p) { 294 return user_alloc_usable_size(p); 295 } 296 297 void __tsan_on_thread_idle() { 298 ThreadState *thr = cur_thread(); 299 allocator()->SwallowCache(&thr->proc()->alloc_cache); 300 internal_allocator()->SwallowCache(&thr->proc()->internal_alloc_cache); 301 ctx->metamap.OnProcIdle(thr->proc()); 302 } 303 } // extern "C" 304