1 /* 2 * Copyright (C) 2019 The Android Open Source Project 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * * Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * * Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in 12 * the documentation and/or other materials provided with the 13 * distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 16 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 17 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 18 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 19 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS 22 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 25 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 #include <inttypes.h> 30 #include <pthread.h> 31 #include <stdatomic.h> 32 #include <stdint.h> 33 #include <stdio.h> 34 35 #include <private/bionic_malloc_dispatch.h> 36 37 #if __has_feature(hwaddress_sanitizer) 38 #include <sanitizer/allocator_interface.h> 39 #endif 40 41 #include "malloc_common.h" 42 #include "malloc_common_dynamic.h" 43 #include "malloc_heapprofd.h" 44 #include "malloc_limit.h" 45 46 __BEGIN_DECLS 47 static void* LimitCalloc(size_t n_elements, size_t elem_size); 48 static void LimitFree(void* mem); 49 static void* LimitMalloc(size_t bytes); 50 static void* LimitMemalign(size_t alignment, size_t bytes); 51 static int LimitPosixMemalign(void** memptr, size_t alignment, size_t size); 52 static void* LimitRealloc(void* old_mem, size_t bytes); 53 static void* LimitAlignedAlloc(size_t alignment, size_t size); 54 #if defined(HAVE_DEPRECATED_MALLOC_FUNCS) 55 static void* LimitPvalloc(size_t bytes); 56 static void* LimitValloc(size_t bytes); 57 #endif 58 59 // Pass through functions. 60 static size_t LimitUsableSize(const void* mem); 61 static struct mallinfo LimitMallinfo(); 62 static int LimitIterate(uintptr_t base, size_t size, void (*callback)(uintptr_t, size_t, void*), void* arg); 63 static void LimitMallocDisable(); 64 static void LimitMallocEnable(); 65 static int LimitMallocInfo(int options, FILE* fp); 66 static int LimitMallopt(int param, int value); 67 __END_DECLS 68 69 static constexpr MallocDispatch __limit_dispatch 70 __attribute__((unused)) = { 71 LimitCalloc, 72 LimitFree, 73 LimitMallinfo, 74 LimitMalloc, 75 LimitUsableSize, 76 LimitMemalign, 77 LimitPosixMemalign, 78 #if defined(HAVE_DEPRECATED_MALLOC_FUNCS) 79 LimitPvalloc, 80 #endif 81 LimitRealloc, 82 #if defined(HAVE_DEPRECATED_MALLOC_FUNCS) 83 LimitValloc, 84 #endif 85 LimitIterate, 86 LimitMallocDisable, 87 LimitMallocEnable, 88 LimitMallopt, 89 LimitAlignedAlloc, 90 LimitMallocInfo, 91 }; 92 93 static _Atomic uint64_t gAllocated; 94 static uint64_t gAllocLimit; 95 96 static inline bool CheckLimit(size_t bytes) { 97 uint64_t total; 98 if (__predict_false(__builtin_add_overflow( 99 atomic_load_explicit(&gAllocated, memory_order_relaxed), bytes, &total) || 100 total > gAllocLimit)) { 101 return false; 102 } 103 return true; 104 } 105 106 static inline void* IncrementLimit(void* mem) { 107 if (__predict_false(mem == nullptr)) { 108 return nullptr; 109 } 110 atomic_fetch_add(&gAllocated, LimitUsableSize(mem)); 111 return mem; 112 } 113 114 void* LimitCalloc(size_t n_elements, size_t elem_size) { 115 size_t total; 116 if (__builtin_add_overflow(n_elements, elem_size, &total) || !CheckLimit(total)) { 117 warning_log("malloc_limit: calloc(%zu, %zu) exceeds limit %" PRId64, n_elements, elem_size, 118 gAllocLimit); 119 return nullptr; 120 } 121 auto dispatch_table = GetDefaultDispatchTable(); 122 if (__predict_false(dispatch_table != nullptr)) { 123 return IncrementLimit(dispatch_table->calloc(n_elements, elem_size)); 124 } 125 return IncrementLimit(Malloc(calloc)(n_elements, elem_size)); 126 } 127 128 void LimitFree(void* mem) { 129 atomic_fetch_sub(&gAllocated, LimitUsableSize(mem)); 130 auto dispatch_table = GetDefaultDispatchTable(); 131 if (__predict_false(dispatch_table != nullptr)) { 132 return dispatch_table->free(mem); 133 } 134 return Malloc(free)(mem); 135 } 136 137 void* LimitMalloc(size_t bytes) { 138 if (!CheckLimit(bytes)) { 139 warning_log("malloc_limit: malloc(%zu) exceeds limit %" PRId64, bytes, gAllocLimit); 140 return nullptr; 141 } 142 auto dispatch_table = GetDefaultDispatchTable(); 143 if (__predict_false(dispatch_table != nullptr)) { 144 return IncrementLimit(dispatch_table->malloc(bytes)); 145 } 146 return IncrementLimit(Malloc(malloc)(bytes)); 147 } 148 149 static void* LimitMemalign(size_t alignment, size_t bytes) { 150 if (!CheckLimit(bytes)) { 151 warning_log("malloc_limit: memalign(%zu, %zu) exceeds limit %" PRId64, alignment, bytes, 152 gAllocLimit); 153 return nullptr; 154 } 155 auto dispatch_table = GetDefaultDispatchTable(); 156 if (__predict_false(dispatch_table != nullptr)) { 157 return IncrementLimit(dispatch_table->memalign(alignment, bytes)); 158 } 159 return IncrementLimit(Malloc(memalign)(alignment, bytes)); 160 } 161 162 static int LimitPosixMemalign(void** memptr, size_t alignment, size_t size) { 163 if (!CheckLimit(size)) { 164 warning_log("malloc_limit: posix_memalign(%zu, %zu) exceeds limit %" PRId64, alignment, size, 165 gAllocLimit); 166 return ENOMEM; 167 } 168 int retval; 169 auto dispatch_table = GetDefaultDispatchTable(); 170 if (__predict_false(dispatch_table != nullptr)) { 171 retval = dispatch_table->posix_memalign(memptr, alignment, size); 172 } else { 173 retval = Malloc(posix_memalign)(memptr, alignment, size); 174 } 175 if (__predict_false(retval != 0)) { 176 return retval; 177 } 178 IncrementLimit(*memptr); 179 return 0; 180 } 181 182 static void* LimitAlignedAlloc(size_t alignment, size_t size) { 183 if (!CheckLimit(size)) { 184 warning_log("malloc_limit: aligned_alloc(%zu, %zu) exceeds limit %" PRId64, alignment, size, 185 gAllocLimit); 186 return nullptr; 187 } 188 auto dispatch_table = GetDefaultDispatchTable(); 189 if (__predict_false(dispatch_table != nullptr)) { 190 return IncrementLimit(dispatch_table->aligned_alloc(alignment, size)); 191 } 192 return IncrementLimit(Malloc(aligned_alloc)(alignment, size)); 193 } 194 195 static void* LimitRealloc(void* old_mem, size_t bytes) { 196 size_t old_usable_size = LimitUsableSize(old_mem); 197 void* new_ptr; 198 // Need to check the size only if the allocation will increase in size. 199 if (bytes > old_usable_size && !CheckLimit(bytes - old_usable_size)) { 200 warning_log("malloc_limit: realloc(%p, %zu) exceeds limit %" PRId64, old_mem, bytes, 201 gAllocLimit); 202 // Free the old pointer. 203 LimitFree(old_mem); 204 return nullptr; 205 } 206 207 auto dispatch_table = GetDefaultDispatchTable(); 208 if (__predict_false(dispatch_table != nullptr)) { 209 new_ptr = dispatch_table->realloc(old_mem, bytes); 210 } else { 211 new_ptr = Malloc(realloc)(old_mem, bytes); 212 } 213 214 if (__predict_false(new_ptr == nullptr)) { 215 // This acts as if the pointer was freed. 216 atomic_fetch_sub(&gAllocated, old_usable_size); 217 return nullptr; 218 } 219 220 size_t new_usable_size = LimitUsableSize(new_ptr); 221 // Assumes that most allocations increase in size, rather than shrink. 222 if (__predict_false(old_usable_size > new_usable_size)) { 223 atomic_fetch_sub(&gAllocated, old_usable_size - new_usable_size); 224 } else { 225 atomic_fetch_add(&gAllocated, new_usable_size - old_usable_size); 226 } 227 return new_ptr; 228 } 229 230 #if defined(HAVE_DEPRECATED_MALLOC_FUNCS) 231 static void* LimitPvalloc(size_t bytes) { 232 if (!CheckLimit(bytes)) { 233 warning_log("malloc_limit: pvalloc(%zu) exceeds limit %" PRId64, bytes, gAllocLimit); 234 return nullptr; 235 } 236 auto dispatch_table = GetDefaultDispatchTable(); 237 if (__predict_false(dispatch_table != nullptr)) { 238 return IncrementLimit(dispatch_table->pvalloc(bytes)); 239 } 240 return IncrementLimit(Malloc(pvalloc)(bytes)); 241 } 242 243 static void* LimitValloc(size_t bytes) { 244 if (!CheckLimit(bytes)) { 245 warning_log("malloc_limit: valloc(%zu) exceeds limit %" PRId64, bytes, gAllocLimit); 246 return nullptr; 247 } 248 auto dispatch_table = GetDefaultDispatchTable(); 249 if (__predict_false(dispatch_table != nullptr)) { 250 return IncrementLimit(dispatch_table->valloc(bytes)); 251 } 252 return IncrementLimit(Malloc(valloc)(bytes)); 253 } 254 #endif 255 256 #if defined(LIBC_STATIC) 257 static bool EnableLimitDispatchTable() { 258 // This is the only valid way to modify the dispatch tables for a 259 // static executable so no locks are necessary. 260 __libc_globals.mutate([](libc_globals* globals) { 261 atomic_store(&globals->current_dispatch_table, &__limit_dispatch); 262 }); 263 return true; 264 } 265 #else 266 static bool EnableLimitDispatchTable() { 267 HeapprofdMaskSignal(); 268 pthread_mutex_lock(&gGlobalsMutateLock); 269 // All other code that calls mutate will grab the gGlobalsMutateLock. 270 // However, there is one case where the lock cannot be acquired, in the 271 // signal handler that enables heapprofd. In order to avoid having two 272 // threads calling mutate at the same time, use an atomic variable to 273 // verify that only this function or the signal handler are calling mutate. 274 // If this function is called at the same time as the signal handler is 275 // being called, allow up to five ms for the signal handler to complete 276 // before failing. 277 bool enabled = false; 278 size_t num_tries = 20; 279 while (true) { 280 if (!atomic_exchange(&gGlobalsMutating, true)) { 281 __libc_globals.mutate([](libc_globals* globals) { 282 atomic_store(&globals->current_dispatch_table, &__limit_dispatch); 283 }); 284 atomic_store(&gGlobalsMutating, false); 285 enabled = true; 286 break; 287 } 288 if (--num_tries == 0) { 289 break; 290 } 291 usleep(1000); 292 } 293 pthread_mutex_unlock(&gGlobalsMutateLock); 294 HeapprofdUnmaskSignal(); 295 if (enabled) { 296 info_log("malloc_limit: Allocation limit enabled, max size %" PRId64 " bytes\n", gAllocLimit); 297 } else { 298 error_log("malloc_limit: Failed to enable allocation limit."); 299 } 300 return enabled; 301 } 302 #endif 303 304 bool LimitEnable(void* arg, size_t arg_size) { 305 if (arg == nullptr || arg_size != sizeof(size_t)) { 306 errno = EINVAL; 307 return false; 308 } 309 310 static _Atomic bool limit_enabled; 311 if (atomic_exchange(&limit_enabled, true)) { 312 // The limit can only be enabled once. 313 error_log("malloc_limit: The allocation limit has already been set, it can only be set once."); 314 return false; 315 } 316 317 gAllocLimit = *reinterpret_cast<size_t*>(arg); 318 #if __has_feature(hwaddress_sanitizer) 319 size_t current_allocated = __sanitizer_get_current_allocated_bytes(); 320 #else 321 size_t current_allocated; 322 auto dispatch_table = GetDefaultDispatchTable(); 323 if (__predict_false(dispatch_table != nullptr)) { 324 current_allocated = dispatch_table->mallinfo().uordblks; 325 } else { 326 current_allocated = Malloc(mallinfo)().uordblks; 327 } 328 #endif 329 atomic_store(&gAllocated, current_allocated); 330 331 return EnableLimitDispatchTable(); 332 } 333 334 static size_t LimitUsableSize(const void* mem) { 335 auto dispatch_table = GetDefaultDispatchTable(); 336 if (__predict_false(dispatch_table != nullptr)) { 337 return dispatch_table->malloc_usable_size(mem); 338 } 339 return Malloc(malloc_usable_size)(mem); 340 } 341 342 static struct mallinfo LimitMallinfo() { 343 auto dispatch_table = GetDefaultDispatchTable(); 344 if (__predict_false(dispatch_table != nullptr)) { 345 return dispatch_table->mallinfo(); 346 } 347 return Malloc(mallinfo)(); 348 } 349 350 static int LimitIterate(uintptr_t base, size_t size, void (*callback)(uintptr_t, size_t, void*), void* arg) { 351 auto dispatch_table = GetDefaultDispatchTable(); 352 if (__predict_false(dispatch_table != nullptr)) { 353 return dispatch_table->iterate(base, size, callback, arg); 354 } 355 return Malloc(iterate)(base, size, callback, arg); 356 } 357 358 static void LimitMallocDisable() { 359 auto dispatch_table = GetDefaultDispatchTable(); 360 if (__predict_false(dispatch_table != nullptr)) { 361 dispatch_table->malloc_disable(); 362 } else { 363 Malloc(malloc_disable)(); 364 } 365 } 366 367 static void LimitMallocEnable() { 368 auto dispatch_table = GetDefaultDispatchTable(); 369 if (__predict_false(dispatch_table != nullptr)) { 370 dispatch_table->malloc_enable(); 371 } else { 372 Malloc(malloc_enable)(); 373 } 374 } 375 376 static int LimitMallocInfo(int options, FILE* fp) { 377 auto dispatch_table = GetDefaultDispatchTable(); 378 if (__predict_false(dispatch_table != nullptr)) { 379 return dispatch_table->malloc_info(options, fp); 380 } 381 return Malloc(malloc_info)(options, fp); 382 } 383 384 static int LimitMallopt(int param, int value) { 385 auto dispatch_table = GetDefaultDispatchTable(); 386 if (__predict_false(dispatch_table != nullptr)) { 387 return dispatch_table->mallopt(param, value); 388 } 389 return Malloc(mallopt)(param, value); 390 } 391