1 /****************************************************************************** 2 * 3 * Copyright (C) 2014 Google, Inc. 4 * 5 * Licensed under the Apache License, Version 2.0 (the "License"); 6 * you may not use this file except in compliance with the License. 7 * You may obtain a copy of the License at: 8 * 9 * http://www.apache.org/licenses/LICENSE-2.0 10 * 11 * Unless required by applicable law or agreed to in writing, software 12 * distributed under the License is distributed on an "AS IS" BASIS, 13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 * See the License for the specific language governing permissions and 15 * limitations under the License. 16 * 17 ******************************************************************************/ 18 19 #define LOG_TAG "bt_osi_allocation_tracker" 20 21 #include "osi/include/allocation_tracker.h" 22 23 #include <assert.h> 24 #include <pthread.h> 25 #include <stdlib.h> 26 #include <string.h> 27 28 #include "osi/include/allocator.h" 29 #include "osi/include/hash_functions.h" 30 #include "osi/include/hash_map.h" 31 #include "osi/include/log.h" 32 #include "osi/include/osi.h" 33 34 typedef struct { 35 uint8_t allocator_id; 36 void *ptr; 37 size_t size; 38 bool freed; 39 } allocation_t; 40 41 // Hidden constructor for hash map for our use only. Everything else should use the 42 // normal interface. 43 hash_map_t *hash_map_new_internal( 44 size_t size, 45 hash_index_fn hash_fn, 46 key_free_fn key_fn, 47 data_free_fn, 48 key_equality_fn equality_fn, 49 const allocator_t *zeroed_allocator); 50 51 static bool allocation_entry_freed_checker(hash_map_entry_t *entry, void *context); 52 static void *untracked_calloc(size_t size); 53 54 static const size_t allocation_hash_map_size = 1024; 55 static const char *canary = "tinybird"; 56 static const allocator_t untracked_calloc_allocator = { 57 untracked_calloc, 58 free 59 }; 60 61 static size_t canary_size; 62 static hash_map_t *allocations; 63 static pthread_mutex_t lock; 64 65 void allocation_tracker_init(void) { 66 if (allocations) 67 return; 68 69 canary_size = strlen(canary); 70 71 pthread_mutex_init(&lock, NULL); 72 73 pthread_mutex_lock(&lock); 74 allocations = hash_map_new_internal( 75 allocation_hash_map_size, 76 hash_function_pointer, 77 NULL, 78 free, 79 NULL, 80 &untracked_calloc_allocator); 81 pthread_mutex_unlock(&lock); 82 } 83 84 // Test function only. Do not call in the normal course of operations. 85 void allocation_tracker_uninit(void) { 86 if (!allocations) 87 return; 88 89 pthread_mutex_lock(&lock); 90 hash_map_free(allocations); 91 allocations = NULL; 92 pthread_mutex_unlock(&lock); 93 } 94 95 void allocation_tracker_reset(void) { 96 if (!allocations) 97 return; 98 99 pthread_mutex_lock(&lock); 100 hash_map_clear(allocations); 101 pthread_mutex_unlock(&lock); 102 } 103 104 size_t allocation_tracker_expect_no_allocations(void) { 105 if (!allocations) 106 return 0; 107 108 pthread_mutex_lock(&lock); 109 110 size_t unfreed_memory_size = 0; 111 hash_map_foreach(allocations, allocation_entry_freed_checker, &unfreed_memory_size); 112 113 pthread_mutex_unlock(&lock); 114 115 return unfreed_memory_size; 116 } 117 118 void *allocation_tracker_notify_alloc(uint8_t allocator_id, void *ptr, size_t requested_size) { 119 if (!allocations || !ptr) 120 return ptr; 121 122 char *return_ptr = (char *)ptr; 123 124 return_ptr += canary_size; 125 126 pthread_mutex_lock(&lock); 127 128 allocation_t *allocation = (allocation_t *)hash_map_get(allocations, return_ptr); 129 if (allocation) { 130 assert(allocation->freed); // Must have been freed before 131 } else { 132 allocation = (allocation_t *)calloc(1, sizeof(allocation_t)); 133 hash_map_set(allocations, return_ptr, allocation); 134 } 135 136 allocation->allocator_id = allocator_id; 137 allocation->freed = false; 138 allocation->size = requested_size; 139 allocation->ptr = return_ptr; 140 141 pthread_mutex_unlock(&lock); 142 143 // Add the canary on both sides 144 memcpy(return_ptr - canary_size, canary, canary_size); 145 memcpy(return_ptr + requested_size, canary, canary_size); 146 147 return return_ptr; 148 } 149 150 void *allocation_tracker_notify_free(UNUSED_ATTR uint8_t allocator_id, void *ptr) { 151 if (!allocations || !ptr) 152 return ptr; 153 154 pthread_mutex_lock(&lock); 155 156 allocation_t *allocation = (allocation_t *)hash_map_get(allocations, ptr); 157 assert(allocation); // Must have been tracked before 158 assert(!allocation->freed); // Must not be a double free 159 assert(allocation->allocator_id == allocator_id); // Must be from the same allocator 160 allocation->freed = true; 161 162 UNUSED_ATTR const char *beginning_canary = ((char *)ptr) - canary_size; 163 UNUSED_ATTR const char *end_canary = ((char *)ptr) + allocation->size; 164 165 for (size_t i = 0; i < canary_size; i++) { 166 assert(beginning_canary[i] == canary[i]); 167 assert(end_canary[i] == canary[i]); 168 } 169 170 // Free the hash map entry to avoid unlimited memory usage growth. 171 // Double-free of memory is detected with "assert(allocation)" above 172 // as the allocation entry will not be present. 173 hash_map_erase(allocations, ptr); 174 175 pthread_mutex_unlock(&lock); 176 177 return ((char *)ptr) - canary_size; 178 } 179 180 size_t allocation_tracker_resize_for_canary(size_t size) { 181 return (!allocations) ? size : size + (2 * canary_size); 182 } 183 184 static bool allocation_entry_freed_checker(hash_map_entry_t *entry, void *context) { 185 allocation_t *allocation = (allocation_t *)entry->data; 186 if (!allocation->freed) { 187 *((size_t *)context) += allocation->size; // Report back the unfreed byte count 188 LOG_ERROR(LOG_TAG, "%s found unfreed allocation. address: 0x%zx size: %zd bytes", __func__, (uintptr_t)allocation->ptr, allocation->size); 189 } 190 191 return true; 192 } 193 194 static void *untracked_calloc(size_t size) { 195 return calloc(size, 1); 196 } 197