1 //===------------------------ fallback_malloc.cpp -------------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is dual licensed under the MIT and the University of Illinois Open 6 // Source Licenses. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 10 #include "fallback_malloc.h" 11 12 #include "config.h" 13 #include <__threading_support> 14 15 #include <cstdlib> // for malloc, calloc, free 16 #include <cstring> // for memset 17 18 // A small, simple heap manager based (loosely) on 19 // the startup heap manager from FreeBSD, optimized for space. 20 // 21 // Manages a fixed-size memory pool, supports malloc and free only. 22 // No support for realloc. 23 // 24 // Allocates chunks in multiples of four bytes, with a four byte header 25 // for each chunk. The overhead of each chunk is kept low by keeping pointers 26 // as two byte offsets within the heap, rather than (4 or 8 byte) pointers. 27 28 namespace { 29 30 // When POSIX threads are not available, make the mutex operations a nop 31 #ifndef _LIBCXXABI_HAS_NO_THREADS 32 _LIBCPP_SAFE_STATIC 33 static std::__libcpp_mutex_t heap_mutex = _LIBCPP_MUTEX_INITIALIZER; 34 #else 35 static void * heap_mutex = 0; 36 #endif 37 38 class mutexor { 39 public: 40 #ifndef _LIBCXXABI_HAS_NO_THREADS 41 mutexor ( std::__libcpp_mutex_t *m ) : mtx_(m) { 42 std::__libcpp_mutex_lock ( mtx_ ); 43 } 44 ~mutexor () { std::__libcpp_mutex_unlock ( mtx_ ); } 45 #else 46 mutexor ( void * ) {} 47 ~mutexor () {} 48 #endif 49 private: 50 mutexor ( const mutexor &rhs ); 51 mutexor & operator = ( const mutexor &rhs ); 52 #ifndef _LIBCXXABI_HAS_NO_THREADS 53 std::__libcpp_mutex_t *mtx_; 54 #endif 55 }; 56 57 58 static const size_t HEAP_SIZE = 512; 59 char heap [ HEAP_SIZE ] __attribute__((aligned)); 60 61 typedef unsigned short heap_offset; 62 typedef unsigned short heap_size; 63 64 struct heap_node { 65 heap_offset next_node; // offset into heap 66 heap_size len; // size in units of "sizeof(heap_node)" 67 }; 68 69 static const heap_node *list_end = (heap_node *) ( &heap [ HEAP_SIZE ] ); // one past the end of the heap 70 static heap_node *freelist = NULL; 71 72 heap_node *node_from_offset ( const heap_offset offset ) 73 { return (heap_node *) ( heap + ( offset * sizeof (heap_node))); } 74 75 heap_offset offset_from_node ( const heap_node *ptr ) 76 { return static_cast<heap_offset>(static_cast<size_t>(reinterpret_cast<const char *>(ptr) - heap) / sizeof (heap_node)); } 77 78 void init_heap () { 79 freelist = (heap_node *) heap; 80 freelist->next_node = offset_from_node ( list_end ); 81 freelist->len = HEAP_SIZE / sizeof (heap_node); 82 } 83 84 // How big a chunk we allocate 85 size_t alloc_size (size_t len) 86 { return (len + sizeof(heap_node) - 1) / sizeof(heap_node) + 1; } 87 88 bool is_fallback_ptr ( void *ptr ) 89 { return ptr >= heap && ptr < ( heap + HEAP_SIZE ); } 90 91 void *fallback_malloc(size_t len) { 92 heap_node *p, *prev; 93 const size_t nelems = alloc_size ( len ); 94 mutexor mtx ( &heap_mutex ); 95 96 if ( NULL == freelist ) 97 init_heap (); 98 99 // Walk the free list, looking for a "big enough" chunk 100 for (p = freelist, prev = 0; 101 p && p != list_end; prev = p, p = node_from_offset ( p->next_node)) { 102 103 if (p->len > nelems) { // chunk is larger, shorten, and return the tail 104 heap_node *q; 105 106 p->len = static_cast<heap_size>(p->len - nelems); 107 q = p + p->len; 108 q->next_node = 0; 109 q->len = static_cast<heap_size>(nelems); 110 return (void *) (q + 1); 111 } 112 113 if (p->len == nelems) { // exact size match 114 if (prev == 0) 115 freelist = node_from_offset(p->next_node); 116 else 117 prev->next_node = p->next_node; 118 p->next_node = 0; 119 return (void *) (p + 1); 120 } 121 } 122 return NULL; // couldn't find a spot big enough 123 } 124 125 // Return the start of the next block 126 heap_node *after ( struct heap_node *p ) { return p + p->len; } 127 128 void fallback_free (void *ptr) { 129 struct heap_node *cp = ((struct heap_node *) ptr) - 1; // retrieve the chunk 130 struct heap_node *p, *prev; 131 132 mutexor mtx ( &heap_mutex ); 133 134 #ifdef DEBUG_FALLBACK_MALLOC 135 std::cout << "Freeing item at " << offset_from_node ( cp ) << " of size " << cp->len << std::endl; 136 #endif 137 138 for (p = freelist, prev = 0; 139 p && p != list_end; prev = p, p = node_from_offset (p->next_node)) { 140 #ifdef DEBUG_FALLBACK_MALLOC 141 std::cout << " p, cp, after (p), after(cp) " 142 << offset_from_node ( p ) << ' ' 143 << offset_from_node ( cp ) << ' ' 144 << offset_from_node ( after ( p )) << ' ' 145 << offset_from_node ( after ( cp )) << std::endl; 146 #endif 147 if ( after ( p ) == cp ) { 148 #ifdef DEBUG_FALLBACK_MALLOC 149 std::cout << " Appending onto chunk at " << offset_from_node ( p ) << std::endl; 150 #endif 151 p->len = static_cast<heap_size>(p->len + cp->len); // make the free heap_node larger 152 return; 153 } 154 else if ( after ( cp ) == p ) { // there's a free heap_node right after 155 #ifdef DEBUG_FALLBACK_MALLOC 156 std::cout << " Appending free chunk at " << offset_from_node ( p ) << std::endl; 157 #endif 158 cp->len = static_cast<heap_size>(cp->len + p->len); 159 if ( prev == 0 ) { 160 freelist = cp; 161 cp->next_node = p->next_node; 162 } 163 else 164 prev->next_node = offset_from_node(cp); 165 return; 166 } 167 } 168 // Nothing to merge with, add it to the start of the free list 169 #ifdef DEBUG_FALLBACK_MALLOC 170 std::cout << " Making new free list entry " << offset_from_node ( cp ) << std::endl; 171 #endif 172 cp->next_node = offset_from_node ( freelist ); 173 freelist = cp; 174 } 175 176 #ifdef INSTRUMENT_FALLBACK_MALLOC 177 size_t print_free_list () { 178 struct heap_node *p, *prev; 179 heap_size total_free = 0; 180 if ( NULL == freelist ) 181 init_heap (); 182 183 for (p = freelist, prev = 0; 184 p && p != list_end; prev = p, p = node_from_offset (p->next_node)) { 185 std::cout << ( prev == 0 ? "" : " ") << "Offset: " << offset_from_node ( p ) 186 << "\tsize: " << p->len << " Next: " << p->next_node << std::endl; 187 total_free += p->len; 188 } 189 std::cout << "Total Free space: " << total_free << std::endl; 190 return total_free; 191 } 192 #endif 193 } // end unnamed namespace 194 195 namespace __cxxabiv1 { 196 197 #pragma GCC visibility push(hidden) 198 199 void * __malloc_with_fallback(size_t size) { 200 void *ptr = std::malloc(size); 201 if (NULL == ptr) // if malloc fails, fall back to emergency stash 202 ptr = fallback_malloc(size); 203 return ptr; 204 } 205 206 void * __calloc_with_fallback(size_t count, size_t size) { 207 void *ptr = std::calloc(count, size); 208 if (NULL != ptr) 209 return ptr; 210 // if calloc fails, fall back to emergency stash 211 ptr = fallback_malloc(size * count); 212 if (NULL != ptr) 213 std::memset(ptr, 0, size * count); 214 return ptr; 215 } 216 217 void __free_with_fallback(void *ptr) { 218 if (is_fallback_ptr(ptr)) 219 fallback_free(ptr); 220 else 221 std::free(ptr); 222 } 223 224 #pragma GCC visibility pop 225 226 } // namespace __cxxabiv1 227