1 // Copyright (C) 2012 The Android Open Source Project 2 // All rights reserved. 3 // 4 // Redistribution and use in source and binary forms, with or without 5 // modification, are permitted provided that the following conditions 6 // are met: 7 // 1. Redistributions of source code must retain the above copyright 8 // notice, this list of conditions and the following disclaimer. 9 // 2. Redistributions in binary form must reproduce the above copyright 10 // notice, this list of conditions and the following disclaimer in the 11 // documentation and/or other materials provided with the distribution. 12 // 3. Neither the name of the project nor the names of its contributors 13 // may be used to endorse or promote products derived from this software 14 // without specific prior written permission. 15 // 16 // THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND 17 // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 // ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE 20 // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 // OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 // HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 // LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 // OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 // SUCH DAMAGE. 27 28 #include <limits.h> 29 #include <sys/mman.h> 30 31 #include <cassert> 32 #include <cstdio> 33 #include <cstdlib> 34 #include <exception> 35 #include <pthread.h> 36 37 #include "cxxabi_defines.h" 38 #include "helper_func_internal.h" 39 40 namespace { 41 42 using namespace __cxxabiv1; 43 44 bool isOurCxxException(uint64_t exc) { 45 // Compatible with GNU 46 return exc == __gxx_exception_class; 47 } 48 49 void defaultExceptionCleanupFunc(_Unwind_Reason_Code reason, 50 _Unwind_Exception* exc) { 51 __cxa_free_exception(exc+1); 52 } 53 54 // Helper class used to ensure a lock is acquire immediately, and released 55 // on scope exit. Usage example: 56 // 57 // { 58 // AutoLock lock(some_mutex); // acquires the mutex. 59 // ... do stuff 60 // if (error) 61 // return; // releases mutex before returning. 62 // ... do other stuff. 63 // } // releases mutex before exiting scope. 64 // 65 class AutoLock { 66 public: 67 AutoLock(pthread_mutex_t& lock) : lock_(lock) { 68 pthread_mutex_lock(&lock_); 69 } 70 71 ~AutoLock(void) { 72 pthread_mutex_unlock(&lock_); 73 } 74 private: 75 pthread_mutex_t& lock_; 76 77 AutoLock(const AutoLock&); 78 AutoLock& operator=(const AutoLock&); 79 }; 80 81 // MMap-based memory allocator for fixed-sized items. 82 // 83 // IMPORTANT: This must be POD-struct compatible, which means: 84 // - No constructor or destructor. 85 // - No virtual methods. 86 // 87 // This allocates large blocks of memory, called 'slabs' that can contain 88 // several items of the same size. A slab contains an array of item slots, 89 // followed by a pointer, used to put all slabs in a single linked list. 90 class PageBasedAllocator { 91 public: 92 // Used to initialize this allocator to hold items of type |T|. 93 template <typename T> 94 void Init() { 95 InitExplicit(sizeof(T), __alignof__(T)); 96 } 97 98 // Used to initialize this instance to hold items of |item_size| bytes, 99 // with alignment |align_size|. 100 void InitExplicit(size_t item_size, size_t align_size) { 101 const size_t ptr_size = sizeof(void*); 102 if (align_size < ptr_size) 103 align_size = ptr_size; 104 item_size_ = (item_size + align_size - 1) & ~(align_size - 1); 105 slab_next_offset_ = kSlabSize - ptr_size; 106 item_slab_count_ = slab_next_offset_ / item_size_; 107 108 pthread_mutex_init(&lock_, NULL); 109 free_items_ = NULL; 110 slab_list_ = NULL; 111 } 112 113 // Call this to deallocate this instance. This releases all pages directly. 114 // Ensure that all items are freed first, or bad things could happen. 115 void Deinit() { 116 pthread_mutex_lock(&lock_); 117 while (slab_list_) { 118 void* slab = slab_list_; 119 void* next_slab = *(void**)((char*)slab + slab_next_offset_); 120 slab_list_ = next_slab; 121 ::munmap(slab, PAGE_SIZE); 122 } 123 pthread_mutex_unlock(&lock_); 124 pthread_mutex_destroy(&lock_); 125 } 126 127 // Allocate a new item, or NULL in case of failure. 128 void* Alloc() { 129 AutoLock lock(lock_); 130 131 if (!free_items_ && !AllocateSlab()) 132 return NULL; 133 134 FreeItem* item = free_items_; 135 free_items_ = item->next; 136 ::memset(item, 0, item_size_); 137 return item; 138 } 139 140 void Release(void* obj) { 141 if (!obj) 142 return; 143 144 AutoLock lock(lock_); 145 FreeItem* item = reinterpret_cast<FreeItem*>(obj); 146 item->next = free_items_; 147 free_items_ = item; 148 } 149 150 private: 151 static const size_t kSlabSize = PAGE_SIZE; 152 153 bool AllocateSlab() { 154 // No more free items, allocate a new slab with mmap(). 155 void* new_slab = mmap(NULL, kSlabSize, PROT_READ|PROT_WRITE, 156 MAP_PRIVATE|MAP_ANONYMOUS, -1, 0); 157 if (new_slab == MAP_FAILED) 158 return false; 159 160 // Prepend to the slab list. 161 *((void**)((char*)new_slab + slab_next_offset_)) = slab_list_; 162 slab_list_ = new_slab; 163 164 // Put all item slots in the new slab into the free item list. 165 FreeItem** pparent = &free_items_; 166 FreeItem* item = reinterpret_cast<FreeItem*>(new_slab); 167 for (size_t n = 0; n < item_slab_count_; ++n) { 168 *pparent = item; 169 pparent = &item->next; 170 item = reinterpret_cast<FreeItem*>((char*)item + item_size_); 171 } 172 *pparent = NULL; 173 174 // Done. 175 return true; 176 } 177 178 struct FreeItem { 179 FreeItem* next; 180 }; 181 182 size_t item_size_; // size of each item in bytes. 183 size_t item_slab_count_; // number of items in each slab. 184 size_t slab_next_offset_; // offset of pointer to next slab in list. 185 pthread_mutex_t lock_; // mutex synchronizing access to data below. 186 void* slab_list_; // Linked list of slabs. 187 FreeItem* free_items_; // Linked list of free items. 188 }; 189 190 // Technical note: 191 // Use a pthread_key_t to hold the key used to store our thread-specific 192 // __cxa_eh_globals objects. The key is created and destroyed through 193 // a static C++ object. 194 // 195 196 // Due to a bug in the dynamic linker that was only fixed in Froyo, the 197 // static C++ destructor may be called with a value of NULL for the 198 // 'this' pointer. As such, any attempt to access any field in the 199 // object there will result in a crash. To work-around this, store 200 // the members of CxaThreadKey as static variables outside of the 201 // C++ object. 202 static pthread_key_t __cxa_thread_key; 203 static PageBasedAllocator __cxa_eh_globals_allocator; 204 205 class CxaThreadKey { 206 public: 207 // Called at program initialization time, or when the shared library 208 // is loaded through dlopen(). 209 CxaThreadKey() { 210 if (pthread_key_create(&__cxa_thread_key, freeObject) != 0) 211 __gabixx::__fatal_error("Can't allocate C++ runtime pthread_key_t"); 212 __cxa_eh_globals_allocator.Init<__cxa_eh_globals>(); 213 } 214 215 // Called at program exit time, or when the shared library is 216 // unloaded through dlclose(). See note above. 217 ~CxaThreadKey() { 218 __cxa_eh_globals_allocator.Deinit(); 219 pthread_key_delete(__cxa_thread_key); 220 } 221 222 static __cxa_eh_globals* getFast() { 223 void* obj = pthread_getspecific(__cxa_thread_key); 224 return reinterpret_cast<__cxa_eh_globals*>(obj); 225 } 226 227 static __cxa_eh_globals* getSlow() { 228 void* obj = pthread_getspecific(__cxa_thread_key); 229 if (obj == NULL) { 230 // malloc() cannot be used here because this method is sometimes 231 // called from malloc() on Android, and this would dead-lock. 232 // 233 // More specifically, if the libc.debug.malloc system property is not 0 234 // on a userdebug or eng build of the platform, malloc() will call 235 // backtrace() to record stack traces of allocation. On ARM, this 236 // forces an unwinding operation which will call this function at 237 // some point. 238 obj = __cxa_eh_globals_allocator.Alloc(); 239 if (!obj) { 240 // Shouldn't happen, but better be safe than sorry. 241 __gabixx::__fatal_error( 242 "Can't allocate thread-specific C++ runtime info block."); 243 } 244 pthread_setspecific(__cxa_thread_key, obj); 245 } 246 return reinterpret_cast<__cxa_eh_globals*>(obj); 247 } 248 249 private: 250 // Called when a thread is destroyed. 251 static void freeObject(void* obj) { 252 __cxa_eh_globals_allocator.Release(obj); 253 } 254 255 }; 256 257 // The single static instance, this forces the compiler to register 258 // a constructor and destructor for this object in the final library 259 // file. They handle the pthread_key_t allocation/deallocation. 260 static CxaThreadKey instance; 261 262 _GABIXX_NORETURN void throwException(__cxa_exception *header) { 263 __cxa_eh_globals* globals = __cxa_get_globals(); 264 header->unexpectedHandler = std::get_unexpected(); 265 header->terminateHandler = std::get_terminate(); 266 globals->uncaughtExceptions += 1; 267 268 _Unwind_RaiseException(&header->unwindHeader); 269 270 // Should not be here 271 call_terminate(&header->unwindHeader); 272 } 273 274 } // anonymous namespace 275 276 277 namespace __cxxabiv1 { 278 __shim_type_info::~__shim_type_info() { 279 } // namespace __cxxabiv1 280 281 extern "C" void __cxa_pure_virtual() { 282 __gabixx::__fatal_error("Pure virtual function called!"); 283 } 284 285 extern "C" void __cxa_deleted_virtual() { 286 __gabixx::__fatal_error("Deleted virtual function called!"); 287 } 288 289 extern "C" __cxa_eh_globals* __cxa_get_globals() _GABIXX_NOEXCEPT { 290 return CxaThreadKey::getSlow(); 291 } 292 293 extern "C" __cxa_eh_globals* __cxa_get_globals_fast() _GABIXX_NOEXCEPT { 294 return CxaThreadKey::getFast(); 295 } 296 297 extern "C" void *__cxa_allocate_exception(size_t thrown_size) _GABIXX_NOEXCEPT { 298 size_t size = thrown_size + sizeof(__cxa_exception); 299 __cxa_exception *buffer = static_cast<__cxa_exception*>(memalign(__alignof__(__cxa_exception), size)); 300 if (!buffer) { 301 // Since Android uses memory-overcommit, we enter here only when 302 // the exception object is VERY large. This will propably never happen. 303 // Therefore, we decide to use no emergency spaces. 304 __gabixx::__fatal_error("Not enough memory to allocate exception!"); 305 } 306 307 ::memset(buffer, 0, sizeof(__cxa_exception)); 308 return buffer + 1; 309 } 310 311 extern "C" void __cxa_free_exception(void* thrown_exception) _GABIXX_NOEXCEPT { 312 __cxa_exception *exc = static_cast<__cxa_exception*>(thrown_exception)-1; 313 314 if (exc->exceptionDestructor) { 315 try { 316 exc->exceptionDestructor(thrown_exception); 317 } catch (...) { 318 __gabixx::__fatal_error("Exception destructor has thrown!"); 319 } 320 } 321 322 free(exc); 323 } 324 325 extern "C" void __cxa_throw(void* thrown_exc, 326 std::type_info* tinfo, 327 void (*dest)(void*)) { 328 __cxa_exception* header = static_cast<__cxa_exception*>(thrown_exc)-1; 329 header->exceptionType = tinfo; 330 header->exceptionDestructor = dest; 331 332 header->unwindHeader.exception_class = __gxx_exception_class; 333 header->unwindHeader.exception_cleanup = defaultExceptionCleanupFunc; 334 335 throwException(header); 336 } 337 338 extern "C" void __cxa_rethrow() { 339 __cxa_eh_globals *globals = __cxa_get_globals(); 340 __cxa_exception* header = globals->caughtExceptions; 341 _Unwind_Exception* exception = &header->unwindHeader; 342 if (!header) { 343 __gabixx::__fatal_error( 344 "Attempting to rethrow an exception that doesn't exist!"); 345 } 346 347 if (isOurCxxException(exception->exception_class)) { 348 header->handlerCount = -header->handlerCount; // Set rethrow flag 349 } else { 350 globals->caughtExceptions = 0; 351 } 352 353 throwException(header); 354 } 355 356 extern "C" void* __cxa_begin_catch(void* exc) _GABIXX_NOEXCEPT { 357 _Unwind_Exception *exception = static_cast<_Unwind_Exception*>(exc); 358 __cxa_exception* header = reinterpret_cast<__cxa_exception*>(exception+1)-1; 359 __cxa_eh_globals* globals = __cxa_get_globals(); 360 361 if (!isOurCxxException(exception->exception_class)) { 362 if (globals->caughtExceptions) { 363 __gabixx::__fatal_error("Can't handle non-C++ exception!"); 364 } 365 } 366 367 // Check rethrow flag 368 header->handlerCount = (header->handlerCount < 0) ? 369 (-header->handlerCount+1) : (header->handlerCount+1); 370 371 if (header != globals->caughtExceptions) { 372 header->nextException = globals->caughtExceptions; 373 globals->caughtExceptions = header; 374 } 375 globals->uncaughtExceptions -= 1; 376 377 return header->adjustedPtr; 378 } 379 380 extern "C" void __cxa_end_catch() _GABIXX_NOEXCEPT { 381 __cxa_eh_globals *globals = __cxa_get_globals_fast(); 382 __cxa_exception *header = globals->caughtExceptions; 383 _Unwind_Exception* exception = &header->unwindHeader; 384 385 if (!header) { 386 return; 387 } 388 389 if (!isOurCxxException(exception->exception_class)) { 390 globals->caughtExceptions = 0; 391 _Unwind_DeleteException(exception); 392 return; 393 } 394 395 int count = header->handlerCount; 396 if (count < 0) { // Rethrow 397 if (++count == 0) { 398 globals->caughtExceptions = header->nextException; 399 } 400 } else if (--count == 0) { 401 globals->caughtExceptions = header->nextException; 402 __cxa_free_exception(header+1); 403 return; 404 } else if (count < 0) { 405 __gabixx::__fatal_error("Internal error during exception handling!"); 406 } 407 408 header->handlerCount = count; 409 } 410 411 extern "C" void* __cxa_get_exception_ptr(void* exceptionObject) _GABIXX_NOEXCEPT { 412 __cxa_exception* header = 413 reinterpret_cast<__cxa_exception*>( 414 reinterpret_cast<_Unwind_Exception *>(exceptionObject)+1)-1; 415 return header->adjustedPtr; 416 } 417 418 extern "C" bool __cxa_uncaught_exception() _GABIXX_NOEXCEPT { 419 __cxa_eh_globals* globals = __cxa_get_globals(); 420 if (globals == NULL) 421 return false; 422 return globals->uncaughtExceptions == 0; 423 } 424 425 extern "C" void __cxa_decrement_exception_refcount(void* exceptionObject) 426 _GABIXX_NOEXCEPT { 427 if (exceptionObject != NULL) 428 { 429 __cxa_exception* header = 430 reinterpret_cast<__cxa_exception*>(exceptionObject)-1; 431 if (__sync_sub_and_fetch(&header->referenceCount, 1) == 0) 432 __cxa_free_exception(exceptionObject); 433 } 434 } 435 436 extern "C" void __cxa_increment_exception_refcount(void* exceptionObject) 437 _GABIXX_NOEXCEPT { 438 if (exceptionObject != NULL) 439 { 440 __cxa_exception* header = 441 reinterpret_cast<__cxa_exception*>(exceptionObject)-1; 442 __sync_add_and_fetch(&header->referenceCount, 1); 443 } 444 } 445 446 extern "C" void __cxa_rethrow_primary_exception(void* primary_exception) { 447 #if defined(LIBCXXABI) 448 // Only warn if we're building for libcxx since other libraries do not use 449 // this. 450 #warning "not implemented." 451 #endif /* defined(LIBCXXABI) */ 452 } 453 454 extern "C" void* __cxa_current_primary_exception() _GABIXX_NOEXCEPT { 455 #if defined(LIBCXXABI) 456 // Only warn if we're building for libcxx since other libraries do not use 457 // this. 458 #warning "not implemented." 459 #endif /* defined(LIBCXXABI) */ 460 return NULL; 461 } 462 463 } // namespace __cxxabiv1 464