1 //===------------------------ memory.cpp ----------------------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is dual licensed under the MIT and the University of Illinois Open 6 // Source Licenses. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 10 #define _LIBCPP_BUILDING_MEMORY 11 #include "memory" 12 #ifndef _LIBCPP_HAS_NO_THREADS 13 #include "mutex" 14 #include "thread" 15 #endif 16 #include "include/atomic_support.h" 17 18 _LIBCPP_BEGIN_NAMESPACE_STD 19 20 namespace 21 { 22 23 // NOTE: Relaxed and acq/rel atomics (for increment and decrement respectively) 24 // should be sufficient for thread safety. 25 // See https://llvm.org/bugs/show_bug.cgi?id=22803 26 template <class T> 27 inline T 28 increment(T& t) _NOEXCEPT 29 { 30 return __libcpp_atomic_add(&t, 1, _AO_Relaxed); 31 } 32 33 template <class T> 34 inline T 35 decrement(T& t) _NOEXCEPT 36 { 37 return __libcpp_atomic_add(&t, -1, _AO_Acq_Rel); 38 } 39 40 } // namespace 41 42 const allocator_arg_t allocator_arg = allocator_arg_t(); 43 44 bad_weak_ptr::~bad_weak_ptr() _NOEXCEPT {} 45 46 const char* 47 bad_weak_ptr::what() const _NOEXCEPT 48 { 49 return "bad_weak_ptr"; 50 } 51 52 __shared_count::~__shared_count() 53 { 54 } 55 56 void 57 __shared_count::__add_shared() _NOEXCEPT 58 { 59 increment(__shared_owners_); 60 } 61 62 bool 63 __shared_count::__release_shared() _NOEXCEPT 64 { 65 if (decrement(__shared_owners_) == -1) 66 { 67 __on_zero_shared(); 68 return true; 69 } 70 return false; 71 } 72 73 __shared_weak_count::~__shared_weak_count() 74 { 75 } 76 77 void 78 __shared_weak_count::__add_shared() _NOEXCEPT 79 { 80 __shared_count::__add_shared(); 81 } 82 83 void 84 __shared_weak_count::__add_weak() _NOEXCEPT 85 { 86 increment(__shared_weak_owners_); 87 } 88 89 void 90 __shared_weak_count::__release_shared() _NOEXCEPT 91 { 92 if (__shared_count::__release_shared()) 93 __release_weak(); 94 } 95 96 void 97 __shared_weak_count::__release_weak() _NOEXCEPT 98 { 99 if (decrement(__shared_weak_owners_) == -1) 100 __on_zero_shared_weak(); 101 } 102 103 __shared_weak_count* 104 __shared_weak_count::lock() _NOEXCEPT 105 { 106 long object_owners = __libcpp_atomic_load(&__shared_owners_); 107 while (object_owners != -1) 108 { 109 if (__libcpp_atomic_compare_exchange(&__shared_owners_, 110 &object_owners, 111 object_owners+1)) 112 return this; 113 } 114 return 0; 115 } 116 117 #if !defined(_LIBCPP_NO_RTTI) || !defined(_LIBCPP_BUILD_STATIC) 118 119 const void* 120 __shared_weak_count::__get_deleter(const type_info&) const _NOEXCEPT 121 { 122 return 0; 123 } 124 125 #endif // _LIBCPP_NO_RTTI 126 127 #if defined(_LIBCPP_HAS_C_ATOMIC_IMP) && !defined(_LIBCPP_HAS_NO_THREADS) 128 129 static const std::size_t __sp_mut_count = 16; 130 static __libcpp_mutex_t mut_back_imp[__sp_mut_count] = 131 { 132 _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER, 133 _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER, 134 _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER, 135 _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER 136 }; 137 138 static mutex* mut_back = reinterpret_cast<std::mutex*>(mut_back_imp); 139 140 _LIBCPP_CONSTEXPR __sp_mut::__sp_mut(void* p) _NOEXCEPT 141 : __lx(p) 142 { 143 } 144 145 void 146 __sp_mut::lock() _NOEXCEPT 147 { 148 mutex& m = *static_cast<mutex*>(__lx); 149 unsigned count = 0; 150 while (!m.try_lock()) 151 { 152 if (++count > 16) 153 { 154 m.lock(); 155 break; 156 } 157 this_thread::yield(); 158 } 159 } 160 161 void 162 __sp_mut::unlock() _NOEXCEPT 163 { 164 static_cast<mutex*>(__lx)->unlock(); 165 } 166 167 __sp_mut& 168 __get_sp_mut(const void* p) 169 { 170 static __sp_mut muts[__sp_mut_count] 171 { 172 &mut_back[ 0], &mut_back[ 1], &mut_back[ 2], &mut_back[ 3], 173 &mut_back[ 4], &mut_back[ 5], &mut_back[ 6], &mut_back[ 7], 174 &mut_back[ 8], &mut_back[ 9], &mut_back[10], &mut_back[11], 175 &mut_back[12], &mut_back[13], &mut_back[14], &mut_back[15] 176 }; 177 return muts[hash<const void*>()(p) & (__sp_mut_count-1)]; 178 } 179 180 #endif // defined(_LIBCPP_HAS_C_ATOMIC_IMP) && !defined(_LIBCPP_HAS_NO_THREADS) 181 182 void 183 declare_reachable(void*) 184 { 185 } 186 187 void 188 declare_no_pointers(char*, size_t) 189 { 190 } 191 192 void 193 undeclare_no_pointers(char*, size_t) 194 { 195 } 196 197 pointer_safety 198 get_pointer_safety() _NOEXCEPT 199 { 200 return pointer_safety::relaxed; 201 } 202 203 void* 204 __undeclare_reachable(void* p) 205 { 206 return p; 207 } 208 209 void* 210 align(size_t alignment, size_t size, void*& ptr, size_t& space) 211 { 212 void* r = nullptr; 213 if (size <= space) 214 { 215 char* p1 = static_cast<char*>(ptr); 216 char* p2 = reinterpret_cast<char*>(reinterpret_cast<size_t>(p1 + (alignment - 1)) & -alignment); 217 size_t d = static_cast<size_t>(p2 - p1); 218 if (d <= space - size) 219 { 220 r = p2; 221 ptr = r; 222 space -= d; 223 } 224 } 225 return r; 226 } 227 228 _LIBCPP_END_NAMESPACE_STD 229