1 /* 2 * Copyright (C) 2013 Google Inc. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions are 6 * met: 7 * 8 * * Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * * Redistributions in binary form must reproduce the above 11 * copyright notice, this list of conditions and the following disclaimer 12 * in the documentation and/or other materials provided with the 13 * distribution. 14 * * Neither the name of Google Inc. nor the names of its 15 * contributors may be used to endorse or promote products derived from 16 * this software without specific prior written permission. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 22 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 23 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 24 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 */ 30 31 #include "config.h" 32 #include "wtf/PartitionAlloc.h" 33 34 #include "wtf/PageAllocator.h" 35 #include "wtf/Vector.h" 36 37 #ifndef NDEBUG 38 #include <stdio.h> 39 #endif 40 41 COMPILE_ASSERT(WTF::kPartitionPageSize < WTF::kSuperPageSize, ok_partition_page_size); 42 43 namespace WTF { 44 45 void partitionAllocInit(PartitionRoot* root) 46 { 47 ASSERT(!root->initialized); 48 root->initialized = true; 49 root->lock = 0; 50 size_t i; 51 for (i = 0; i < kNumBuckets; ++i) { 52 PartitionBucket* bucket = &root->buckets[i]; 53 bucket->root = root; 54 bucket->currPage = &root->seedPage; 55 bucket->freePages = 0; 56 bucket->numFullPages = 0; 57 } 58 59 root->nextSuperPage = 0; 60 root->nextPartitionPage = 0; 61 root->nextPartitionPageEnd = 0; 62 root->seedPage.numAllocatedSlots = 0; 63 root->seedPage.bucket = &root->seedBucket; 64 root->seedPage.freelistHead = 0; 65 root->seedPage.next = &root->seedPage; 66 root->seedPage.prev = &root->seedPage; 67 68 root->seedBucket.root = root; 69 root->seedBucket.currPage = 0; 70 root->seedBucket.freePages = 0; 71 root->seedBucket.numFullPages = 0; 72 } 73 74 static ALWAYS_INLINE void partitionFreeSuperPage(PartitionPageHeader* page) 75 { 76 freeSuperPages(page, kSuperPageSize); 77 } 78 79 static void partitionCollectIfSuperPage(PartitionPageHeader* partitionPage, Vector<PartitionPageHeader*>* superPages) 80 { 81 PartitionPageHeader* superPage = reinterpret_cast<PartitionPageHeader*>(reinterpret_cast<uintptr_t>(partitionPage) & kSuperPageBaseMask); 82 uintptr_t superPageOffset = reinterpret_cast<uintptr_t>(partitionPage) & kSuperPageOffsetMask; 83 // If this partition page is at the start of a super page, note it so we can 84 // free all the distinct super pages. 85 if (!superPageOffset) 86 superPages->append(superPage); 87 } 88 89 static bool partitionAllocShutdownBucket(PartitionBucket* bucket, Vector<PartitionPageHeader*>* superPages) 90 { 91 // Failure here indicates a memory leak. 92 bool noLeaks = !bucket->numFullPages; 93 PartitionFreepagelistEntry* entry = bucket->freePages; 94 while (entry) { 95 PartitionFreepagelistEntry* next = entry->next; 96 partitionCollectIfSuperPage(entry->page, superPages); 97 partitionFree(entry); 98 entry = next; 99 } 100 PartitionPageHeader* page = bucket->currPage; 101 do { 102 if (page->numAllocatedSlots) 103 noLeaks = false; 104 PartitionPageHeader* next = page->next; 105 if (page != &bucket->root->seedPage) 106 partitionCollectIfSuperPage(page, superPages); 107 page = next; 108 } while (page != bucket->currPage); 109 110 return noLeaks; 111 } 112 113 bool partitionAllocShutdown(PartitionRoot* root) 114 { 115 bool noLeaks = true; 116 ASSERT(root->initialized); 117 root->initialized = false; 118 // As we iterate through all the partition pages, we keep a list of all the 119 // distinct super pages that we have seen. This is so that we can free all 120 // the super pages correctly. A super page must be freed all at once -- it 121 // is not permissible to free a super page by freeing all its component 122 // partition pages. 123 // Note that we cannot free a super page upon discovering it, because a 124 // single super page will likely contain partition pages from multiple 125 // different buckets. 126 Vector<PartitionPageHeader*> superPages; 127 size_t i; 128 // First, free the non-freepage buckets. Freeing the free pages in these 129 // buckets will depend on the freepage bucket. 130 for (i = 0; i < kNumBuckets; ++i) { 131 if (i != kFreePageBucket) { 132 PartitionBucket* bucket = &root->buckets[i]; 133 if (!partitionAllocShutdownBucket(bucket, &superPages)) 134 noLeaks = false; 135 } 136 } 137 // Finally, free the freepage bucket. 138 (void) partitionAllocShutdownBucket(&root->buckets[kFreePageBucket], &superPages); 139 // Now that we've examined all partition pages in all buckets, it's safe 140 // to free all our super pages. 141 for (Vector<PartitionPageHeader*>::iterator it = superPages.begin(); it != superPages.end(); ++it) 142 partitionFreeSuperPage(*it); 143 144 return noLeaks; 145 } 146 147 static ALWAYS_INLINE PartitionPageHeader* partitionAllocPage(PartitionRoot* root) 148 { 149 char* ret = 0; 150 if (LIKELY(root->nextPartitionPage != 0)) { 151 // In this case, we can still hand out pages from a previous 152 // super page allocation. 153 ret = root->nextPartitionPage; 154 root->nextPartitionPage += kPartitionPageSize; 155 if (UNLIKELY(root->nextPartitionPage == root->nextPartitionPageEnd)) { 156 // We ran out, need to get more pages next time. 157 root->nextPartitionPage = 0; 158 root->nextPartitionPageEnd = 0; 159 } 160 } else { 161 // Need a new super page. 162 // We need to put a guard page in front if either: 163 // a) This is the first super page allocation. 164 // b) The super page did not end up at our suggested address. 165 bool needsGuard = false; 166 if (!root->nextSuperPage) { 167 needsGuard = true; 168 root->nextSuperPage = getRandomSuperPageBase(); 169 } 170 ret = reinterpret_cast<char*>(allocSuperPages(root->nextSuperPage, kSuperPageSize)); 171 if (ret != root->nextSuperPage) { 172 needsGuard = true; 173 // Re-randomize the base location for next time just in case the 174 // underlying operating system picks lousy locations for mappings. 175 root->nextSuperPage = 0; 176 } else { 177 root->nextSuperPage = ret + kSuperPageSize; 178 } 179 root->nextPartitionPageEnd = ret + kSuperPageSize; 180 if (needsGuard) { 181 setSystemPagesInaccessible(ret, kPartitionPageSize); 182 ret += kPartitionPageSize; 183 } 184 root->nextPartitionPage = ret + kPartitionPageSize; 185 } 186 return reinterpret_cast<PartitionPageHeader*>(ret); 187 } 188 189 static ALWAYS_INLINE void partitionUnusePage(PartitionPageHeader* page) 190 { 191 decommitSystemPages(page, kPartitionPageSize); 192 } 193 194 static ALWAYS_INLINE size_t partitionBucketSlots(const PartitionBucket* bucket) 195 { 196 ASSERT(!(sizeof(PartitionPageHeader) % sizeof(void*))); 197 return (kPartitionPageSize - sizeof(PartitionPageHeader)) / partitionBucketSize(bucket); 198 } 199 200 static ALWAYS_INLINE void partitionPageInit(PartitionPageHeader* page, PartitionBucket* bucket) 201 { 202 page->numAllocatedSlots = 1; 203 page->bucket = bucket; 204 size_t size = partitionBucketSize(bucket); 205 size_t numSlots = partitionBucketSlots(bucket); 206 RELEASE_ASSERT(numSlots > 1); 207 page->freelistHead = reinterpret_cast<PartitionFreelistEntry*>((reinterpret_cast<char*>(page) + sizeof(PartitionPageHeader) + size)); 208 PartitionFreelistEntry* freelist = page->freelistHead; 209 // Account for the slot we've handed out right away as a return value. 210 --numSlots; 211 // This loop sets up the initial chain of freelist pointers in the new page. 212 while (--numSlots) { 213 PartitionFreelistEntry* next = reinterpret_cast<PartitionFreelistEntry*>(reinterpret_cast<char*>(freelist) + size); 214 freelist->next = partitionFreelistMask(next); 215 freelist = next; 216 } 217 freelist->next = partitionFreelistMask(0); 218 } 219 220 static ALWAYS_INLINE void partitionUnlinkPage(PartitionPageHeader* page) 221 { 222 ASSERT(page->prev->next == page); 223 ASSERT(page->next->prev == page); 224 225 page->next->prev = page->prev; 226 page->prev->next = page->next; 227 } 228 229 static ALWAYS_INLINE void partitionLinkPage(PartitionPageHeader* newPage, PartitionPageHeader* prevPage) 230 { 231 ASSERT(prevPage->prev->next == prevPage); 232 ASSERT(prevPage->next->prev == prevPage); 233 234 newPage->prev = prevPage; 235 newPage->next = prevPage->next; 236 237 prevPage->next->prev = newPage; 238 prevPage->next = newPage; 239 } 240 241 void* partitionAllocSlowPath(PartitionBucket* bucket) 242 { 243 // Slow path. First look for a page in our linked ring list of non-full 244 // pages. 245 PartitionPageHeader* page = bucket->currPage; 246 PartitionPageHeader* next = page->next; 247 ASSERT(page == &bucket->root->seedPage || (page->bucket == bucket && next->bucket == bucket)); 248 249 while (LIKELY(next != page)) { 250 ASSERT(next->bucket == bucket); 251 ASSERT(next->next->prev == next); 252 ASSERT(next->prev->next == next); 253 ASSERT(next != &bucket->root->seedPage); 254 if (LIKELY(next->freelistHead != 0)) { 255 bucket->currPage = next; 256 PartitionFreelistEntry* ret = next->freelistHead; 257 next->freelistHead = partitionFreelistMask(ret->next); 258 next->numAllocatedSlots++; 259 return ret; 260 } 261 // Pull this page out of the non-full page list, since it has no free 262 // slots. 263 // This tags the page as full so that free'ing can tell, and move 264 // the page back into the non-full page list when appropriate. 265 ASSERT(next->numAllocatedSlots == partitionBucketSlots(bucket)); 266 next->numAllocatedSlots = -next->numAllocatedSlots; 267 partitionUnlinkPage(next); 268 ++bucket->numFullPages; 269 270 next = next->next; 271 } 272 273 // Second, look in our list of freed but reserved pages. 274 PartitionPageHeader* newPage; 275 PartitionFreepagelistEntry* pagelist = bucket->freePages; 276 if (LIKELY(pagelist != 0)) { 277 newPage = pagelist->page; 278 bucket->freePages = pagelist->next; 279 partitionFree(pagelist); 280 ASSERT(page != &bucket->root->seedPage); 281 partitionLinkPage(newPage, page); 282 } else { 283 // Third. If we get here, we need a brand new page. 284 newPage = partitionAllocPage(bucket->root); 285 if (UNLIKELY(page == &bucket->root->seedPage)) { 286 // If this is the first page allocation to this bucket, then 287 // fully replace the seed page. This avoids pointlessly iterating 288 // over it. 289 newPage->prev = newPage; 290 newPage->next = newPage; 291 } else { 292 partitionLinkPage(newPage, page); 293 } 294 } 295 partitionPageInit(newPage, bucket); 296 bucket->currPage = newPage; 297 298 return reinterpret_cast<char*>(newPage) + sizeof(PartitionPageHeader); 299 } 300 301 void partitionFreeSlowPath(PartitionPageHeader* page) 302 { 303 PartitionBucket* bucket = page->bucket; 304 if (LIKELY(page->numAllocatedSlots == 0)) { 305 // Page became fully unused. 306 // If it's the current page, leave it be so that we don't bounce a page 307 // onto the free page list and immediately back out again. 308 if (LIKELY(page == bucket->currPage)) 309 return; 310 311 partitionUnlinkPage(page); 312 partitionUnusePage(page); 313 PartitionFreepagelistEntry* entry = static_cast<PartitionFreepagelistEntry*>(partitionBucketAlloc(&bucket->root->buckets[kFreePageBucket])); 314 entry->page = page; 315 entry->next = bucket->freePages; 316 bucket->freePages = entry; 317 } else { 318 // Fully used page became partially used. It must be put back on the 319 // non-full page list. 320 partitionLinkPage(page, bucket->currPage); 321 page->numAllocatedSlots = -page->numAllocatedSlots - 2; 322 ASSERT(page->numAllocatedSlots == partitionBucketSlots(bucket) - 1); 323 --bucket->numFullPages; 324 } 325 } 326 327 void* partitionReallocGeneric(PartitionRoot* root, void* ptr, size_t oldSize, size_t newSize) 328 { 329 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR) 330 return realloc(ptr, newSize); 331 #else 332 size_t oldIndex = partitionAllocRoundup(oldSize) >> kBucketShift; 333 if (oldIndex > kNumBuckets) 334 oldIndex = kNumBuckets; 335 size_t newIndex = partitionAllocRoundup(newSize) >> kBucketShift; 336 if (newIndex > kNumBuckets) 337 newIndex = kNumBuckets; 338 339 if (oldIndex == newIndex) { 340 // Same bucket. But kNumBuckets indicates the fastMalloc "bucket" so in 341 // that case we're not done; we have to forward to fastRealloc. 342 if (oldIndex == kNumBuckets) 343 return WTF::fastRealloc(ptr, newSize); 344 return ptr; 345 } 346 // This realloc cannot be resized in-place. Sadness. 347 void* ret = partitionAllocGeneric(root, newSize); 348 size_t copySize = oldSize; 349 if (newSize < oldSize) 350 copySize = newSize; 351 memcpy(ret, ptr, copySize); 352 partitionFreeGeneric(ptr, oldSize); 353 return ret; 354 #endif 355 } 356 357 #ifndef NDEBUG 358 359 void partitionDumpStats(const PartitionRoot& root) 360 { 361 size_t i; 362 for (i = 0; i < kNumBuckets; ++i) { 363 const PartitionBucket& bucket = root.buckets[i]; 364 if (bucket.currPage == &bucket.root->seedPage && !bucket.freePages) { 365 // Empty bucket with no freelist pages. Skip reporting it. 366 continue; 367 } 368 size_t numFreePages = 0; 369 const PartitionFreepagelistEntry* freePages = bucket.freePages; 370 while (freePages) { 371 ++numFreePages; 372 freePages = freePages->next; 373 } 374 size_t bucketSlotSize = partitionBucketSize(&bucket); 375 size_t bucketNumSlots = partitionBucketSlots(&bucket); 376 size_t numActivePages = bucket.numFullPages; 377 size_t numActiveBytes = numActivePages * bucketSlotSize * bucketNumSlots; 378 const PartitionPageHeader* page = bucket.currPage; 379 do { 380 if (page != &bucket.root->seedPage) { 381 ++numActivePages; 382 numActiveBytes += (page->numAllocatedSlots * bucketSlotSize); 383 } 384 page = page->next; 385 } while (page != bucket.currPage); 386 printf("bucket size %ld: %ld/%ld bytes, %ld free pages\n", bucketSlotSize, numActiveBytes, numActivePages * kPartitionPageSize, numFreePages); 387 } 388 } 389 #endif // !NDEBUG 390 391 } // namespace WTF 392 393