1 // Copyright (c) 2008, Google Inc. 2 // All rights reserved. 3 // 4 // Redistribution and use in source and binary forms, with or without 5 // modification, are permitted provided that the following conditions are 6 // met: 7 // 8 // * Redistributions of source code must retain the above copyright 9 // notice, this list of conditions and the following disclaimer. 10 // * Redistributions in binary form must reproduce the above 11 // copyright notice, this list of conditions and the following disclaimer 12 // in the documentation and/or other materials provided with the 13 // distribution. 14 // * Neither the name of Google Inc. nor the names of its 15 // contributors may be used to endorse or promote products derived from 16 // this software without specific prior written permission. 17 // 18 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 20 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 21 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 22 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 23 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 24 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 25 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 26 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 28 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 30 // --- 31 // Author: Sanjay Ghemawat <opensource (at) google.com> 32 33 #include "config.h" 34 #include "common.h" 35 #include "system-alloc.h" 36 37 #if defined(HAVE_UNISTD_H) && defined(HAVE_GETPAGESIZE) 38 #include <unistd.h> // for getpagesize 39 #endif 40 41 namespace tcmalloc { 42 43 // Note: the following only works for "n"s that fit in 32-bits, but 44 // that is fine since we only use it for small sizes. 45 static inline int LgFloor(size_t n) { 46 int log = 0; 47 for (int i = 4; i >= 0; --i) { 48 int shift = (1 << i); 49 size_t x = n >> shift; 50 if (x != 0) { 51 n = x; 52 log += shift; 53 } 54 } 55 ASSERT(n == 1); 56 return log; 57 } 58 59 int AlignmentForSize(size_t size) { 60 int alignment = kAlignment; 61 if (size > kMaxSize) { 62 // Cap alignment at kPageSize for large sizes. 63 alignment = kPageSize; 64 } else if (size >= 128) { 65 // Space wasted due to alignment is at most 1/8, i.e., 12.5%. 66 alignment = (1 << LgFloor(size)) / 8; 67 } else if (size >= 16) { 68 // We need an alignment of at least 16 bytes to satisfy 69 // requirements for some SSE types. 70 alignment = 16; 71 } 72 // Maximum alignment allowed is page size alignment. 73 if (alignment > kPageSize) { 74 alignment = kPageSize; 75 } 76 CHECK_CONDITION(size < 16 || alignment >= 16); 77 CHECK_CONDITION((alignment & (alignment - 1)) == 0); 78 return alignment; 79 } 80 81 int SizeMap::NumMoveSize(size_t size) { 82 if (size == 0) return 0; 83 // Use approx 64k transfers between thread and central caches. 84 int num = static_cast<int>(64.0 * 1024.0 / size); 85 if (num < 2) num = 2; 86 87 // Avoid bringing too many objects into small object free lists. 88 // If this value is too large: 89 // - We waste memory with extra objects sitting in the thread caches. 90 // - The central freelist holds its lock for too long while 91 // building a linked list of objects, slowing down the allocations 92 // of other threads. 93 // If this value is too small: 94 // - We go to the central freelist too often and we have to acquire 95 // its lock each time. 96 // This value strikes a balance between the constraints above. 97 if (num > 32) num = 32; 98 99 return num; 100 } 101 102 // Initialize the mapping arrays 103 void SizeMap::Init() { 104 // Do some sanity checking on add_amount[]/shift_amount[]/class_array[] 105 if (ClassIndex(0) < 0) { 106 Log(kCrash, __FILE__, __LINE__, 107 "Invalid class index for size 0", ClassIndex(0)); 108 } 109 if (ClassIndex(kMaxSize) >= sizeof(class_array_)) { 110 Log(kCrash, __FILE__, __LINE__, 111 "Invalid class index for kMaxSize", ClassIndex(kMaxSize)); 112 } 113 114 // Compute the size classes we want to use 115 int sc = 1; // Next size class to assign 116 int alignment = kAlignment; 117 CHECK_CONDITION(kAlignment <= 16); 118 for (size_t size = kMinClassSize; size <= kMaxSize; size += alignment) { 119 alignment = AlignmentForSize(size); 120 CHECK_CONDITION((size % alignment) == 0); 121 122 int blocks_to_move = NumMoveSize(size) / 4; 123 size_t psize = 0; 124 do { 125 psize += kPageSize; 126 // Allocate enough pages so leftover is less than 1/8 of total. 127 // This bounds wasted space to at most 12.5%. 128 while ((psize % size) > (psize >> 3)) { 129 psize += kPageSize; 130 } 131 // Continue to add pages until there are at least as many objects in 132 // the span as are needed when moving objects from the central 133 // freelists and spans to the thread caches. 134 } while ((psize / size) < (blocks_to_move)); 135 const size_t my_pages = psize >> kPageShift; 136 137 if (sc > 1 && my_pages == class_to_pages_[sc-1]) { 138 // See if we can merge this into the previous class without 139 // increasing the fragmentation of the previous class. 140 const size_t my_objects = (my_pages << kPageShift) / size; 141 const size_t prev_objects = (class_to_pages_[sc-1] << kPageShift) 142 / class_to_size_[sc-1]; 143 if (my_objects == prev_objects) { 144 // Adjust last class to include this size 145 class_to_size_[sc-1] = size; 146 continue; 147 } 148 } 149 150 // Add new class 151 class_to_pages_[sc] = my_pages; 152 class_to_size_[sc] = size; 153 sc++; 154 } 155 if (sc != kNumClasses) { 156 Log(kCrash, __FILE__, __LINE__, 157 "wrong number of size classes: (found vs. expected )", sc, kNumClasses); 158 } 159 160 // Initialize the mapping arrays 161 int next_size = 0; 162 for (int c = 1; c < kNumClasses; c++) { 163 const int max_size_in_class = class_to_size_[c]; 164 for (int s = next_size; s <= max_size_in_class; s += kAlignment) { 165 class_array_[ClassIndex(s)] = c; 166 } 167 next_size = max_size_in_class + kAlignment; 168 } 169 170 // Double-check sizes just to be safe 171 for (size_t size = 0; size <= kMaxSize; size++) { 172 const int sc = SizeClass(size); 173 if (sc <= 0 || sc >= kNumClasses) { 174 Log(kCrash, __FILE__, __LINE__, 175 "Bad size class (class, size)", sc, size); 176 } 177 if (sc > 1 && size <= class_to_size_[sc-1]) { 178 Log(kCrash, __FILE__, __LINE__, 179 "Allocating unnecessarily large class (class, size)", sc, size); 180 } 181 const size_t s = class_to_size_[sc]; 182 if (size > s || s == 0) { 183 Log(kCrash, __FILE__, __LINE__, 184 "Bad (class, size, requested)", sc, s, size); 185 } 186 } 187 188 // Initialize the num_objects_to_move array. 189 for (size_t cl = 1; cl < kNumClasses; ++cl) { 190 num_objects_to_move_[cl] = NumMoveSize(ByteSizeForClass(cl)); 191 } 192 } 193 194 // Metadata allocator -- keeps stats about how many bytes allocated. 195 static uint64_t metadata_system_bytes_ = 0; 196 static uint64_t metadata_unmapped_bytes_ = 0; 197 198 void* MetaDataAlloc(size_t bytes) { 199 static size_t pagesize; 200 #ifdef HAVE_GETPAGESIZE 201 if (pagesize == 0) 202 pagesize = getpagesize(); 203 #endif 204 205 void* result = TCMalloc_SystemAlloc(bytes, NULL, pagesize); 206 if (result != NULL) { 207 metadata_system_bytes_ += bytes; 208 } 209 return result; 210 } 211 212 uint64_t metadata_system_bytes() { return metadata_system_bytes_; } 213 uint64_t metadata_unmapped_bytes() { return metadata_unmapped_bytes_; } 214 215 void update_metadata_system_bytes(int diff) { 216 metadata_system_bytes_ += diff; 217 } 218 void update_metadata_unmapped_bytes(int diff) { 219 metadata_unmapped_bytes_ += diff; 220 } 221 222 } // namespace tcmalloc 223