1 /* 2 * Copyright (C) 2006, 2008 Apple Inc. All rights reserved. 3 * Copyright (C) Research In Motion Limited 2009-2010. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY 15 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 17 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR 18 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 19 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 20 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 21 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY 22 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 24 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27 #include "config.h" 28 #include "core/platform/SharedBuffer.h" 29 30 #include "core/platform/PurgeableBuffer.h" 31 #include "wtf/PassOwnPtr.h" 32 #include "wtf/unicode/Unicode.h" 33 #include "wtf/unicode/UTF8.h" 34 35 #undef SHARED_BUFFER_STATS 36 37 #ifdef SHARED_BUFFER_STATS 38 #include "wtf/DataLog.h" 39 #include "wtf/MainThread.h" 40 #endif 41 42 using namespace std; 43 44 namespace WebCore { 45 46 static const unsigned segmentSize = 0x1000; 47 static const unsigned segmentPositionMask = 0x0FFF; 48 49 static inline unsigned segmentIndex(unsigned position) 50 { 51 return position / segmentSize; 52 } 53 54 static inline unsigned offsetInSegment(unsigned position) 55 { 56 return position & segmentPositionMask; 57 } 58 59 static inline char* allocateSegment() 60 { 61 return static_cast<char*>(fastMalloc(segmentSize)); 62 } 63 64 static inline void freeSegment(char* p) 65 { 66 fastFree(p); 67 } 68 69 #ifdef SHARED_BUFFER_STATS 70 71 static Mutex& statsMutex() 72 { 73 DEFINE_STATIC_LOCAL(Mutex, mutex, ()); 74 return mutex; 75 } 76 77 static HashSet<SharedBuffer*>& liveBuffers() 78 { 79 DEFINE_STATIC_LOCAL(HashSet<SharedBuffer*>, buffers, ()); 80 return buffers; 81 } 82 83 static bool sizeComparator(SharedBuffer* a, SharedBuffer* b) 84 { 85 return a->size() > b->size(); 86 } 87 88 static CString snippetForBuffer(SharedBuffer* sharedBuffer) 89 { 90 const unsigned kMaxSnippetLength = 64; 91 char* snippet = 0; 92 unsigned snippetLength = std::min(sharedBuffer->size(), kMaxSnippetLength); 93 CString result = CString::newUninitialized(snippetLength, snippet); 94 95 const char* segment; 96 unsigned offset = 0; 97 while (unsigned segmentLength = sharedBuffer->getSomeData(segment, offset)) { 98 unsigned length = std::min(segmentLength, snippetLength - offset); 99 memcpy(snippet + offset, segment, length); 100 offset += segmentLength; 101 if (offset >= snippetLength) 102 break; 103 } 104 105 for (unsigned i = 0; i < snippetLength; ++i) { 106 if (!isASCIIPrintable(snippet[i])) 107 snippet[i] = '?'; 108 } 109 110 return result; 111 } 112 113 static void printStats(void*) 114 { 115 MutexLocker locker(statsMutex()); 116 Vector<SharedBuffer*> buffers; 117 for (HashSet<SharedBuffer*>::const_iterator iter = liveBuffers().begin(); iter != liveBuffers().end(); ++iter) 118 buffers.append(*iter); 119 std::sort(buffers.begin(), buffers.end(), sizeComparator); 120 121 dataLogF("---- Shared Buffer Stats ----\n"); 122 for (size_t i = 0; i < buffers.size() && i < 64; ++i) { 123 CString snippet = snippetForBuffer(buffers[i]); 124 dataLogF("Buffer size=%8u %s\n", buffers[i]->size(), snippet.data()); 125 } 126 } 127 128 static void didCreateSharedBuffer(SharedBuffer* buffer) 129 { 130 MutexLocker locker(statsMutex()); 131 liveBuffers().add(buffer); 132 133 callOnMainThread(printStats, 0); 134 } 135 136 static void willDestroySharedBuffer(SharedBuffer* buffer) 137 { 138 MutexLocker locker(statsMutex()); 139 liveBuffers().remove(buffer); 140 } 141 142 #endif 143 144 SharedBuffer::SharedBuffer() 145 : m_size(0) 146 { 147 #ifdef SHARED_BUFFER_STATS 148 didCreateSharedBuffer(this); 149 #endif 150 } 151 152 SharedBuffer::SharedBuffer(size_t size) 153 : m_size(size) 154 , m_buffer(size) 155 { 156 #ifdef SHARED_BUFFER_STATS 157 didCreateSharedBuffer(this); 158 #endif 159 } 160 161 SharedBuffer::SharedBuffer(const char* data, int size) 162 : m_size(0) 163 { 164 // FIXME: Use unsigned consistently, and check for invalid casts when calling into SharedBuffer from other code. 165 if (size < 0) 166 CRASH(); 167 168 append(data, size); 169 170 #ifdef SHARED_BUFFER_STATS 171 didCreateSharedBuffer(this); 172 #endif 173 } 174 175 SharedBuffer::SharedBuffer(const unsigned char* data, int size) 176 : m_size(0) 177 { 178 // FIXME: Use unsigned consistently, and check for invalid casts when calling into SharedBuffer from other code. 179 if (size < 0) 180 CRASH(); 181 182 append(reinterpret_cast<const char*>(data), size); 183 184 #ifdef SHARED_BUFFER_STATS 185 didCreateSharedBuffer(this); 186 #endif 187 } 188 189 SharedBuffer::~SharedBuffer() 190 { 191 clear(); 192 193 #ifdef SHARED_BUFFER_STATS 194 willDestroySharedBuffer(this); 195 #endif 196 } 197 198 PassRefPtr<SharedBuffer> SharedBuffer::adoptVector(Vector<char>& vector) 199 { 200 RefPtr<SharedBuffer> buffer = create(); 201 buffer->m_buffer.swap(vector); 202 buffer->m_size = buffer->m_buffer.size(); 203 return buffer.release(); 204 } 205 206 PassRefPtr<SharedBuffer> SharedBuffer::adoptPurgeableBuffer(PassOwnPtr<PurgeableBuffer> purgeableBuffer) 207 { 208 ASSERT(!purgeableBuffer->isPurgeable()); 209 RefPtr<SharedBuffer> buffer = create(); 210 buffer->m_purgeableBuffer = purgeableBuffer; 211 return buffer.release(); 212 } 213 214 unsigned SharedBuffer::size() const 215 { 216 if (m_purgeableBuffer) 217 return m_purgeableBuffer->size(); 218 219 return m_size; 220 } 221 222 void SharedBuffer::createPurgeableBuffer() const 223 { 224 if (m_purgeableBuffer) 225 return; 226 227 m_purgeableBuffer = PurgeableBuffer::create(buffer().data(), m_size); 228 } 229 230 const char* SharedBuffer::data() const 231 { 232 if (m_purgeableBuffer) 233 return m_purgeableBuffer->data(); 234 235 return this->buffer().data(); 236 } 237 238 void SharedBuffer::moveTo(Vector<char>& result) 239 { 240 ASSERT(result.isEmpty()); 241 if (m_purgeableBuffer) { 242 result.reserveCapacity(m_purgeableBuffer->size()); 243 result.append(m_purgeableBuffer->data(), m_purgeableBuffer->size()); 244 clear(); 245 return; 246 } 247 248 unsigned bufferSize = m_buffer.size(); 249 if (m_size == bufferSize) { 250 m_buffer.swap(result); 251 clear(); 252 return; 253 } 254 255 result.reserveCapacity(m_size); 256 257 const char* segment = 0; 258 unsigned position = 0; 259 while (unsigned segmentSize = getSomeData(segment, position)) { 260 result.append(segment, segmentSize); 261 position += segmentSize; 262 } 263 ASSERT(result.size() == m_size); 264 clear(); 265 return; 266 } 267 268 void SharedBuffer::append(SharedBuffer* data) 269 { 270 const char* segment; 271 size_t position = 0; 272 while (size_t length = data->getSomeData(segment, position)) { 273 append(segment, length); 274 position += length; 275 } 276 } 277 278 void SharedBuffer::append(const char* data, unsigned length) 279 { 280 ASSERT(!m_purgeableBuffer); 281 if (!length) 282 return; 283 284 unsigned positionInSegment = offsetInSegment(m_size - m_buffer.size()); 285 m_size += length; 286 287 if (m_size <= segmentSize) { 288 // No need to use segments for small resource data 289 if (m_buffer.isEmpty()) 290 m_buffer.reserveInitialCapacity(length); 291 m_buffer.append(data, length); 292 return; 293 } 294 295 char* segment; 296 if (!positionInSegment) { 297 segment = allocateSegment(); 298 m_segments.append(segment); 299 } else 300 segment = m_segments.last() + positionInSegment; 301 302 unsigned segmentFreeSpace = segmentSize - positionInSegment; 303 unsigned bytesToCopy = min(length, segmentFreeSpace); 304 305 for (;;) { 306 memcpy(segment, data, bytesToCopy); 307 if (static_cast<unsigned>(length) == bytesToCopy) 308 break; 309 310 length -= bytesToCopy; 311 data += bytesToCopy; 312 segment = allocateSegment(); 313 m_segments.append(segment); 314 bytesToCopy = min(length, segmentSize); 315 } 316 } 317 318 void SharedBuffer::append(const Vector<char>& data) 319 { 320 append(data.data(), data.size()); 321 } 322 323 void SharedBuffer::clear() 324 { 325 for (unsigned i = 0; i < m_segments.size(); ++i) 326 freeSegment(m_segments[i]); 327 328 m_segments.clear(); 329 m_size = 0; 330 331 m_buffer.clear(); 332 m_purgeableBuffer.clear(); 333 } 334 335 PassRefPtr<SharedBuffer> SharedBuffer::copy() const 336 { 337 RefPtr<SharedBuffer> clone(adoptRef(new SharedBuffer)); 338 if (m_purgeableBuffer) { 339 clone->append(data(), size()); 340 return clone.release(); 341 } 342 343 clone->m_size = m_size; 344 clone->m_buffer.reserveCapacity(m_size); 345 clone->m_buffer.append(m_buffer.data(), m_buffer.size()); 346 if (!m_segments.isEmpty()) { 347 const char* segment = 0; 348 unsigned position = m_buffer.size(); 349 while (unsigned segmentSize = getSomeData(segment, position)) { 350 clone->m_buffer.append(segment, segmentSize); 351 position += segmentSize; 352 } 353 ASSERT(position == clone->size()); 354 } 355 return clone.release(); 356 } 357 358 PassOwnPtr<PurgeableBuffer> SharedBuffer::releasePurgeableBuffer() 359 { 360 ASSERT(hasOneRef()); 361 return m_purgeableBuffer.release(); 362 } 363 364 const Vector<char>& SharedBuffer::buffer() const 365 { 366 unsigned bufferSize = m_buffer.size(); 367 if (m_size > bufferSize) { 368 m_buffer.resize(m_size); 369 char* destination = m_buffer.data() + bufferSize; 370 unsigned bytesLeft = m_size - bufferSize; 371 for (unsigned i = 0; i < m_segments.size(); ++i) { 372 unsigned bytesToCopy = min(bytesLeft, segmentSize); 373 memcpy(destination, m_segments[i], bytesToCopy); 374 destination += bytesToCopy; 375 bytesLeft -= bytesToCopy; 376 freeSegment(m_segments[i]); 377 } 378 m_segments.clear(); 379 } 380 return m_buffer; 381 } 382 383 unsigned SharedBuffer::getSomeData(const char*& someData, unsigned position) const 384 { 385 unsigned totalSize = size(); 386 if (position >= totalSize) { 387 someData = 0; 388 return 0; 389 } 390 391 if (m_purgeableBuffer) { 392 ASSERT_WITH_SECURITY_IMPLICATION(position < size()); 393 someData = data() + position; 394 return totalSize - position; 395 } 396 397 ASSERT_WITH_SECURITY_IMPLICATION(position < m_size); 398 unsigned consecutiveSize = m_buffer.size(); 399 if (position < consecutiveSize) { 400 someData = m_buffer.data() + position; 401 return consecutiveSize - position; 402 } 403 404 position -= consecutiveSize; 405 unsigned segments = m_segments.size(); 406 unsigned maxSegmentedSize = segments * segmentSize; 407 unsigned segment = segmentIndex(position); 408 if (segment < segments) { 409 unsigned bytesLeft = totalSize - consecutiveSize; 410 unsigned segmentedSize = min(maxSegmentedSize, bytesLeft); 411 412 unsigned positionInSegment = offsetInSegment(position); 413 someData = m_segments[segment] + positionInSegment; 414 return segment == segments - 1 ? segmentedSize - position : segmentSize - positionInSegment; 415 } 416 ASSERT_NOT_REACHED(); 417 return 0; 418 } 419 420 PassRefPtr<ArrayBuffer> SharedBuffer::getAsArrayBuffer() const 421 { 422 RefPtr<ArrayBuffer> arrayBuffer = ArrayBuffer::createUninitialized(static_cast<unsigned>(size()), 1); 423 424 const char* segment = 0; 425 unsigned position = 0; 426 while (unsigned segmentSize = getSomeData(segment, position)) { 427 memcpy(static_cast<char*>(arrayBuffer->data()) + position, segment, segmentSize); 428 position += segmentSize; 429 } 430 431 if (position != arrayBuffer->byteLength()) { 432 ASSERT_NOT_REACHED(); 433 // Don't return the incomplete ArrayBuffer. 434 return 0; 435 } 436 437 return arrayBuffer; 438 } 439 440 } // namespace WebCore 441