1 /* 2 * Copyright 2012 Google Inc. 3 * 4 * Use of this source code is governed by a BSD-style license that can be 5 * found in the LICENSE file. 6 */ 7 8 #ifndef SkChecksum_DEFINED 9 #define SkChecksum_DEFINED 10 11 #include "SkTypes.h" 12 13 /** 14 * Computes a 32bit checksum from a blob of 32bit aligned data. This is meant 15 * to be very very fast, as it is used internally by the font cache, in 16 * conjuction with the entire raw key. This algorithm does not generate 17 * unique values as well as others (e.g. MD5) but it performs much faster. 18 * Skia's use cases can survive non-unique values (since the entire key is 19 * always available). Clients should only be used in circumstances where speed 20 * over uniqueness is at a premium. 21 */ 22 class SkChecksum : SkNoncopyable { 23 private: 24 /* 25 * Our Rotate and Mash helpers are meant to automatically do the right 26 * thing depending if sizeof(uintptr_t) is 4 or 8. 27 */ 28 enum { 29 ROTR = 17, 30 ROTL = sizeof(uintptr_t) * 8 - ROTR, 31 HALFBITS = sizeof(uintptr_t) * 4 32 }; 33 34 static inline uintptr_t Mash(uintptr_t total, uintptr_t value) { 35 return ((total >> ROTR) | (total << ROTL)) ^ value; 36 } 37 38 public: 39 /** 40 * uint32_t -> uint32_t hash, useful for when you're about to trucate this hash but you 41 * suspect its low bits aren't well mixed. 42 * 43 * This is the Murmur3 finalizer. 44 */ 45 static uint32_t Mix(uint32_t hash) { 46 hash ^= hash >> 16; 47 hash *= 0x85ebca6b; 48 hash ^= hash >> 13; 49 hash *= 0xc2b2ae35; 50 hash ^= hash >> 16; 51 return hash; 52 } 53 54 /** 55 * Calculate 32-bit Murmur hash (murmur3). 56 * This should take 2-3x longer than SkChecksum::Compute, but is a considerably better hash. 57 * See en.wikipedia.org/wiki/MurmurHash. 58 * 59 * @param data Memory address of the data block to be processed. Must be 32-bit aligned. 60 * @param size Size of the data block in bytes. Must be a multiple of 4. 61 * @param seed Initial hash seed. (optional) 62 * @return hash result 63 */ 64 static uint32_t Murmur3(const uint32_t* data, size_t bytes, uint32_t seed=0) { 65 // Use may_alias to remind the compiler we're intentionally violating strict aliasing, 66 // and so not to apply strict-aliasing-based optimizations. 67 typedef uint32_t SK_ATTRIBUTE(may_alias) aliased_uint32_t; 68 const aliased_uint32_t* safe_data = (const aliased_uint32_t*)data; 69 70 SkASSERTF(SkIsAlign4(bytes), "Expected 4-byte multiple, got %zu", bytes); 71 const size_t words = bytes/4; 72 73 74 uint32_t hash = seed; 75 for (size_t i = 0; i < words; i++) { 76 uint32_t k = safe_data[i]; 77 k *= 0xcc9e2d51; 78 k = (k << 15) | (k >> 17); 79 k *= 0x1b873593; 80 81 hash ^= k; 82 hash = (hash << 13) | (hash >> 19); 83 hash *= 5; 84 hash += 0xe6546b64; 85 } 86 hash ^= bytes; 87 return Mix(hash); 88 } 89 90 /** 91 * Compute a 32-bit checksum for a given data block 92 * 93 * WARNING: this algorithm is tuned for efficiency, not backward/forward 94 * compatibility. It may change at any time, so a checksum generated with 95 * one version of the Skia code may not match a checksum generated with 96 * a different version of the Skia code. 97 * 98 * @param data Memory address of the data block to be processed. Must be 99 * 32-bit aligned. 100 * @param size Size of the data block in bytes. Must be a multiple of 4. 101 * @return checksum result 102 */ 103 static uint32_t Compute(const uint32_t* data, size_t size) { 104 // Use may_alias to remind the compiler we're intentionally violating strict aliasing, 105 // and so not to apply strict-aliasing-based optimizations. 106 typedef uint32_t SK_ATTRIBUTE(may_alias) aliased_uint32_t; 107 const aliased_uint32_t* safe_data = (const aliased_uint32_t*)data; 108 109 SkASSERT(SkIsAlign4(size)); 110 111 /* 112 * We want to let the compiler use 32bit or 64bit addressing and math 113 * so we use uintptr_t as our magic type. This makes the code a little 114 * more obscure (we can't hard-code 32 or 64 anywhere, but have to use 115 * sizeof()). 116 */ 117 uintptr_t result = 0; 118 const uintptr_t* ptr = reinterpret_cast<const uintptr_t*>(safe_data); 119 120 /* 121 * count the number of quad element chunks. This takes into account 122 * if we're on a 32bit or 64bit arch, since we use sizeof(uintptr_t) 123 * to compute how much to shift-down the size. 124 */ 125 size_t n4 = size / (sizeof(uintptr_t) << 2); 126 for (size_t i = 0; i < n4; ++i) { 127 result = Mash(result, *ptr++); 128 result = Mash(result, *ptr++); 129 result = Mash(result, *ptr++); 130 result = Mash(result, *ptr++); 131 } 132 size &= ((sizeof(uintptr_t) << 2) - 1); 133 134 safe_data = reinterpret_cast<const aliased_uint32_t*>(ptr); 135 const aliased_uint32_t* stop = safe_data + (size >> 2); 136 while (safe_data < stop) { 137 result = Mash(result, *safe_data++); 138 } 139 140 /* 141 * smash us down to 32bits if we were 64. Note that when uintptr_t is 142 * 32bits, this code-path should go away, but I still got a warning 143 * when I wrote 144 * result ^= result >> 32; 145 * since >>32 is undefined for 32bit ints, hence the wacky HALFBITS 146 * define. 147 */ 148 if (8 == sizeof(result)) { 149 result ^= result >> HALFBITS; 150 } 151 return static_cast<uint32_t>(result); 152 } 153 }; 154 155 #endif 156