1 /* 2 * Copyright 2013 Google Inc. 3 * 4 * Use of this source code is governed by a BSD-style license that can be 5 * found in the LICENSE file. 6 */ 7 8 #include "SkDiscardableMemory.h" 9 #include "SkScaledImageCache.h" 10 #include "Test.h" 11 12 static void make_bm(SkBitmap* bm, int w, int h) { 13 bm->allocN32Pixels(w, h); 14 } 15 16 static const int COUNT = 10; 17 static const int DIM = 256; 18 19 static void test_cache(skiatest::Reporter* reporter, SkScaledImageCache& cache, 20 bool testPurge) { 21 SkScaledImageCache::ID* id; 22 23 SkBitmap bm[COUNT]; 24 25 const SkScalar scale = 2; 26 for (int i = 0; i < COUNT; ++i) { 27 make_bm(&bm[i], DIM, DIM); 28 } 29 30 for (int i = 0; i < COUNT; ++i) { 31 SkBitmap tmp; 32 33 SkScaledImageCache::ID* id = cache.findAndLock(bm[i], scale, scale, &tmp); 34 REPORTER_ASSERT(reporter, NULL == id); 35 36 make_bm(&tmp, DIM, DIM); 37 id = cache.addAndLock(bm[i], scale, scale, tmp); 38 REPORTER_ASSERT(reporter, NULL != id); 39 40 SkBitmap tmp2; 41 SkScaledImageCache::ID* id2 = cache.findAndLock(bm[i], scale, scale, 42 &tmp2); 43 REPORTER_ASSERT(reporter, id == id2); 44 REPORTER_ASSERT(reporter, tmp.pixelRef() == tmp2.pixelRef()); 45 REPORTER_ASSERT(reporter, tmp.width() == tmp2.width()); 46 REPORTER_ASSERT(reporter, tmp.height() == tmp2.height()); 47 cache.unlock(id2); 48 49 cache.unlock(id); 50 } 51 52 if (testPurge) { 53 // stress test, should trigger purges 54 float incScale = 2; 55 for (size_t i = 0; i < COUNT * 100; ++i) { 56 incScale += 1; 57 58 SkBitmap tmp; 59 make_bm(&tmp, DIM, DIM); 60 61 SkScaledImageCache::ID* id = cache.addAndLock(bm[0], incScale, 62 incScale, tmp); 63 REPORTER_ASSERT(reporter, NULL != id); 64 cache.unlock(id); 65 } 66 } 67 68 // test the originals after all that purging 69 for (int i = 0; i < COUNT; ++i) { 70 SkBitmap tmp; 71 id = cache.findAndLock(bm[i], scale, scale, &tmp); 72 if (id) { 73 cache.unlock(id); 74 } 75 } 76 77 cache.setTotalByteLimit(0); 78 } 79 80 #include "SkDiscardableMemoryPool.h" 81 82 static SkDiscardableMemoryPool* gPool; 83 static SkDiscardableMemory* pool_factory(size_t bytes) { 84 SkASSERT(gPool); 85 return gPool->create(bytes); 86 } 87 88 DEF_TEST(ImageCache, reporter) { 89 static const size_t defLimit = DIM * DIM * 4 * COUNT + 1024; // 1K slop 90 91 { 92 SkScaledImageCache cache(defLimit); 93 test_cache(reporter, cache, true); 94 } 95 { 96 SkAutoTUnref<SkDiscardableMemoryPool> pool( 97 SkDiscardableMemoryPool::Create(defLimit, NULL)); 98 gPool = pool.get(); 99 SkScaledImageCache cache(pool_factory); 100 test_cache(reporter, cache, true); 101 } 102 { 103 SkScaledImageCache cache(SkDiscardableMemory::Create); 104 test_cache(reporter, cache, false); 105 } 106 } 107 108 DEF_TEST(ImageCache_doubleAdd, r) { 109 // Adding the same key twice should be safe. 110 SkScaledImageCache cache(4096); 111 112 SkBitmap original; 113 original.allocN32Pixels(40, 40); 114 115 SkBitmap scaled1; 116 scaled1.allocN32Pixels(20, 20); 117 118 SkBitmap scaled2; 119 scaled2.allocN32Pixels(20, 20); 120 121 SkScaledImageCache::ID* id1 = cache.addAndLock(original, 0.5f, 0.5f, scaled1); 122 SkScaledImageCache::ID* id2 = cache.addAndLock(original, 0.5f, 0.5f, scaled2); 123 // We don't really care if id1 == id2 as long as unlocking both works. 124 cache.unlock(id1); 125 cache.unlock(id2); 126 127 SkBitmap tmp; 128 // Lookup should return the value that was added last. 129 SkScaledImageCache::ID* id = cache.findAndLock(original, 0.5f, 0.5f, &tmp); 130 REPORTER_ASSERT(r, NULL != id); 131 REPORTER_ASSERT(r, tmp.getGenerationID() == scaled2.getGenerationID()); 132 cache.unlock(id); 133 } 134