1 /* 2 * Copyright 2010 Google Inc. 3 * 4 * Use of this source code is governed by a BSD-style license that can be 5 * found in the LICENSE file. 6 */ 7 8 #include "SkGr.h" 9 #include "SkColorFilter.h" 10 #include "SkConfig8888.h" 11 #include "SkData.h" 12 #include "SkMessageBus.h" 13 #include "SkPixelRef.h" 14 #include "SkTextureCompressor.h" 15 #include "GrResourceCache.h" 16 #include "GrGpu.h" 17 #include "effects/GrDitherEffect.h" 18 #include "GrDrawTargetCaps.h" 19 #include "effects/GrYUVtoRGBEffect.h" 20 21 #ifndef SK_IGNORE_ETC1_SUPPORT 22 # include "ktx.h" 23 # include "etc1.h" 24 #endif 25 26 /* Fill out buffer with the compressed format Ganesh expects from a colortable 27 based bitmap. [palette (colortable) + indices]. 28 29 At the moment Ganesh only supports 8bit version. If Ganesh allowed we others 30 we could detect that the colortable.count is <= 16, and then repack the 31 indices as nibbles to save RAM, but it would take more time (i.e. a lot 32 slower than memcpy), so skipping that for now. 33 34 Ganesh wants a full 256 palette entry, even though Skia's ctable is only as big 35 as the colortable.count says it is. 36 */ 37 static void build_compressed_data(void* buffer, const SkBitmap& bitmap) { 38 SkASSERT(kIndex_8_SkColorType == bitmap.colorType()); 39 40 SkAutoLockPixels alp(bitmap); 41 if (!bitmap.readyToDraw()) { 42 SkDEBUGFAIL("bitmap not ready to draw!"); 43 return; 44 } 45 46 SkColorTable* ctable = bitmap.getColorTable(); 47 char* dst = (char*)buffer; 48 49 const int count = ctable->count(); 50 51 SkDstPixelInfo dstPI; 52 dstPI.fColorType = kRGBA_8888_SkColorType; 53 dstPI.fAlphaType = kPremul_SkAlphaType; 54 dstPI.fPixels = buffer; 55 dstPI.fRowBytes = count * sizeof(SkPMColor); 56 57 SkSrcPixelInfo srcPI; 58 srcPI.fColorType = kN32_SkColorType; 59 srcPI.fAlphaType = kPremul_SkAlphaType; 60 srcPI.fPixels = ctable->lockColors(); 61 srcPI.fRowBytes = count * sizeof(SkPMColor); 62 63 srcPI.convertPixelsTo(&dstPI, count, 1); 64 65 ctable->unlockColors(); 66 67 // always skip a full 256 number of entries, even if we memcpy'd fewer 68 dst += 256 * sizeof(GrColor); 69 70 if ((unsigned)bitmap.width() == bitmap.rowBytes()) { 71 memcpy(dst, bitmap.getPixels(), bitmap.getSize()); 72 } else { 73 // need to trim off the extra bytes per row 74 size_t width = bitmap.width(); 75 size_t rowBytes = bitmap.rowBytes(); 76 const char* src = (const char*)bitmap.getPixels(); 77 for (int y = 0; y < bitmap.height(); y++) { 78 memcpy(dst, src, width); 79 src += rowBytes; 80 dst += width; 81 } 82 } 83 } 84 85 //////////////////////////////////////////////////////////////////////////////// 86 87 static void generate_bitmap_cache_id(const SkBitmap& bitmap, GrCacheID* id) { 88 // Our id includes the offset, width, and height so that bitmaps created by extractSubset() 89 // are unique. 90 uint32_t genID = bitmap.getGenerationID(); 91 SkIPoint origin = bitmap.pixelRefOrigin(); 92 int16_t width = SkToS16(bitmap.width()); 93 int16_t height = SkToS16(bitmap.height()); 94 95 GrCacheID::Key key; 96 memcpy(key.fData8 + 0, &genID, 4); 97 memcpy(key.fData8 + 4, &origin.fX, 4); 98 memcpy(key.fData8 + 8, &origin.fY, 4); 99 memcpy(key.fData8 + 12, &width, 2); 100 memcpy(key.fData8 + 14, &height, 2); 101 static const size_t kKeyDataSize = 16; 102 memset(key.fData8 + kKeyDataSize, 0, sizeof(key) - kKeyDataSize); 103 GR_STATIC_ASSERT(sizeof(key) >= kKeyDataSize); 104 static const GrCacheID::Domain gBitmapTextureDomain = GrCacheID::GenerateDomain(); 105 id->reset(gBitmapTextureDomain, key); 106 } 107 108 static void generate_bitmap_texture_desc(const SkBitmap& bitmap, GrTextureDesc* desc) { 109 desc->fFlags = kNone_GrTextureFlags; 110 desc->fWidth = bitmap.width(); 111 desc->fHeight = bitmap.height(); 112 desc->fConfig = SkImageInfo2GrPixelConfig(bitmap.info()); 113 desc->fSampleCnt = 0; 114 } 115 116 namespace { 117 118 // When the SkPixelRef genID changes, invalidate a corresponding GrResource described by key. 119 class GrResourceInvalidator : public SkPixelRef::GenIDChangeListener { 120 public: 121 explicit GrResourceInvalidator(GrResourceKey key) : fKey(key) {} 122 private: 123 GrResourceKey fKey; 124 125 virtual void onChange() SK_OVERRIDE { 126 const GrResourceInvalidatedMessage message = { fKey }; 127 SkMessageBus<GrResourceInvalidatedMessage>::Post(message); 128 } 129 }; 130 131 } // namespace 132 133 static void add_genID_listener(GrResourceKey key, SkPixelRef* pixelRef) { 134 SkASSERT(pixelRef); 135 pixelRef->addGenIDChangeListener(SkNEW_ARGS(GrResourceInvalidator, (key))); 136 } 137 138 static GrTexture* sk_gr_allocate_texture(GrContext* ctx, 139 bool cache, 140 const GrTextureParams* params, 141 const SkBitmap& bm, 142 GrTextureDesc desc, 143 const void* pixels, 144 size_t rowBytes) { 145 GrTexture* result; 146 if (cache) { 147 // This texture is likely to be used again so leave it in the cache 148 GrCacheID cacheID; 149 generate_bitmap_cache_id(bm, &cacheID); 150 151 GrResourceKey key; 152 result = ctx->createTexture(params, desc, cacheID, pixels, rowBytes, &key); 153 if (result) { 154 add_genID_listener(key, bm.pixelRef()); 155 } 156 } else { 157 // This texture is unlikely to be used again (in its present form) so 158 // just use a scratch texture. This will remove the texture from the 159 // cache so no one else can find it. Additionally, once unlocked, the 160 // scratch texture will go to the end of the list for purging so will 161 // likely be available for this volatile bitmap the next time around. 162 result = ctx->lockAndRefScratchTexture(desc, GrContext::kExact_ScratchTexMatch); 163 if (pixels) { 164 result->writePixels(0, 0, bm.width(), bm.height(), desc.fConfig, pixels, rowBytes); 165 } 166 } 167 return result; 168 } 169 170 #ifndef SK_IGNORE_ETC1_SUPPORT 171 static GrTexture *load_etc1_texture(GrContext* ctx, bool cache, 172 const GrTextureParams* params, 173 const SkBitmap &bm, GrTextureDesc desc) { 174 SkAutoTUnref<SkData> data(bm.pixelRef()->refEncodedData()); 175 176 // Is this even encoded data? 177 if (NULL == data) { 178 return NULL; 179 } 180 181 // Is this a valid PKM encoded data? 182 const uint8_t *bytes = data->bytes(); 183 if (etc1_pkm_is_valid(bytes)) { 184 uint32_t encodedWidth = etc1_pkm_get_width(bytes); 185 uint32_t encodedHeight = etc1_pkm_get_height(bytes); 186 187 // Does the data match the dimensions of the bitmap? If not, 188 // then we don't know how to scale the image to match it... 189 if (encodedWidth != static_cast<uint32_t>(bm.width()) || 190 encodedHeight != static_cast<uint32_t>(bm.height())) { 191 return NULL; 192 } 193 194 // Everything seems good... skip ahead to the data. 195 bytes += ETC_PKM_HEADER_SIZE; 196 desc.fConfig = kETC1_GrPixelConfig; 197 } else if (SkKTXFile::is_ktx(bytes)) { 198 SkKTXFile ktx(data); 199 200 // Is it actually an ETC1 texture? 201 if (!ktx.isCompressedFormat(SkTextureCompressor::kETC1_Format)) { 202 return NULL; 203 } 204 205 // Does the data match the dimensions of the bitmap? If not, 206 // then we don't know how to scale the image to match it... 207 if (ktx.width() != bm.width() || ktx.height() != bm.height()) { 208 return NULL; 209 } 210 211 bytes = ktx.pixelData(); 212 desc.fConfig = kETC1_GrPixelConfig; 213 } else { 214 return NULL; 215 } 216 217 return sk_gr_allocate_texture(ctx, cache, params, bm, desc, bytes, 0); 218 } 219 #endif // SK_IGNORE_ETC1_SUPPORT 220 221 static GrTexture *load_yuv_texture(GrContext* ctx, bool cache, const GrTextureParams* params, 222 const SkBitmap& bm, const GrTextureDesc& desc) { 223 SkPixelRef* pixelRef = bm.pixelRef(); 224 SkISize yuvSizes[3]; 225 if ((NULL == pixelRef) || !pixelRef->getYUV8Planes(yuvSizes, NULL, NULL, NULL)) { 226 return NULL; 227 } 228 229 // Allocate the memory for YUV 230 size_t totalSize(0); 231 size_t sizes[3], rowBytes[3]; 232 for (int i = 0; i < 3; ++i) { 233 rowBytes[i] = yuvSizes[i].fWidth; 234 totalSize += sizes[i] = rowBytes[i] * yuvSizes[i].fHeight; 235 } 236 SkAutoMalloc storage(totalSize); 237 void* planes[3]; 238 planes[0] = storage.get(); 239 planes[1] = (uint8_t*)planes[0] + sizes[0]; 240 planes[2] = (uint8_t*)planes[1] + sizes[1]; 241 242 SkYUVColorSpace colorSpace; 243 244 // Get the YUV planes 245 if (!pixelRef->getYUV8Planes(yuvSizes, planes, rowBytes, &colorSpace)) { 246 return NULL; 247 } 248 249 GrTextureDesc yuvDesc; 250 yuvDesc.fConfig = kAlpha_8_GrPixelConfig; 251 GrAutoScratchTexture yuvTextures[3]; 252 for (int i = 0; i < 3; ++i) { 253 yuvDesc.fWidth = yuvSizes[i].fWidth; 254 yuvDesc.fHeight = yuvSizes[i].fHeight; 255 yuvTextures[i].set(ctx, yuvDesc); 256 if ((NULL == yuvTextures[i].texture()) || 257 !ctx->writeTexturePixels(yuvTextures[i].texture(), 258 0, 0, yuvDesc.fWidth, yuvDesc.fHeight, 259 yuvDesc.fConfig, planes[i], rowBytes[i])) { 260 return NULL; 261 } 262 } 263 264 GrTextureDesc rtDesc = desc; 265 rtDesc.fFlags = rtDesc.fFlags | 266 kRenderTarget_GrTextureFlagBit | 267 kNoStencil_GrTextureFlagBit; 268 269 GrTexture* result = sk_gr_allocate_texture(ctx, cache, params, bm, rtDesc, NULL, 0); 270 271 GrRenderTarget* renderTarget = result ? result->asRenderTarget() : NULL; 272 if (renderTarget) { 273 SkAutoTUnref<GrFragmentProcessor> yuvToRgbProcessor(GrYUVtoRGBEffect::Create( 274 yuvTextures[0].texture(), yuvTextures[1].texture(), yuvTextures[2].texture(), 275 colorSpace)); 276 GrPaint paint; 277 paint.addColorProcessor(yuvToRgbProcessor); 278 SkRect r = SkRect::MakeWH(SkIntToScalar(yuvSizes[0].fWidth), 279 SkIntToScalar(yuvSizes[0].fHeight)); 280 GrContext::AutoRenderTarget autoRT(ctx, renderTarget); 281 GrContext::AutoMatrix am; 282 am.setIdentity(ctx); 283 GrContext::AutoClip ac(ctx, GrContext::AutoClip::kWideOpen_InitialClip); 284 ctx->drawRect(paint, r); 285 } else { 286 SkSafeSetNull(result); 287 } 288 289 return result; 290 } 291 292 static GrTexture* sk_gr_create_bitmap_texture(GrContext* ctx, 293 bool cache, 294 const GrTextureParams* params, 295 const SkBitmap& origBitmap) { 296 SkBitmap tmpBitmap; 297 298 const SkBitmap* bitmap = &origBitmap; 299 300 GrTextureDesc desc; 301 generate_bitmap_texture_desc(*bitmap, &desc); 302 303 if (kIndex_8_SkColorType == bitmap->colorType()) { 304 // build_compressed_data doesn't do npot->pot expansion 305 // and paletted textures can't be sub-updated 306 if (ctx->supportsIndex8PixelConfig(params, bitmap->width(), bitmap->height())) { 307 size_t imageSize = GrCompressedFormatDataSize(kIndex_8_GrPixelConfig, 308 bitmap->width(), bitmap->height()); 309 SkAutoMalloc storage(imageSize); 310 311 build_compressed_data(storage.get(), origBitmap); 312 313 // our compressed data will be trimmed, so pass width() for its 314 // "rowBytes", since they are the same now. 315 return sk_gr_allocate_texture(ctx, cache, params, origBitmap, 316 desc, storage.get(), bitmap->width()); 317 } else { 318 origBitmap.copyTo(&tmpBitmap, kN32_SkColorType); 319 // now bitmap points to our temp, which has been promoted to 32bits 320 bitmap = &tmpBitmap; 321 desc.fConfig = SkImageInfo2GrPixelConfig(bitmap->info()); 322 } 323 } 324 325 // Is this an ETC1 encoded texture? 326 #ifndef SK_IGNORE_ETC1_SUPPORT 327 else if ( 328 // We do not support scratch ETC1 textures, hence they should all be at least 329 // trying to go to the cache. 330 cache 331 // Make sure that the underlying device supports ETC1 textures before we go ahead 332 // and check the data. 333 && ctx->getGpu()->caps()->isConfigTexturable(kETC1_GrPixelConfig) 334 // If the bitmap had compressed data and was then uncompressed, it'll still return 335 // compressed data on 'refEncodedData' and upload it. Probably not good, since if 336 // the bitmap has available pixels, then they might not be what the decompressed 337 // data is. 338 && !(bitmap->readyToDraw())) { 339 GrTexture *texture = load_etc1_texture(ctx, cache, params, *bitmap, desc); 340 if (texture) { 341 return texture; 342 } 343 } 344 #endif // SK_IGNORE_ETC1_SUPPORT 345 346 else { 347 GrTexture *texture = load_yuv_texture(ctx, cache, params, *bitmap, desc); 348 if (texture) { 349 return texture; 350 } 351 } 352 SkAutoLockPixels alp(*bitmap); 353 if (!bitmap->readyToDraw()) { 354 return NULL; 355 } 356 357 return sk_gr_allocate_texture(ctx, cache, params, origBitmap, desc, 358 bitmap->getPixels(), bitmap->rowBytes()); 359 } 360 361 bool GrIsBitmapInCache(const GrContext* ctx, 362 const SkBitmap& bitmap, 363 const GrTextureParams* params) { 364 GrCacheID cacheID; 365 generate_bitmap_cache_id(bitmap, &cacheID); 366 367 GrTextureDesc desc; 368 generate_bitmap_texture_desc(bitmap, &desc); 369 return ctx->isTextureInCache(desc, cacheID, params); 370 } 371 372 GrTexture* GrLockAndRefCachedBitmapTexture(GrContext* ctx, 373 const SkBitmap& bitmap, 374 const GrTextureParams* params) { 375 GrTexture* result = NULL; 376 377 bool cache = !bitmap.isVolatile(); 378 379 if (cache) { 380 // If the bitmap isn't changing try to find a cached copy first. 381 382 GrCacheID cacheID; 383 generate_bitmap_cache_id(bitmap, &cacheID); 384 385 GrTextureDesc desc; 386 generate_bitmap_texture_desc(bitmap, &desc); 387 388 result = ctx->findAndRefTexture(desc, cacheID, params); 389 } 390 if (NULL == result) { 391 result = sk_gr_create_bitmap_texture(ctx, cache, params, bitmap); 392 } 393 if (NULL == result) { 394 GrPrintf("---- failed to create texture for cache [%d %d]\n", 395 bitmap.width(), bitmap.height()); 396 } 397 return result; 398 } 399 400 void GrUnlockAndUnrefCachedBitmapTexture(GrTexture* texture) { 401 SkASSERT(texture->getContext()); 402 403 texture->getContext()->unlockScratchTexture(texture); 404 texture->unref(); 405 } 406 407 /////////////////////////////////////////////////////////////////////////////// 408 409 // alphatype is ignore for now, but if GrPixelConfig is expanded to encompass 410 // alpha info, that will be considered. 411 GrPixelConfig SkImageInfo2GrPixelConfig(SkColorType ct, SkAlphaType) { 412 switch (ct) { 413 case kUnknown_SkColorType: 414 return kUnknown_GrPixelConfig; 415 case kAlpha_8_SkColorType: 416 return kAlpha_8_GrPixelConfig; 417 case kRGB_565_SkColorType: 418 return kRGB_565_GrPixelConfig; 419 case kARGB_4444_SkColorType: 420 return kRGBA_4444_GrPixelConfig; 421 case kRGBA_8888_SkColorType: 422 return kRGBA_8888_GrPixelConfig; 423 case kBGRA_8888_SkColorType: 424 return kBGRA_8888_GrPixelConfig; 425 case kIndex_8_SkColorType: 426 return kIndex_8_GrPixelConfig; 427 } 428 SkASSERT(0); // shouldn't get here 429 return kUnknown_GrPixelConfig; 430 } 431 432 bool GrPixelConfig2ColorType(GrPixelConfig config, SkColorType* ctOut) { 433 SkColorType ct; 434 switch (config) { 435 case kAlpha_8_GrPixelConfig: 436 ct = kAlpha_8_SkColorType; 437 break; 438 case kIndex_8_GrPixelConfig: 439 ct = kIndex_8_SkColorType; 440 break; 441 case kRGB_565_GrPixelConfig: 442 ct = kRGB_565_SkColorType; 443 break; 444 case kRGBA_4444_GrPixelConfig: 445 ct = kARGB_4444_SkColorType; 446 break; 447 case kRGBA_8888_GrPixelConfig: 448 ct = kRGBA_8888_SkColorType; 449 break; 450 case kBGRA_8888_GrPixelConfig: 451 ct = kBGRA_8888_SkColorType; 452 break; 453 default: 454 return false; 455 } 456 if (ctOut) { 457 *ctOut = ct; 458 } 459 return true; 460 } 461 462 /////////////////////////////////////////////////////////////////////////////// 463 464 void SkPaint2GrPaintNoShader(GrContext* context, const SkPaint& skPaint, GrColor paintColor, 465 bool constantColor, GrPaint* grPaint) { 466 467 grPaint->setDither(skPaint.isDither()); 468 grPaint->setAntiAlias(skPaint.isAntiAlias()); 469 470 SkXfermode::Coeff sm; 471 SkXfermode::Coeff dm; 472 473 SkXfermode* mode = skPaint.getXfermode(); 474 GrFragmentProcessor* xferProcessor = NULL; 475 if (SkXfermode::asFragmentProcessorOrCoeff(mode, &xferProcessor, &sm, &dm)) { 476 if (xferProcessor) { 477 grPaint->addColorProcessor(xferProcessor)->unref(); 478 sm = SkXfermode::kOne_Coeff; 479 dm = SkXfermode::kZero_Coeff; 480 } 481 } else { 482 //SkDEBUGCODE(SkDebugf("Unsupported xfer mode.\n");) 483 // Fall back to src-over 484 sm = SkXfermode::kOne_Coeff; 485 dm = SkXfermode::kISA_Coeff; 486 } 487 grPaint->setBlendFunc(sk_blend_to_grblend(sm), sk_blend_to_grblend(dm)); 488 489 //set the color of the paint to the one of the parameter 490 grPaint->setColor(paintColor); 491 492 SkColorFilter* colorFilter = skPaint.getColorFilter(); 493 if (colorFilter) { 494 // if the source color is a constant then apply the filter here once rather than per pixel 495 // in a shader. 496 if (constantColor) { 497 SkColor filtered = colorFilter->filterColor(skPaint.getColor()); 498 grPaint->setColor(SkColor2GrColor(filtered)); 499 } else { 500 SkAutoTUnref<GrFragmentProcessor> fp(colorFilter->asFragmentProcessor(context)); 501 if (fp.get()) { 502 grPaint->addColorProcessor(fp); 503 } 504 } 505 } 506 507 #ifndef SK_IGNORE_GPU_DITHER 508 // If the dither flag is set, then we need to see if the underlying context 509 // supports it. If not, then install a dither effect. 510 if (skPaint.isDither() && grPaint->numColorStages() > 0) { 511 // What are we rendering into? 512 const GrRenderTarget *target = context->getRenderTarget(); 513 SkASSERT(target); 514 515 // Suspect the dithering flag has no effect on these configs, otherwise 516 // fall back on setting the appropriate state. 517 if (target->config() == kRGBA_8888_GrPixelConfig || 518 target->config() == kBGRA_8888_GrPixelConfig) { 519 // The dither flag is set and the target is likely 520 // not going to be dithered by the GPU. 521 SkAutoTUnref<GrFragmentProcessor> fp(GrDitherEffect::Create()); 522 if (fp.get()) { 523 grPaint->addColorProcessor(fp); 524 grPaint->setDither(false); 525 } 526 } 527 } 528 #endif 529 } 530 531 /** 532 * Unlike GrContext::AutoMatrix, this doesn't require setting a new matrix. GrContext::AutoMatrix 533 * likes to set the new matrix in its constructor because it is usually necessary to simulataneously 534 * update a GrPaint. This AutoMatrix is used while initially setting up GrPaint, however. 535 */ 536 class AutoMatrix { 537 public: 538 AutoMatrix(GrContext* context) { 539 fMatrix = context->getMatrix(); 540 fContext = context; 541 } 542 ~AutoMatrix() { 543 SkASSERT(fContext); 544 fContext->setMatrix(fMatrix); 545 } 546 private: 547 GrContext* fContext; 548 SkMatrix fMatrix; 549 }; 550 551 void SkPaint2GrPaintShader(GrContext* context, const SkPaint& skPaint, 552 bool constantColor, GrPaint* grPaint) { 553 SkShader* shader = skPaint.getShader(); 554 if (NULL == shader) { 555 SkPaint2GrPaintNoShader(context, skPaint, SkColor2GrColor(skPaint.getColor()), 556 constantColor, grPaint); 557 return; 558 } 559 560 GrColor paintColor = SkColor2GrColor(skPaint.getColor()); 561 562 // Start a new block here in order to preserve our context state after calling 563 // asFragmentProcessor(). Since these calls get passed back to the client, we don't really 564 // want them messing around with the context. 565 { 566 // SkShader::asFragmentProcessor() may do offscreen rendering. Save off the current RT, 567 // clip, and matrix. We don't reset the matrix on the context because 568 // SkShader::asFragmentProcessor may use GrContext::getMatrix() to know the transformation 569 // from local coords to device space. 570 GrContext::AutoRenderTarget art(context, NULL); 571 GrContext::AutoClip ac(context, GrContext::AutoClip::kWideOpen_InitialClip); 572 AutoMatrix am(context); 573 574 // Allow the shader to modify paintColor and also create an effect to be installed as 575 // the first color effect on the GrPaint. 576 GrFragmentProcessor* fp = NULL; 577 if (shader->asFragmentProcessor(context, skPaint, NULL, &paintColor, &fp) && fp) { 578 grPaint->addColorProcessor(fp)->unref(); 579 constantColor = false; 580 } 581 } 582 583 // The grcolor is automatically set when calling asFragmentProcessor. 584 // If the shader can be seen as an effect it returns true and adds its effect to the grpaint. 585 SkPaint2GrPaintNoShader(context, skPaint, paintColor, constantColor, grPaint); 586 } 587