1 /* 2 * Copyright 2015 Google Inc. 3 * 4 * Use of this source code is governed by a BSD-style license that can be 5 * found in the LICENSE file. 6 */ 7 8 #include "GrDrawOpAtlas.h" 9 10 #include "GrContext.h" 11 #include "GrContextPriv.h" 12 #include "GrOnFlushResourceProvider.h" 13 #include "GrOpFlushState.h" 14 #include "GrRectanizer.h" 15 #include "GrProxyProvider.h" 16 #include "GrResourceProvider.h" 17 #include "GrSurfaceProxyPriv.h" 18 #include "GrTexture.h" 19 #include "GrTracing.h" 20 21 // When proxy allocation is deferred until flush time the proxies acting as atlases require 22 // special handling. This is because the usage that can be determined from the ops themselves 23 // isn't sufficient. Independent of the ops there will be ASAP and inline uploads to the 24 // atlases. Extending the usage interval of any op that uses an atlas to the start of the 25 // flush (as is done for proxies that are used for sw-generated masks) also won't work because 26 // the atlas persists even beyond the last use in an op - for a given flush. Given this, atlases 27 // must explicitly manage the lifetime of their backing proxies via the onFlushCallback system 28 // (which calls this method). 29 void GrDrawOpAtlas::instantiate(GrOnFlushResourceProvider* onFlushResourceProvider) { 30 for (uint32_t i = 0; i < fNumActivePages; ++i) { 31 // All the atlas pages are now instantiated at flush time in the activeNewPage method. 32 SkASSERT(fProxies[i] && fProxies[i]->isInstantiated()); 33 } 34 } 35 36 std::unique_ptr<GrDrawOpAtlas> GrDrawOpAtlas::Make(GrProxyProvider* proxyProvider, 37 const GrBackendFormat& format, 38 GrPixelConfig config, int width, 39 int height, int plotWidth, int plotHeight, 40 AllowMultitexturing allowMultitexturing, 41 GrDrawOpAtlas::EvictionFunc func, void* data) { 42 std::unique_ptr<GrDrawOpAtlas> atlas(new GrDrawOpAtlas(proxyProvider, format, config, width, 43 height, plotWidth, plotHeight, 44 allowMultitexturing)); 45 if (!atlas->getProxies()[0]) { 46 return nullptr; 47 } 48 49 atlas->registerEvictionCallback(func, data); 50 return atlas; 51 } 52 53 #ifdef DUMP_ATLAS_DATA 54 static bool gDumpAtlasData = false; 55 #endif 56 57 //////////////////////////////////////////////////////////////////////////////// 58 GrDrawOpAtlas::Plot::Plot(int pageIndex, int plotIndex, uint64_t genID, int offX, int offY, 59 int width, int height, GrPixelConfig config) 60 : fLastUpload(GrDeferredUploadToken::AlreadyFlushedToken()) 61 , fLastUse(GrDeferredUploadToken::AlreadyFlushedToken()) 62 , fFlushesSinceLastUse(0) 63 , fPageIndex(pageIndex) 64 , fPlotIndex(plotIndex) 65 , fGenID(genID) 66 , fID(CreateId(fPageIndex, fPlotIndex, fGenID)) 67 , fData(nullptr) 68 , fWidth(width) 69 , fHeight(height) 70 , fX(offX) 71 , fY(offY) 72 , fRects(nullptr) 73 , fOffset(SkIPoint16::Make(fX * fWidth, fY * fHeight)) 74 , fConfig(config) 75 , fBytesPerPixel(GrBytesPerPixel(config)) 76 #ifdef SK_DEBUG 77 , fDirty(false) 78 #endif 79 { 80 // We expect the allocated dimensions to be a multiple of 4 bytes 81 SkASSERT(((width*fBytesPerPixel) & 0x3) == 0); 82 // The padding for faster uploads only works for 1, 2 and 4 byte texels 83 SkASSERT(fBytesPerPixel != 3 && fBytesPerPixel <= 4); 84 fDirtyRect.setEmpty(); 85 } 86 87 GrDrawOpAtlas::Plot::~Plot() { 88 sk_free(fData); 89 delete fRects; 90 } 91 92 bool GrDrawOpAtlas::Plot::addSubImage(int width, int height, const void* image, SkIPoint16* loc) { 93 SkASSERT(width <= fWidth && height <= fHeight); 94 95 if (!fRects) { 96 fRects = GrRectanizer::Factory(fWidth, fHeight); 97 } 98 99 if (!fRects->addRect(width, height, loc)) { 100 return false; 101 } 102 103 if (!fData) { 104 fData = reinterpret_cast<unsigned char*>(sk_calloc_throw(fBytesPerPixel * fWidth * 105 fHeight)); 106 } 107 size_t rowBytes = width * fBytesPerPixel; 108 const unsigned char* imagePtr = (const unsigned char*)image; 109 // point ourselves at the right starting spot 110 unsigned char* dataPtr = fData; 111 dataPtr += fBytesPerPixel * fWidth * loc->fY; 112 dataPtr += fBytesPerPixel * loc->fX; 113 // copy into the data buffer, swizzling as we go if this is ARGB data 114 if (4 == fBytesPerPixel && kSkia8888_GrPixelConfig == kBGRA_8888_GrPixelConfig) { 115 for (int i = 0; i < height; ++i) { 116 SkOpts::RGBA_to_BGRA((uint32_t*)dataPtr, (const uint32_t*)imagePtr, width); 117 dataPtr += fBytesPerPixel * fWidth; 118 imagePtr += rowBytes; 119 } 120 } else { 121 for (int i = 0; i < height; ++i) { 122 memcpy(dataPtr, imagePtr, rowBytes); 123 dataPtr += fBytesPerPixel * fWidth; 124 imagePtr += rowBytes; 125 } 126 } 127 128 fDirtyRect.join(loc->fX, loc->fY, loc->fX + width, loc->fY + height); 129 130 loc->fX += fOffset.fX; 131 loc->fY += fOffset.fY; 132 SkDEBUGCODE(fDirty = true;) 133 134 return true; 135 } 136 137 void GrDrawOpAtlas::Plot::uploadToTexture(GrDeferredTextureUploadWritePixelsFn& writePixels, 138 GrTextureProxy* proxy) { 139 // We should only be issuing uploads if we are in fact dirty 140 SkASSERT(fDirty && fData && proxy && proxy->peekTexture()); 141 TRACE_EVENT0("skia.gpu", TRACE_FUNC); 142 size_t rowBytes = fBytesPerPixel * fWidth; 143 const unsigned char* dataPtr = fData; 144 // Clamp to 4-byte aligned boundaries 145 unsigned int clearBits = 0x3 / fBytesPerPixel; 146 fDirtyRect.fLeft &= ~clearBits; 147 fDirtyRect.fRight += clearBits; 148 fDirtyRect.fRight &= ~clearBits; 149 SkASSERT(fDirtyRect.fRight <= fWidth); 150 // Set up dataPtr 151 dataPtr += rowBytes * fDirtyRect.fTop; 152 dataPtr += fBytesPerPixel * fDirtyRect.fLeft; 153 // TODO: Make GrDrawOpAtlas store a GrColorType rather than GrPixelConfig. 154 auto colorType = GrPixelConfigToColorType(fConfig); 155 writePixels(proxy, fOffset.fX + fDirtyRect.fLeft, fOffset.fY + fDirtyRect.fTop, 156 fDirtyRect.width(), fDirtyRect.height(), colorType, dataPtr, rowBytes); 157 fDirtyRect.setEmpty(); 158 SkDEBUGCODE(fDirty = false;) 159 } 160 161 void GrDrawOpAtlas::Plot::resetRects() { 162 if (fRects) { 163 fRects->reset(); 164 } 165 166 fGenID++; 167 fID = CreateId(fPageIndex, fPlotIndex, fGenID); 168 fLastUpload = GrDeferredUploadToken::AlreadyFlushedToken(); 169 fLastUse = GrDeferredUploadToken::AlreadyFlushedToken(); 170 171 // zero out the plot 172 if (fData) { 173 sk_bzero(fData, fBytesPerPixel * fWidth * fHeight); 174 } 175 176 fDirtyRect.setEmpty(); 177 SkDEBUGCODE(fDirty = false;) 178 } 179 180 /////////////////////////////////////////////////////////////////////////////// 181 182 GrDrawOpAtlas::GrDrawOpAtlas(GrProxyProvider* proxyProvider, const GrBackendFormat& format, 183 GrPixelConfig config, int width, int height, 184 int plotWidth, int plotHeight, AllowMultitexturing allowMultitexturing) 185 : fFormat(format) 186 , fPixelConfig(config) 187 , fTextureWidth(width) 188 , fTextureHeight(height) 189 , fPlotWidth(plotWidth) 190 , fPlotHeight(plotHeight) 191 , fAtlasGeneration(kInvalidAtlasGeneration + 1) 192 , fPrevFlushToken(GrDeferredUploadToken::AlreadyFlushedToken()) 193 , fMaxPages(AllowMultitexturing::kYes == allowMultitexturing ? kMaxMultitexturePages : 1) 194 , fNumActivePages(0) { 195 int numPlotsX = width/plotWidth; 196 int numPlotsY = height/plotHeight; 197 SkASSERT(numPlotsX * numPlotsY <= GrDrawOpAtlas::kMaxPlots); 198 SkASSERT(fPlotWidth * numPlotsX == fTextureWidth); 199 SkASSERT(fPlotHeight * numPlotsY == fTextureHeight); 200 201 fNumPlots = numPlotsX * numPlotsY; 202 203 this->createPages(proxyProvider); 204 } 205 206 inline void GrDrawOpAtlas::processEviction(AtlasID id) { 207 for (int i = 0; i < fEvictionCallbacks.count(); i++) { 208 (*fEvictionCallbacks[i].fFunc)(id, fEvictionCallbacks[i].fData); 209 } 210 ++fAtlasGeneration; 211 } 212 213 inline bool GrDrawOpAtlas::updatePlot(GrDeferredUploadTarget* target, AtlasID* id, Plot* plot) { 214 int pageIdx = GetPageIndexFromID(plot->id()); 215 this->makeMRU(plot, pageIdx); 216 217 // If our most recent upload has already occurred then we have to insert a new 218 // upload. Otherwise, we already have a scheduled upload that hasn't yet ocurred. 219 // This new update will piggy back on that previously scheduled update. 220 if (plot->lastUploadToken() < target->tokenTracker()->nextTokenToFlush()) { 221 // With c+14 we could move sk_sp into lamba to only ref once. 222 sk_sp<Plot> plotsp(SkRef(plot)); 223 224 GrTextureProxy* proxy = fProxies[pageIdx].get(); 225 SkASSERT(proxy->isInstantiated()); // This is occurring at flush time 226 227 GrDeferredUploadToken lastUploadToken = target->addASAPUpload( 228 [plotsp, proxy](GrDeferredTextureUploadWritePixelsFn& writePixels) { 229 plotsp->uploadToTexture(writePixels, proxy); 230 }); 231 plot->setLastUploadToken(lastUploadToken); 232 } 233 *id = plot->id(); 234 return true; 235 } 236 237 bool GrDrawOpAtlas::uploadToPage(unsigned int pageIdx, AtlasID* id, GrDeferredUploadTarget* target, 238 int width, int height, const void* image, SkIPoint16* loc) { 239 SkASSERT(fProxies[pageIdx] && fProxies[pageIdx]->isInstantiated()); 240 241 // look through all allocated plots for one we can share, in Most Recently Refed order 242 PlotList::Iter plotIter; 243 plotIter.init(fPages[pageIdx].fPlotList, PlotList::Iter::kHead_IterStart); 244 245 for (Plot* plot = plotIter.get(); plot; plot = plotIter.next()) { 246 SkASSERT(GrBytesPerPixel(fProxies[pageIdx]->config()) == plot->bpp()); 247 248 if (plot->addSubImage(width, height, image, loc)) { 249 return this->updatePlot(target, id, plot); 250 } 251 } 252 253 return false; 254 } 255 256 // Number of atlas-related flushes beyond which we consider a plot to no longer be in use. 257 // 258 // This value is somewhat arbitrary -- the idea is to keep it low enough that 259 // a page with unused plots will get removed reasonably quickly, but allow it 260 // to hang around for a bit in case it's needed. The assumption is that flushes 261 // are rare; i.e., we are not continually refreshing the frame. 262 static constexpr auto kRecentlyUsedCount = 256; 263 264 GrDrawOpAtlas::ErrorCode GrDrawOpAtlas::addToAtlas(GrResourceProvider* resourceProvider, 265 AtlasID* id, GrDeferredUploadTarget* target, 266 int width, int height, 267 const void* image, SkIPoint16* loc) { 268 if (width > fPlotWidth || height > fPlotHeight) { 269 return ErrorCode::kError; 270 } 271 272 // Look through each page to see if we can upload without having to flush 273 // We prioritize this upload to the first pages, not the most recently used, to make it easier 274 // to remove unused pages in reverse page order. 275 for (unsigned int pageIdx = 0; pageIdx < fNumActivePages; ++pageIdx) { 276 if (this->uploadToPage(pageIdx, id, target, width, height, image, loc)) { 277 return ErrorCode::kSucceeded; 278 } 279 } 280 281 // If the above fails, then see if the least recently used plot per page has already been 282 // flushed to the gpu if we're at max page allocation, or if the plot has aged out otherwise. 283 // We wait until we've grown to the full number of pages to begin evicting already flushed 284 // plots so that we can maximize the opportunity for reuse. 285 // As before we prioritize this upload to the first pages, not the most recently used. 286 if (fNumActivePages == this->maxPages()) { 287 for (unsigned int pageIdx = 0; pageIdx < fNumActivePages; ++pageIdx) { 288 Plot* plot = fPages[pageIdx].fPlotList.tail(); 289 SkASSERT(plot); 290 if (plot->lastUseToken() < target->tokenTracker()->nextTokenToFlush()) { 291 this->processEvictionAndResetRects(plot); 292 SkASSERT(GrBytesPerPixel(fProxies[pageIdx]->config()) == plot->bpp()); 293 SkDEBUGCODE(bool verify = )plot->addSubImage(width, height, image, loc); 294 SkASSERT(verify); 295 if (!this->updatePlot(target, id, plot)) { 296 return ErrorCode::kError; 297 } 298 return ErrorCode::kSucceeded; 299 } 300 } 301 } else { 302 // If we haven't activated all the available pages, try to create a new one and add to it 303 if (!this->activateNewPage(resourceProvider)) { 304 return ErrorCode::kError; 305 } 306 307 if (this->uploadToPage(fNumActivePages-1, id, target, width, height, image, loc)) { 308 return ErrorCode::kSucceeded; 309 } else { 310 // If we fail to upload to a newly activated page then something has gone terribly 311 // wrong - return an error 312 return ErrorCode::kError; 313 } 314 } 315 316 if (!fNumActivePages) { 317 return ErrorCode::kError; 318 } 319 320 // Try to find a plot that we can perform an inline upload to. 321 // We prioritize this upload in reverse order of pages to counterbalance the order above. 322 Plot* plot = nullptr; 323 for (int pageIdx = ((int)fNumActivePages)-1; pageIdx >= 0; --pageIdx) { 324 Plot* currentPlot = fPages[pageIdx].fPlotList.tail(); 325 if (currentPlot->lastUseToken() != target->tokenTracker()->nextDrawToken()) { 326 plot = currentPlot; 327 break; 328 } 329 } 330 331 // If we can't find a plot that is not used in a draw currently being prepared by an op, then 332 // we have to fail. This gives the op a chance to enqueue the draw, and call back into this 333 // function. When that draw is enqueued, the draw token advances, and the subsequent call will 334 // continue past this branch and prepare an inline upload that will occur after the enqueued 335 // draw which references the plot's pre-upload content. 336 if (!plot) { 337 return ErrorCode::kTryAgain; 338 } 339 340 this->processEviction(plot->id()); 341 int pageIdx = GetPageIndexFromID(plot->id()); 342 fPages[pageIdx].fPlotList.remove(plot); 343 sk_sp<Plot>& newPlot = fPages[pageIdx].fPlotArray[plot->index()]; 344 newPlot.reset(plot->clone()); 345 346 fPages[pageIdx].fPlotList.addToHead(newPlot.get()); 347 SkASSERT(GrBytesPerPixel(fProxies[pageIdx]->config()) == newPlot->bpp()); 348 SkDEBUGCODE(bool verify = )newPlot->addSubImage(width, height, image, loc); 349 SkASSERT(verify); 350 351 // Note that this plot will be uploaded inline with the draws whereas the 352 // one it displaced most likely was uploaded ASAP. 353 // With c+14 we could move sk_sp into lambda to only ref once. 354 sk_sp<Plot> plotsp(SkRef(newPlot.get())); 355 356 GrTextureProxy* proxy = fProxies[pageIdx].get(); 357 SkASSERT(proxy->isInstantiated()); 358 359 GrDeferredUploadToken lastUploadToken = target->addInlineUpload( 360 [plotsp, proxy](GrDeferredTextureUploadWritePixelsFn& writePixels) { 361 plotsp->uploadToTexture(writePixels, proxy); 362 }); 363 newPlot->setLastUploadToken(lastUploadToken); 364 365 *id = newPlot->id(); 366 367 return ErrorCode::kSucceeded; 368 } 369 370 void GrDrawOpAtlas::compact(GrDeferredUploadToken startTokenForNextFlush) { 371 if (fNumActivePages <= 1) { 372 fPrevFlushToken = startTokenForNextFlush; 373 return; 374 } 375 376 // For all plots, reset number of flushes since used if used this frame. 377 PlotList::Iter plotIter; 378 bool atlasUsedThisFlush = false; 379 for (uint32_t pageIndex = 0; pageIndex < fNumActivePages; ++pageIndex) { 380 plotIter.init(fPages[pageIndex].fPlotList, PlotList::Iter::kHead_IterStart); 381 while (Plot* plot = plotIter.get()) { 382 // Reset number of flushes since used 383 if (plot->lastUseToken().inInterval(fPrevFlushToken, startTokenForNextFlush)) { 384 plot->resetFlushesSinceLastUsed(); 385 atlasUsedThisFlush = true; 386 } 387 388 plotIter.next(); 389 } 390 } 391 392 // We only try to compact if the atlas was used in the recently completed flush. 393 // This is to handle the case where a lot of text or path rendering has occurred but then just 394 // a blinking cursor is drawn. 395 // TODO: consider if we should also do this if it's been a long time since the last atlas use 396 if (atlasUsedThisFlush) { 397 SkTArray<Plot*> availablePlots; 398 uint32_t lastPageIndex = fNumActivePages - 1; 399 400 // For all plots but the last one, update number of flushes since used, and check to see 401 // if there are any in the first pages that the last page can safely upload to. 402 for (uint32_t pageIndex = 0; pageIndex < lastPageIndex; ++pageIndex) { 403 #ifdef DUMP_ATLAS_DATA 404 if (gDumpAtlasData) { 405 SkDebugf("page %d: ", pageIndex); 406 } 407 #endif 408 plotIter.init(fPages[pageIndex].fPlotList, PlotList::Iter::kHead_IterStart); 409 while (Plot* plot = plotIter.get()) { 410 // Update number of flushes since plot was last used 411 // We only increment the 'sinceLastUsed' count for flushes where the atlas was used 412 // to avoid deleting everything when we return to text drawing in the blinking 413 // cursor case 414 if (!plot->lastUseToken().inInterval(fPrevFlushToken, startTokenForNextFlush)) { 415 plot->incFlushesSinceLastUsed(); 416 } 417 418 #ifdef DUMP_ATLAS_DATA 419 if (gDumpAtlasData) { 420 SkDebugf("%d ", plot->flushesSinceLastUsed()); 421 } 422 #endif 423 // Count plots we can potentially upload to in all pages except the last one 424 // (the potential compactee). 425 if (plot->flushesSinceLastUsed() > kRecentlyUsedCount) { 426 availablePlots.push_back() = plot; 427 } 428 429 plotIter.next(); 430 } 431 #ifdef DUMP_ATLAS_DATA 432 if (gDumpAtlasData) { 433 SkDebugf("\n"); 434 } 435 #endif 436 } 437 438 // Count recently used plots in the last page and evict any that are no longer in use. 439 // Since we prioritize uploading to the first pages, this will eventually 440 // clear out usage of this page unless we have a large need. 441 plotIter.init(fPages[lastPageIndex].fPlotList, PlotList::Iter::kHead_IterStart); 442 unsigned int usedPlots = 0; 443 #ifdef DUMP_ATLAS_DATA 444 if (gDumpAtlasData) { 445 SkDebugf("page %d: ", lastPageIndex); 446 } 447 #endif 448 while (Plot* plot = plotIter.get()) { 449 // Update number of flushes since plot was last used 450 if (!plot->lastUseToken().inInterval(fPrevFlushToken, startTokenForNextFlush)) { 451 plot->incFlushesSinceLastUsed(); 452 } 453 454 #ifdef DUMP_ATLAS_DATA 455 if (gDumpAtlasData) { 456 SkDebugf("%d ", plot->flushesSinceLastUsed()); 457 } 458 #endif 459 // If this plot was used recently 460 if (plot->flushesSinceLastUsed() <= kRecentlyUsedCount) { 461 usedPlots++; 462 } else if (plot->lastUseToken() != GrDeferredUploadToken::AlreadyFlushedToken()) { 463 // otherwise if aged out just evict it. 464 this->processEvictionAndResetRects(plot); 465 } 466 plotIter.next(); 467 } 468 #ifdef DUMP_ATLAS_DATA 469 if (gDumpAtlasData) { 470 SkDebugf("\n"); 471 } 472 #endif 473 474 // If recently used plots in the last page are using less than a quarter of the page, try 475 // to evict them if there's available space in earlier pages. Since we prioritize uploading 476 // to the first pages, this will eventually clear out usage of this page unless we have a 477 // large need. 478 if (availablePlots.count() && usedPlots && usedPlots <= fNumPlots / 4) { 479 plotIter.init(fPages[lastPageIndex].fPlotList, PlotList::Iter::kHead_IterStart); 480 while (Plot* plot = plotIter.get()) { 481 // If this plot was used recently 482 if (plot->flushesSinceLastUsed() <= kRecentlyUsedCount) { 483 // See if there's room in an earlier page and if so evict. 484 // We need to be somewhat harsh here so that a handful of plots that are 485 // consistently in use don't end up locking the page in memory. 486 if (availablePlots.count() > 0) { 487 this->processEvictionAndResetRects(plot); 488 this->processEvictionAndResetRects(availablePlots.back()); 489 availablePlots.pop_back(); 490 --usedPlots; 491 } 492 if (!usedPlots || !availablePlots.count()) { 493 break; 494 } 495 } 496 plotIter.next(); 497 } 498 } 499 500 // If none of the plots in the last page have been used recently, delete it. 501 if (!usedPlots) { 502 #ifdef DUMP_ATLAS_DATA 503 if (gDumpAtlasData) { 504 SkDebugf("delete %d\n", fNumPages-1); 505 } 506 #endif 507 this->deactivateLastPage(); 508 } 509 } 510 511 fPrevFlushToken = startTokenForNextFlush; 512 } 513 514 bool GrDrawOpAtlas::createPages(GrProxyProvider* proxyProvider) { 515 SkASSERT(SkIsPow2(fTextureWidth) && SkIsPow2(fTextureHeight)); 516 517 GrSurfaceDesc desc; 518 desc.fFlags = kNone_GrSurfaceFlags; 519 desc.fWidth = fTextureWidth; 520 desc.fHeight = fTextureHeight; 521 desc.fConfig = fPixelConfig; 522 523 int numPlotsX = fTextureWidth/fPlotWidth; 524 int numPlotsY = fTextureHeight/fPlotHeight; 525 526 for (uint32_t i = 0; i < this->maxPages(); ++i) { 527 fProxies[i] = proxyProvider->createProxy(fFormat, desc, kTopLeft_GrSurfaceOrigin, 528 SkBackingFit::kExact, SkBudgeted::kYes, GrInternalSurfaceFlags::kNoPendingIO); 529 if (!fProxies[i]) { 530 return false; 531 } 532 533 // set up allocated plots 534 fPages[i].fPlotArray.reset(new sk_sp<Plot>[ numPlotsX * numPlotsY ]); 535 536 sk_sp<Plot>* currPlot = fPages[i].fPlotArray.get(); 537 for (int y = numPlotsY - 1, r = 0; y >= 0; --y, ++r) { 538 for (int x = numPlotsX - 1, c = 0; x >= 0; --x, ++c) { 539 uint32_t plotIndex = r * numPlotsX + c; 540 currPlot->reset(new Plot(i, plotIndex, 1, x, y, fPlotWidth, fPlotHeight, 541 fPixelConfig)); 542 543 // build LRU list 544 fPages[i].fPlotList.addToHead(currPlot->get()); 545 ++currPlot; 546 } 547 } 548 549 } 550 551 return true; 552 } 553 554 555 bool GrDrawOpAtlas::activateNewPage(GrResourceProvider* resourceProvider) { 556 SkASSERT(fNumActivePages < this->maxPages()); 557 558 if (!fProxies[fNumActivePages]->instantiate(resourceProvider)) { 559 return false; 560 } 561 562 #ifdef DUMP_ATLAS_DATA 563 if (gDumpAtlasData) { 564 SkDebugf("activated page#: %d\n", fNumActivePages); 565 } 566 #endif 567 568 ++fNumActivePages; 569 return true; 570 } 571 572 573 inline void GrDrawOpAtlas::deactivateLastPage() { 574 SkASSERT(fNumActivePages); 575 576 uint32_t lastPageIndex = fNumActivePages - 1; 577 578 int numPlotsX = fTextureWidth/fPlotWidth; 579 int numPlotsY = fTextureHeight/fPlotHeight; 580 581 fPages[lastPageIndex].fPlotList.reset(); 582 for (int r = 0; r < numPlotsY; ++r) { 583 for (int c = 0; c < numPlotsX; ++c) { 584 uint32_t plotIndex = r * numPlotsX + c; 585 586 Plot* currPlot = fPages[lastPageIndex].fPlotArray[plotIndex].get(); 587 currPlot->resetRects(); 588 currPlot->resetFlushesSinceLastUsed(); 589 590 // rebuild the LRU list 591 SkDEBUGCODE(currPlot->fPrev = currPlot->fNext = nullptr); 592 SkDEBUGCODE(currPlot->fList = nullptr); 593 fPages[lastPageIndex].fPlotList.addToHead(currPlot); 594 } 595 } 596 597 // remove ref to the backing texture 598 fProxies[lastPageIndex]->deinstantiate(); 599 --fNumActivePages; 600 } 601 602 GrDrawOpAtlasConfig::GrDrawOpAtlasConfig(int maxTextureSize, size_t maxBytes) { 603 static const SkISize kARGBDimensions[] = { 604 {256, 256}, // maxBytes < 2^19 605 {512, 256}, // 2^19 <= maxBytes < 2^20 606 {512, 512}, // 2^20 <= maxBytes < 2^21 607 {1024, 512}, // 2^21 <= maxBytes < 2^22 608 {1024, 1024}, // 2^22 <= maxBytes < 2^23 609 {2048, 1024}, // 2^23 <= maxBytes 610 }; 611 612 // Index 0 corresponds to maxBytes of 2^18, so start by dividing it by that 613 maxBytes >>= 18; 614 // Take the floor of the log to get the index 615 int index = maxBytes > 0 616 ? SkTPin<int>(SkPrevLog2(maxBytes), 0, SK_ARRAY_COUNT(kARGBDimensions) - 1) 617 : 0; 618 619 SkASSERT(kARGBDimensions[index].width() <= kMaxAtlasDim); 620 SkASSERT(kARGBDimensions[index].height() <= kMaxAtlasDim); 621 fARGBDimensions.set(SkTMin<int>(kARGBDimensions[index].width(), maxTextureSize), 622 SkTMin<int>(kARGBDimensions[index].height(), maxTextureSize)); 623 fMaxTextureSize = SkTMin<int>(maxTextureSize, kMaxAtlasDim); 624 } 625 626 SkISize GrDrawOpAtlasConfig::atlasDimensions(GrMaskFormat type) const { 627 if (kA8_GrMaskFormat == type) { 628 // A8 is always 2x the ARGB dimensions, clamped to the max allowed texture size 629 return { SkTMin<int>(2 * fARGBDimensions.width(), fMaxTextureSize), 630 SkTMin<int>(2 * fARGBDimensions.height(), fMaxTextureSize) }; 631 } else { 632 return fARGBDimensions; 633 } 634 } 635 636 SkISize GrDrawOpAtlasConfig::plotDimensions(GrMaskFormat type) const { 637 if (kA8_GrMaskFormat == type) { 638 SkISize atlasDimensions = this->atlasDimensions(type); 639 // For A8 we want to grow the plots at larger texture sizes to accept more of the 640 // larger SDF glyphs. Since the largest SDF glyph can be 170x170 with padding, this 641 // allows us to pack 3 in a 512x256 plot, or 9 in a 512x512 plot. 642 643 // This will give us 512x256 plots for 2048x1024, 512x512 plots for 2048x2048, 644 // and 256x256 plots otherwise. 645 int plotWidth = atlasDimensions.width() >= 2048 ? 512 : 256; 646 int plotHeight = atlasDimensions.height() >= 2048 ? 512 : 256; 647 648 return { plotWidth, plotHeight }; 649 } else { 650 // ARGB and LCD always use 256x256 plots -- this has been shown to be faster 651 return { 256, 256 }; 652 } 653 } 654 655 constexpr int GrDrawOpAtlasConfig::kMaxAtlasDim; 656