1 /* 2 * Copyright 2015 Google Inc. 3 * 4 * Use of this source code is governed by a BSD-style license that can be 5 * found in the LICENSE file. 6 */ 7 8 #include "GrDrawOpAtlas.h" 9 10 #include "GrContext.h" 11 #include "GrContextPriv.h" 12 #include "GrOpFlushState.h" 13 #include "GrRectanizer.h" 14 #include "GrProxyProvider.h" 15 #include "GrResourceProvider.h" 16 #include "GrTexture.h" 17 #include "GrTracing.h" 18 19 std::unique_ptr<GrDrawOpAtlas> GrDrawOpAtlas::Make(GrContext* ctx, GrPixelConfig config, int width, 20 int height, int numPlotsX, int numPlotsY, 21 AllowMultitexturing allowMultitexturing, 22 GrDrawOpAtlas::EvictionFunc func, void* data) { 23 std::unique_ptr<GrDrawOpAtlas> atlas(new GrDrawOpAtlas(ctx, config, width, height, numPlotsX, 24 numPlotsY, allowMultitexturing)); 25 if (!atlas->getProxies()[0]) { 26 return nullptr; 27 } 28 29 atlas->registerEvictionCallback(func, data); 30 return atlas; 31 } 32 33 #ifdef DUMP_ATLAS_DATA 34 static bool gDumpAtlasData = false; 35 #endif 36 37 //////////////////////////////////////////////////////////////////////////////// 38 39 GrDrawOpAtlas::Plot::Plot(int pageIndex, int plotIndex, uint64_t genID, int offX, int offY, 40 int width, int height, GrPixelConfig config) 41 : fLastUpload(GrDeferredUploadToken::AlreadyFlushedToken()) 42 , fLastUse(GrDeferredUploadToken::AlreadyFlushedToken()) 43 , fFlushesSinceLastUse(0) 44 , fPageIndex(pageIndex) 45 , fPlotIndex(plotIndex) 46 , fGenID(genID) 47 , fID(CreateId(fPageIndex, fPlotIndex, fGenID)) 48 , fData(nullptr) 49 , fWidth(width) 50 , fHeight(height) 51 , fX(offX) 52 , fY(offY) 53 , fRects(nullptr) 54 , fOffset(SkIPoint16::Make(fX * fWidth, fY * fHeight)) 55 , fConfig(config) 56 , fBytesPerPixel(GrBytesPerPixel(config)) 57 #ifdef SK_DEBUG 58 , fDirty(false) 59 #endif 60 { 61 fDirtyRect.setEmpty(); 62 } 63 64 GrDrawOpAtlas::Plot::~Plot() { 65 sk_free(fData); 66 delete fRects; 67 } 68 69 bool GrDrawOpAtlas::Plot::addSubImage(int width, int height, const void* image, SkIPoint16* loc) { 70 SkASSERT(width <= fWidth && height <= fHeight); 71 72 if (!fRects) { 73 fRects = GrRectanizer::Factory(fWidth, fHeight); 74 } 75 76 if (!fRects->addRect(width, height, loc)) { 77 return false; 78 } 79 80 if (!fData) { 81 fData = reinterpret_cast<unsigned char*>(sk_calloc_throw(fBytesPerPixel * fWidth * 82 fHeight)); 83 } 84 size_t rowBytes = width * fBytesPerPixel; 85 const unsigned char* imagePtr = (const unsigned char*)image; 86 // point ourselves at the right starting spot 87 unsigned char* dataPtr = fData; 88 dataPtr += fBytesPerPixel * fWidth * loc->fY; 89 dataPtr += fBytesPerPixel * loc->fX; 90 // copy into the data buffer, swizzling as we go if this is ARGB data 91 if (4 == fBytesPerPixel && kSkia8888_GrPixelConfig == kBGRA_8888_GrPixelConfig) { 92 for (int i = 0; i < height; ++i) { 93 SkOpts::RGBA_to_BGRA(reinterpret_cast<uint32_t*>(dataPtr), imagePtr, width); 94 dataPtr += fBytesPerPixel * fWidth; 95 imagePtr += rowBytes; 96 } 97 } else { 98 for (int i = 0; i < height; ++i) { 99 memcpy(dataPtr, imagePtr, rowBytes); 100 dataPtr += fBytesPerPixel * fWidth; 101 imagePtr += rowBytes; 102 } 103 } 104 105 fDirtyRect.join(loc->fX, loc->fY, loc->fX + width, loc->fY + height); 106 107 loc->fX += fOffset.fX; 108 loc->fY += fOffset.fY; 109 SkDEBUGCODE(fDirty = true;) 110 111 return true; 112 } 113 114 void GrDrawOpAtlas::Plot::uploadToTexture(GrDeferredTextureUploadWritePixelsFn& writePixels, 115 GrTextureProxy* proxy) { 116 // We should only be issuing uploads if we are in fact dirty 117 SkASSERT(fDirty && fData && proxy && proxy->priv().peekTexture()); 118 TRACE_EVENT0("skia.gpu", TRACE_FUNC); 119 size_t rowBytes = fBytesPerPixel * fWidth; 120 const unsigned char* dataPtr = fData; 121 dataPtr += rowBytes * fDirtyRect.fTop; 122 dataPtr += fBytesPerPixel * fDirtyRect.fLeft; 123 writePixels(proxy, fOffset.fX + fDirtyRect.fLeft, fOffset.fY + fDirtyRect.fTop, 124 fDirtyRect.width(), fDirtyRect.height(), fConfig, dataPtr, rowBytes); 125 fDirtyRect.setEmpty(); 126 SkDEBUGCODE(fDirty = false;) 127 } 128 129 void GrDrawOpAtlas::Plot::resetRects() { 130 if (fRects) { 131 fRects->reset(); 132 } 133 134 fGenID++; 135 fID = CreateId(fPageIndex, fPlotIndex, fGenID); 136 fLastUpload = GrDeferredUploadToken::AlreadyFlushedToken(); 137 fLastUse = GrDeferredUploadToken::AlreadyFlushedToken(); 138 139 // zero out the plot 140 if (fData) { 141 sk_bzero(fData, fBytesPerPixel * fWidth * fHeight); 142 } 143 144 fDirtyRect.setEmpty(); 145 SkDEBUGCODE(fDirty = false;) 146 } 147 148 /////////////////////////////////////////////////////////////////////////////// 149 150 GrDrawOpAtlas::GrDrawOpAtlas(GrContext* context, GrPixelConfig config, int width, int height, 151 int numPlotsX, int numPlotsY, AllowMultitexturing allowMultitexturing) 152 : fContext(context) 153 , fPixelConfig(config) 154 , fTextureWidth(width) 155 , fTextureHeight(height) 156 , fAtlasGeneration(kInvalidAtlasGeneration + 1) 157 , fPrevFlushToken(GrDeferredUploadToken::AlreadyFlushedToken()) 158 , fAllowMultitexturing(allowMultitexturing) 159 , fNumPages(0) { 160 fPlotWidth = fTextureWidth / numPlotsX; 161 fPlotHeight = fTextureHeight / numPlotsY; 162 SkASSERT(numPlotsX * numPlotsY <= BulkUseTokenUpdater::kMaxPlots); 163 SkASSERT(fPlotWidth * numPlotsX == fTextureWidth); 164 SkASSERT(fPlotHeight * numPlotsY == fTextureHeight); 165 166 SkDEBUGCODE(fNumPlots = numPlotsX * numPlotsY;) 167 168 this->createNewPage(); 169 } 170 171 inline void GrDrawOpAtlas::processEviction(AtlasID id) { 172 for (int i = 0; i < fEvictionCallbacks.count(); i++) { 173 (*fEvictionCallbacks[i].fFunc)(id, fEvictionCallbacks[i].fData); 174 } 175 ++fAtlasGeneration; 176 } 177 178 inline bool GrDrawOpAtlas::updatePlot(GrDeferredUploadTarget* target, AtlasID* id, Plot* plot) { 179 int pageIdx = GetPageIndexFromID(plot->id()); 180 this->makeMRU(plot, pageIdx); 181 182 // If our most recent upload has already occurred then we have to insert a new 183 // upload. Otherwise, we already have a scheduled upload that hasn't yet ocurred. 184 // This new update will piggy back on that previously scheduled update. 185 if (plot->lastUploadToken() < target->tokenTracker()->nextTokenToFlush()) { 186 // With c+14 we could move sk_sp into lamba to only ref once. 187 sk_sp<Plot> plotsp(SkRef(plot)); 188 189 // MDB TODO: this is currently fine since the atlas' proxy is always pre-instantiated. 190 // Once it is deferred more care must be taken upon instantiation failure. 191 if (!fProxies[pageIdx]->instantiate(fContext->contextPriv().resourceProvider())) { 192 return false; 193 } 194 195 GrTextureProxy* proxy = fProxies[pageIdx].get(); 196 197 GrDeferredUploadToken lastUploadToken = target->addASAPUpload( 198 [plotsp, proxy](GrDeferredTextureUploadWritePixelsFn& writePixels) { 199 plotsp->uploadToTexture(writePixels, proxy); 200 }); 201 plot->setLastUploadToken(lastUploadToken); 202 } 203 *id = plot->id(); 204 return true; 205 } 206 207 // Number of atlas-related flushes beyond which we consider a plot to no longer be in use. 208 // 209 // This value is somewhat arbitrary -- the idea is to keep it low enough that 210 // a page with unused plots will get removed reasonably quickly, but allow it 211 // to hang around for a bit in case it's needed. The assumption is that flushes 212 // are rare; i.e., we are not continually refreshing the frame. 213 static constexpr auto kRecentlyUsedCount = 256; 214 215 bool GrDrawOpAtlas::addToAtlas(AtlasID* id, GrDeferredUploadTarget* target, int width, int height, 216 const void* image, SkIPoint16* loc) { 217 if (width > fPlotWidth || height > fPlotHeight) { 218 return false; 219 } 220 221 // Look through each page to see if we can upload without having to flush 222 // We prioritize this upload to the first pages, not the most recently used, to make it easier 223 // to remove unused pages in reverse page order. 224 for (unsigned int pageIdx = 0; pageIdx < fNumPages; ++pageIdx) { 225 SkASSERT(fProxies[pageIdx]); 226 // look through all allocated plots for one we can share, in Most Recently Refed order 227 PlotList::Iter plotIter; 228 plotIter.init(fPages[pageIdx].fPlotList, PlotList::Iter::kHead_IterStart); 229 Plot* plot; 230 while ((plot = plotIter.get())) { 231 SkASSERT(GrBytesPerPixel(fProxies[pageIdx]->config()) == plot->bpp()); 232 if (plot->addSubImage(width, height, image, loc)) { 233 return this->updatePlot(target, id, plot); 234 } 235 plotIter.next(); 236 } 237 } 238 239 // If the above fails, then see if the least recently used plot per page has already been 240 // flushed to the gpu if we're at max page allocation, or if the plot has aged out otherwise. 241 // We wait until we've grown to the full number of pages to begin evicting already flushed 242 // plots so that we can maximize the opportunity for reuse. 243 // As before we prioritize this upload to the first pages, not the most recently used. 244 for (unsigned int pageIdx = 0; pageIdx < fNumPages; ++pageIdx) { 245 Plot* plot = fPages[pageIdx].fPlotList.tail(); 246 SkASSERT(plot); 247 if ((fNumPages == this->maxPages() && 248 plot->lastUseToken() < target->tokenTracker()->nextTokenToFlush()) || 249 plot->flushesSinceLastUsed() >= kRecentlyUsedCount) { 250 this->processEvictionAndResetRects(plot); 251 SkASSERT(GrBytesPerPixel(fProxies[pageIdx]->config()) == plot->bpp()); 252 SkDEBUGCODE(bool verify = )plot->addSubImage(width, height, image, loc); 253 SkASSERT(verify); 254 if (!this->updatePlot(target, id, plot)) { 255 return false; 256 } 257 return true; 258 } 259 } 260 261 // If the simple cases fail, try to create a new page and add to it 262 if (this->createNewPage()) { 263 unsigned int pageIdx = fNumPages-1; 264 SkASSERT(fProxies[pageIdx]); 265 Plot* plot = fPages[pageIdx].fPlotList.head(); 266 SkASSERT(GrBytesPerPixel(fProxies[pageIdx]->config()) == plot->bpp()); 267 if (plot->addSubImage(width, height, image, loc)) { 268 return this->updatePlot(target, id, plot); 269 } 270 271 // we shouldn't get here -- if so, something has gone terribly wrong 272 SkASSERT(false); 273 return false; 274 } 275 276 // Try to find a plot that we can perform an inline upload to. 277 // We prioritize this upload in reverse order of pages to counterbalance the order above. 278 Plot* plot = nullptr; 279 for (int pageIdx = (int)(fNumPages-1); pageIdx >= 0; --pageIdx) { 280 Plot* currentPlot = fPages[pageIdx].fPlotList.tail(); 281 if (currentPlot->lastUseToken() != target->tokenTracker()->nextDrawToken()) { 282 plot = currentPlot; 283 break; 284 } 285 } 286 287 // If we can't find a plot that is not used in a draw currently being prepared by an op, then 288 // we have to fail. This gives the op a chance to enqueue the draw, and call back into this 289 // function. When that draw is enqueued, the draw token advances, and the subsequent call will 290 // continue past this branch and prepare an inline upload that will occur after the enqueued 291 //draw which references the plot's pre-upload content. 292 if (!plot) { 293 return false; 294 } 295 296 this->processEviction(plot->id()); 297 int pageIdx = GetPageIndexFromID(plot->id()); 298 fPages[pageIdx].fPlotList.remove(plot); 299 sk_sp<Plot>& newPlot = fPages[pageIdx].fPlotArray[plot->index()]; 300 newPlot.reset(plot->clone()); 301 302 fPages[pageIdx].fPlotList.addToHead(newPlot.get()); 303 SkASSERT(GrBytesPerPixel(fProxies[pageIdx]->config()) == newPlot->bpp()); 304 SkDEBUGCODE(bool verify = )newPlot->addSubImage(width, height, image, loc); 305 SkASSERT(verify); 306 307 // Note that this plot will be uploaded inline with the draws whereas the 308 // one it displaced most likely was uploaded ASAP. 309 // With c+14 we could move sk_sp into lambda to only ref once. 310 sk_sp<Plot> plotsp(SkRef(newPlot.get())); 311 // MDB TODO: this is currently fine since the atlas' proxy is always pre-instantiated. 312 // Once it is deferred more care must be taken upon instantiation failure. 313 if (!fProxies[pageIdx]->instantiate(fContext->contextPriv().resourceProvider())) { 314 return false; 315 } 316 GrTextureProxy* proxy = fProxies[pageIdx].get(); 317 318 GrDeferredUploadToken lastUploadToken = target->addInlineUpload( 319 [plotsp, proxy](GrDeferredTextureUploadWritePixelsFn& writePixels) { 320 plotsp->uploadToTexture(writePixels, proxy); 321 }); 322 newPlot->setLastUploadToken(lastUploadToken); 323 324 *id = newPlot->id(); 325 326 return true; 327 } 328 329 void GrDrawOpAtlas::compact(GrDeferredUploadToken startTokenForNextFlush) { 330 if (fNumPages <= 1) { 331 fPrevFlushToken = startTokenForNextFlush; 332 return; 333 } 334 335 // For all plots, reset number of flushes since used if used this frame. 336 PlotList::Iter plotIter; 337 bool atlasUsedThisFlush = false; 338 for (uint32_t pageIndex = 0; pageIndex < fNumPages; ++pageIndex) { 339 plotIter.init(fPages[pageIndex].fPlotList, PlotList::Iter::kHead_IterStart); 340 while (Plot* plot = plotIter.get()) { 341 // Reset number of flushes since used 342 if (plot->lastUseToken().inInterval(fPrevFlushToken, startTokenForNextFlush)) { 343 plot->resetFlushesSinceLastUsed(); 344 atlasUsedThisFlush = true; 345 } 346 347 plotIter.next(); 348 } 349 } 350 351 // We only try to compact if the atlas was used in the recently completed flush. 352 // This is to handle the case where a lot of text or path rendering has occurred but then just 353 // a blinking cursor is drawn. 354 // TODO: consider if we should also do this if it's been a long time since the last atlas use 355 if (atlasUsedThisFlush) { 356 int availablePlots = 0; 357 uint32_t lastPageIndex = fNumPages - 1; 358 359 // For all plots but the last one, update number of flushes since used, and check to see 360 // if there are any in the first pages that the last page can safely upload to. 361 for (uint32_t pageIndex = 0; pageIndex < lastPageIndex; ++pageIndex) { 362 #ifdef DUMP_ATLAS_DATA 363 if (gDumpAtlasData) { 364 SkDebugf("page %d: ", pageIndex); 365 } 366 #endif 367 plotIter.init(fPages[pageIndex].fPlotList, PlotList::Iter::kHead_IterStart); 368 while (Plot* plot = plotIter.get()) { 369 // Update number of flushes since plot was last used 370 // We only increment the 'sinceLastUsed' count for flushes where the atlas was used 371 // to avoid deleting everything when we return to text drawing in the blinking 372 // cursor case 373 if (!plot->lastUseToken().inInterval(fPrevFlushToken, startTokenForNextFlush)) { 374 plot->incFlushesSinceLastUsed(); 375 } 376 377 #ifdef DUMP_ATLAS_DATA 378 if (gDumpAtlasData) { 379 SkDebugf("%d ", plot->flushesSinceLastUsed()); 380 } 381 #endif 382 // Count plots we can potentially upload to in all pages except the last one 383 // (the potential compactee). 384 if (plot->flushesSinceLastUsed() > kRecentlyUsedCount) { 385 ++availablePlots; 386 } 387 388 plotIter.next(); 389 } 390 #ifdef DUMP_ATLAS_DATA 391 if (gDumpAtlasData) { 392 SkDebugf("\n"); 393 } 394 #endif 395 } 396 397 // Count recently used plots in the last page and evict them if there's available space 398 // in earlier pages. Since we prioritize uploading to the first pages, this will eventually 399 // clear out usage of this page unless we have a large need. 400 plotIter.init(fPages[lastPageIndex].fPlotList, PlotList::Iter::kHead_IterStart); 401 int usedPlots = 0; 402 #ifdef DUMP_ATLAS_DATA 403 if (gDumpAtlasData) { 404 SkDebugf("page %d: ", lastPageIndex); 405 } 406 #endif 407 while (Plot* plot = plotIter.get()) { 408 // Update number of flushes since plot was last used 409 if (!plot->lastUseToken().inInterval(fPrevFlushToken, startTokenForNextFlush)) { 410 plot->incFlushesSinceLastUsed(); 411 } 412 413 #ifdef DUMP_ATLAS_DATA 414 if (gDumpAtlasData) { 415 SkDebugf("%d ", plot->flushesSinceLastUsed()); 416 } 417 #endif 418 // If this plot was used recently 419 if (plot->flushesSinceLastUsed() <= kRecentlyUsedCount) { 420 usedPlots++; 421 // see if there's room in an earlier page and if so evict. 422 // We need to be somewhat harsh here so that one plot that is consistently in use 423 // doesn't end up locking the page in memory. 424 if (availablePlots) { 425 this->processEvictionAndResetRects(plot); 426 --availablePlots; 427 } 428 } else if (plot->lastUseToken() != GrDeferredUploadToken::AlreadyFlushedToken()) { 429 // otherwise if aged out just evict it. 430 this->processEvictionAndResetRects(plot); 431 } 432 plotIter.next(); 433 } 434 #ifdef DUMP_ATLAS_DATA 435 if (gDumpAtlasData) { 436 SkDebugf("\n"); 437 } 438 #endif 439 // If none of the plots in the last page have been used recently, delete it. 440 if (!usedPlots) { 441 #ifdef DUMP_ATLAS_DATA 442 if (gDumpAtlasData) { 443 SkDebugf("delete %d\n", fNumPages-1); 444 } 445 #endif 446 this->deleteLastPage(); 447 } 448 } 449 450 fPrevFlushToken = startTokenForNextFlush; 451 } 452 453 bool GrDrawOpAtlas::createNewPage() { 454 if (fNumPages == this->maxPages()) { 455 return false; 456 } 457 458 GrProxyProvider* proxyProvider = fContext->contextPriv().proxyProvider(); 459 460 GrSurfaceDesc desc; 461 desc.fFlags = kNone_GrSurfaceFlags; 462 desc.fOrigin = kTopLeft_GrSurfaceOrigin; 463 desc.fWidth = fTextureWidth; 464 desc.fHeight = fTextureHeight; 465 desc.fConfig = fPixelConfig; 466 467 SkASSERT(SkIsPow2(fTextureWidth) && SkIsPow2(fTextureHeight)); 468 fProxies[fNumPages] = proxyProvider->createProxy(desc, SkBackingFit::kExact, SkBudgeted::kYes, 469 GrResourceProvider::kNoPendingIO_Flag); 470 if (!fProxies[fNumPages]) { 471 return false; 472 } 473 474 int numPlotsX = fTextureWidth/fPlotWidth; 475 int numPlotsY = fTextureHeight/fPlotHeight; 476 477 // set up allocated plots 478 fPages[fNumPages].fPlotArray.reset(new sk_sp<Plot>[ numPlotsX * numPlotsY ]); 479 480 sk_sp<Plot>* currPlot = fPages[fNumPages].fPlotArray.get(); 481 for (int y = numPlotsY - 1, r = 0; y >= 0; --y, ++r) { 482 for (int x = numPlotsX - 1, c = 0; x >= 0; --x, ++c) { 483 uint32_t plotIndex = r * numPlotsX + c; 484 currPlot->reset(new Plot(fNumPages, plotIndex, 1, x, y, fPlotWidth, fPlotHeight, 485 fPixelConfig)); 486 487 // build LRU list 488 fPages[fNumPages].fPlotList.addToHead(currPlot->get()); 489 ++currPlot; 490 } 491 } 492 493 #ifdef DUMP_ATLAS_DATA 494 if (gDumpAtlasData) { 495 SkDebugf("created %d\n", fNumPages); 496 } 497 #endif 498 fNumPages++; 499 return true; 500 } 501 502 inline void GrDrawOpAtlas::deleteLastPage() { 503 uint32_t lastPageIndex = fNumPages - 1; 504 // clean out the plots 505 fPages[lastPageIndex].fPlotList.reset(); 506 fPages[lastPageIndex].fPlotArray.reset(nullptr); 507 // remove ref to texture proxy 508 fProxies[lastPageIndex].reset(nullptr); 509 --fNumPages; 510 } 511