Home | History | Annotate | Download | only in hwui
      1 /*
      2  * Copyright (C) 2013 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #define LOG_TAG "OpenGLRenderer"
     18 #define ATRACE_TAG ATRACE_TAG_VIEW
     19 
     20 #include <SkCanvas.h>
     21 
     22 #include <utils/Trace.h>
     23 #include <ui/Rect.h>
     24 #include <ui/Region.h>
     25 
     26 #include "Caches.h"
     27 #include "Debug.h"
     28 #include "DeferredDisplayList.h"
     29 #include "DisplayListOp.h"
     30 #include "OpenGLRenderer.h"
     31 #include "Properties.h"
     32 #include "utils/MathUtils.h"
     33 
     34 #if DEBUG_DEFER
     35     #define DEFER_LOGD(...) ALOGD(__VA_ARGS__)
     36 #else
     37     #define DEFER_LOGD(...)
     38 #endif
     39 
     40 namespace android {
     41 namespace uirenderer {
     42 
     43 // Depth of the save stack at the beginning of batch playback at flush time
     44 #define FLUSH_SAVE_STACK_DEPTH 2
     45 
     46 #define DEBUG_COLOR_BARRIER          0x1f000000
     47 #define DEBUG_COLOR_MERGEDBATCH      0x5f7f7fff
     48 #define DEBUG_COLOR_MERGEDBATCH_SOLO 0x5f7fff7f
     49 
     50 /////////////////////////////////////////////////////////////////////////////////
     51 // Operation Batches
     52 /////////////////////////////////////////////////////////////////////////////////
     53 
     54 class Batch {
     55 public:
     56     virtual void replay(OpenGLRenderer& renderer, Rect& dirty, int index) = 0;
     57     virtual ~Batch() {}
     58     virtual bool purelyDrawBatch() { return false; }
     59     virtual bool coversBounds(const Rect& bounds) { return false; }
     60 };
     61 
     62 class DrawBatch : public Batch {
     63 public:
     64     DrawBatch(const DeferInfo& deferInfo) : mAllOpsOpaque(true),
     65             mBatchId(deferInfo.batchId), mMergeId(deferInfo.mergeId) {
     66         mOps.clear();
     67     }
     68 
     69     virtual ~DrawBatch() { mOps.clear(); }
     70 
     71     virtual void add(DrawOp* op, const DeferredDisplayState* state, bool opaqueOverBounds) {
     72         // NOTE: ignore empty bounds special case, since we don't merge across those ops
     73         mBounds.unionWith(state->mBounds);
     74         mAllOpsOpaque &= opaqueOverBounds;
     75         mOps.add(OpStatePair(op, state));
     76     }
     77 
     78     bool intersects(const Rect& rect) {
     79         if (!rect.intersects(mBounds)) return false;
     80 
     81         for (unsigned int i = 0; i < mOps.size(); i++) {
     82             if (rect.intersects(mOps[i].state->mBounds)) {
     83 #if DEBUG_DEFER
     84                 DEFER_LOGD("op intersects with op %p with bounds %f %f %f %f:", mOps[i].op,
     85                         mOps[i].state->mBounds.left, mOps[i].state->mBounds.top,
     86                         mOps[i].state->mBounds.right, mOps[i].state->mBounds.bottom);
     87                 mOps[i].op->output(2);
     88 #endif
     89                 return true;
     90             }
     91         }
     92         return false;
     93     }
     94 
     95     virtual void replay(OpenGLRenderer& renderer, Rect& dirty, int index) override {
     96         DEFER_LOGD("%d  replaying DrawBatch %p, with %d ops (batch id %x, merge id %p)",
     97                 index, this, mOps.size(), getBatchId(), getMergeId());
     98 
     99         for (unsigned int i = 0; i < mOps.size(); i++) {
    100             DrawOp* op = mOps[i].op;
    101             const DeferredDisplayState* state = mOps[i].state;
    102             renderer.restoreDisplayState(*state);
    103 
    104 #if DEBUG_DISPLAY_LIST_OPS_AS_EVENTS
    105             renderer.eventMark(op->name());
    106 #endif
    107             op->applyDraw(renderer, dirty);
    108 
    109 #if DEBUG_MERGE_BEHAVIOR
    110             const Rect& bounds = state->mBounds;
    111             int batchColor = 0x1f000000;
    112             if (getBatchId() & 0x1) batchColor |= 0x0000ff;
    113             if (getBatchId() & 0x2) batchColor |= 0x00ff00;
    114             if (getBatchId() & 0x4) batchColor |= 0xff0000;
    115             renderer.drawScreenSpaceColorRect(bounds.left, bounds.top, bounds.right, bounds.bottom,
    116                     batchColor);
    117 #endif
    118         }
    119     }
    120 
    121     virtual bool purelyDrawBatch() override { return true; }
    122 
    123     virtual bool coversBounds(const Rect& bounds) override {
    124         if (CC_LIKELY(!mAllOpsOpaque || !mBounds.contains(bounds) || count() == 1)) return false;
    125 
    126         Region uncovered(android::Rect(bounds.left, bounds.top, bounds.right, bounds.bottom));
    127         for (unsigned int i = 0; i < mOps.size(); i++) {
    128             const Rect &r = mOps[i].state->mBounds;
    129             uncovered.subtractSelf(android::Rect(r.left, r.top, r.right, r.bottom));
    130         }
    131         return uncovered.isEmpty();
    132     }
    133 
    134     inline int getBatchId() const { return mBatchId; }
    135     inline mergeid_t getMergeId() const { return mMergeId; }
    136     inline int count() const { return mOps.size(); }
    137 
    138 protected:
    139     Vector<OpStatePair> mOps;
    140     Rect mBounds; // union of bounds of contained ops
    141 private:
    142     bool mAllOpsOpaque;
    143     int mBatchId;
    144     mergeid_t mMergeId;
    145 };
    146 
    147 class MergingDrawBatch : public DrawBatch {
    148 public:
    149     MergingDrawBatch(DeferInfo& deferInfo, int width, int height) :
    150             DrawBatch(deferInfo), mClipRect(width, height),
    151             mClipSideFlags(kClipSide_None) {}
    152 
    153     /*
    154      * Helper for determining if a new op can merge with a MergingDrawBatch based on their bounds
    155      * and clip side flags. Positive bounds delta means new bounds fit in old.
    156      */
    157     static inline bool checkSide(const int currentFlags, const int newFlags, const int side,
    158             float boundsDelta) {
    159         bool currentClipExists = currentFlags & side;
    160         bool newClipExists = newFlags & side;
    161 
    162         // if current is clipped, we must be able to fit new bounds in current
    163         if (boundsDelta > 0 && currentClipExists) return false;
    164 
    165         // if new is clipped, we must be able to fit current bounds in new
    166         if (boundsDelta < 0 && newClipExists) return false;
    167 
    168         return true;
    169     }
    170 
    171     /*
    172      * Checks if a (mergeable) op can be merged into this batch
    173      *
    174      * If true, the op's multiDraw must be guaranteed to handle both ops simultaneously, so it is
    175      * important to consider all paint attributes used in the draw calls in deciding both a) if an
    176      * op tries to merge at all, and b) if the op can merge with another set of ops
    177      *
    178      * False positives can lead to information from the paints of subsequent merged operations being
    179      * dropped, so we make simplifying qualifications on the ops that can merge, per op type.
    180      */
    181     bool canMergeWith(const DrawOp* op, const DeferredDisplayState* state) {
    182         bool isTextBatch = getBatchId() == DeferredDisplayList::kOpBatch_Text ||
    183                 getBatchId() == DeferredDisplayList::kOpBatch_ColorText;
    184 
    185         // Overlapping other operations is only allowed for text without shadow. For other ops,
    186         // multiDraw isn't guaranteed to overdraw correctly
    187         if (!isTextBatch || op->hasTextShadow()) {
    188             if (intersects(state->mBounds)) return false;
    189         }
    190         const DeferredDisplayState* lhs = state;
    191         const DeferredDisplayState* rhs = mOps[0].state;
    192 
    193         if (!MathUtils::areEqual(lhs->mAlpha, rhs->mAlpha)) return false;
    194 
    195         // Identical round rect clip state means both ops will clip in the same way, or not at all.
    196         // As the state objects are const, we can compare their pointers to determine mergeability
    197         if (lhs->mRoundRectClipState != rhs->mRoundRectClipState) return false;
    198         if (lhs->mProjectionPathMask != rhs->mProjectionPathMask) return false;
    199 
    200         /* Clipping compatibility check
    201          *
    202          * Exploits the fact that if a op or batch is clipped on a side, its bounds will equal its
    203          * clip for that side.
    204          */
    205         const int currentFlags = mClipSideFlags;
    206         const int newFlags = state->mClipSideFlags;
    207         if (currentFlags != kClipSide_None || newFlags != kClipSide_None) {
    208             const Rect& opBounds = state->mBounds;
    209             float boundsDelta = mBounds.left - opBounds.left;
    210             if (!checkSide(currentFlags, newFlags, kClipSide_Left, boundsDelta)) return false;
    211             boundsDelta = mBounds.top - opBounds.top;
    212             if (!checkSide(currentFlags, newFlags, kClipSide_Top, boundsDelta)) return false;
    213 
    214             // right and bottom delta calculation reversed to account for direction
    215             boundsDelta = opBounds.right - mBounds.right;
    216             if (!checkSide(currentFlags, newFlags, kClipSide_Right, boundsDelta)) return false;
    217             boundsDelta = opBounds.bottom - mBounds.bottom;
    218             if (!checkSide(currentFlags, newFlags, kClipSide_Bottom, boundsDelta)) return false;
    219         }
    220 
    221         // if paints are equal, then modifiers + paint attribs don't need to be compared
    222         if (op->mPaint == mOps[0].op->mPaint) return true;
    223 
    224         if (op->getPaintAlpha() != mOps[0].op->getPaintAlpha()) return false;
    225 
    226         if (op->mPaint && mOps[0].op->mPaint &&
    227             op->mPaint->getColorFilter() != mOps[0].op->mPaint->getColorFilter()) {
    228             return false;
    229         }
    230 
    231         if (op->mPaint && mOps[0].op->mPaint &&
    232             op->mPaint->getShader() != mOps[0].op->mPaint->getShader()) {
    233             return false;
    234         }
    235 
    236         return true;
    237     }
    238 
    239     virtual void add(DrawOp* op, const DeferredDisplayState* state,
    240             bool opaqueOverBounds) override {
    241         DrawBatch::add(op, state, opaqueOverBounds);
    242 
    243         const int newClipSideFlags = state->mClipSideFlags;
    244         mClipSideFlags |= newClipSideFlags;
    245         if (newClipSideFlags & kClipSide_Left) mClipRect.left = state->mClip.left;
    246         if (newClipSideFlags & kClipSide_Top) mClipRect.top = state->mClip.top;
    247         if (newClipSideFlags & kClipSide_Right) mClipRect.right = state->mClip.right;
    248         if (newClipSideFlags & kClipSide_Bottom) mClipRect.bottom = state->mClip.bottom;
    249     }
    250 
    251     virtual void replay(OpenGLRenderer& renderer, Rect& dirty, int index) override {
    252         DEFER_LOGD("%d  replaying MergingDrawBatch %p, with %d ops,"
    253                 " clip flags %x (batch id %x, merge id %p)",
    254                 index, this, mOps.size(), mClipSideFlags, getBatchId(), getMergeId());
    255         if (mOps.size() == 1) {
    256             DrawBatch::replay(renderer, dirty, -1);
    257             return;
    258         }
    259 
    260         // clipping in the merged case is done ahead of time since all ops share the clip (if any)
    261         renderer.setupMergedMultiDraw(mClipSideFlags ? &mClipRect : nullptr);
    262 
    263         DrawOp* op = mOps[0].op;
    264 #if DEBUG_DISPLAY_LIST_OPS_AS_EVENTS
    265         renderer.eventMark("multiDraw");
    266         renderer.eventMark(op->name());
    267 #endif
    268         op->multiDraw(renderer, dirty, mOps, mBounds);
    269 
    270 #if DEBUG_MERGE_BEHAVIOR
    271         renderer.drawScreenSpaceColorRect(mBounds.left, mBounds.top, mBounds.right, mBounds.bottom,
    272                 DEBUG_COLOR_MERGEDBATCH);
    273 #endif
    274     }
    275 
    276 private:
    277     /*
    278      * Contains the effective clip rect shared by all merged ops. Initialized to the layer viewport,
    279      * it will shrink if an op must be clipped on a certain side. The clipped sides are reflected in
    280      * mClipSideFlags.
    281      */
    282     Rect mClipRect;
    283     int mClipSideFlags;
    284 };
    285 
    286 class StateOpBatch : public Batch {
    287 public:
    288     // creates a single operation batch
    289     StateOpBatch(const StateOp* op, const DeferredDisplayState* state) : mOp(op), mState(state) {}
    290 
    291     virtual void replay(OpenGLRenderer& renderer, Rect& dirty, int index) override {
    292         DEFER_LOGD("replaying state op batch %p", this);
    293         renderer.restoreDisplayState(*mState);
    294 
    295         // use invalid save count because it won't be used at flush time - RestoreToCountOp is the
    296         // only one to use it, and we don't use that class at flush time, instead calling
    297         // renderer.restoreToCount directly
    298         int saveCount = -1;
    299         mOp->applyState(renderer, saveCount);
    300     }
    301 
    302 private:
    303     const StateOp* mOp;
    304     const DeferredDisplayState* mState;
    305 };
    306 
    307 class RestoreToCountBatch : public Batch {
    308 public:
    309     RestoreToCountBatch(const StateOp* op, const DeferredDisplayState* state, int restoreCount) :
    310             mState(state), mRestoreCount(restoreCount) {}
    311 
    312     virtual void replay(OpenGLRenderer& renderer, Rect& dirty, int index) override {
    313         DEFER_LOGD("batch %p restoring to count %d", this, mRestoreCount);
    314 
    315         renderer.restoreDisplayState(*mState);
    316         renderer.restoreToCount(mRestoreCount);
    317     }
    318 
    319 private:
    320     // we use the state storage for the RestoreToCountOp, but don't replay the op itself
    321     const DeferredDisplayState* mState;
    322 
    323     /*
    324      * The count used here represents the flush() time saveCount. This is as opposed to the
    325      * DisplayList record time, or defer() time values (which are RestoreToCountOp's mCount, and
    326      * (saveCount + mCount) respectively). Since the count is different from the original
    327      * RestoreToCountOp, we don't store a pointer to the op, as elsewhere.
    328      */
    329     const int mRestoreCount;
    330 };
    331 
    332 #if DEBUG_MERGE_BEHAVIOR
    333 class BarrierDebugBatch : public Batch {
    334     virtual void replay(OpenGLRenderer& renderer, Rect& dirty, int index) {
    335         renderer.drawScreenSpaceColorRect(0, 0, 10000, 10000, DEBUG_COLOR_BARRIER);
    336     }
    337 };
    338 #endif
    339 
    340 /////////////////////////////////////////////////////////////////////////////////
    341 // DeferredDisplayList
    342 /////////////////////////////////////////////////////////////////////////////////
    343 
    344 void DeferredDisplayList::resetBatchingState() {
    345     for (int i = 0; i < kOpBatch_Count; i++) {
    346         mBatchLookup[i] = nullptr;
    347         mMergingBatches[i].clear();
    348     }
    349 #if DEBUG_MERGE_BEHAVIOR
    350     if (mBatches.size() != 0) {
    351         mBatches.add(new BarrierDebugBatch());
    352     }
    353 #endif
    354     mEarliestBatchIndex = mBatches.size();
    355 }
    356 
    357 void DeferredDisplayList::clear() {
    358     resetBatchingState();
    359     mComplexClipStackStart = -1;
    360 
    361     for (unsigned int i = 0; i < mBatches.size(); i++) {
    362         delete mBatches[i];
    363     }
    364     mBatches.clear();
    365     mSaveStack.clear();
    366     mEarliestBatchIndex = 0;
    367     mEarliestUnclearedIndex = 0;
    368 }
    369 
    370 /////////////////////////////////////////////////////////////////////////////////
    371 // Operation adding
    372 /////////////////////////////////////////////////////////////////////////////////
    373 
    374 int DeferredDisplayList::getStateOpDeferFlags() const {
    375     // For both clipOp and save(Layer)Op, we don't want to save drawing info, and only want to save
    376     // the clip if we aren't recording a complex clip (and can thus trust it to be a rect)
    377     return recordingComplexClip() ? 0 : kStateDeferFlag_Clip;
    378 }
    379 
    380 int DeferredDisplayList::getDrawOpDeferFlags() const {
    381     return kStateDeferFlag_Draw | getStateOpDeferFlags();
    382 }
    383 
    384 /**
    385  * When an clipping operation occurs that could cause a complex clip, record the operation and all
    386  * subsequent clipOps, save/restores (if the clip flag is set). During a flush, instead of loading
    387  * the clip from deferred state, we play back all of the relevant state operations that generated
    388  * the complex clip.
    389  *
    390  * Note that we don't need to record the associated restore operation, since operations at defer
    391  * time record whether they should store the renderer's current clip
    392  */
    393 void DeferredDisplayList::addClip(OpenGLRenderer& renderer, ClipOp* op) {
    394     if (recordingComplexClip() || op->canCauseComplexClip() || !renderer.hasRectToRectTransform()) {
    395         DEFER_LOGD("%p Received complex clip operation %p", this, op);
    396 
    397         // NOTE: defer clip op before setting mComplexClipStackStart so previous clip is recorded
    398         storeStateOpBarrier(renderer, op);
    399 
    400         if (!recordingComplexClip()) {
    401             mComplexClipStackStart = renderer.getSaveCount() - 1;
    402             DEFER_LOGD("    Starting complex clip region, start is %d", mComplexClipStackStart);
    403         }
    404     }
    405 }
    406 
    407 /**
    408  * For now, we record save layer operations as barriers in the batch list, preventing drawing
    409  * operations from reordering around the saveLayer and it's associated restore()
    410  *
    411  * In the future, we should send saveLayer commands (if they can be played out of order) and their
    412  * contained drawing operations to a seperate list of batches, so that they may draw at the
    413  * beginning of the frame. This would avoid targetting and removing an FBO in the middle of a frame.
    414  *
    415  * saveLayer operations should be pulled to the beginning of the frame if the canvas doesn't have a
    416  * complex clip, and if the flags (kClip_SaveFlag & kClipToLayer_SaveFlag) are set.
    417  */
    418 void DeferredDisplayList::addSaveLayer(OpenGLRenderer& renderer,
    419         SaveLayerOp* op, int newSaveCount) {
    420     DEFER_LOGD("%p adding saveLayerOp %p, flags %x, new count %d",
    421             this, op, op->getFlags(), newSaveCount);
    422 
    423     storeStateOpBarrier(renderer, op);
    424     mSaveStack.push(newSaveCount);
    425 }
    426 
    427 /**
    428  * Takes save op and it's return value - the new save count - and stores it into the stream as a
    429  * barrier if it's needed to properly modify a complex clip
    430  */
    431 void DeferredDisplayList::addSave(OpenGLRenderer& renderer, SaveOp* op, int newSaveCount) {
    432     int saveFlags = op->getFlags();
    433     DEFER_LOGD("%p adding saveOp %p, flags %x, new count %d", this, op, saveFlags, newSaveCount);
    434 
    435     if (recordingComplexClip() && (saveFlags & SkCanvas::kClip_SaveFlag)) {
    436         // store and replay the save operation, as it may be needed to correctly playback the clip
    437         DEFER_LOGD("    adding save barrier with new save count %d", newSaveCount);
    438         storeStateOpBarrier(renderer, op);
    439         mSaveStack.push(newSaveCount);
    440     }
    441 }
    442 
    443 /**
    444  * saveLayer() commands must be associated with a restoreToCount batch that will clean up and draw
    445  * the layer in the deferred list
    446  *
    447  * other save() commands which occur as children of a snapshot with complex clip will be deferred,
    448  * and must be restored
    449  *
    450  * Either will act as a barrier to draw operation reordering, as we want to play back layer
    451  * save/restore and complex canvas modifications (including save/restore) in order.
    452  */
    453 void DeferredDisplayList::addRestoreToCount(OpenGLRenderer& renderer, StateOp* op,
    454         int newSaveCount) {
    455     DEFER_LOGD("%p addRestoreToCount %d", this, newSaveCount);
    456 
    457     if (recordingComplexClip() && newSaveCount <= mComplexClipStackStart) {
    458         mComplexClipStackStart = -1;
    459         resetBatchingState();
    460     }
    461 
    462     if (mSaveStack.isEmpty() || newSaveCount > mSaveStack.top()) {
    463         return;
    464     }
    465 
    466     while (!mSaveStack.isEmpty() && mSaveStack.top() >= newSaveCount) mSaveStack.pop();
    467 
    468     storeRestoreToCountBarrier(renderer, op, mSaveStack.size() + FLUSH_SAVE_STACK_DEPTH);
    469 }
    470 
    471 void DeferredDisplayList::addDrawOp(OpenGLRenderer& renderer, DrawOp* op) {
    472     /* 1: op calculates local bounds */
    473     DeferredDisplayState* const state = createState();
    474     if (op->getLocalBounds(state->mBounds)) {
    475         if (state->mBounds.isEmpty()) {
    476             // valid empty bounds, don't bother deferring
    477             tryRecycleState(state);
    478             return;
    479         }
    480     } else {
    481         state->mBounds.setEmpty();
    482     }
    483 
    484     /* 2: renderer calculates global bounds + stores state */
    485     if (renderer.storeDisplayState(*state, getDrawOpDeferFlags())) {
    486         tryRecycleState(state);
    487         return; // quick rejected
    488     }
    489 
    490     /* 3: ask op for defer info, given renderer state */
    491     DeferInfo deferInfo;
    492     op->onDefer(renderer, deferInfo, *state);
    493 
    494     // complex clip has a complex set of expectations on the renderer state - for now, avoid taking
    495     // the merge path in those cases
    496     deferInfo.mergeable &= !recordingComplexClip();
    497     deferInfo.opaqueOverBounds &= !recordingComplexClip()
    498             && mSaveStack.isEmpty()
    499             && !state->mRoundRectClipState;
    500 
    501     if (CC_LIKELY(mAvoidOverdraw) && mBatches.size() &&
    502             state->mClipSideFlags != kClipSide_ConservativeFull &&
    503             deferInfo.opaqueOverBounds && state->mBounds.contains(mBounds)) {
    504         // avoid overdraw by resetting drawing state + discarding drawing ops
    505         discardDrawingBatches(mBatches.size() - 1);
    506         resetBatchingState();
    507     }
    508 
    509     if (CC_UNLIKELY(Properties::drawReorderDisabled)) {
    510         // TODO: elegant way to reuse batches?
    511         DrawBatch* b = new DrawBatch(deferInfo);
    512         b->add(op, state, deferInfo.opaqueOverBounds);
    513         mBatches.add(b);
    514         return;
    515     }
    516 
    517     // find the latest batch of the new op's type, and try to merge the new op into it
    518     DrawBatch* targetBatch = nullptr;
    519 
    520     // insertion point of a new batch, will hopefully be immediately after similar batch
    521     // (eventually, should be similar shader)
    522     int insertBatchIndex = mBatches.size();
    523     if (!mBatches.isEmpty()) {
    524         if (state->mBounds.isEmpty()) {
    525             // don't know the bounds for op, so add to last batch and start from scratch on next op
    526             DrawBatch* b = new DrawBatch(deferInfo);
    527             b->add(op, state, deferInfo.opaqueOverBounds);
    528             mBatches.add(b);
    529             resetBatchingState();
    530 #if DEBUG_DEFER
    531             DEFER_LOGD("Warning: Encountered op with empty bounds, resetting batches");
    532             op->output(2);
    533 #endif
    534             return;
    535         }
    536 
    537         if (deferInfo.mergeable) {
    538             // Try to merge with any existing batch with same mergeId.
    539             if (mMergingBatches[deferInfo.batchId].get(deferInfo.mergeId, targetBatch)) {
    540                 if (!((MergingDrawBatch*) targetBatch)->canMergeWith(op, state)) {
    541                     targetBatch = nullptr;
    542                 }
    543             }
    544         } else {
    545             // join with similar, non-merging batch
    546             targetBatch = (DrawBatch*)mBatchLookup[deferInfo.batchId];
    547         }
    548 
    549         if (targetBatch || deferInfo.mergeable) {
    550             // iterate back toward target to see if anything drawn since should overlap the new op
    551             // if no target, merging ops still interate to find similar batch to insert after
    552             for (int i = mBatches.size() - 1; i >= mEarliestBatchIndex; i--) {
    553                 DrawBatch* overBatch = (DrawBatch*)mBatches[i];
    554 
    555                 if (overBatch == targetBatch) break;
    556 
    557                 // TODO: also consider shader shared between batch types
    558                 if (deferInfo.batchId == overBatch->getBatchId()) {
    559                     insertBatchIndex = i + 1;
    560                     if (!targetBatch) break; // found insert position, quit
    561                 }
    562 
    563                 if (overBatch->intersects(state->mBounds)) {
    564                     // NOTE: it may be possible to optimize for special cases where two operations
    565                     // of the same batch/paint could swap order, such as with a non-mergeable
    566                     // (clipped) and a mergeable text operation
    567                     targetBatch = nullptr;
    568 #if DEBUG_DEFER
    569                     DEFER_LOGD("op couldn't join batch %p, was intersected by batch %d",
    570                             targetBatch, i);
    571                     op->output(2);
    572 #endif
    573                     break;
    574                 }
    575             }
    576         }
    577     }
    578 
    579     if (!targetBatch) {
    580         if (deferInfo.mergeable) {
    581             targetBatch = new MergingDrawBatch(deferInfo,
    582                     renderer.getViewportWidth(), renderer.getViewportHeight());
    583             mMergingBatches[deferInfo.batchId].put(deferInfo.mergeId, targetBatch);
    584         } else {
    585             targetBatch = new DrawBatch(deferInfo);
    586             mBatchLookup[deferInfo.batchId] = targetBatch;
    587         }
    588 
    589         DEFER_LOGD("creating %singBatch %p, bid %x, at %d",
    590                 deferInfo.mergeable ? "Merg" : "Draw",
    591                 targetBatch, deferInfo.batchId, insertBatchIndex);
    592         mBatches.insertAt(targetBatch, insertBatchIndex);
    593     }
    594 
    595     targetBatch->add(op, state, deferInfo.opaqueOverBounds);
    596 }
    597 
    598 void DeferredDisplayList::storeStateOpBarrier(OpenGLRenderer& renderer, StateOp* op) {
    599     DEFER_LOGD("%p adding state op barrier at pos %d", this, mBatches.size());
    600 
    601     DeferredDisplayState* state = createState();
    602     renderer.storeDisplayState(*state, getStateOpDeferFlags());
    603     mBatches.add(new StateOpBatch(op, state));
    604     resetBatchingState();
    605 }
    606 
    607 void DeferredDisplayList::storeRestoreToCountBarrier(OpenGLRenderer& renderer, StateOp* op,
    608         int newSaveCount) {
    609     DEFER_LOGD("%p adding restore to count %d barrier, pos %d",
    610             this, newSaveCount, mBatches.size());
    611 
    612     // store displayState for the restore operation, as it may be associated with a saveLayer that
    613     // doesn't have kClip_SaveFlag set
    614     DeferredDisplayState* state = createState();
    615     renderer.storeDisplayState(*state, getStateOpDeferFlags());
    616     mBatches.add(new RestoreToCountBatch(op, state, newSaveCount));
    617     resetBatchingState();
    618 }
    619 
    620 /////////////////////////////////////////////////////////////////////////////////
    621 // Replay / flush
    622 /////////////////////////////////////////////////////////////////////////////////
    623 
    624 static void replayBatchList(const Vector<Batch*>& batchList,
    625         OpenGLRenderer& renderer, Rect& dirty) {
    626 
    627     for (unsigned int i = 0; i < batchList.size(); i++) {
    628         if (batchList[i]) {
    629             batchList[i]->replay(renderer, dirty, i);
    630         }
    631     }
    632     DEFER_LOGD("--flushed, drew %d batches", batchList.size());
    633 }
    634 
    635 void DeferredDisplayList::flush(OpenGLRenderer& renderer, Rect& dirty) {
    636     ATRACE_NAME("flush drawing commands");
    637     Caches::getInstance().fontRenderer->endPrecaching();
    638 
    639     if (isEmpty()) return; // nothing to flush
    640     renderer.restoreToCount(1);
    641 
    642     DEFER_LOGD("--flushing");
    643     renderer.eventMark("Flush");
    644 
    645     // save and restore so that reordering doesn't affect final state
    646     renderer.save(SkCanvas::kMatrix_SaveFlag | SkCanvas::kClip_SaveFlag);
    647 
    648     if (CC_LIKELY(mAvoidOverdraw)) {
    649         for (unsigned int i = 1; i < mBatches.size(); i++) {
    650             if (mBatches[i] && mBatches[i]->coversBounds(mBounds)) {
    651                 discardDrawingBatches(i - 1);
    652             }
    653         }
    654     }
    655     // NOTE: depth of the save stack at this point, before playback, should be reflected in
    656     // FLUSH_SAVE_STACK_DEPTH, so that save/restores match up correctly
    657     replayBatchList(mBatches, renderer, dirty);
    658 
    659     renderer.restoreToCount(1);
    660 
    661     DEFER_LOGD("--flush complete, returning %x", status);
    662     clear();
    663 }
    664 
    665 void DeferredDisplayList::discardDrawingBatches(const unsigned int maxIndex) {
    666     for (unsigned int i = mEarliestUnclearedIndex; i <= maxIndex; i++) {
    667         // leave deferred state ops alone for simplicity (empty save restore pairs may now exist)
    668         if (mBatches[i] && mBatches[i]->purelyDrawBatch()) {
    669             delete mBatches[i];
    670             mBatches.replaceAt(nullptr, i);
    671         }
    672     }
    673     mEarliestUnclearedIndex = maxIndex + 1;
    674 }
    675 
    676 }; // namespace uirenderer
    677 }; // namespace android
    678