Home | History | Annotate | Download | only in hwui
      1 /*
      2  * Copyright (C) 2013 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #ifndef ANDROID_HWUI_DEFERRED_DISPLAY_LIST_H
     18 #define ANDROID_HWUI_DEFERRED_DISPLAY_LIST_H
     19 
     20 #include <unordered_map>
     21 
     22 #include <utils/Errors.h>
     23 #include <utils/LinearAllocator.h>
     24 
     25 #include "Matrix.h"
     26 #include "OpenGLRenderer.h"
     27 #include "Rect.h"
     28 
     29 #include <vector>
     30 
     31 class SkBitmap;
     32 
     33 namespace android {
     34 namespace uirenderer {
     35 
     36 class ClipOp;
     37 class DrawOp;
     38 class SaveOp;
     39 class SaveLayerOp;
     40 class StateOp;
     41 
     42 class DeferredDisplayState;
     43 
     44 class Batch;
     45 class DrawBatch;
     46 class MergingDrawBatch;
     47 
     48 typedef const void* mergeid_t;
     49 
     50 class DeferredDisplayState {
     51 public:
     52     // global op bounds, mapped by mMatrix to be in screen space coordinates, clipped
     53     Rect mBounds;
     54 
     55     // the below are set and used by the OpenGLRenderer at record and deferred playback
     56     bool mClipValid;
     57     Rect mClip;
     58     int mClipSideFlags; // specifies which sides of the bounds are clipped, unclipped if cleared
     59     mat4 mMatrix;
     60     float mAlpha;
     61     const RoundRectClipState* mRoundRectClipState;
     62     const ProjectionPathMask* mProjectionPathMask;
     63 };
     64 
     65 class OpStatePair {
     66 public:
     67     OpStatePair()
     68             : op(nullptr), state(nullptr) {}
     69     OpStatePair(DrawOp* newOp, const DeferredDisplayState* newState)
     70             : op(newOp), state(newState) {}
     71     OpStatePair(const OpStatePair& other)
     72             : op(other.op), state(other.state) {}
     73     DrawOp* op;
     74     const DeferredDisplayState* state;
     75 };
     76 
     77 class DeferredDisplayList {
     78     friend struct DeferStateStruct; // used to give access to allocator
     79 public:
     80     DeferredDisplayList(const Rect& bounds)
     81             : mBounds(bounds) {
     82         clear();
     83     }
     84     ~DeferredDisplayList() { clear(); }
     85 
     86     enum OpBatchId {
     87         kOpBatch_None = 0, // Don't batch
     88         kOpBatch_Bitmap,
     89         kOpBatch_Patch,
     90         kOpBatch_AlphaVertices,
     91         kOpBatch_Vertices,
     92         kOpBatch_AlphaMaskTexture,
     93         kOpBatch_Text,
     94         kOpBatch_ColorText,
     95 
     96         kOpBatch_Count, // Add other batch ids before this
     97     };
     98 
     99     bool isEmpty() { return mBatches.empty(); }
    100 
    101     /**
    102      * Plays back all of the draw ops recorded into batches to the renderer.
    103      * Adjusts the state of the renderer as necessary, and restores it when complete
    104      */
    105     void flush(OpenGLRenderer& renderer, Rect& dirty);
    106 
    107     void addClip(OpenGLRenderer& renderer, ClipOp* op);
    108     void addSaveLayer(OpenGLRenderer& renderer, SaveLayerOp* op, int newSaveCount);
    109     void addSave(OpenGLRenderer& renderer, SaveOp* op, int newSaveCount);
    110     void addRestoreToCount(OpenGLRenderer& renderer, StateOp* op, int newSaveCount);
    111 
    112     /**
    113      * Add a draw op into the DeferredDisplayList, reordering as needed (for performance) if
    114      * disallowReorder is false, respecting draw order when overlaps occur.
    115      */
    116     void addDrawOp(OpenGLRenderer& renderer, DrawOp* op);
    117 
    118 private:
    119     DeferredDisplayList(const DeferredDisplayList& other); // disallow copy
    120 
    121     DeferredDisplayState* createState() {
    122         return mAllocator.create_trivial<DeferredDisplayState>();
    123     }
    124 
    125     void tryRecycleState(DeferredDisplayState* state) {
    126         mAllocator.rewindIfLastAlloc(state);
    127     }
    128 
    129     /**
    130      * Resets the batching back-pointers, creating a barrier in the operation stream so that no ops
    131      * added in the future will be inserted into a batch that already exist.
    132      */
    133     void resetBatchingState();
    134 
    135     void clear();
    136 
    137     void storeStateOpBarrier(OpenGLRenderer& renderer, StateOp* op);
    138     void storeRestoreToCountBarrier(OpenGLRenderer& renderer, StateOp* op, int newSaveCount);
    139 
    140     bool recordingComplexClip() const { return mComplexClipStackStart >= 0; }
    141 
    142     int getStateOpDeferFlags() const;
    143     int getDrawOpDeferFlags() const;
    144 
    145     void discardDrawingBatches(const unsigned int maxIndex);
    146 
    147     // layer space bounds of rendering
    148     Rect mBounds;
    149 
    150     /**
    151      * At defer time, stores the *defer time* savecount of save/saveLayer ops that were deferred, so
    152      * that when an associated restoreToCount is deferred, it can be recorded as a
    153      * RestoreToCountBatch
    154      */
    155     std::vector<int> mSaveStack;
    156     int mComplexClipStackStart;
    157 
    158     std::vector<Batch*> mBatches;
    159 
    160     // Maps batch ids to the most recent *non-merging* batch of that id
    161     Batch* mBatchLookup[kOpBatch_Count];
    162 
    163     // Points to the index after the most recent barrier
    164     int mEarliestBatchIndex;
    165 
    166     // Points to the first index that may contain a pure drawing batch
    167     int mEarliestUnclearedIndex;
    168 
    169     /**
    170      * Maps the mergeid_t returned by an op's getMergeId() to the most recently seen
    171      * MergingDrawBatch of that id. These ids are unique per draw type and guaranteed to not
    172      * collide, which avoids the need to resolve mergeid collisions.
    173      */
    174     std::unordered_map<mergeid_t, DrawBatch*> mMergingBatches[kOpBatch_Count];
    175 
    176     LinearAllocator mAllocator;
    177 };
    178 
    179 /**
    180  * Struct containing information that instructs the defer
    181  */
    182 struct DeferInfo {
    183 public:
    184     DeferInfo() :
    185             batchId(DeferredDisplayList::kOpBatch_None),
    186             mergeId((mergeid_t) -1),
    187             mergeable(false),
    188             opaqueOverBounds(false) {
    189     };
    190 
    191     int batchId;
    192     mergeid_t mergeId;
    193     bool mergeable;
    194     bool opaqueOverBounds; // opaque over bounds in DeferredDisplayState - can skip ops below
    195 };
    196 
    197 }; // namespace uirenderer
    198 }; // namespace android
    199 
    200 #endif // ANDROID_HWUI_DEFERRED_DISPLAY_LIST_H
    201