Home | History | Annotate | Download | only in hwui
      1 /*
      2  * Copyright (C) 2013 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #ifndef ANDROID_HWUI_DEFERRED_DISPLAY_LIST_H
     18 #define ANDROID_HWUI_DEFERRED_DISPLAY_LIST_H
     19 
     20 #include <utils/Errors.h>
     21 #include <utils/LinearAllocator.h>
     22 #include <utils/Vector.h>
     23 #include <utils/TinyHashMap.h>
     24 
     25 #include "Matrix.h"
     26 #include "OpenGLRenderer.h"
     27 #include "Rect.h"
     28 
     29 class SkBitmap;
     30 
     31 namespace android {
     32 namespace uirenderer {
     33 
     34 class ClipOp;
     35 class DrawOp;
     36 class SaveOp;
     37 class SaveLayerOp;
     38 class StateOp;
     39 
     40 class DeferredDisplayState;
     41 
     42 class Batch;
     43 class DrawBatch;
     44 class MergingDrawBatch;
     45 
     46 typedef const void* mergeid_t;
     47 
     48 class DeferredDisplayState {
     49 public:
     50     /** static void* operator new(size_t size); PURPOSELY OMITTED **/
     51     static void* operator new(size_t size, LinearAllocator& allocator) {
     52         return allocator.alloc(size);
     53     }
     54 
     55     // global op bounds, mapped by mMatrix to be in screen space coordinates, clipped
     56     Rect mBounds;
     57 
     58     // the below are set and used by the OpenGLRenderer at record and deferred playback
     59     bool mClipValid;
     60     Rect mClip;
     61     int mClipSideFlags; // specifies which sides of the bounds are clipped, unclipped if cleared
     62     bool mClipped;
     63     mat4 mMatrix;
     64     DrawModifiers mDrawModifiers;
     65     float mAlpha;
     66     const RoundRectClipState* mRoundRectClipState;
     67 };
     68 
     69 class OpStatePair {
     70 public:
     71     OpStatePair()
     72             : op(NULL), state(NULL) {}
     73     OpStatePair(DrawOp* newOp, const DeferredDisplayState* newState)
     74             : op(newOp), state(newState) {}
     75     OpStatePair(const OpStatePair& other)
     76             : op(other.op), state(other.state) {}
     77     DrawOp* op;
     78     const DeferredDisplayState* state;
     79 };
     80 
     81 class DeferredDisplayList {
     82     friend class DeferStateStruct; // used to give access to allocator
     83 public:
     84     DeferredDisplayList(const Rect& bounds, bool avoidOverdraw = true) :
     85             mBounds(bounds), mAvoidOverdraw(avoidOverdraw) {
     86         clear();
     87     }
     88     ~DeferredDisplayList() { clear(); }
     89 
     90     enum OpBatchId {
     91         kOpBatch_None = 0, // Don't batch
     92         kOpBatch_Bitmap,
     93         kOpBatch_Patch,
     94         kOpBatch_AlphaVertices,
     95         kOpBatch_Vertices,
     96         kOpBatch_AlphaMaskTexture,
     97         kOpBatch_Text,
     98         kOpBatch_ColorText,
     99 
    100         kOpBatch_Count, // Add other batch ids before this
    101     };
    102 
    103     bool isEmpty() { return mBatches.isEmpty(); }
    104 
    105     /**
    106      * Plays back all of the draw ops recorded into batches to the renderer.
    107      * Adjusts the state of the renderer as necessary, and restores it when complete
    108      */
    109     status_t flush(OpenGLRenderer& renderer, Rect& dirty);
    110 
    111     void addClip(OpenGLRenderer& renderer, ClipOp* op);
    112     void addSaveLayer(OpenGLRenderer& renderer, SaveLayerOp* op, int newSaveCount);
    113     void addSave(OpenGLRenderer& renderer, SaveOp* op, int newSaveCount);
    114     void addRestoreToCount(OpenGLRenderer& renderer, StateOp* op, int newSaveCount);
    115 
    116     /**
    117      * Add a draw op into the DeferredDisplayList, reordering as needed (for performance) if
    118      * disallowReorder is false, respecting draw order when overlaps occur.
    119      */
    120     void addDrawOp(OpenGLRenderer& renderer, DrawOp* op);
    121 
    122 private:
    123     DeferredDisplayList(const DeferredDisplayList& other); // disallow copy
    124 
    125     DeferredDisplayState* createState() {
    126         return new (mAllocator) DeferredDisplayState();
    127     }
    128 
    129     void tryRecycleState(DeferredDisplayState* state) {
    130         mAllocator.rewindIfLastAlloc(state, sizeof(DeferredDisplayState));
    131     }
    132 
    133     /**
    134      * Resets the batching back-pointers, creating a barrier in the operation stream so that no ops
    135      * added in the future will be inserted into a batch that already exist.
    136      */
    137     void resetBatchingState();
    138 
    139     void clear();
    140 
    141     void storeStateOpBarrier(OpenGLRenderer& renderer, StateOp* op);
    142     void storeRestoreToCountBarrier(OpenGLRenderer& renderer, StateOp* op, int newSaveCount);
    143 
    144     bool recordingComplexClip() const { return mComplexClipStackStart >= 0; }
    145 
    146     int getStateOpDeferFlags() const;
    147     int getDrawOpDeferFlags() const;
    148 
    149     void discardDrawingBatches(const unsigned int maxIndex);
    150 
    151     // layer space bounds of rendering
    152     Rect mBounds;
    153     const bool mAvoidOverdraw;
    154 
    155     /**
    156      * At defer time, stores the *defer time* savecount of save/saveLayer ops that were deferred, so
    157      * that when an associated restoreToCount is deferred, it can be recorded as a
    158      * RestoreToCountBatch
    159      */
    160     Vector<int> mSaveStack;
    161     int mComplexClipStackStart;
    162 
    163     Vector<Batch*> mBatches;
    164 
    165     // Maps batch ids to the most recent *non-merging* batch of that id
    166     Batch* mBatchLookup[kOpBatch_Count];
    167 
    168     // Points to the index after the most recent barrier
    169     int mEarliestBatchIndex;
    170 
    171     // Points to the first index that may contain a pure drawing batch
    172     int mEarliestUnclearedIndex;
    173 
    174     /**
    175      * Maps the mergeid_t returned by an op's getMergeId() to the most recently seen
    176      * MergingDrawBatch of that id. These ids are unique per draw type and guaranteed to not
    177      * collide, which avoids the need to resolve mergeid collisions.
    178      */
    179     TinyHashMap<mergeid_t, DrawBatch*> mMergingBatches[kOpBatch_Count];
    180 
    181     LinearAllocator mAllocator;
    182 };
    183 
    184 /**
    185  * Struct containing information that instructs the defer
    186  */
    187 struct DeferInfo {
    188 public:
    189     DeferInfo() :
    190             batchId(DeferredDisplayList::kOpBatch_None),
    191             mergeId((mergeid_t) -1),
    192             mergeable(false),
    193             opaqueOverBounds(false) {
    194     };
    195 
    196     int batchId;
    197     mergeid_t mergeId;
    198     bool mergeable;
    199     bool opaqueOverBounds; // opaque over bounds in DeferredDisplayState - can skip ops below
    200 };
    201 
    202 }; // namespace uirenderer
    203 }; // namespace android
    204 
    205 #endif // ANDROID_HWUI_DEFERRED_DISPLAY_LIST_H
    206