Home | History | Annotate | Download | only in libhwcomposer
      1 /*
      2  * Copyright (C) 2012-2013, The Linux Foundation. All rights reserved.
      3  *
      4  * Not a Contribution, Apache license notifications and license are retained
      5  * for attribution purposes only.
      6  *
      7  * Licensed under the Apache License, Version 2.0 (the "License");
      8  * you may not use this file except in compliance with the License.
      9  * You may obtain a copy of the License at
     10  *
     11  *      http://www.apache.org/licenses/LICENSE-2.0
     12  *
     13  * Unless required by applicable law or agreed to in writing, software
     14  * distributed under the License is distributed on an "AS IS" BASIS,
     15  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     16  * See the License for the specific language governing permissions and
     17  * limitations under the License.
     18  */
     19 
     20 #ifndef HWC_MDP_COMP
     21 #define HWC_MDP_COMP
     22 
     23 #include <hwc_utils.h>
     24 #include <idle_invalidator.h>
     25 #include <cutils/properties.h>
     26 #include <overlay.h>
     27 
     28 #define DEFAULT_IDLE_TIME 70
     29 #define MAX_PIPES_PER_MIXER 4
     30 
     31 namespace overlay {
     32 class Rotator;
     33 };
     34 
     35 namespace qhwc {
     36 namespace ovutils = overlay::utils;
     37 
     38 class MDPComp {
     39 public:
     40     explicit MDPComp(int);
     41     virtual ~MDPComp(){};
     42     /*sets up mdp comp for the current frame */
     43     int prepare(hwc_context_t *ctx, hwc_display_contents_1_t* list);
     44     /* draw */
     45     virtual bool draw(hwc_context_t *ctx, hwc_display_contents_1_t *list) = 0;
     46     /* dumpsys */
     47     void dump(android::String8& buf, hwc_context_t *ctx);
     48     bool isGLESOnlyComp() { return (mCurrentFrame.mdpCount == 0); }
     49     static MDPComp* getObject(hwc_context_t *ctx, const int& dpy);
     50     /* Handler to invoke frame redraw on Idle Timer expiry */
     51     static void timeout_handler(void *udata);
     52     /* Initialize MDP comp*/
     53     static bool init(hwc_context_t *ctx);
     54     static void resetIdleFallBack() { sIdleFallBack = false; }
     55     static void reset() { sHandleTimeout = false; };
     56     static bool isIdleFallback() { return sIdleFallBack; }
     57 
     58 protected:
     59     enum { MAX_SEC_LAYERS = 1 }; //TODO add property support
     60 
     61     enum ePipeType {
     62         MDPCOMP_OV_RGB = ovutils::OV_MDP_PIPE_RGB,
     63         MDPCOMP_OV_VG = ovutils::OV_MDP_PIPE_VG,
     64         MDPCOMP_OV_DMA = ovutils::OV_MDP_PIPE_DMA,
     65         MDPCOMP_OV_ANY,
     66     };
     67 
     68     //Simulation flags
     69     enum {
     70         MDPCOMP_AVOID_FULL_MDP = 0x001,
     71         MDPCOMP_AVOID_CACHE_MDP = 0x002,
     72         MDPCOMP_AVOID_LOAD_MDP = 0x004,
     73         MDPCOMP_AVOID_VIDEO_ONLY = 0x008,
     74     };
     75 
     76     /* mdp pipe data */
     77     struct MdpPipeInfo {
     78         int zOrder;
     79         virtual ~MdpPipeInfo(){};
     80     };
     81 
     82     struct MdpYUVPipeInfo : public MdpPipeInfo{
     83         ovutils::eDest lIndex;
     84         ovutils::eDest rIndex;
     85         virtual ~MdpYUVPipeInfo(){};
     86     };
     87 
     88     /* per layer data */
     89     struct PipeLayerPair {
     90         MdpPipeInfo *pipeInfo;
     91         overlay::Rotator* rot;
     92         int listIndex;
     93     };
     94 
     95     /* per frame data */
     96     struct FrameInfo {
     97         /* maps layer list to mdp list */
     98         int layerCount;
     99         int layerToMDP[MAX_NUM_APP_LAYERS];
    100 
    101         /* maps mdp list to layer list */
    102         int mdpCount;
    103         struct PipeLayerPair mdpToLayer[MAX_PIPES_PER_MIXER];
    104 
    105         /* layer composing on FB? */
    106         int fbCount;
    107         bool isFBComposed[MAX_NUM_APP_LAYERS];
    108         /* layers lying outside ROI. Will
    109          * be dropped off from the composition */
    110         int dropCount;
    111         bool drop[MAX_NUM_APP_LAYERS];
    112 
    113         bool needsRedraw;
    114         int fbZ;
    115 
    116         /* c'tor */
    117         FrameInfo();
    118         /* clear old frame data */
    119         void reset(const int& numLayers);
    120         void map();
    121     };
    122 
    123     /* cached data */
    124     struct LayerCache {
    125         int layerCount;
    126         bool isFBComposed[MAX_NUM_APP_LAYERS];
    127         bool drop[MAX_NUM_APP_LAYERS];
    128 
    129         /* c'tor */
    130         LayerCache();
    131         /* clear caching info*/
    132         void reset();
    133         void updateCounts(const FrameInfo&);
    134         bool isSameFrame(const FrameInfo& curFrame,
    135                          hwc_display_contents_1_t* list);
    136     };
    137 
    138     /* allocates pipe from pipe book */
    139     virtual bool allocLayerPipes(hwc_context_t *ctx,
    140                                  hwc_display_contents_1_t* list) = 0;
    141     /* configures MPD pipes */
    142     virtual int configure(hwc_context_t *ctx, hwc_layer_1_t *layer,
    143                           PipeLayerPair& pipeLayerPair) = 0;
    144     /* Increments mdpCount if 4k2k yuv layer split is enabled.
    145      * updates framebuffer z order if fb lies above source-split layer */
    146     virtual void adjustForSourceSplit(hwc_context_t *ctx,
    147             hwc_display_contents_1_t* list) = 0;
    148     /* configures 4kx2k yuv layer*/
    149     virtual int configure4k2kYuv(hwc_context_t *ctx, hwc_layer_1_t *layer,
    150             PipeLayerPair& PipeLayerPair) = 0;
    151     /* generates ROI based on the modified area of the frame */
    152     virtual void generateROI(hwc_context_t *ctx,
    153             hwc_display_contents_1_t* list) = 0;
    154     /* Calculates the dirtyRegion for the given layer */
    155     hwc_rect_t calculateDirtyRect(const hwc_layer_1_t* layer,
    156                                 hwc_rect_t& scissor);
    157     /* validates the ROI generated for fallback conditions */
    158     virtual bool validateAndApplyROI(hwc_context_t *ctx,
    159             hwc_display_contents_1_t* list) = 0;
    160     /* Trims fbRect calculated against ROI generated */
    161     virtual void trimAgainstROI(hwc_context_t *ctx, hwc_rect_t& fbRect) = 0;
    162 
    163     /* set/reset flags for MDPComp */
    164     void setMDPCompLayerFlags(hwc_context_t *ctx,
    165                               hwc_display_contents_1_t* list);
    166     void setRedraw(hwc_context_t *ctx,
    167             hwc_display_contents_1_t* list);
    168     /* checks for conditions where mdpcomp is not possible */
    169     bool isFrameDoable(hwc_context_t *ctx);
    170     /* checks for conditions where RGB layers cannot be bypassed */
    171     bool tryFullFrame(hwc_context_t *ctx, hwc_display_contents_1_t* list);
    172     /* checks if full MDP comp can be done */
    173     bool fullMDPComp(hwc_context_t *ctx, hwc_display_contents_1_t* list);
    174     /* check if we can use layer cache to do at least partial MDP comp */
    175     bool partialMDPComp(hwc_context_t *ctx, hwc_display_contents_1_t* list);
    176     /* Partial MDP comp that uses caching to save power as primary goal */
    177     bool cacheBasedComp(hwc_context_t *ctx, hwc_display_contents_1_t* list);
    178     /* Partial MDP comp that balances the load between MDP and GPU such that
    179      * MDP is loaded to the max of its capacity. The lower z order layers are
    180      * fed to MDP, whereas the upper ones to GPU, because the upper ones have
    181      * lower number of pixels and can reduce GPU processing time */
    182     bool loadBasedComp(hwc_context_t *ctx, hwc_display_contents_1_t* list);
    183     /* Checks if its worth doing load based partial comp */
    184     bool isLoadBasedCompDoable(hwc_context_t *ctx);
    185     /* checks for conditions where only video can be bypassed */
    186     bool tryVideoOnly(hwc_context_t *ctx, hwc_display_contents_1_t* list);
    187     bool videoOnlyComp(hwc_context_t *ctx, hwc_display_contents_1_t* list,
    188             bool secureOnly);
    189     /* checks for conditions where YUV layers cannot be bypassed */
    190     bool isYUVDoable(hwc_context_t* ctx, hwc_layer_1_t* layer);
    191     /* checks if MDP/MDSS can process current list w.r.to HW limitations
    192      * All peculiar HW limitations should go here */
    193     bool hwLimitationsCheck(hwc_context_t* ctx, hwc_display_contents_1_t* list);
    194     /* Is debug enabled */
    195     static bool isDebug() { return sDebugLogs ? true : false; };
    196     /* Is feature enabled */
    197     static bool isEnabled() { return sEnabled; };
    198     /* checks for mdp comp dimension limitation */
    199     bool isValidDimension(hwc_context_t *ctx, hwc_layer_1_t *layer);
    200     /* tracks non updating layers*/
    201     void updateLayerCache(hwc_context_t* ctx, hwc_display_contents_1_t* list);
    202     /* optimize layers for mdp comp*/
    203     bool markLayersForCaching(hwc_context_t* ctx,
    204             hwc_display_contents_1_t* list);
    205     int getBatch(hwc_display_contents_1_t* list,
    206             int& maxBatchStart, int& maxBatchEnd,
    207             int& maxBatchCount);
    208     bool canPushBatchToTop(const hwc_display_contents_1_t* list,
    209             int fromIndex, int toIndex);
    210     bool intersectingUpdatingLayers(const hwc_display_contents_1_t* list,
    211             int fromIndex, int toIndex, int targetLayerIndex);
    212 
    213         /* updates cache map with YUV info */
    214     void updateYUV(hwc_context_t* ctx, hwc_display_contents_1_t* list,
    215             bool secureOnly);
    216     /* Validates if the GPU/MDP layer split chosen by a strategy is supported
    217      * by MDP.
    218      * Sets up MDP comp data structures to reflect covnversion from layers to
    219      * overlay pipes.
    220      * Configures overlay.
    221      * Configures if GPU should redraw.
    222      */
    223     bool postHeuristicsHandling(hwc_context_t *ctx,
    224             hwc_display_contents_1_t* list);
    225     void reset(hwc_context_t *ctx);
    226     bool isSupportedForMDPComp(hwc_context_t *ctx, hwc_layer_1_t* layer);
    227     bool resourceCheck();
    228     hwc_rect_t getUpdatingFBRect(hwc_context_t *ctx,
    229             hwc_display_contents_1_t* list);
    230     /* checks for conditions to enable partial udpate */
    231     bool canPartialUpdate(hwc_context_t *ctx, hwc_display_contents_1_t* list);
    232 
    233     int mDpy;
    234     static bool sEnabled;
    235     static bool sEnableMixedMode;
    236     static int sSimulationFlags;
    237     static bool sDebugLogs;
    238     static bool sIdleFallBack;
    239     /* Handles the timeout event from kernel, if the value is set to true */
    240     static bool sHandleTimeout;
    241     static int sMaxPipesPerMixer;
    242     static bool sSrcSplitEnabled;
    243     static IdleInvalidator *idleInvalidator;
    244     struct FrameInfo mCurrentFrame;
    245     struct LayerCache mCachedFrame;
    246     //Enable 4kx2k yuv layer split
    247     static bool sEnable4k2kYUVSplit;
    248     bool allocSplitVGPipesfor4k2k(hwc_context_t *ctx, int index);
    249 };
    250 
    251 class MDPCompNonSplit : public MDPComp {
    252 public:
    253     explicit MDPCompNonSplit(int dpy):MDPComp(dpy){};
    254     virtual ~MDPCompNonSplit(){};
    255     virtual bool draw(hwc_context_t *ctx, hwc_display_contents_1_t *list);
    256 
    257 private:
    258     struct MdpPipeInfoNonSplit : public MdpPipeInfo {
    259         ovutils::eDest index;
    260         virtual ~MdpPipeInfoNonSplit() {};
    261     };
    262 
    263     /* configure's overlay pipes for the frame */
    264     virtual int configure(hwc_context_t *ctx, hwc_layer_1_t *layer,
    265                           PipeLayerPair& pipeLayerPair);
    266 
    267     /* allocates pipes to selected candidates */
    268     virtual bool allocLayerPipes(hwc_context_t *ctx,
    269                                  hwc_display_contents_1_t* list);
    270 
    271     /* Increments mdpCount if 4k2k yuv layer split is enabled.
    272      * updates framebuffer z order if fb lies above source-split layer */
    273     virtual void adjustForSourceSplit(hwc_context_t *ctx,
    274             hwc_display_contents_1_t* list);
    275 
    276     /* configures 4kx2k yuv layer to 2 VG pipes*/
    277     virtual int configure4k2kYuv(hwc_context_t *ctx, hwc_layer_1_t *layer,
    278             PipeLayerPair& PipeLayerPair);
    279     /* generates ROI based on the modified area of the frame */
    280     virtual void generateROI(hwc_context_t *ctx,
    281             hwc_display_contents_1_t* list);
    282     /* validates the ROI generated for fallback conditions */
    283     virtual bool validateAndApplyROI(hwc_context_t *ctx,
    284             hwc_display_contents_1_t* list);
    285     /* Trims fbRect calculated against ROI generated */
    286     virtual void trimAgainstROI(hwc_context_t *ctx, hwc_rect_t& fbRect);
    287 };
    288 
    289 class MDPCompSplit : public MDPComp {
    290 public:
    291     explicit MDPCompSplit(int dpy):MDPComp(dpy){};
    292     virtual ~MDPCompSplit(){};
    293     virtual bool draw(hwc_context_t *ctx, hwc_display_contents_1_t *list);
    294 
    295 protected:
    296     struct MdpPipeInfoSplit : public MdpPipeInfo {
    297         ovutils::eDest lIndex;
    298         ovutils::eDest rIndex;
    299         virtual ~MdpPipeInfoSplit() {};
    300     };
    301 
    302     virtual bool acquireMDPPipes(hwc_context_t *ctx, hwc_layer_1_t* layer,
    303                          MdpPipeInfoSplit& pipe_info);
    304 
    305     /* configure's overlay pipes for the frame */
    306     virtual int configure(hwc_context_t *ctx, hwc_layer_1_t *layer,
    307                           PipeLayerPair& pipeLayerPair);
    308 
    309     /* allocates pipes to selected candidates */
    310     virtual bool allocLayerPipes(hwc_context_t *ctx,
    311                                  hwc_display_contents_1_t* list);
    312 private:
    313     /* Increments mdpCount if 4k2k yuv layer split is enabled.
    314      * updates framebuffer z order if fb lies above source-split layer */
    315     virtual void adjustForSourceSplit(hwc_context_t *ctx,
    316             hwc_display_contents_1_t* list);
    317 
    318     /* configures 4kx2k yuv layer*/
    319     virtual int configure4k2kYuv(hwc_context_t *ctx, hwc_layer_1_t *layer,
    320             PipeLayerPair& PipeLayerPair);
    321     /* generates ROI based on the modified area of the frame */
    322     virtual void generateROI(hwc_context_t *ctx,
    323             hwc_display_contents_1_t* list);
    324     /* validates the ROI generated for fallback conditions */
    325     virtual bool validateAndApplyROI(hwc_context_t *ctx,
    326             hwc_display_contents_1_t* list);
    327     /* Trims fbRect calculated against ROI generated */
    328     virtual void trimAgainstROI(hwc_context_t *ctx, hwc_rect_t& fbRect);
    329 };
    330 
    331 class MDPCompSrcSplit : public MDPCompSplit {
    332 public:
    333     explicit MDPCompSrcSplit(int dpy) : MDPCompSplit(dpy){};
    334     virtual ~MDPCompSrcSplit(){};
    335 private:
    336     virtual bool acquireMDPPipes(hwc_context_t *ctx, hwc_layer_1_t* layer,
    337             MdpPipeInfoSplit& pipe_info);
    338 
    339     virtual int configure(hwc_context_t *ctx, hwc_layer_1_t *layer,
    340             PipeLayerPair& pipeLayerPair);
    341 };
    342 
    343 }; //namespace
    344 #endif
    345