1 /* 2 * Copyright (C) 2012-2013, The Linux Foundation. All rights reserved. 3 * 4 * Not a Contribution, Apache license notifications and license are retained 5 * for attribution purposes only. 6 * 7 * Licensed under the Apache License, Version 2.0 (the "License"); 8 * you may not use this file except in compliance with the License. 9 * You may obtain a copy of the License at 10 * 11 * http://www.apache.org/licenses/LICENSE-2.0 12 * 13 * Unless required by applicable law or agreed to in writing, software 14 * distributed under the License is distributed on an "AS IS" BASIS, 15 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 16 * See the License for the specific language governing permissions and 17 * limitations under the License. 18 */ 19 20 #ifndef HWC_MDP_COMP 21 #define HWC_MDP_COMP 22 23 #include <hwc_utils.h> 24 #include <idle_invalidator.h> 25 #include <cutils/properties.h> 26 #include <overlay.h> 27 28 #define DEFAULT_IDLE_TIME 70 29 #define MAX_PIPES_PER_MIXER 4 30 31 namespace overlay { 32 class Rotator; 33 }; 34 35 namespace qhwc { 36 namespace ovutils = overlay::utils; 37 38 class MDPComp { 39 public: 40 explicit MDPComp(int); 41 virtual ~MDPComp(){}; 42 /*sets up mdp comp for the current frame */ 43 int prepare(hwc_context_t *ctx, hwc_display_contents_1_t* list); 44 /* draw */ 45 virtual bool draw(hwc_context_t *ctx, hwc_display_contents_1_t *list) = 0; 46 /* dumpsys */ 47 void dump(android::String8& buf, hwc_context_t *ctx); 48 bool isGLESOnlyComp() { return (mCurrentFrame.mdpCount == 0); } 49 static MDPComp* getObject(hwc_context_t *ctx, const int& dpy); 50 /* Handler to invoke frame redraw on Idle Timer expiry */ 51 static void timeout_handler(void *udata); 52 /* Initialize MDP comp*/ 53 static bool init(hwc_context_t *ctx); 54 static void resetIdleFallBack() { sIdleFallBack = false; } 55 static void reset() { sHandleTimeout = false; }; 56 static bool isIdleFallback() { return sIdleFallBack; } 57 58 protected: 59 enum { MAX_SEC_LAYERS = 1 }; //TODO add property support 60 61 enum ePipeType { 62 MDPCOMP_OV_RGB = ovutils::OV_MDP_PIPE_RGB, 63 MDPCOMP_OV_VG = ovutils::OV_MDP_PIPE_VG, 64 MDPCOMP_OV_DMA = ovutils::OV_MDP_PIPE_DMA, 65 MDPCOMP_OV_ANY, 66 }; 67 68 //Simulation flags 69 enum { 70 MDPCOMP_AVOID_FULL_MDP = 0x001, 71 MDPCOMP_AVOID_CACHE_MDP = 0x002, 72 MDPCOMP_AVOID_LOAD_MDP = 0x004, 73 MDPCOMP_AVOID_VIDEO_ONLY = 0x008, 74 }; 75 76 /* mdp pipe data */ 77 struct MdpPipeInfo { 78 int zOrder; 79 virtual ~MdpPipeInfo(){}; 80 }; 81 82 struct MdpYUVPipeInfo : public MdpPipeInfo{ 83 ovutils::eDest lIndex; 84 ovutils::eDest rIndex; 85 virtual ~MdpYUVPipeInfo(){}; 86 }; 87 88 /* per layer data */ 89 struct PipeLayerPair { 90 MdpPipeInfo *pipeInfo; 91 overlay::Rotator* rot; 92 int listIndex; 93 }; 94 95 /* per frame data */ 96 struct FrameInfo { 97 /* maps layer list to mdp list */ 98 int layerCount; 99 int layerToMDP[MAX_NUM_APP_LAYERS]; 100 101 /* maps mdp list to layer list */ 102 int mdpCount; 103 struct PipeLayerPair mdpToLayer[MAX_PIPES_PER_MIXER]; 104 105 /* layer composing on FB? */ 106 int fbCount; 107 bool isFBComposed[MAX_NUM_APP_LAYERS]; 108 /* layers lying outside ROI. Will 109 * be dropped off from the composition */ 110 int dropCount; 111 bool drop[MAX_NUM_APP_LAYERS]; 112 113 bool needsRedraw; 114 int fbZ; 115 116 /* c'tor */ 117 FrameInfo(); 118 /* clear old frame data */ 119 void reset(const int& numLayers); 120 void map(); 121 }; 122 123 /* cached data */ 124 struct LayerCache { 125 int layerCount; 126 buffer_handle_t hnd[MAX_NUM_APP_LAYERS]; 127 bool isFBComposed[MAX_NUM_APP_LAYERS]; 128 bool drop[MAX_NUM_APP_LAYERS]; 129 130 /* c'tor */ 131 LayerCache(); 132 /* clear caching info*/ 133 void reset(); 134 void cacheAll(hwc_display_contents_1_t* list); 135 void updateCounts(const FrameInfo&); 136 bool isSameFrame(const FrameInfo& curFrame, 137 hwc_display_contents_1_t* list); 138 }; 139 140 /* allocates pipe from pipe book */ 141 virtual bool allocLayerPipes(hwc_context_t *ctx, 142 hwc_display_contents_1_t* list) = 0; 143 /* configures MPD pipes */ 144 virtual int configure(hwc_context_t *ctx, hwc_layer_1_t *layer, 145 PipeLayerPair& pipeLayerPair) = 0; 146 /* Increments mdpCount if 4k2k yuv layer split is enabled. 147 * updates framebuffer z order if fb lies above source-split layer */ 148 virtual void adjustForSourceSplit(hwc_context_t *ctx, 149 hwc_display_contents_1_t* list) = 0; 150 /* configures 4kx2k yuv layer*/ 151 virtual int configure4k2kYuv(hwc_context_t *ctx, hwc_layer_1_t *layer, 152 PipeLayerPair& PipeLayerPair) = 0; 153 /* generates ROI based on the modified area of the frame */ 154 virtual void generateROI(hwc_context_t *ctx, 155 hwc_display_contents_1_t* list) = 0; 156 /* validates the ROI generated for fallback conditions */ 157 virtual bool validateAndApplyROI(hwc_context_t *ctx, 158 hwc_display_contents_1_t* list) = 0; 159 /* Trims fbRect calculated against ROI generated */ 160 virtual void trimAgainstROI(hwc_context_t *ctx, hwc_rect_t& fbRect) = 0; 161 162 /* set/reset flags for MDPComp */ 163 void setMDPCompLayerFlags(hwc_context_t *ctx, 164 hwc_display_contents_1_t* list); 165 void setRedraw(hwc_context_t *ctx, 166 hwc_display_contents_1_t* list); 167 /* checks for conditions where mdpcomp is not possible */ 168 bool isFrameDoable(hwc_context_t *ctx); 169 /* checks for conditions where RGB layers cannot be bypassed */ 170 bool tryFullFrame(hwc_context_t *ctx, hwc_display_contents_1_t* list); 171 /* checks if full MDP comp can be done */ 172 bool fullMDPComp(hwc_context_t *ctx, hwc_display_contents_1_t* list); 173 /* check if we can use layer cache to do at least partial MDP comp */ 174 bool partialMDPComp(hwc_context_t *ctx, hwc_display_contents_1_t* list); 175 /* Partial MDP comp that uses caching to save power as primary goal */ 176 bool cacheBasedComp(hwc_context_t *ctx, hwc_display_contents_1_t* list); 177 /* Partial MDP comp that balances the load between MDP and GPU such that 178 * MDP is loaded to the max of its capacity. The lower z order layers are 179 * fed to MDP, whereas the upper ones to GPU, because the upper ones have 180 * lower number of pixels and can reduce GPU processing time */ 181 bool loadBasedComp(hwc_context_t *ctx, hwc_display_contents_1_t* list); 182 /* Checks if its worth doing load based partial comp */ 183 bool isLoadBasedCompDoable(hwc_context_t *ctx); 184 /* checks for conditions where only video can be bypassed */ 185 bool tryVideoOnly(hwc_context_t *ctx, hwc_display_contents_1_t* list); 186 bool videoOnlyComp(hwc_context_t *ctx, hwc_display_contents_1_t* list, 187 bool secureOnly); 188 /* checks for conditions where YUV layers cannot be bypassed */ 189 bool isYUVDoable(hwc_context_t* ctx, hwc_layer_1_t* layer); 190 /* checks if MDP/MDSS can process current list w.r.to HW limitations 191 * All peculiar HW limitations should go here */ 192 bool hwLimitationsCheck(hwc_context_t* ctx, hwc_display_contents_1_t* list); 193 /* Is debug enabled */ 194 static bool isDebug() { return sDebugLogs ? true : false; }; 195 /* Is feature enabled */ 196 static bool isEnabled() { return sEnabled; }; 197 /* checks for mdp comp dimension limitation */ 198 bool isValidDimension(hwc_context_t *ctx, hwc_layer_1_t *layer); 199 /* tracks non updating layers*/ 200 void updateLayerCache(hwc_context_t* ctx, hwc_display_contents_1_t* list); 201 /* optimize layers for mdp comp*/ 202 bool markLayersForCaching(hwc_context_t* ctx, 203 hwc_display_contents_1_t* list); 204 int getBatch(hwc_display_contents_1_t* list, 205 int& maxBatchStart, int& maxBatchEnd, 206 int& maxBatchCount); 207 bool canPushBatchToTop(const hwc_display_contents_1_t* list, 208 int fromIndex, int toIndex); 209 bool intersectingUpdatingLayers(const hwc_display_contents_1_t* list, 210 int fromIndex, int toIndex, int targetLayerIndex); 211 212 /* updates cache map with YUV info */ 213 void updateYUV(hwc_context_t* ctx, hwc_display_contents_1_t* list, 214 bool secureOnly); 215 /* Validates if the GPU/MDP layer split chosen by a strategy is supported 216 * by MDP. 217 * Sets up MDP comp data structures to reflect covnversion from layers to 218 * overlay pipes. 219 * Configures overlay. 220 * Configures if GPU should redraw. 221 */ 222 bool postHeuristicsHandling(hwc_context_t *ctx, 223 hwc_display_contents_1_t* list); 224 void reset(hwc_context_t *ctx); 225 bool isSupportedForMDPComp(hwc_context_t *ctx, hwc_layer_1_t* layer); 226 bool resourceCheck(); 227 hwc_rect_t getUpdatingFBRect(hwc_context_t *ctx, 228 hwc_display_contents_1_t* list); 229 /* checks for conditions to enable partial udpate */ 230 bool canPartialUpdate(hwc_context_t *ctx, hwc_display_contents_1_t* list); 231 232 int mDpy; 233 static bool sEnabled; 234 static bool sEnableMixedMode; 235 static int sSimulationFlags; 236 static bool sDebugLogs; 237 static bool sIdleFallBack; 238 /* Handles the timeout event from kernel, if the value is set to true */ 239 static bool sHandleTimeout; 240 static int sMaxPipesPerMixer; 241 static bool sSrcSplitEnabled; 242 static IdleInvalidator *idleInvalidator; 243 struct FrameInfo mCurrentFrame; 244 struct LayerCache mCachedFrame; 245 //Enable 4kx2k yuv layer split 246 static bool sEnable4k2kYUVSplit; 247 bool allocSplitVGPipesfor4k2k(hwc_context_t *ctx, int index); 248 }; 249 250 class MDPCompNonSplit : public MDPComp { 251 public: 252 explicit MDPCompNonSplit(int dpy):MDPComp(dpy){}; 253 virtual ~MDPCompNonSplit(){}; 254 virtual bool draw(hwc_context_t *ctx, hwc_display_contents_1_t *list); 255 256 private: 257 struct MdpPipeInfoNonSplit : public MdpPipeInfo { 258 ovutils::eDest index; 259 virtual ~MdpPipeInfoNonSplit() {}; 260 }; 261 262 /* configure's overlay pipes for the frame */ 263 virtual int configure(hwc_context_t *ctx, hwc_layer_1_t *layer, 264 PipeLayerPair& pipeLayerPair); 265 266 /* allocates pipes to selected candidates */ 267 virtual bool allocLayerPipes(hwc_context_t *ctx, 268 hwc_display_contents_1_t* list); 269 270 /* Increments mdpCount if 4k2k yuv layer split is enabled. 271 * updates framebuffer z order if fb lies above source-split layer */ 272 virtual void adjustForSourceSplit(hwc_context_t *ctx, 273 hwc_display_contents_1_t* list); 274 275 /* configures 4kx2k yuv layer to 2 VG pipes*/ 276 virtual int configure4k2kYuv(hwc_context_t *ctx, hwc_layer_1_t *layer, 277 PipeLayerPair& PipeLayerPair); 278 /* generates ROI based on the modified area of the frame */ 279 virtual void generateROI(hwc_context_t *ctx, 280 hwc_display_contents_1_t* list); 281 /* validates the ROI generated for fallback conditions */ 282 virtual bool validateAndApplyROI(hwc_context_t *ctx, 283 hwc_display_contents_1_t* list); 284 /* Trims fbRect calculated against ROI generated */ 285 virtual void trimAgainstROI(hwc_context_t *ctx, hwc_rect_t& fbRect); 286 }; 287 288 class MDPCompSplit : public MDPComp { 289 public: 290 explicit MDPCompSplit(int dpy):MDPComp(dpy){}; 291 virtual ~MDPCompSplit(){}; 292 virtual bool draw(hwc_context_t *ctx, hwc_display_contents_1_t *list); 293 294 protected: 295 struct MdpPipeInfoSplit : public MdpPipeInfo { 296 ovutils::eDest lIndex; 297 ovutils::eDest rIndex; 298 virtual ~MdpPipeInfoSplit() {}; 299 }; 300 301 virtual bool acquireMDPPipes(hwc_context_t *ctx, hwc_layer_1_t* layer, 302 MdpPipeInfoSplit& pipe_info); 303 304 /* configure's overlay pipes for the frame */ 305 virtual int configure(hwc_context_t *ctx, hwc_layer_1_t *layer, 306 PipeLayerPair& pipeLayerPair); 307 308 /* allocates pipes to selected candidates */ 309 virtual bool allocLayerPipes(hwc_context_t *ctx, 310 hwc_display_contents_1_t* list); 311 private: 312 /* Increments mdpCount if 4k2k yuv layer split is enabled. 313 * updates framebuffer z order if fb lies above source-split layer */ 314 virtual void adjustForSourceSplit(hwc_context_t *ctx, 315 hwc_display_contents_1_t* list); 316 317 /* configures 4kx2k yuv layer*/ 318 virtual int configure4k2kYuv(hwc_context_t *ctx, hwc_layer_1_t *layer, 319 PipeLayerPair& PipeLayerPair); 320 /* generates ROI based on the modified area of the frame */ 321 virtual void generateROI(hwc_context_t *ctx, 322 hwc_display_contents_1_t* list); 323 /* validates the ROI generated for fallback conditions */ 324 virtual bool validateAndApplyROI(hwc_context_t *ctx, 325 hwc_display_contents_1_t* list); 326 /* Trims fbRect calculated against ROI generated */ 327 virtual void trimAgainstROI(hwc_context_t *ctx, hwc_rect_t& fbRect); 328 }; 329 330 class MDPCompSrcSplit : public MDPCompSplit { 331 public: 332 explicit MDPCompSrcSplit(int dpy) : MDPCompSplit(dpy){}; 333 virtual ~MDPCompSrcSplit(){}; 334 private: 335 virtual bool acquireMDPPipes(hwc_context_t *ctx, hwc_layer_1_t* layer, 336 MdpPipeInfoSplit& pipe_info); 337 338 virtual int configure(hwc_context_t *ctx, hwc_layer_1_t *layer, 339 PipeLayerPair& pipeLayerPair); 340 }; 341 342 }; //namespace 343 #endif 344