1 /* 2 * Copyright (C) 2012-2013, The Linux Foundation. All rights reserved. 3 * 4 * Not a Contribution, Apache license notifications and license are retained 5 * for attribution purposes only. 6 * 7 * Licensed under the Apache License, Version 2.0 (the "License"); 8 * you may not use this file except in compliance with the License. 9 * You may obtain a copy of the License at 10 * 11 * http://www.apache.org/licenses/LICENSE-2.0 12 * 13 * Unless required by applicable law or agreed to in writing, software 14 * distributed under the License is distributed on an "AS IS" BASIS, 15 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 16 * See the License for the specific language governing permissions and 17 * limitations under the License. 18 */ 19 20 #ifndef HWC_MDP_COMP 21 #define HWC_MDP_COMP 22 23 #include <hwc_utils.h> 24 #include <idle_invalidator.h> 25 #include <cutils/properties.h> 26 #include <overlay.h> 27 28 #define DEFAULT_IDLE_TIME 70 29 #define MAX_PIPES_PER_MIXER 4 30 31 namespace overlay { 32 class Rotator; 33 }; 34 35 namespace qhwc { 36 namespace ovutils = overlay::utils; 37 38 class MDPComp { 39 public: 40 explicit MDPComp(int); ~MDPComp()41 virtual ~MDPComp(){}; 42 /*sets up mdp comp for the current frame */ 43 int prepare(hwc_context_t *ctx, hwc_display_contents_1_t* list); 44 /* draw */ 45 virtual bool draw(hwc_context_t *ctx, hwc_display_contents_1_t *list) = 0; 46 //Reset values 47 void reset(); 48 /* dumpsys */ 49 void dump(android::String8& buf, hwc_context_t *ctx); isGLESOnlyComp()50 bool isGLESOnlyComp() { return (mCurrentFrame.mdpCount == 0); } 51 int drawOverlap(hwc_context_t *ctx, hwc_display_contents_1_t* list); 52 static MDPComp* getObject(hwc_context_t *ctx, const int& dpy); 53 /* Handler to invoke frame redraw on Idle Timer expiry */ 54 static void timeout_handler(void *udata); 55 /* Initialize MDP comp*/ 56 static bool init(hwc_context_t *ctx); resetIdleFallBack()57 static void resetIdleFallBack() { sIdleFallBack = false; } isIdleFallback()58 static bool isIdleFallback() { return sIdleFallBack; } dynamicDebug(bool enable)59 static void dynamicDebug(bool enable){ sDebugLogs = enable; } 60 61 protected: 62 enum { MAX_SEC_LAYERS = 1 }; //TODO add property support 63 64 enum ePipeType { 65 MDPCOMP_OV_RGB = ovutils::OV_MDP_PIPE_RGB, 66 MDPCOMP_OV_VG = ovutils::OV_MDP_PIPE_VG, 67 MDPCOMP_OV_DMA = ovutils::OV_MDP_PIPE_DMA, 68 MDPCOMP_OV_ANY, 69 }; 70 71 //Simulation flags 72 enum { 73 MDPCOMP_AVOID_FULL_MDP = 0x001, 74 MDPCOMP_AVOID_CACHE_MDP = 0x002, 75 MDPCOMP_AVOID_LOAD_MDP = 0x004, 76 MDPCOMP_AVOID_VIDEO_ONLY = 0x008, 77 }; 78 79 /* mdp pipe data */ 80 struct MdpPipeInfo { 81 int zOrder; ~MdpPipeInfoMdpPipeInfo82 virtual ~MdpPipeInfo(){}; 83 }; 84 85 struct MdpYUVPipeInfo : public MdpPipeInfo{ 86 ovutils::eDest lIndex; 87 ovutils::eDest rIndex; ~MdpYUVPipeInfoMdpYUVPipeInfo88 virtual ~MdpYUVPipeInfo(){}; 89 }; 90 91 /* per layer data */ 92 struct PipeLayerPair { 93 MdpPipeInfo *pipeInfo; 94 overlay::Rotator* rot; 95 int listIndex; 96 }; 97 98 /* per frame data */ 99 struct FrameInfo { 100 /* maps layer list to mdp list */ 101 int layerCount; 102 int layerToMDP[MAX_NUM_APP_LAYERS]; 103 104 /* maps mdp list to layer list */ 105 int mdpCount; 106 struct PipeLayerPair mdpToLayer[MAX_PIPES_PER_MIXER]; 107 108 /* layer composing on FB? */ 109 int fbCount; 110 bool isFBComposed[MAX_NUM_APP_LAYERS]; 111 /* layers lying outside ROI. Will 112 * be dropped off from the composition */ 113 int dropCount; 114 bool drop[MAX_NUM_APP_LAYERS]; 115 116 bool needsRedraw; 117 int fbZ; 118 119 /* c'tor */ 120 FrameInfo(); 121 /* clear old frame data */ 122 void reset(const int& numLayers); 123 void map(); 124 }; 125 126 /* cached data */ 127 struct LayerCache { 128 int layerCount; 129 buffer_handle_t hnd[MAX_NUM_APP_LAYERS]; 130 bool isFBComposed[MAX_NUM_APP_LAYERS]; 131 bool drop[MAX_NUM_APP_LAYERS]; 132 133 /* c'tor */ 134 LayerCache(); 135 /* clear caching info*/ 136 void reset(); 137 void cacheAll(hwc_display_contents_1_t* list); 138 void updateCounts(const FrameInfo&); 139 bool isSameFrame(const FrameInfo& curFrame, 140 hwc_display_contents_1_t* list); 141 }; 142 143 /* allocates pipe from pipe book */ 144 virtual bool allocLayerPipes(hwc_context_t *ctx, 145 hwc_display_contents_1_t* list) = 0; 146 /* configures MPD pipes */ 147 virtual int configure(hwc_context_t *ctx, hwc_layer_1_t *layer, 148 PipeLayerPair& pipeLayerPair) = 0; 149 /* Increments mdpCount if 4k2k yuv layer split is enabled. 150 * updates framebuffer z order if fb lies above source-split layer */ 151 virtual void adjustForSourceSplit(hwc_context_t *ctx, 152 hwc_display_contents_1_t* list) = 0; 153 /* configures 4kx2k yuv layer*/ 154 virtual int configure4k2kYuv(hwc_context_t *ctx, hwc_layer_1_t *layer, 155 PipeLayerPair& PipeLayerPair) = 0; 156 /* generates ROI based on the modified area of the frame */ 157 virtual void generateROI(hwc_context_t *ctx, 158 hwc_display_contents_1_t* list) = 0; 159 /* validates the ROI generated for fallback conditions */ 160 virtual bool validateAndApplyROI(hwc_context_t *ctx, 161 hwc_display_contents_1_t* list) = 0; 162 /* Trims fbRect calculated against ROI generated */ 163 virtual void trimAgainstROI(hwc_context_t *ctx, hwc_rect_t& fbRect) = 0; 164 165 /* set/reset flags for MDPComp */ 166 void setMDPCompLayerFlags(hwc_context_t *ctx, 167 hwc_display_contents_1_t* list); 168 void setRedraw(hwc_context_t *ctx, 169 hwc_display_contents_1_t* list); 170 /* checks for conditions where mdpcomp is not possible */ 171 bool isFrameDoable(hwc_context_t *ctx); 172 /* checks for conditions where RGB layers cannot be bypassed */ 173 bool tryFullFrame(hwc_context_t *ctx, hwc_display_contents_1_t* list); 174 /* checks if full MDP comp can be done */ 175 bool fullMDPComp(hwc_context_t *ctx, hwc_display_contents_1_t* list); 176 /* Full MDP Composition with Peripheral Tiny Overlap Removal */ 177 bool fullMDPCompWithPTOR(hwc_context_t *ctx,hwc_display_contents_1_t* list); 178 /* check if we can use layer cache to do at least partial MDP comp */ 179 bool partialMDPComp(hwc_context_t *ctx, hwc_display_contents_1_t* list); 180 /* Partial MDP comp that uses caching to save power as primary goal */ 181 bool cacheBasedComp(hwc_context_t *ctx, hwc_display_contents_1_t* list); 182 /* Partial MDP comp that balances the load between MDP and GPU such that 183 * MDP is loaded to the max of its capacity. The lower z order layers are 184 * fed to MDP, whereas the upper ones to GPU, because the upper ones have 185 * lower number of pixels and can reduce GPU processing time */ 186 bool loadBasedComp(hwc_context_t *ctx, hwc_display_contents_1_t* list); 187 /* Checks if its worth doing load based partial comp */ 188 bool isLoadBasedCompDoable(hwc_context_t *ctx); 189 /* checks for conditions where only video can be bypassed */ 190 bool tryVideoOnly(hwc_context_t *ctx, hwc_display_contents_1_t* list); 191 bool videoOnlyComp(hwc_context_t *ctx, hwc_display_contents_1_t* list, 192 bool secureOnly); 193 /* checks for conditions where YUV layers cannot be bypassed */ 194 bool isYUVDoable(hwc_context_t* ctx, hwc_layer_1_t* layer); 195 /* checks if MDP/MDSS can process current list w.r.to HW limitations 196 * All peculiar HW limitations should go here */ 197 bool hwLimitationsCheck(hwc_context_t* ctx, hwc_display_contents_1_t* list); 198 /* Is debug enabled */ isDebug()199 static bool isDebug() { return sDebugLogs ? true : false; }; 200 /* Is feature enabled */ isEnabled()201 static bool isEnabled() { return sEnabled; }; 202 /* checks for mdp comp dimension limitation */ 203 bool isValidDimension(hwc_context_t *ctx, hwc_layer_1_t *layer); 204 /* tracks non updating layers*/ 205 void updateLayerCache(hwc_context_t* ctx, hwc_display_contents_1_t* list); 206 /* optimize layers for mdp comp*/ 207 bool markLayersForCaching(hwc_context_t* ctx, 208 hwc_display_contents_1_t* list); 209 int getBatch(hwc_display_contents_1_t* list, 210 int& maxBatchStart, int& maxBatchEnd, 211 int& maxBatchCount); 212 bool canPushBatchToTop(const hwc_display_contents_1_t* list, 213 int fromIndex, int toIndex); 214 bool intersectingUpdatingLayers(const hwc_display_contents_1_t* list, 215 int fromIndex, int toIndex, int targetLayerIndex); 216 217 /* updates cache map with YUV info */ 218 void updateYUV(hwc_context_t* ctx, hwc_display_contents_1_t* list, 219 bool secureOnly); 220 /* Validates if the GPU/MDP layer split chosen by a strategy is supported 221 * by MDP. 222 * Sets up MDP comp data structures to reflect covnversion from layers to 223 * overlay pipes. 224 * Configures overlay. 225 * Configures if GPU should redraw. 226 */ 227 bool postHeuristicsHandling(hwc_context_t *ctx, 228 hwc_display_contents_1_t* list); 229 void reset(hwc_context_t *ctx); 230 bool isSupportedForMDPComp(hwc_context_t *ctx, hwc_layer_1_t* layer); 231 bool resourceCheck(hwc_context_t* ctx, hwc_display_contents_1_t* list); 232 hwc_rect_t getUpdatingFBRect(hwc_context_t *ctx, 233 hwc_display_contents_1_t* list); 234 /* checks for conditions to enable partial udpate */ 235 bool canPartialUpdate(hwc_context_t *ctx, hwc_display_contents_1_t* list); 236 237 int mDpy; 238 static bool sEnabled; 239 static bool sEnableMixedMode; 240 static int sSimulationFlags; 241 static bool sDebugLogs; 242 static bool sIdleFallBack; 243 /* Handles the timeout event from kernel, if the value is set to true */ 244 static bool sHandleTimeout; 245 static int sMaxPipesPerMixer; 246 static bool sSrcSplitEnabled; 247 static IdleInvalidator *idleInvalidator; 248 struct FrameInfo mCurrentFrame; 249 struct LayerCache mCachedFrame; 250 //Enable 4kx2k yuv layer split 251 static bool sEnable4k2kYUVSplit; 252 bool mModeOn; // if prepare happened 253 bool allocSplitVGPipesfor4k2k(hwc_context_t *ctx, int index); 254 }; 255 256 class MDPCompNonSplit : public MDPComp { 257 public: MDPCompNonSplit(int dpy)258 explicit MDPCompNonSplit(int dpy):MDPComp(dpy){}; ~MDPCompNonSplit()259 virtual ~MDPCompNonSplit(){}; 260 virtual bool draw(hwc_context_t *ctx, hwc_display_contents_1_t *list); 261 262 private: 263 struct MdpPipeInfoNonSplit : public MdpPipeInfo { 264 ovutils::eDest index; ~MdpPipeInfoNonSplitMdpPipeInfoNonSplit265 virtual ~MdpPipeInfoNonSplit() {}; 266 }; 267 268 /* configure's overlay pipes for the frame */ 269 virtual int configure(hwc_context_t *ctx, hwc_layer_1_t *layer, 270 PipeLayerPair& pipeLayerPair); 271 272 /* allocates pipes to selected candidates */ 273 virtual bool allocLayerPipes(hwc_context_t *ctx, 274 hwc_display_contents_1_t* list); 275 276 /* Increments mdpCount if 4k2k yuv layer split is enabled. 277 * updates framebuffer z order if fb lies above source-split layer */ 278 virtual void adjustForSourceSplit(hwc_context_t *ctx, 279 hwc_display_contents_1_t* list); 280 281 /* configures 4kx2k yuv layer to 2 VG pipes*/ 282 virtual int configure4k2kYuv(hwc_context_t *ctx, hwc_layer_1_t *layer, 283 PipeLayerPair& PipeLayerPair); 284 /* generates ROI based on the modified area of the frame */ 285 virtual void generateROI(hwc_context_t *ctx, 286 hwc_display_contents_1_t* list); 287 /* validates the ROI generated for fallback conditions */ 288 virtual bool validateAndApplyROI(hwc_context_t *ctx, 289 hwc_display_contents_1_t* list); 290 /* Trims fbRect calculated against ROI generated */ 291 virtual void trimAgainstROI(hwc_context_t *ctx, hwc_rect_t& fbRect); 292 }; 293 294 class MDPCompSplit : public MDPComp { 295 public: MDPCompSplit(int dpy)296 explicit MDPCompSplit(int dpy):MDPComp(dpy){}; ~MDPCompSplit()297 virtual ~MDPCompSplit(){}; 298 virtual bool draw(hwc_context_t *ctx, hwc_display_contents_1_t *list); 299 300 protected: 301 struct MdpPipeInfoSplit : public MdpPipeInfo { 302 ovutils::eDest lIndex; 303 ovutils::eDest rIndex; ~MdpPipeInfoSplitMdpPipeInfoSplit304 virtual ~MdpPipeInfoSplit() {}; 305 }; 306 307 virtual bool acquireMDPPipes(hwc_context_t *ctx, hwc_layer_1_t* layer, 308 MdpPipeInfoSplit& pipe_info); 309 310 /* configure's overlay pipes for the frame */ 311 virtual int configure(hwc_context_t *ctx, hwc_layer_1_t *layer, 312 PipeLayerPair& pipeLayerPair); 313 314 /* allocates pipes to selected candidates */ 315 virtual bool allocLayerPipes(hwc_context_t *ctx, 316 hwc_display_contents_1_t* list); 317 private: 318 /* Increments mdpCount if 4k2k yuv layer split is enabled. 319 * updates framebuffer z order if fb lies above source-split layer */ 320 virtual void adjustForSourceSplit(hwc_context_t *ctx, 321 hwc_display_contents_1_t* list); 322 323 /* configures 4kx2k yuv layer*/ 324 virtual int configure4k2kYuv(hwc_context_t *ctx, hwc_layer_1_t *layer, 325 PipeLayerPair& PipeLayerPair); 326 /* generates ROI based on the modified area of the frame */ 327 virtual void generateROI(hwc_context_t *ctx, 328 hwc_display_contents_1_t* list); 329 /* validates the ROI generated for fallback conditions */ 330 virtual bool validateAndApplyROI(hwc_context_t *ctx, 331 hwc_display_contents_1_t* list); 332 /* Trims fbRect calculated against ROI generated */ 333 virtual void trimAgainstROI(hwc_context_t *ctx, hwc_rect_t& fbRect); 334 }; 335 336 class MDPCompSrcSplit : public MDPCompSplit { 337 public: MDPCompSrcSplit(int dpy)338 explicit MDPCompSrcSplit(int dpy) : MDPCompSplit(dpy){}; ~MDPCompSrcSplit()339 virtual ~MDPCompSrcSplit(){}; 340 private: 341 virtual bool acquireMDPPipes(hwc_context_t *ctx, hwc_layer_1_t* layer, 342 MdpPipeInfoSplit& pipe_info); 343 344 virtual int configure(hwc_context_t *ctx, hwc_layer_1_t *layer, 345 PipeLayerPair& pipeLayerPair); 346 }; 347 348 }; //namespace 349 #endif 350