1 /* 2 * Copyright (C) 2016 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 #include "LayerBuilder.h" 18 19 #include "BakedOpState.h" 20 #include "RenderNode.h" 21 #include "utils/PaintUtils.h" 22 #include "utils/TraceUtils.h" 23 24 #include <utils/TypeHelpers.h> 25 26 namespace android { 27 namespace uirenderer { 28 29 class BatchBase { 30 public: 31 BatchBase(batchid_t batchId, BakedOpState* op, bool merging) 32 : mBatchId(batchId) 33 , mMerging(merging) { 34 mBounds = op->computedState.clippedBounds; 35 mOps.push_back(op); 36 } 37 38 bool intersects(const Rect& rect) const { 39 if (!rect.intersects(mBounds)) return false; 40 41 for (const BakedOpState* op : mOps) { 42 if (rect.intersects(op->computedState.clippedBounds)) { 43 return true; 44 } 45 } 46 return false; 47 } 48 49 batchid_t getBatchId() const { return mBatchId; } 50 bool isMerging() const { return mMerging; } 51 52 const std::vector<BakedOpState*>& getOps() const { return mOps; } 53 54 void dump() const { 55 ALOGD(" Batch %p, id %d, merging %d, count %d, bounds " RECT_STRING, 56 this, mBatchId, mMerging, (int) mOps.size(), RECT_ARGS(mBounds)); 57 } 58 protected: 59 batchid_t mBatchId; 60 Rect mBounds; 61 std::vector<BakedOpState*> mOps; 62 bool mMerging; 63 }; 64 65 class OpBatch : public BatchBase { 66 public: 67 OpBatch(batchid_t batchId, BakedOpState* op) 68 : BatchBase(batchId, op, false) { 69 } 70 71 void batchOp(BakedOpState* op) { 72 mBounds.unionWith(op->computedState.clippedBounds); 73 mOps.push_back(op); 74 } 75 }; 76 77 class MergingOpBatch : public BatchBase { 78 public: 79 MergingOpBatch(batchid_t batchId, BakedOpState* op) 80 : BatchBase(batchId, op, true) 81 , mClipSideFlags(op->computedState.clipSideFlags) { 82 } 83 84 /* 85 * Helper for determining if a new op can merge with a MergingDrawBatch based on their bounds 86 * and clip side flags. Positive bounds delta means new bounds fit in old. 87 */ 88 static inline bool checkSide(const int currentFlags, const int newFlags, const int side, 89 float boundsDelta) { 90 bool currentClipExists = currentFlags & side; 91 bool newClipExists = newFlags & side; 92 93 // if current is clipped, we must be able to fit new bounds in current 94 if (boundsDelta > 0 && currentClipExists) return false; 95 96 // if new is clipped, we must be able to fit current bounds in new 97 if (boundsDelta < 0 && newClipExists) return false; 98 99 return true; 100 } 101 102 static bool paintIsDefault(const SkPaint& paint) { 103 return paint.getAlpha() == 255 104 && paint.getColorFilter() == nullptr 105 && paint.getShader() == nullptr; 106 } 107 108 static bool paintsAreEquivalent(const SkPaint& a, const SkPaint& b) { 109 // Note: don't check color, since all currently mergeable ops can merge across colors 110 return a.getAlpha() == b.getAlpha() 111 && a.getColorFilter() == b.getColorFilter() 112 && a.getShader() == b.getShader(); 113 } 114 115 /* 116 * Checks if a (mergeable) op can be merged into this batch 117 * 118 * If true, the op's multiDraw must be guaranteed to handle both ops simultaneously, so it is 119 * important to consider all paint attributes used in the draw calls in deciding both a) if an 120 * op tries to merge at all, and b) if the op can merge with another set of ops 121 * 122 * False positives can lead to information from the paints of subsequent merged operations being 123 * dropped, so we make simplifying qualifications on the ops that can merge, per op type. 124 */ 125 bool canMergeWith(BakedOpState* op) const { 126 bool isTextBatch = getBatchId() == OpBatchType::Text 127 || getBatchId() == OpBatchType::ColorText; 128 129 // Overlapping other operations is only allowed for text without shadow. For other ops, 130 // multiDraw isn't guaranteed to overdraw correctly 131 if (!isTextBatch || PaintUtils::hasTextShadow(op->op->paint)) { 132 if (intersects(op->computedState.clippedBounds)) return false; 133 } 134 135 const BakedOpState* lhs = op; 136 const BakedOpState* rhs = mOps[0]; 137 138 if (!MathUtils::areEqual(lhs->alpha, rhs->alpha)) return false; 139 140 // Identical round rect clip state means both ops will clip in the same way, or not at all. 141 // As the state objects are const, we can compare their pointers to determine mergeability 142 if (lhs->roundRectClipState != rhs->roundRectClipState) return false; 143 144 // Local masks prevent merge, since they're potentially in different coordinate spaces 145 if (lhs->computedState.localProjectionPathMask 146 || rhs->computedState.localProjectionPathMask) return false; 147 148 /* Clipping compatibility check 149 * 150 * Exploits the fact that if a op or batch is clipped on a side, its bounds will equal its 151 * clip for that side. 152 */ 153 const int currentFlags = mClipSideFlags; 154 const int newFlags = op->computedState.clipSideFlags; 155 if (currentFlags != OpClipSideFlags::None || newFlags != OpClipSideFlags::None) { 156 const Rect& opBounds = op->computedState.clippedBounds; 157 float boundsDelta = mBounds.left - opBounds.left; 158 if (!checkSide(currentFlags, newFlags, OpClipSideFlags::Left, boundsDelta)) return false; 159 boundsDelta = mBounds.top - opBounds.top; 160 if (!checkSide(currentFlags, newFlags, OpClipSideFlags::Top, boundsDelta)) return false; 161 162 // right and bottom delta calculation reversed to account for direction 163 boundsDelta = opBounds.right - mBounds.right; 164 if (!checkSide(currentFlags, newFlags, OpClipSideFlags::Right, boundsDelta)) return false; 165 boundsDelta = opBounds.bottom - mBounds.bottom; 166 if (!checkSide(currentFlags, newFlags, OpClipSideFlags::Bottom, boundsDelta)) return false; 167 } 168 169 const SkPaint* newPaint = op->op->paint; 170 const SkPaint* oldPaint = mOps[0]->op->paint; 171 172 if (newPaint == oldPaint) { 173 // if paints are equal, then modifiers + paint attribs don't need to be compared 174 return true; 175 } else if (newPaint && !oldPaint) { 176 return paintIsDefault(*newPaint); 177 } else if (!newPaint && oldPaint) { 178 return paintIsDefault(*oldPaint); 179 } 180 return paintsAreEquivalent(*newPaint, *oldPaint); 181 } 182 183 void mergeOp(BakedOpState* op) { 184 mBounds.unionWith(op->computedState.clippedBounds); 185 mOps.push_back(op); 186 187 // Because a new op must have passed canMergeWith(), we know it's passed the clipping compat 188 // check, and doesn't extend past a side of the clip that's in use by the merged batch. 189 // Therefore it's safe to simply always merge flags, and use the bounds as the clip rect. 190 mClipSideFlags |= op->computedState.clipSideFlags; 191 } 192 193 int getClipSideFlags() const { return mClipSideFlags; } 194 const Rect& getClipRect() const { return mBounds; } 195 196 private: 197 int mClipSideFlags; 198 }; 199 200 LayerBuilder::LayerBuilder(uint32_t width, uint32_t height, 201 const Rect& repaintRect, const BeginLayerOp* beginLayerOp, RenderNode* renderNode) 202 : width(width) 203 , height(height) 204 , repaintRect(repaintRect) 205 , repaintClip(repaintRect) 206 , offscreenBuffer(renderNode ? renderNode->getLayer() : nullptr) 207 , beginLayerOp(beginLayerOp) 208 , renderNode(renderNode) {} 209 210 // iterate back toward target to see if anything drawn since should overlap the new op 211 // if no target, merging ops still iterate to find similar batch to insert after 212 void LayerBuilder::locateInsertIndex(int batchId, const Rect& clippedBounds, 213 BatchBase** targetBatch, size_t* insertBatchIndex) const { 214 for (int i = mBatches.size() - 1; i >= 0; i--) { 215 BatchBase* overBatch = mBatches[i]; 216 217 if (overBatch == *targetBatch) break; 218 219 // TODO: also consider shader shared between batch types 220 if (batchId == overBatch->getBatchId()) { 221 *insertBatchIndex = i + 1; 222 if (!*targetBatch) break; // found insert position, quit 223 } 224 225 if (overBatch->intersects(clippedBounds)) { 226 // NOTE: it may be possible to optimize for special cases where two operations 227 // of the same batch/paint could swap order, such as with a non-mergeable 228 // (clipped) and a mergeable text operation 229 *targetBatch = nullptr; 230 break; 231 } 232 } 233 } 234 235 void LayerBuilder::deferLayerClear(const Rect& rect) { 236 mClearRects.push_back(rect); 237 } 238 239 void LayerBuilder::onDeferOp(LinearAllocator& allocator, const BakedOpState* bakedState) { 240 if (bakedState->op->opId != RecordedOpId::CopyToLayerOp) { 241 // First non-CopyToLayer, so stop stashing up layer clears for unclipped save layers, 242 // and issue them together in one draw. 243 flushLayerClears(allocator); 244 245 if (CC_UNLIKELY(activeUnclippedSaveLayers.empty() 246 && bakedState->computedState.opaqueOverClippedBounds 247 && bakedState->computedState.clippedBounds.contains(repaintRect) 248 && !Properties::debugOverdraw)) { 249 // discard all deferred drawing ops, since new one will occlude them 250 clear(); 251 } 252 } 253 } 254 255 void LayerBuilder::flushLayerClears(LinearAllocator& allocator) { 256 if (CC_UNLIKELY(!mClearRects.empty())) { 257 const int vertCount = mClearRects.size() * 4; 258 // put the verts in the frame allocator, since 259 // 1) SimpleRectsOps needs verts, not rects 260 // 2) even if mClearRects stored verts, std::vectors will move their contents 261 Vertex* const verts = (Vertex*) allocator.create_trivial_array<Vertex>(vertCount); 262 263 Vertex* currentVert = verts; 264 Rect bounds = mClearRects[0]; 265 for (auto&& rect : mClearRects) { 266 bounds.unionWith(rect); 267 Vertex::set(currentVert++, rect.left, rect.top); 268 Vertex::set(currentVert++, rect.right, rect.top); 269 Vertex::set(currentVert++, rect.left, rect.bottom); 270 Vertex::set(currentVert++, rect.right, rect.bottom); 271 } 272 mClearRects.clear(); // discard rects before drawing so this method isn't reentrant 273 274 // One or more unclipped saveLayers have been enqueued, with deferred clears. 275 // Flush all of these clears with a single draw 276 SkPaint* paint = allocator.create<SkPaint>(); 277 paint->setXfermodeMode(SkXfermode::kClear_Mode); 278 SimpleRectsOp* op = allocator.create_trivial<SimpleRectsOp>(bounds, 279 Matrix4::identity(), nullptr, paint, 280 verts, vertCount); 281 BakedOpState* bakedState = BakedOpState::directConstruct(allocator, 282 &repaintClip, bounds, *op); 283 deferUnmergeableOp(allocator, bakedState, OpBatchType::Vertices); 284 } 285 } 286 287 void LayerBuilder::deferUnmergeableOp(LinearAllocator& allocator, 288 BakedOpState* op, batchid_t batchId) { 289 onDeferOp(allocator, op); 290 OpBatch* targetBatch = mBatchLookup[batchId]; 291 292 size_t insertBatchIndex = mBatches.size(); 293 if (targetBatch) { 294 locateInsertIndex(batchId, op->computedState.clippedBounds, 295 (BatchBase**)(&targetBatch), &insertBatchIndex); 296 } 297 298 if (targetBatch) { 299 targetBatch->batchOp(op); 300 } else { 301 // new non-merging batch 302 targetBatch = allocator.create<OpBatch>(batchId, op); 303 mBatchLookup[batchId] = targetBatch; 304 mBatches.insert(mBatches.begin() + insertBatchIndex, targetBatch); 305 } 306 } 307 308 void LayerBuilder::deferMergeableOp(LinearAllocator& allocator, 309 BakedOpState* op, batchid_t batchId, mergeid_t mergeId) { 310 onDeferOp(allocator, op); 311 MergingOpBatch* targetBatch = nullptr; 312 313 // Try to merge with any existing batch with same mergeId 314 auto getResult = mMergingBatchLookup[batchId].find(mergeId); 315 if (getResult != mMergingBatchLookup[batchId].end()) { 316 targetBatch = getResult->second; 317 if (!targetBatch->canMergeWith(op)) { 318 targetBatch = nullptr; 319 } 320 } 321 322 size_t insertBatchIndex = mBatches.size(); 323 locateInsertIndex(batchId, op->computedState.clippedBounds, 324 (BatchBase**)(&targetBatch), &insertBatchIndex); 325 326 if (targetBatch) { 327 targetBatch->mergeOp(op); 328 } else { 329 // new merging batch 330 targetBatch = allocator.create<MergingOpBatch>(batchId, op); 331 mMergingBatchLookup[batchId].insert(std::make_pair(mergeId, targetBatch)); 332 333 mBatches.insert(mBatches.begin() + insertBatchIndex, targetBatch); 334 } 335 } 336 337 void LayerBuilder::replayBakedOpsImpl(void* arg, 338 BakedOpReceiver* unmergedReceivers, MergedOpReceiver* mergedReceivers) const { 339 if (renderNode) { 340 ATRACE_FORMAT_BEGIN("Issue HW Layer DisplayList %s %ux%u", 341 renderNode->getName(), width, height); 342 } else { 343 ATRACE_BEGIN("flush drawing commands"); 344 } 345 346 for (const BatchBase* batch : mBatches) { 347 size_t size = batch->getOps().size(); 348 if (size > 1 && batch->isMerging()) { 349 int opId = batch->getOps()[0]->op->opId; 350 const MergingOpBatch* mergingBatch = static_cast<const MergingOpBatch*>(batch); 351 MergedBakedOpList data = { 352 batch->getOps().data(), 353 size, 354 mergingBatch->getClipSideFlags(), 355 mergingBatch->getClipRect() 356 }; 357 mergedReceivers[opId](arg, data); 358 } else { 359 for (const BakedOpState* op : batch->getOps()) { 360 unmergedReceivers[op->op->opId](arg, *op); 361 } 362 } 363 } 364 ATRACE_END(); 365 } 366 367 void LayerBuilder::clear() { 368 mBatches.clear(); 369 for (int i = 0; i < OpBatchType::Count; i++) { 370 mBatchLookup[i] = nullptr; 371 mMergingBatchLookup[i].clear(); 372 } 373 } 374 375 void LayerBuilder::dump() const { 376 ALOGD("LayerBuilder %p, %ux%u buffer %p, blo %p, rn %p (%s)", 377 this, width, height, offscreenBuffer, beginLayerOp, 378 renderNode, renderNode ? renderNode->getName() : "-"); 379 for (const BatchBase* batch : mBatches) { 380 batch->dump(); 381 } 382 } 383 384 } // namespace uirenderer 385 } // namespace android 386