1 /* 2 * Copyright 2012 Google Inc. 3 * 4 * Use of this source code is governed by a BSD-style license that can be 5 * found in the LICENSE file. 6 */ 7 8 #include "GrSoftwarePathRenderer.h" 9 #include "GrAuditTrail.h" 10 #include "GrClip.h" 11 #include "GrContextPriv.h" 12 #include "GrDeferredProxyUploader.h" 13 #include "GrGpuResourcePriv.h" 14 #include "GrOpFlushState.h" 15 #include "GrOpList.h" 16 #include "GrProxyProvider.h" 17 #include "GrSWMaskHelper.h" 18 #include "SkMakeUnique.h" 19 #include "SkSemaphore.h" 20 #include "SkTaskGroup.h" 21 #include "SkTraceEvent.h" 22 #include "ops/GrDrawOp.h" 23 #include "ops/GrRectOpFactory.h" 24 25 //////////////////////////////////////////////////////////////////////////////// 26 GrPathRenderer::CanDrawPath 27 GrSoftwarePathRenderer::onCanDrawPath(const CanDrawPathArgs& args) const { 28 // Pass on any style that applies. The caller will apply the style if a suitable renderer is 29 // not found and try again with the new GrShape. 30 if (!args.fShape->style().applies() && SkToBool(fProxyProvider) && 31 (args.fAAType == GrAAType::kCoverage || args.fAAType == GrAAType::kNone)) { 32 // This is the fallback renderer for when a path is too complicated for the GPU ones. 33 return CanDrawPath::kAsBackup; 34 } 35 return CanDrawPath::kNo; 36 } 37 38 //////////////////////////////////////////////////////////////////////////////// 39 static bool get_unclipped_shape_dev_bounds(const GrShape& shape, const SkMatrix& matrix, 40 SkIRect* devBounds) { 41 SkRect shapeBounds = shape.styledBounds(); 42 if (shapeBounds.isEmpty()) { 43 return false; 44 } 45 SkRect shapeDevBounds; 46 matrix.mapRect(&shapeDevBounds, shapeBounds); 47 // Even though these are "unclipped" bounds we still clip to the int32_t range. 48 // This is the largest int32_t that is representable exactly as a float. The next 63 larger ints 49 // would round down to this value when cast to a float, but who really cares. 50 // INT32_MIN is exactly representable. 51 static constexpr int32_t kMaxInt = 2147483520; 52 if (!shapeDevBounds.intersect(SkRect::MakeLTRB(INT32_MIN, INT32_MIN, kMaxInt, kMaxInt))) { 53 return false; 54 } 55 // Make sure that the resulting SkIRect can have representable width and height 56 if (SkScalarRoundToInt(shapeDevBounds.width()) > kMaxInt || 57 SkScalarRoundToInt(shapeDevBounds.height()) > kMaxInt) { 58 return false; 59 } 60 shapeDevBounds.roundOut(devBounds); 61 return true; 62 } 63 64 // Gets the shape bounds, the clip bounds, and the intersection (if any). Returns false if there 65 // is no intersection. 66 static bool get_shape_and_clip_bounds(int width, int height, 67 const GrClip& clip, 68 const GrShape& shape, 69 const SkMatrix& matrix, 70 SkIRect* unclippedDevShapeBounds, 71 SkIRect* clippedDevShapeBounds, 72 SkIRect* devClipBounds) { 73 // compute bounds as intersection of rt size, clip, and path 74 clip.getConservativeBounds(width, height, devClipBounds); 75 76 if (!get_unclipped_shape_dev_bounds(shape, matrix, unclippedDevShapeBounds)) { 77 *unclippedDevShapeBounds = SkIRect::EmptyIRect(); 78 *clippedDevShapeBounds = SkIRect::EmptyIRect(); 79 return false; 80 } 81 if (!clippedDevShapeBounds->intersect(*devClipBounds, *unclippedDevShapeBounds)) { 82 *clippedDevShapeBounds = SkIRect::EmptyIRect(); 83 return false; 84 } 85 return true; 86 } 87 88 //////////////////////////////////////////////////////////////////////////////// 89 90 void GrSoftwarePathRenderer::DrawNonAARect(GrRenderTargetContext* renderTargetContext, 91 GrPaint&& paint, 92 const GrUserStencilSettings& userStencilSettings, 93 const GrClip& clip, 94 const SkMatrix& viewMatrix, 95 const SkRect& rect, 96 const SkMatrix& localMatrix) { 97 renderTargetContext->addDrawOp(clip, 98 GrRectOpFactory::MakeNonAAFillWithLocalMatrix( 99 std::move(paint), viewMatrix, localMatrix, rect, 100 GrAAType::kNone, &userStencilSettings)); 101 } 102 103 void GrSoftwarePathRenderer::DrawAroundInvPath(GrRenderTargetContext* renderTargetContext, 104 GrPaint&& paint, 105 const GrUserStencilSettings& userStencilSettings, 106 const GrClip& clip, 107 const SkMatrix& viewMatrix, 108 const SkIRect& devClipBounds, 109 const SkIRect& devPathBounds) { 110 SkMatrix invert; 111 if (!viewMatrix.invert(&invert)) { 112 return; 113 } 114 115 SkRect rect; 116 if (devClipBounds.fTop < devPathBounds.fTop) { 117 rect.iset(devClipBounds.fLeft, devClipBounds.fTop, 118 devClipBounds.fRight, devPathBounds.fTop); 119 DrawNonAARect(renderTargetContext, GrPaint::Clone(paint), userStencilSettings, clip, 120 SkMatrix::I(), rect, invert); 121 } 122 if (devClipBounds.fLeft < devPathBounds.fLeft) { 123 rect.iset(devClipBounds.fLeft, devPathBounds.fTop, 124 devPathBounds.fLeft, devPathBounds.fBottom); 125 DrawNonAARect(renderTargetContext, GrPaint::Clone(paint), userStencilSettings, clip, 126 SkMatrix::I(), rect, invert); 127 } 128 if (devClipBounds.fRight > devPathBounds.fRight) { 129 rect.iset(devPathBounds.fRight, devPathBounds.fTop, 130 devClipBounds.fRight, devPathBounds.fBottom); 131 DrawNonAARect(renderTargetContext, GrPaint::Clone(paint), userStencilSettings, clip, 132 SkMatrix::I(), rect, invert); 133 } 134 if (devClipBounds.fBottom > devPathBounds.fBottom) { 135 rect.iset(devClipBounds.fLeft, devPathBounds.fBottom, 136 devClipBounds.fRight, devClipBounds.fBottom); 137 DrawNonAARect(renderTargetContext, std::move(paint), userStencilSettings, clip, 138 SkMatrix::I(), rect, invert); 139 } 140 } 141 142 void GrSoftwarePathRenderer::DrawToTargetWithShapeMask( 143 sk_sp<GrTextureProxy> proxy, 144 GrRenderTargetContext* renderTargetContext, 145 GrPaint&& paint, 146 const GrUserStencilSettings& userStencilSettings, 147 const GrClip& clip, 148 const SkMatrix& viewMatrix, 149 const SkIPoint& textureOriginInDeviceSpace, 150 const SkIRect& deviceSpaceRectToDraw) { 151 SkMatrix invert; 152 if (!viewMatrix.invert(&invert)) { 153 return; 154 } 155 156 SkRect dstRect = SkRect::Make(deviceSpaceRectToDraw); 157 158 // We use device coords to compute the texture coordinates. We take the device coords and apply 159 // a translation so that the top-left of the device bounds maps to 0,0, and then a scaling 160 // matrix to normalized coords. 161 SkMatrix maskMatrix = SkMatrix::MakeTrans(SkIntToScalar(-textureOriginInDeviceSpace.fX), 162 SkIntToScalar(-textureOriginInDeviceSpace.fY)); 163 maskMatrix.preConcat(viewMatrix); 164 paint.addCoverageFragmentProcessor(GrSimpleTextureEffect::Make( 165 std::move(proxy), maskMatrix, GrSamplerState::Filter::kNearest)); 166 DrawNonAARect(renderTargetContext, std::move(paint), userStencilSettings, clip, SkMatrix::I(), 167 dstRect, invert); 168 } 169 170 static sk_sp<GrTextureProxy> make_deferred_mask_texture_proxy(GrContext* context, SkBackingFit fit, 171 int width, int height) { 172 GrProxyProvider* proxyProvider = context->contextPriv().proxyProvider(); 173 174 GrSurfaceDesc desc; 175 desc.fOrigin = kTopLeft_GrSurfaceOrigin; 176 desc.fWidth = width; 177 desc.fHeight = height; 178 desc.fConfig = kAlpha_8_GrPixelConfig; 179 180 // MDB TODO: We're going to fill this proxy with an ASAP upload (which is out of order wrt to 181 // ops), so it can't have any pending IO. 182 return proxyProvider->createProxy(desc, fit, SkBudgeted::kYes, 183 GrResourceProvider::kNoPendingIO_Flag); 184 } 185 186 namespace { 187 188 /** 189 * Payload class for use with GrTDeferredProxyUploader. The software path renderer only draws 190 * a single path into the mask texture. This stores all of the information needed by the worker 191 * thread's call to drawShape (see below, in onDrawPath). 192 */ 193 class SoftwarePathData { 194 public: 195 SoftwarePathData(const SkIRect& maskBounds, const SkMatrix& viewMatrix, const GrShape& shape, 196 GrAA aa) 197 : fMaskBounds(maskBounds) 198 , fViewMatrix(viewMatrix) 199 , fShape(shape) 200 , fAA(aa) {} 201 202 const SkIRect& getMaskBounds() const { return fMaskBounds; } 203 const SkMatrix* getViewMatrix() const { return &fViewMatrix; } 204 const GrShape& getShape() const { return fShape; } 205 GrAA getAA() const { return fAA; } 206 207 private: 208 SkIRect fMaskBounds; 209 SkMatrix fViewMatrix; 210 GrShape fShape; 211 GrAA fAA; 212 }; 213 214 // When the SkPathRef genID changes, invalidate a corresponding GrResource described by key. 215 class PathInvalidator : public SkPathRef::GenIDChangeListener { 216 public: 217 explicit PathInvalidator(const GrUniqueKey& key) : fMsg(key) {} 218 private: 219 GrUniqueKeyInvalidatedMessage fMsg; 220 221 void onChange() override { 222 SkMessageBus<GrUniqueKeyInvalidatedMessage>::Post(fMsg); 223 } 224 }; 225 226 } 227 228 //////////////////////////////////////////////////////////////////////////////// 229 // return true on success; false on failure 230 bool GrSoftwarePathRenderer::onDrawPath(const DrawPathArgs& args) { 231 GR_AUDIT_TRAIL_AUTO_FRAME(args.fRenderTargetContext->auditTrail(), 232 "GrSoftwarePathRenderer::onDrawPath"); 233 if (!fProxyProvider) { 234 return false; 235 } 236 237 // We really need to know if the shape will be inverse filled or not 238 bool inverseFilled = false; 239 SkTLazy<GrShape> tmpShape; 240 SkASSERT(!args.fShape->style().applies()); 241 // If the path is hairline, ignore inverse fill. 242 inverseFilled = args.fShape->inverseFilled() && 243 !IsStrokeHairlineOrEquivalent(args.fShape->style(), *args.fViewMatrix, nullptr); 244 245 SkIRect unclippedDevShapeBounds, clippedDevShapeBounds, devClipBounds; 246 // To prevent overloading the cache with entries during animations we limit the cache of masks 247 // to cases where the matrix preserves axis alignment. 248 bool useCache = fAllowCaching && !inverseFilled && args.fViewMatrix->preservesAxisAlignment() && 249 args.fShape->hasUnstyledKey() && GrAAType::kCoverage == args.fAAType; 250 251 if (!get_shape_and_clip_bounds(args.fRenderTargetContext->width(), 252 args.fRenderTargetContext->height(), 253 *args.fClip, *args.fShape, 254 *args.fViewMatrix, &unclippedDevShapeBounds, 255 &clippedDevShapeBounds, 256 &devClipBounds)) { 257 if (inverseFilled) { 258 DrawAroundInvPath(args.fRenderTargetContext, std::move(args.fPaint), 259 *args.fUserStencilSettings, *args.fClip, *args.fViewMatrix, 260 devClipBounds, unclippedDevShapeBounds); 261 } 262 return true; 263 } 264 265 const SkIRect* boundsForMask = &clippedDevShapeBounds; 266 if (useCache) { 267 // Use the cache only if >50% of the path is visible. 268 int unclippedWidth = unclippedDevShapeBounds.width(); 269 int unclippedHeight = unclippedDevShapeBounds.height(); 270 int64_t unclippedArea = sk_64_mul(unclippedWidth, unclippedHeight); 271 int64_t clippedArea = sk_64_mul(clippedDevShapeBounds.width(), 272 clippedDevShapeBounds.height()); 273 int maxTextureSize = args.fRenderTargetContext->caps()->maxTextureSize(); 274 if (unclippedArea > 2 * clippedArea || unclippedWidth > maxTextureSize || 275 unclippedHeight > maxTextureSize) { 276 useCache = false; 277 } else { 278 boundsForMask = &unclippedDevShapeBounds; 279 } 280 } 281 282 GrUniqueKey maskKey; 283 if (useCache) { 284 // We require the upper left 2x2 of the matrix to match exactly for a cache hit. 285 SkScalar sx = args.fViewMatrix->get(SkMatrix::kMScaleX); 286 SkScalar sy = args.fViewMatrix->get(SkMatrix::kMScaleY); 287 SkScalar kx = args.fViewMatrix->get(SkMatrix::kMSkewX); 288 SkScalar ky = args.fViewMatrix->get(SkMatrix::kMSkewY); 289 static const GrUniqueKey::Domain kDomain = GrUniqueKey::GenerateDomain(); 290 #ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK 291 // Fractional translate does not affect caching on Android. This is done for better cache 292 // hit ratio and speed, but it is matching HWUI behavior, which doesn't consider the matrix 293 // at all when caching paths. 294 GrUniqueKey::Builder builder(&maskKey, kDomain, 4 + args.fShape->unstyledKeySize()); 295 #else 296 SkScalar tx = args.fViewMatrix->get(SkMatrix::kMTransX); 297 SkScalar ty = args.fViewMatrix->get(SkMatrix::kMTransY); 298 // Allow 8 bits each in x and y of subpixel positioning. 299 SkFixed fracX = SkScalarToFixed(SkScalarFraction(tx)) & 0x0000FF00; 300 SkFixed fracY = SkScalarToFixed(SkScalarFraction(ty)) & 0x0000FF00; 301 GrUniqueKey::Builder builder(&maskKey, kDomain, 5 + args.fShape->unstyledKeySize()); 302 #endif 303 builder[0] = SkFloat2Bits(sx); 304 builder[1] = SkFloat2Bits(sy); 305 builder[2] = SkFloat2Bits(kx); 306 builder[3] = SkFloat2Bits(ky); 307 #ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK 308 args.fShape->writeUnstyledKey(&builder[4]); 309 #else 310 builder[4] = fracX | (fracY >> 8); 311 args.fShape->writeUnstyledKey(&builder[5]); 312 #endif 313 } 314 315 sk_sp<GrTextureProxy> proxy; 316 if (useCache) { 317 proxy = fProxyProvider->findOrCreateProxyByUniqueKey(maskKey, kTopLeft_GrSurfaceOrigin); 318 } 319 if (!proxy) { 320 SkBackingFit fit = useCache ? SkBackingFit::kExact : SkBackingFit::kApprox; 321 GrAA aa = GrAAType::kCoverage == args.fAAType ? GrAA::kYes : GrAA::kNo; 322 323 SkTaskGroup* taskGroup = args.fContext->contextPriv().getTaskGroup(); 324 if (taskGroup) { 325 proxy = make_deferred_mask_texture_proxy(args.fContext, fit, 326 boundsForMask->width(), 327 boundsForMask->height()); 328 if (!proxy) { 329 return false; 330 } 331 332 auto uploader = skstd::make_unique<GrTDeferredProxyUploader<SoftwarePathData>>( 333 *boundsForMask, *args.fViewMatrix, *args.fShape, aa); 334 GrTDeferredProxyUploader<SoftwarePathData>* uploaderRaw = uploader.get(); 335 336 auto drawAndUploadMask = [uploaderRaw] { 337 TRACE_EVENT0("skia", "Threaded SW Mask Render"); 338 GrSWMaskHelper helper(uploaderRaw->getPixels()); 339 if (helper.init(uploaderRaw->data().getMaskBounds())) { 340 helper.drawShape(uploaderRaw->data().getShape(), 341 *uploaderRaw->data().getViewMatrix(), 342 SkRegion::kReplace_Op, uploaderRaw->data().getAA(), 0xFF); 343 } else { 344 SkDEBUGFAIL("Unable to allocate SW mask."); 345 } 346 uploaderRaw->signalAndFreeData(); 347 }; 348 taskGroup->add(std::move(drawAndUploadMask)); 349 proxy->texPriv().setDeferredUploader(std::move(uploader)); 350 } else { 351 GrSWMaskHelper helper; 352 if (!helper.init(*boundsForMask)) { 353 return false; 354 } 355 helper.drawShape(*args.fShape, *args.fViewMatrix, SkRegion::kReplace_Op, aa, 0xFF); 356 proxy = helper.toTextureProxy(args.fContext, fit); 357 } 358 359 if (!proxy) { 360 return false; 361 } 362 if (useCache) { 363 SkASSERT(proxy->origin() == kTopLeft_GrSurfaceOrigin); 364 fProxyProvider->assignUniqueKeyToProxy(maskKey, proxy.get()); 365 args.fShape->addGenIDChangeListener(new PathInvalidator(maskKey)); 366 } 367 } 368 if (inverseFilled) { 369 DrawAroundInvPath(args.fRenderTargetContext, GrPaint::Clone(args.fPaint), 370 *args.fUserStencilSettings, *args.fClip, *args.fViewMatrix, devClipBounds, 371 unclippedDevShapeBounds); 372 } 373 DrawToTargetWithShapeMask( 374 std::move(proxy), args.fRenderTargetContext, std::move(args.fPaint), 375 *args.fUserStencilSettings, *args.fClip, *args.fViewMatrix, 376 SkIPoint{boundsForMask->fLeft, boundsForMask->fTop}, *boundsForMask); 377 378 return true; 379 } 380