1 /* 2 * Copyright 2006 The Android Open Source Project 3 * 4 * Use of this source code is governed by a BSD-style license that can be 5 * found in the LICENSE file. 6 */ 7 8 #include "SkBlurMaskFilter.h" 9 #include "SkBlurMask.h" 10 #include "SkGpuBlurUtils.h" 11 #include "SkMaskFilterBase.h" 12 #include "SkReadBuffer.h" 13 #include "SkRRectPriv.h" 14 #include "SkWriteBuffer.h" 15 #include "SkMaskFilter.h" 16 #include "SkRRect.h" 17 #include "SkStringUtils.h" 18 #include "SkStrokeRec.h" 19 #include "SkVertices.h" 20 21 #if SK_SUPPORT_GPU 22 #include "GrCircleBlurFragmentProcessor.h" 23 #include "GrClip.h" 24 #include "GrContext.h" 25 #include "GrFragmentProcessor.h" 26 #include "GrRenderTargetContext.h" 27 #include "GrResourceProvider.h" 28 #include "GrShaderCaps.h" 29 #include "GrStyle.h" 30 #include "GrTextureProxy.h" 31 #include "effects/GrRectBlurEffect.h" 32 #include "effects/GrRRectBlurEffect.h" 33 #include "effects/GrSimpleTextureEffect.h" 34 #include "effects/GrTextureDomain.h" 35 #include "glsl/GrGLSLFragmentProcessor.h" 36 #include "glsl/GrGLSLFragmentShaderBuilder.h" 37 #include "glsl/GrGLSLProgramDataManager.h" 38 #include "glsl/GrGLSLUniformHandler.h" 39 #endif 40 41 SkScalar SkBlurMaskFilter::ConvertRadiusToSigma(SkScalar radius) { 42 return SkBlurMask::ConvertRadiusToSigma(radius); 43 } 44 45 class SkBlurMaskFilterImpl : public SkMaskFilterBase { 46 public: 47 SkBlurMaskFilterImpl(SkScalar sigma, SkBlurStyle, const SkRect& occluder, uint32_t flags); 48 49 // overrides from SkMaskFilter 50 SkMask::Format getFormat() const override; 51 bool filterMask(SkMask* dst, const SkMask& src, const SkMatrix&, 52 SkIPoint* margin) const override; 53 54 #if SK_SUPPORT_GPU 55 bool canFilterMaskGPU(const SkRRect& devRRect, 56 const SkIRect& clipBounds, 57 const SkMatrix& ctm, 58 SkRect* maskRect) const override; 59 bool directFilterMaskGPU(GrContext*, 60 GrRenderTargetContext* renderTargetContext, 61 GrPaint&&, 62 const GrClip&, 63 const SkMatrix& viewMatrix, 64 const SkStrokeRec& strokeRec, 65 const SkPath& path) const override; 66 bool directFilterRRectMaskGPU(GrContext*, 67 GrRenderTargetContext* renderTargetContext, 68 GrPaint&&, 69 const GrClip&, 70 const SkMatrix& viewMatrix, 71 const SkStrokeRec& strokeRec, 72 const SkRRect& rrect, 73 const SkRRect& devRRect) const override; 74 sk_sp<GrTextureProxy> filterMaskGPU(GrContext*, 75 sk_sp<GrTextureProxy> srcProxy, 76 const SkMatrix& ctm, 77 const SkIRect& maskRect) const override; 78 #endif 79 80 void computeFastBounds(const SkRect&, SkRect*) const override; 81 bool asABlur(BlurRec*) const override; 82 83 SK_TO_STRING_OVERRIDE() 84 SK_DECLARE_PUBLIC_FLATTENABLE_DESERIALIZATION_PROCS(SkBlurMaskFilterImpl) 85 86 protected: 87 FilterReturn filterRectsToNine(const SkRect[], int count, const SkMatrix&, 88 const SkIRect& clipBounds, 89 NinePatch*) const override; 90 91 FilterReturn filterRRectToNine(const SkRRect&, const SkMatrix&, 92 const SkIRect& clipBounds, 93 NinePatch*) const override; 94 95 bool filterRectMask(SkMask* dstM, const SkRect& r, const SkMatrix& matrix, 96 SkIPoint* margin, SkMask::CreateMode createMode) const; 97 bool filterRRectMask(SkMask* dstM, const SkRRect& r, const SkMatrix& matrix, 98 SkIPoint* margin, SkMask::CreateMode createMode) const; 99 100 bool ignoreXform() const { 101 return SkToBool(fBlurFlags & SkBlurMaskFilter::kIgnoreTransform_BlurFlag); 102 } 103 104 private: 105 // To avoid unseemly allocation requests (esp. for finite platforms like 106 // handset) we limit the radius so something manageable. (as opposed to 107 // a request like 10,000) 108 static const SkScalar kMAX_BLUR_SIGMA; 109 110 SkScalar fSigma; 111 SkBlurStyle fBlurStyle; 112 SkRect fOccluder; 113 uint32_t fBlurFlags; 114 115 SkBlurQuality getQuality() const { 116 return (fBlurFlags & SkBlurMaskFilter::kHighQuality_BlurFlag) ? 117 kHigh_SkBlurQuality : kLow_SkBlurQuality; 118 } 119 120 SkBlurMaskFilterImpl(SkReadBuffer&); 121 void flatten(SkWriteBuffer&) const override; 122 123 SkScalar computeXformedSigma(const SkMatrix& ctm) const { 124 SkScalar xformedSigma = this->ignoreXform() ? fSigma : ctm.mapRadius(fSigma); 125 return SkMinScalar(xformedSigma, kMAX_BLUR_SIGMA); 126 } 127 128 friend class SkBlurMaskFilter; 129 130 typedef SkMaskFilter INHERITED; 131 }; 132 133 const SkScalar SkBlurMaskFilterImpl::kMAX_BLUR_SIGMA = SkIntToScalar(128); 134 135 sk_sp<SkMaskFilter> SkBlurMaskFilter::Make(SkBlurStyle style, SkScalar sigma, 136 const SkRect& occluder, uint32_t flags) { 137 SkASSERT(!(flags & ~SkBlurMaskFilter::kAll_BlurFlag)); 138 SkASSERT(style <= kLastEnum_SkBlurStyle); 139 140 if (!SkScalarIsFinite(sigma) || sigma <= 0) { 141 return nullptr; 142 } 143 144 return sk_sp<SkMaskFilter>(new SkBlurMaskFilterImpl(sigma, style, occluder, flags)); 145 } 146 147 // linearly interpolate between y1 & y3 to match x2's position between x1 & x3 148 static SkScalar interp(SkScalar x1, SkScalar x2, SkScalar x3, SkScalar y1, SkScalar y3) { 149 SkASSERT(x1 <= x2 && x2 <= x3); 150 SkASSERT(y1 <= y3); 151 152 SkScalar t = (x2 - x1) / (x3 - x1); 153 return y1 + t * (y3 - y1); 154 } 155 156 // Insert 'lower' and 'higher' into 'array1' and insert a new value at each matching insertion 157 // point in 'array2' that linearly interpolates between the existing values. 158 // Return a bit mask which contains a copy of 'inputMask' for all the cells between the two 159 // insertion points. 160 static uint32_t insert_into_arrays(SkScalar* array1, SkScalar* array2, 161 SkScalar lower, SkScalar higher, 162 int* num, uint32_t inputMask, int maskSize) { 163 SkASSERT(lower < higher); 164 SkASSERT(lower >= array1[0] && higher <= array1[*num-1]); 165 166 int32_t skipMask = 0x0; 167 int i; 168 for (i = 0; i < *num; ++i) { 169 if (lower >= array1[i] && lower < array1[i+1]) { 170 if (!SkScalarNearlyEqual(lower, array1[i])) { 171 memmove(&array1[i+2], &array1[i+1], (*num-i-1)*sizeof(SkScalar)); 172 array1[i+1] = lower; 173 memmove(&array2[i+2], &array2[i+1], (*num-i-1)*sizeof(SkScalar)); 174 array2[i+1] = interp(array1[i], lower, array1[i+2], array2[i], array2[i+2]); 175 i++; 176 (*num)++; 177 } 178 break; 179 } 180 } 181 for ( ; i < *num; ++i) { 182 skipMask |= inputMask << (i*maskSize); 183 if (higher > array1[i] && higher <= array1[i+1]) { 184 if (!SkScalarNearlyEqual(higher, array1[i+1])) { 185 memmove(&array1[i+2], &array1[i+1], (*num-i-1)*sizeof(SkScalar)); 186 array1[i+1] = higher; 187 memmove(&array2[i+2], &array2[i+1], (*num-i-1)*sizeof(SkScalar)); 188 array2[i+1] = interp(array1[i], higher, array1[i+2], array2[i], array2[i+2]); 189 (*num)++; 190 } 191 break; 192 } 193 } 194 195 return skipMask; 196 } 197 198 bool SkBlurMaskFilter::ComputeBlurredRRectParams(const SkRRect& srcRRect, const SkRRect& devRRect, 199 const SkRect& occluder, 200 SkScalar sigma, SkScalar xformedSigma, 201 SkRRect* rrectToDraw, 202 SkISize* widthHeight, 203 SkScalar rectXs[kMaxDivisions], 204 SkScalar rectYs[kMaxDivisions], 205 SkScalar texXs[kMaxDivisions], 206 SkScalar texYs[kMaxDivisions], 207 int* numXs, int* numYs, uint32_t* skipMask) { 208 unsigned int devBlurRadius = 3*SkScalarCeilToInt(xformedSigma-1/6.0f); 209 SkScalar srcBlurRadius = 3.0f * sigma; 210 211 const SkRect& devOrig = devRRect.getBounds(); 212 const SkVector& devRadiiUL = devRRect.radii(SkRRect::kUpperLeft_Corner); 213 const SkVector& devRadiiUR = devRRect.radii(SkRRect::kUpperRight_Corner); 214 const SkVector& devRadiiLR = devRRect.radii(SkRRect::kLowerRight_Corner); 215 const SkVector& devRadiiLL = devRRect.radii(SkRRect::kLowerLeft_Corner); 216 217 const int devLeft = SkScalarCeilToInt(SkTMax<SkScalar>(devRadiiUL.fX, devRadiiLL.fX)); 218 const int devTop = SkScalarCeilToInt(SkTMax<SkScalar>(devRadiiUL.fY, devRadiiUR.fY)); 219 const int devRight = SkScalarCeilToInt(SkTMax<SkScalar>(devRadiiUR.fX, devRadiiLR.fX)); 220 const int devBot = SkScalarCeilToInt(SkTMax<SkScalar>(devRadiiLL.fY, devRadiiLR.fY)); 221 222 // This is a conservative check for nine-patchability 223 if (devOrig.fLeft + devLeft + devBlurRadius >= devOrig.fRight - devRight - devBlurRadius || 224 devOrig.fTop + devTop + devBlurRadius >= devOrig.fBottom - devBot - devBlurRadius) { 225 return false; 226 } 227 228 const SkVector& srcRadiiUL = srcRRect.radii(SkRRect::kUpperLeft_Corner); 229 const SkVector& srcRadiiUR = srcRRect.radii(SkRRect::kUpperRight_Corner); 230 const SkVector& srcRadiiLR = srcRRect.radii(SkRRect::kLowerRight_Corner); 231 const SkVector& srcRadiiLL = srcRRect.radii(SkRRect::kLowerLeft_Corner); 232 233 const SkScalar srcLeft = SkTMax<SkScalar>(srcRadiiUL.fX, srcRadiiLL.fX); 234 const SkScalar srcTop = SkTMax<SkScalar>(srcRadiiUL.fY, srcRadiiUR.fY); 235 const SkScalar srcRight = SkTMax<SkScalar>(srcRadiiUR.fX, srcRadiiLR.fX); 236 const SkScalar srcBot = SkTMax<SkScalar>(srcRadiiLL.fY, srcRadiiLR.fY); 237 238 int newRRWidth = 2*devBlurRadius + devLeft + devRight + 1; 239 int newRRHeight = 2*devBlurRadius + devTop + devBot + 1; 240 widthHeight->fWidth = newRRWidth + 2 * devBlurRadius; 241 widthHeight->fHeight = newRRHeight + 2 * devBlurRadius; 242 243 const SkRect srcProxyRect = srcRRect.getBounds().makeOutset(srcBlurRadius, srcBlurRadius); 244 245 rectXs[0] = srcProxyRect.fLeft; 246 rectXs[1] = srcProxyRect.fLeft + 2*srcBlurRadius + srcLeft; 247 rectXs[2] = srcProxyRect.fRight - 2*srcBlurRadius - srcRight; 248 rectXs[3] = srcProxyRect.fRight; 249 250 rectYs[0] = srcProxyRect.fTop; 251 rectYs[1] = srcProxyRect.fTop + 2*srcBlurRadius + srcTop; 252 rectYs[2] = srcProxyRect.fBottom - 2*srcBlurRadius - srcBot; 253 rectYs[3] = srcProxyRect.fBottom; 254 255 texXs[0] = 0.0f; 256 texXs[1] = 2.0f*devBlurRadius + devLeft; 257 texXs[2] = 2.0f*devBlurRadius + devLeft + 1; 258 texXs[3] = SkIntToScalar(widthHeight->fWidth); 259 260 texYs[0] = 0.0f; 261 texYs[1] = 2.0f*devBlurRadius + devTop; 262 texYs[2] = 2.0f*devBlurRadius + devTop + 1; 263 texYs[3] = SkIntToScalar(widthHeight->fHeight); 264 265 SkRect temp = occluder; 266 267 *numXs = 4; 268 *numYs = 4; 269 *skipMask = 0; 270 if (!temp.isEmpty() && (srcProxyRect.contains(temp) || temp.intersect(srcProxyRect))) { 271 *skipMask = insert_into_arrays(rectXs, texXs, temp.fLeft, temp.fRight, numXs, 0x1, 1); 272 *skipMask = insert_into_arrays(rectYs, texYs, temp.fTop, temp.fBottom, 273 numYs, *skipMask, *numXs-1); 274 } 275 276 const SkRect newRect = SkRect::MakeXYWH(SkIntToScalar(devBlurRadius), 277 SkIntToScalar(devBlurRadius), 278 SkIntToScalar(newRRWidth), 279 SkIntToScalar(newRRHeight)); 280 SkVector newRadii[4]; 281 newRadii[0] = { SkScalarCeilToScalar(devRadiiUL.fX), SkScalarCeilToScalar(devRadiiUL.fY) }; 282 newRadii[1] = { SkScalarCeilToScalar(devRadiiUR.fX), SkScalarCeilToScalar(devRadiiUR.fY) }; 283 newRadii[2] = { SkScalarCeilToScalar(devRadiiLR.fX), SkScalarCeilToScalar(devRadiiLR.fY) }; 284 newRadii[3] = { SkScalarCeilToScalar(devRadiiLL.fX), SkScalarCeilToScalar(devRadiiLL.fY) }; 285 286 rrectToDraw->setRectRadii(newRect, newRadii); 287 return true; 288 } 289 290 /////////////////////////////////////////////////////////////////////////////// 291 292 SkBlurMaskFilterImpl::SkBlurMaskFilterImpl(SkScalar sigma, SkBlurStyle style, 293 const SkRect& occluder, uint32_t flags) 294 : fSigma(sigma) 295 , fBlurStyle(style) 296 , fOccluder(occluder) 297 , fBlurFlags(flags) { 298 SkASSERT(fSigma > 0); 299 SkASSERT((unsigned)style <= kLastEnum_SkBlurStyle); 300 SkASSERT(flags <= SkBlurMaskFilter::kAll_BlurFlag); 301 } 302 303 SkMask::Format SkBlurMaskFilterImpl::getFormat() const { 304 return SkMask::kA8_Format; 305 } 306 307 bool SkBlurMaskFilterImpl::asABlur(BlurRec* rec) const { 308 if (this->ignoreXform()) { 309 return false; 310 } 311 312 if (rec) { 313 rec->fSigma = fSigma; 314 rec->fStyle = fBlurStyle; 315 rec->fQuality = this->getQuality(); 316 } 317 return true; 318 } 319 320 bool SkBlurMaskFilterImpl::filterMask(SkMask* dst, const SkMask& src, 321 const SkMatrix& matrix, 322 SkIPoint* margin) const { 323 SkScalar sigma = this->computeXformedSigma(matrix); 324 return SkBlurMask::BoxBlur(dst, src, sigma, fBlurStyle, this->getQuality(), margin); 325 } 326 327 bool SkBlurMaskFilterImpl::filterRectMask(SkMask* dst, const SkRect& r, 328 const SkMatrix& matrix, 329 SkIPoint* margin, SkMask::CreateMode createMode) const { 330 SkScalar sigma = computeXformedSigma(matrix); 331 332 return SkBlurMask::BlurRect(sigma, dst, r, fBlurStyle, margin, createMode); 333 } 334 335 bool SkBlurMaskFilterImpl::filterRRectMask(SkMask* dst, const SkRRect& r, 336 const SkMatrix& matrix, 337 SkIPoint* margin, SkMask::CreateMode createMode) const { 338 SkScalar sigma = computeXformedSigma(matrix); 339 340 return SkBlurMask::BlurRRect(sigma, dst, r, fBlurStyle, margin, createMode); 341 } 342 343 #include "SkCanvas.h" 344 345 static bool prepare_to_draw_into_mask(const SkRect& bounds, SkMask* mask) { 346 SkASSERT(mask != nullptr); 347 348 mask->fBounds = bounds.roundOut(); 349 mask->fRowBytes = SkAlign4(mask->fBounds.width()); 350 mask->fFormat = SkMask::kA8_Format; 351 const size_t size = mask->computeImageSize(); 352 mask->fImage = SkMask::AllocImage(size, SkMask::kZeroInit_Alloc); 353 if (nullptr == mask->fImage) { 354 return false; 355 } 356 return true; 357 } 358 359 static bool draw_rrect_into_mask(const SkRRect rrect, SkMask* mask) { 360 if (!prepare_to_draw_into_mask(rrect.rect(), mask)) { 361 return false; 362 } 363 364 // FIXME: This code duplicates code in draw_rects_into_mask, below. Is there a 365 // clean way to share more code? 366 SkBitmap bitmap; 367 bitmap.installMaskPixels(*mask); 368 369 SkCanvas canvas(bitmap); 370 canvas.translate(-SkIntToScalar(mask->fBounds.left()), 371 -SkIntToScalar(mask->fBounds.top())); 372 373 SkPaint paint; 374 paint.setAntiAlias(true); 375 canvas.drawRRect(rrect, paint); 376 return true; 377 } 378 379 static bool draw_rects_into_mask(const SkRect rects[], int count, SkMask* mask) { 380 if (!prepare_to_draw_into_mask(rects[0], mask)) { 381 return false; 382 } 383 384 SkBitmap bitmap; 385 bitmap.installPixels(SkImageInfo::Make(mask->fBounds.width(), 386 mask->fBounds.height(), 387 kAlpha_8_SkColorType, 388 kPremul_SkAlphaType), 389 mask->fImage, mask->fRowBytes); 390 391 SkCanvas canvas(bitmap); 392 canvas.translate(-SkIntToScalar(mask->fBounds.left()), 393 -SkIntToScalar(mask->fBounds.top())); 394 395 SkPaint paint; 396 paint.setAntiAlias(true); 397 398 if (1 == count) { 399 canvas.drawRect(rects[0], paint); 400 } else { 401 // todo: do I need a fast way to do this? 402 SkPath path; 403 path.addRect(rects[0]); 404 path.addRect(rects[1]); 405 path.setFillType(SkPath::kEvenOdd_FillType); 406 canvas.drawPath(path, paint); 407 } 408 return true; 409 } 410 411 static bool rect_exceeds(const SkRect& r, SkScalar v) { 412 return r.fLeft < -v || r.fTop < -v || r.fRight > v || r.fBottom > v || 413 r.width() > v || r.height() > v; 414 } 415 416 #include "SkMaskCache.h" 417 418 static SkCachedData* copy_mask_to_cacheddata(SkMask* mask) { 419 const size_t size = mask->computeTotalImageSize(); 420 SkCachedData* data = SkResourceCache::NewCachedData(size); 421 if (data) { 422 memcpy(data->writable_data(), mask->fImage, size); 423 SkMask::FreeImage(mask->fImage); 424 mask->fImage = (uint8_t*)data->data(); 425 } 426 return data; 427 } 428 429 static SkCachedData* find_cached_rrect(SkMask* mask, SkScalar sigma, SkBlurStyle style, 430 SkBlurQuality quality, const SkRRect& rrect) { 431 return SkMaskCache::FindAndRef(sigma, style, quality, rrect, mask); 432 } 433 434 static SkCachedData* add_cached_rrect(SkMask* mask, SkScalar sigma, SkBlurStyle style, 435 SkBlurQuality quality, const SkRRect& rrect) { 436 SkCachedData* cache = copy_mask_to_cacheddata(mask); 437 if (cache) { 438 SkMaskCache::Add(sigma, style, quality, rrect, *mask, cache); 439 } 440 return cache; 441 } 442 443 static SkCachedData* find_cached_rects(SkMask* mask, SkScalar sigma, SkBlurStyle style, 444 SkBlurQuality quality, const SkRect rects[], int count) { 445 return SkMaskCache::FindAndRef(sigma, style, quality, rects, count, mask); 446 } 447 448 static SkCachedData* add_cached_rects(SkMask* mask, SkScalar sigma, SkBlurStyle style, 449 SkBlurQuality quality, const SkRect rects[], int count) { 450 SkCachedData* cache = copy_mask_to_cacheddata(mask); 451 if (cache) { 452 SkMaskCache::Add(sigma, style, quality, rects, count, *mask, cache); 453 } 454 return cache; 455 } 456 457 #ifdef SK_IGNORE_FAST_RRECT_BLUR 458 // Use the faster analytic blur approach for ninepatch round rects 459 static const bool c_analyticBlurRRect{false}; 460 #else 461 static const bool c_analyticBlurRRect{true}; 462 #endif 463 464 SkMaskFilterBase::FilterReturn 465 SkBlurMaskFilterImpl::filterRRectToNine(const SkRRect& rrect, const SkMatrix& matrix, 466 const SkIRect& clipBounds, 467 NinePatch* patch) const { 468 SkASSERT(patch != nullptr); 469 switch (rrect.getType()) { 470 case SkRRect::kEmpty_Type: 471 // Nothing to draw. 472 return kFalse_FilterReturn; 473 474 case SkRRect::kRect_Type: 475 // We should have caught this earlier. 476 SkASSERT(false); 477 // Fall through. 478 case SkRRect::kOval_Type: 479 // The nine patch special case does not handle ovals, and we 480 // already have code for rectangles. 481 return kUnimplemented_FilterReturn; 482 483 // These three can take advantage of this fast path. 484 case SkRRect::kSimple_Type: 485 case SkRRect::kNinePatch_Type: 486 case SkRRect::kComplex_Type: 487 break; 488 } 489 490 // TODO: report correct metrics for innerstyle, where we do not grow the 491 // total bounds, but we do need an inset the size of our blur-radius 492 if (kInner_SkBlurStyle == fBlurStyle) { 493 return kUnimplemented_FilterReturn; 494 } 495 496 // TODO: take clipBounds into account to limit our coordinates up front 497 // for now, just skip too-large src rects (to take the old code path). 498 if (rect_exceeds(rrect.rect(), SkIntToScalar(32767))) { 499 return kUnimplemented_FilterReturn; 500 } 501 502 SkIPoint margin; 503 SkMask srcM, dstM; 504 srcM.fBounds = rrect.rect().roundOut(); 505 srcM.fFormat = SkMask::kA8_Format; 506 srcM.fRowBytes = 0; 507 508 bool filterResult = false; 509 if (c_analyticBlurRRect) { 510 // special case for fast round rect blur 511 // don't actually do the blur the first time, just compute the correct size 512 filterResult = this->filterRRectMask(&dstM, rrect, matrix, &margin, 513 SkMask::kJustComputeBounds_CreateMode); 514 } 515 516 if (!filterResult) { 517 filterResult = this->filterMask(&dstM, srcM, matrix, &margin); 518 } 519 520 if (!filterResult) { 521 return kFalse_FilterReturn; 522 } 523 524 // Now figure out the appropriate width and height of the smaller round rectangle 525 // to stretch. It will take into account the larger radius per side as well as double 526 // the margin, to account for inner and outer blur. 527 const SkVector& UL = rrect.radii(SkRRect::kUpperLeft_Corner); 528 const SkVector& UR = rrect.radii(SkRRect::kUpperRight_Corner); 529 const SkVector& LR = rrect.radii(SkRRect::kLowerRight_Corner); 530 const SkVector& LL = rrect.radii(SkRRect::kLowerLeft_Corner); 531 532 const SkScalar leftUnstretched = SkTMax(UL.fX, LL.fX) + SkIntToScalar(2 * margin.fX); 533 const SkScalar rightUnstretched = SkTMax(UR.fX, LR.fX) + SkIntToScalar(2 * margin.fX); 534 535 // Extra space in the middle to ensure an unchanging piece for stretching. Use 3 to cover 536 // any fractional space on either side plus 1 for the part to stretch. 537 const SkScalar stretchSize = SkIntToScalar(3); 538 539 const SkScalar totalSmallWidth = leftUnstretched + rightUnstretched + stretchSize; 540 if (totalSmallWidth >= rrect.rect().width()) { 541 // There is no valid piece to stretch. 542 return kUnimplemented_FilterReturn; 543 } 544 545 const SkScalar topUnstretched = SkTMax(UL.fY, UR.fY) + SkIntToScalar(2 * margin.fY); 546 const SkScalar bottomUnstretched = SkTMax(LL.fY, LR.fY) + SkIntToScalar(2 * margin.fY); 547 548 const SkScalar totalSmallHeight = topUnstretched + bottomUnstretched + stretchSize; 549 if (totalSmallHeight >= rrect.rect().height()) { 550 // There is no valid piece to stretch. 551 return kUnimplemented_FilterReturn; 552 } 553 554 SkRect smallR = SkRect::MakeWH(totalSmallWidth, totalSmallHeight); 555 556 SkRRect smallRR; 557 SkVector radii[4]; 558 radii[SkRRect::kUpperLeft_Corner] = UL; 559 radii[SkRRect::kUpperRight_Corner] = UR; 560 radii[SkRRect::kLowerRight_Corner] = LR; 561 radii[SkRRect::kLowerLeft_Corner] = LL; 562 smallRR.setRectRadii(smallR, radii); 563 564 const SkScalar sigma = this->computeXformedSigma(matrix); 565 SkCachedData* cache = find_cached_rrect(&patch->fMask, sigma, fBlurStyle, 566 this->getQuality(), smallRR); 567 if (!cache) { 568 bool analyticBlurWorked = false; 569 if (c_analyticBlurRRect) { 570 analyticBlurWorked = 571 this->filterRRectMask(&patch->fMask, smallRR, matrix, &margin, 572 SkMask::kComputeBoundsAndRenderImage_CreateMode); 573 } 574 575 if (!analyticBlurWorked) { 576 if (!draw_rrect_into_mask(smallRR, &srcM)) { 577 return kFalse_FilterReturn; 578 } 579 580 SkAutoMaskFreeImage amf(srcM.fImage); 581 582 if (!this->filterMask(&patch->fMask, srcM, matrix, &margin)) { 583 return kFalse_FilterReturn; 584 } 585 } 586 cache = add_cached_rrect(&patch->fMask, sigma, fBlurStyle, this->getQuality(), smallRR); 587 } 588 589 patch->fMask.fBounds.offsetTo(0, 0); 590 patch->fOuterRect = dstM.fBounds; 591 patch->fCenter.fX = SkScalarCeilToInt(leftUnstretched) + 1; 592 patch->fCenter.fY = SkScalarCeilToInt(topUnstretched) + 1; 593 SkASSERT(nullptr == patch->fCache); 594 patch->fCache = cache; // transfer ownership to patch 595 return kTrue_FilterReturn; 596 } 597 598 // Use the faster analytic blur approach for ninepatch rects 599 static const bool c_analyticBlurNinepatch{true}; 600 601 SkMaskFilterBase::FilterReturn 602 SkBlurMaskFilterImpl::filterRectsToNine(const SkRect rects[], int count, 603 const SkMatrix& matrix, 604 const SkIRect& clipBounds, 605 NinePatch* patch) const { 606 if (count < 1 || count > 2) { 607 return kUnimplemented_FilterReturn; 608 } 609 610 // TODO: report correct metrics for innerstyle, where we do not grow the 611 // total bounds, but we do need an inset the size of our blur-radius 612 if (kInner_SkBlurStyle == fBlurStyle || kOuter_SkBlurStyle == fBlurStyle) { 613 return kUnimplemented_FilterReturn; 614 } 615 616 // TODO: take clipBounds into account to limit our coordinates up front 617 // for now, just skip too-large src rects (to take the old code path). 618 if (rect_exceeds(rects[0], SkIntToScalar(32767))) { 619 return kUnimplemented_FilterReturn; 620 } 621 622 SkIPoint margin; 623 SkMask srcM, dstM; 624 srcM.fBounds = rects[0].roundOut(); 625 srcM.fFormat = SkMask::kA8_Format; 626 srcM.fRowBytes = 0; 627 628 bool filterResult = false; 629 if (count == 1 && c_analyticBlurNinepatch) { 630 // special case for fast rect blur 631 // don't actually do the blur the first time, just compute the correct size 632 filterResult = this->filterRectMask(&dstM, rects[0], matrix, &margin, 633 SkMask::kJustComputeBounds_CreateMode); 634 } else { 635 filterResult = this->filterMask(&dstM, srcM, matrix, &margin); 636 } 637 638 if (!filterResult) { 639 return kFalse_FilterReturn; 640 } 641 642 /* 643 * smallR is the smallest version of 'rect' that will still guarantee that 644 * we get the same blur results on all edges, plus 1 center row/col that is 645 * representative of the extendible/stretchable edges of the ninepatch. 646 * Since our actual edge may be fractional we inset 1 more to be sure we 647 * don't miss any interior blur. 648 * x is an added pixel of blur, and { and } are the (fractional) edge 649 * pixels from the original rect. 650 * 651 * x x { x x .... x x } x x 652 * 653 * Thus, in this case, we inset by a total of 5 (on each side) beginning 654 * with our outer-rect (dstM.fBounds) 655 */ 656 SkRect smallR[2]; 657 SkIPoint center; 658 659 // +2 is from +1 for each edge (to account for possible fractional edges 660 int smallW = dstM.fBounds.width() - srcM.fBounds.width() + 2; 661 int smallH = dstM.fBounds.height() - srcM.fBounds.height() + 2; 662 SkIRect innerIR; 663 664 if (1 == count) { 665 innerIR = srcM.fBounds; 666 center.set(smallW, smallH); 667 } else { 668 SkASSERT(2 == count); 669 rects[1].roundIn(&innerIR); 670 center.set(smallW + (innerIR.left() - srcM.fBounds.left()), 671 smallH + (innerIR.top() - srcM.fBounds.top())); 672 } 673 674 // +1 so we get a clean, stretchable, center row/col 675 smallW += 1; 676 smallH += 1; 677 678 // we want the inset amounts to be integral, so we don't change any 679 // fractional phase on the fRight or fBottom of our smallR. 680 const SkScalar dx = SkIntToScalar(innerIR.width() - smallW); 681 const SkScalar dy = SkIntToScalar(innerIR.height() - smallH); 682 if (dx < 0 || dy < 0) { 683 // we're too small, relative to our blur, to break into nine-patch, 684 // so we ask to have our normal filterMask() be called. 685 return kUnimplemented_FilterReturn; 686 } 687 688 smallR[0].set(rects[0].left(), rects[0].top(), rects[0].right() - dx, rects[0].bottom() - dy); 689 if (smallR[0].width() < 2 || smallR[0].height() < 2) { 690 return kUnimplemented_FilterReturn; 691 } 692 if (2 == count) { 693 smallR[1].set(rects[1].left(), rects[1].top(), 694 rects[1].right() - dx, rects[1].bottom() - dy); 695 SkASSERT(!smallR[1].isEmpty()); 696 } 697 698 const SkScalar sigma = this->computeXformedSigma(matrix); 699 SkCachedData* cache = find_cached_rects(&patch->fMask, sigma, fBlurStyle, 700 this->getQuality(), smallR, count); 701 if (!cache) { 702 if (count > 1 || !c_analyticBlurNinepatch) { 703 if (!draw_rects_into_mask(smallR, count, &srcM)) { 704 return kFalse_FilterReturn; 705 } 706 707 SkAutoMaskFreeImage amf(srcM.fImage); 708 709 if (!this->filterMask(&patch->fMask, srcM, matrix, &margin)) { 710 return kFalse_FilterReturn; 711 } 712 } else { 713 if (!this->filterRectMask(&patch->fMask, smallR[0], matrix, &margin, 714 SkMask::kComputeBoundsAndRenderImage_CreateMode)) { 715 return kFalse_FilterReturn; 716 } 717 } 718 cache = add_cached_rects(&patch->fMask, sigma, fBlurStyle, this->getQuality(), smallR, count); 719 } 720 patch->fMask.fBounds.offsetTo(0, 0); 721 patch->fOuterRect = dstM.fBounds; 722 patch->fCenter = center; 723 SkASSERT(nullptr == patch->fCache); 724 patch->fCache = cache; // transfer ownership to patch 725 return kTrue_FilterReturn; 726 } 727 728 void SkBlurMaskFilterImpl::computeFastBounds(const SkRect& src, 729 SkRect* dst) const { 730 SkScalar pad = 3.0f * fSigma; 731 732 dst->set(src.fLeft - pad, src.fTop - pad, 733 src.fRight + pad, src.fBottom + pad); 734 } 735 736 sk_sp<SkFlattenable> SkBlurMaskFilterImpl::CreateProc(SkReadBuffer& buffer) { 737 const SkScalar sigma = buffer.readScalar(); 738 SkBlurStyle style = buffer.read32LE(kLastEnum_SkBlurStyle); 739 unsigned flags = buffer.read32LE(SkBlurMaskFilter::kAll_BlurFlag); 740 741 SkRect occluder; 742 buffer.readRect(&occluder); 743 744 return SkBlurMaskFilter::Make((SkBlurStyle)style, sigma, occluder, flags); 745 } 746 747 void SkBlurMaskFilterImpl::flatten(SkWriteBuffer& buffer) const { 748 buffer.writeScalar(fSigma); 749 buffer.writeUInt(fBlurStyle); 750 buffer.writeUInt(fBlurFlags); 751 buffer.writeRect(fOccluder); 752 } 753 754 755 #if SK_SUPPORT_GPU 756 757 bool SkBlurMaskFilterImpl::directFilterMaskGPU(GrContext* context, 758 GrRenderTargetContext* renderTargetContext, 759 GrPaint&& paint, 760 const GrClip& clip, 761 const SkMatrix& viewMatrix, 762 const SkStrokeRec& strokeRec, 763 const SkPath& path) const { 764 SkASSERT(renderTargetContext); 765 766 if (fBlurStyle != kNormal_SkBlurStyle) { 767 return false; 768 } 769 770 // TODO: we could handle blurred stroked circles 771 if (!strokeRec.isFillStyle()) { 772 return false; 773 } 774 775 SkScalar xformedSigma = this->computeXformedSigma(viewMatrix); 776 777 GrProxyProvider* proxyProvider = context->contextPriv().proxyProvider(); 778 std::unique_ptr<GrFragmentProcessor> fp; 779 780 SkRect rect; 781 if (path.isRect(&rect)) { 782 SkScalar pad = 3.0f * xformedSigma; 783 rect.outset(pad, pad); 784 785 fp = GrRectBlurEffect::Make(proxyProvider, rect, xformedSigma); 786 } else if (path.isOval(&rect) && SkScalarNearlyEqual(rect.width(), rect.height())) { 787 fp = GrCircleBlurFragmentProcessor::Make(proxyProvider, rect, xformedSigma); 788 789 // expand the rect for the coverage geometry 790 int pad = SkScalarCeilToInt(6*xformedSigma)/2; 791 rect.outset(SkIntToScalar(pad), SkIntToScalar(pad)); 792 } else { 793 return false; 794 } 795 796 if (!fp) { 797 return false; 798 } 799 800 SkMatrix inverse; 801 if (!viewMatrix.invert(&inverse)) { 802 return false; 803 } 804 805 paint.addCoverageFragmentProcessor(std::move(fp)); 806 renderTargetContext->fillRectWithLocalMatrix(clip, std::move(paint), GrAA::kNo, SkMatrix::I(), 807 rect, inverse); 808 return true; 809 } 810 811 bool SkBlurMaskFilterImpl::directFilterRRectMaskGPU(GrContext* context, 812 GrRenderTargetContext* renderTargetContext, 813 GrPaint&& paint, 814 const GrClip& clip, 815 const SkMatrix& viewMatrix, 816 const SkStrokeRec& strokeRec, 817 const SkRRect& srcRRect, 818 const SkRRect& devRRect) const { 819 SkASSERT(renderTargetContext); 820 821 if (fBlurStyle != kNormal_SkBlurStyle) { 822 return false; 823 } 824 825 if (!strokeRec.isFillStyle()) { 826 return false; 827 } 828 829 GrProxyProvider* proxyProvider = context->contextPriv().proxyProvider(); 830 SkScalar xformedSigma = this->computeXformedSigma(viewMatrix); 831 832 if (devRRect.isRect() || SkRRectPriv::IsCircle(devRRect)) { 833 std::unique_ptr<GrFragmentProcessor> fp; 834 if (devRRect.isRect()) { 835 SkScalar pad = 3.0f * xformedSigma; 836 const SkRect dstCoverageRect = devRRect.rect().makeOutset(pad, pad); 837 838 fp = GrRectBlurEffect::Make(proxyProvider, dstCoverageRect, xformedSigma); 839 } else { 840 fp = GrCircleBlurFragmentProcessor::Make(proxyProvider, 841 devRRect.rect(), xformedSigma); 842 } 843 844 if (!fp) { 845 return false; 846 } 847 paint.addCoverageFragmentProcessor(std::move(fp)); 848 849 SkRect srcProxyRect = srcRRect.rect(); 850 SkScalar outsetX = 3.0f*fSigma; 851 SkScalar outsetY = 3.0f*fSigma; 852 if (this->ignoreXform()) { 853 // When we're ignoring the CTM the padding added to the source rect also needs to ignore 854 // the CTM. The matrix passed in here is guaranteed to be just scale and translate so we 855 // can just grab the X and Y scales off the matrix and pre-undo the scale. 856 outsetX /= viewMatrix.getScaleX(); 857 outsetY /= viewMatrix.getScaleY(); 858 } 859 srcProxyRect.outset(outsetX, outsetY); 860 861 renderTargetContext->drawRect(clip, std::move(paint), GrAA::kNo, viewMatrix, srcProxyRect); 862 return true; 863 } 864 865 auto fp = GrRRectBlurEffect::Make(context, fSigma, xformedSigma, srcRRect, devRRect); 866 if (!fp) { 867 return false; 868 } 869 870 if (!this->ignoreXform()) { 871 SkRect srcProxyRect = srcRRect.rect(); 872 srcProxyRect.outset(3.0f*fSigma, 3.0f*fSigma); 873 874 sk_sp<SkVertices> vertices = nullptr; 875 SkRect temp = fOccluder; 876 877 if (!temp.isEmpty() && (srcProxyRect.contains(temp) || temp.intersect(srcProxyRect))) { 878 SkVertices::Builder builder(SkVertices::kTriangles_VertexMode, 8, 24, 0); 879 srcProxyRect.toQuad(builder.positions()); 880 temp.toQuad(builder.positions() + 4); 881 882 static const uint16_t ringI[24] = { 0, 1, 5, 5, 4, 0, 883 1, 2, 6, 6, 5, 1, 884 2, 3, 7, 7, 6, 2, 885 3, 0, 4, 4, 7, 3 }; 886 memcpy(builder.indices(), ringI, sizeof(ringI)); 887 vertices = builder.detach(); 888 } else { 889 // full rect case 890 SkVertices::Builder builder(SkVertices::kTriangles_VertexMode, 4, 6, 0); 891 srcProxyRect.toQuad(builder.positions()); 892 893 static const uint16_t fullI[6] = { 0, 1, 2, 0, 2, 3 }; 894 memcpy(builder.indices(), fullI, sizeof(fullI)); 895 vertices = builder.detach(); 896 } 897 898 paint.addCoverageFragmentProcessor(std::move(fp)); 899 renderTargetContext->drawVertices(clip, std::move(paint), viewMatrix, std::move(vertices)); 900 } else { 901 SkMatrix inverse; 902 if (!viewMatrix.invert(&inverse)) { 903 return false; 904 } 905 906 float extra=3.f*SkScalarCeilToScalar(xformedSigma-1/6.0f); 907 SkRect proxyRect = devRRect.rect(); 908 proxyRect.outset(extra, extra); 909 910 paint.addCoverageFragmentProcessor(std::move(fp)); 911 renderTargetContext->fillRectWithLocalMatrix(clip, std::move(paint), GrAA::kNo, 912 SkMatrix::I(), proxyRect, inverse); 913 } 914 915 return true; 916 } 917 918 bool SkBlurMaskFilterImpl::canFilterMaskGPU(const SkRRect& devRRect, 919 const SkIRect& clipBounds, 920 const SkMatrix& ctm, 921 SkRect* maskRect) const { 922 SkScalar xformedSigma = this->computeXformedSigma(ctm); 923 if (xformedSigma <= 0) { 924 return false; 925 } 926 927 // We always do circles and simple circular rrects on the GPU 928 if (!SkRRectPriv::IsCircle(devRRect) && !SkRRectPriv::IsSimpleCircular(devRRect)) { 929 static const SkScalar kMIN_GPU_BLUR_SIZE = SkIntToScalar(64); 930 static const SkScalar kMIN_GPU_BLUR_SIGMA = SkIntToScalar(32); 931 932 if (devRRect.width() <= kMIN_GPU_BLUR_SIZE && 933 devRRect.height() <= kMIN_GPU_BLUR_SIZE && 934 xformedSigma <= kMIN_GPU_BLUR_SIGMA) { 935 // We prefer to blur small rects with small radii on the CPU. 936 return false; 937 } 938 } 939 940 if (nullptr == maskRect) { 941 // don't need to compute maskRect 942 return true; 943 } 944 945 float sigma3 = 3 * SkScalarToFloat(xformedSigma); 946 947 SkRect clipRect = SkRect::Make(clipBounds); 948 SkRect srcRect(devRRect.rect()); 949 950 // Outset srcRect and clipRect by 3 * sigma, to compute affected blur area. 951 srcRect.outset(sigma3, sigma3); 952 clipRect.outset(sigma3, sigma3); 953 if (!srcRect.intersect(clipRect)) { 954 srcRect.setEmpty(); 955 } 956 *maskRect = srcRect; 957 return true; 958 } 959 960 sk_sp<GrTextureProxy> SkBlurMaskFilterImpl::filterMaskGPU(GrContext* context, 961 sk_sp<GrTextureProxy> srcProxy, 962 const SkMatrix& ctm, 963 const SkIRect& maskRect) const { 964 // 'maskRect' isn't snapped to the UL corner but the mask in 'src' is. 965 const SkIRect clipRect = SkIRect::MakeWH(maskRect.width(), maskRect.height()); 966 967 SkScalar xformedSigma = this->computeXformedSigma(ctm); 968 SkASSERT(xformedSigma > 0); 969 970 // If we're doing a normal blur, we can clobber the pathTexture in the 971 // gaussianBlur. Otherwise, we need to save it for later compositing. 972 bool isNormalBlur = (kNormal_SkBlurStyle == fBlurStyle); 973 sk_sp<GrRenderTargetContext> renderTargetContext( 974 SkGpuBlurUtils::GaussianBlur(context, 975 srcProxy, 976 nullptr, 977 clipRect, 978 SkIRect::EmptyIRect(), 979 xformedSigma, 980 xformedSigma, 981 GrTextureDomain::kIgnore_Mode)); 982 if (!renderTargetContext) { 983 return nullptr; 984 } 985 986 if (!isNormalBlur) { 987 GrPaint paint; 988 // Blend pathTexture over blurTexture. 989 paint.addCoverageFragmentProcessor(GrSimpleTextureEffect::Make(std::move(srcProxy), 990 SkMatrix::I())); 991 if (kInner_SkBlurStyle == fBlurStyle) { 992 // inner: dst = dst * src 993 paint.setCoverageSetOpXPFactory(SkRegion::kIntersect_Op); 994 } else if (kSolid_SkBlurStyle == fBlurStyle) { 995 // solid: dst = src + dst - src * dst 996 // = src + (1 - src) * dst 997 paint.setCoverageSetOpXPFactory(SkRegion::kUnion_Op); 998 } else if (kOuter_SkBlurStyle == fBlurStyle) { 999 // outer: dst = dst * (1 - src) 1000 // = 0 * src + (1 - src) * dst 1001 paint.setCoverageSetOpXPFactory(SkRegion::kDifference_Op); 1002 } else { 1003 paint.setCoverageSetOpXPFactory(SkRegion::kReplace_Op); 1004 } 1005 1006 renderTargetContext->drawRect(GrNoClip(), std::move(paint), GrAA::kNo, SkMatrix::I(), 1007 SkRect::Make(clipRect)); 1008 } 1009 1010 return renderTargetContext->asTextureProxyRef(); 1011 } 1012 1013 #endif // SK_SUPPORT_GPU 1014 1015 1016 #ifndef SK_IGNORE_TO_STRING 1017 void SkBlurMaskFilterImpl::toString(SkString* str) const { 1018 str->append("SkBlurMaskFilterImpl: ("); 1019 1020 str->append("sigma: "); 1021 str->appendScalar(fSigma); 1022 str->append(" "); 1023 1024 static const char* gStyleName[kLastEnum_SkBlurStyle + 1] = { 1025 "normal", "solid", "outer", "inner" 1026 }; 1027 1028 str->appendf("style: %s ", gStyleName[fBlurStyle]); 1029 str->append("flags: ("); 1030 if (fBlurFlags) { 1031 bool needSeparator = false; 1032 SkAddFlagToString(str, this->ignoreXform(), "IgnoreXform", &needSeparator); 1033 SkAddFlagToString(str, 1034 SkToBool(fBlurFlags & SkBlurMaskFilter::kHighQuality_BlurFlag), 1035 "HighQuality", &needSeparator); 1036 } else { 1037 str->append("None"); 1038 } 1039 str->append("))"); 1040 } 1041 #endif 1042 1043 SK_DEFINE_FLATTENABLE_REGISTRAR_GROUP_START(SkBlurMaskFilter) 1044 SK_DEFINE_FLATTENABLE_REGISTRAR_ENTRY(SkBlurMaskFilterImpl) 1045 SK_DEFINE_FLATTENABLE_REGISTRAR_GROUP_END 1046