1 /* 2 * Copyright 2016 Google Inc. 3 * 4 * Use of this source code is governed by a BSD-style license that can be 5 * found in the LICENSE file. 6 */ 7 8 #include "Sk4fGradientBase.h" 9 10 namespace { 11 12 const float kInv255Float = 1.0f / 255; 13 14 SkPMColor pack_color(SkColor c, bool premul) { 15 return premul 16 ? SkPreMultiplyColor(c) 17 : SkPackARGB32NoCheck(SkColorGetA(c), SkColorGetR(c), SkColorGetG(c), SkColorGetB(c)); 18 } 19 20 // true when x is in [k1,k2) 21 bool in_range(SkScalar x, SkScalar k1, SkScalar k2) { 22 SkASSERT(k1 != k2); 23 return (k1 < k2) 24 ? (x >= k1 && x < k2) 25 : (x >= k2 && x < k1); 26 } 27 28 } // anonymous namespace 29 30 SkGradientShaderBase::GradientShaderBase4fContext:: 31 Interval::Interval(SkPMColor c0, SkScalar p0, 32 SkPMColor c1, SkScalar p1, 33 const Sk4f& componentScale) 34 : fP0(p0) 35 , fP1(p1) 36 , fZeroRamp(c0 == c1) { 37 SkASSERT(p0 != p1); 38 39 const Sk4f c4f0 = SkNx_cast<float>(Sk4b::Load(&c0)) * componentScale; 40 const Sk4f c4f1 = SkNx_cast<float>(Sk4b::Load(&c1)) * componentScale; 41 const Sk4f dc4f = (c4f1 - c4f0) / (p1 - p0); 42 43 c4f0.store(&fC0.fVec); 44 dc4f.store(&fDc.fVec); 45 } 46 47 SkGradientShaderBase::GradientShaderBase4fContext:: 48 Interval::Interval(const Sk4f& c0, const Sk4f& dc, 49 SkScalar p0, SkScalar p1) 50 : fP0(p0) 51 , fP1(p1) 52 , fZeroRamp((dc == 0).allTrue()) { 53 c0.store(fC0.fVec); 54 dc.store(fDc.fVec); 55 } 56 57 bool SkGradientShaderBase::GradientShaderBase4fContext:: 58 Interval::contains(SkScalar fx) const { 59 return in_range(fx, fP0, fP1); 60 } 61 62 SkGradientShaderBase:: 63 GradientShaderBase4fContext::GradientShaderBase4fContext(const SkGradientShaderBase& shader, 64 const ContextRec& rec) 65 : INHERITED(shader, rec) 66 , fFlags(this->INHERITED::getFlags()) 67 #ifdef SK_SUPPORT_LEGACY_GRADIENT_DITHERING 68 , fDither(true) 69 #else 70 , fDither(rec.fPaint->isDither()) 71 #endif 72 { 73 // The main job here is to build an interval list. Intervals are a different 74 // representation of the color stops data, optimized for efficient scan line 75 // access during shading. 76 // 77 // [{P0,C0} , {P1,C1}) [{P1,C2} , {P2,c3}) ... [{Pn,C2n} , {Pn+1,C2n+1}) 78 // 79 // The list is sorted in increasing dst order, i.e. X(Pk) < X(Pk+1). This 80 // allows us to always traverse left->right when iterating over a scan line. 81 // It also means that the interval order matches the color stops when dx >= 0, 82 // and is the inverse (pos, colors, order are flipped) when dx < 0. 83 // 84 // Note: the current representation duplicates pos data; we could refactor to 85 // avoid this if interval storage size becomes a concern. 86 // 87 // Aside from reordering, we also perform two more pre-processing steps at 88 // this stage: 89 // 90 // 1) scale the color components depending on paint alpha and the requested 91 // interpolation space (note: the interval color storage is SkPM4f, but 92 // that doesn't necessarily mean the colors are premultiplied; that 93 // property is tracked in fColorsArePremul) 94 // 95 // 2) inject synthetic intervals to support tiling. 96 // 97 // * for kRepeat, no extra intervals are needed - the iterator just 98 // wraps around at the end: 99 // 100 // ->[P0,P1)->..[Pn-1,Pn)-> 101 // 102 // * for kClamp, we add two "infinite" intervals before/after: 103 // 104 // [-/+inf , P0)->[P0 , P1)->..[Pn-1 , Pn)->[Pn , +/-inf) 105 // 106 // (the iterator should never run off the end in this mode) 107 // 108 // * for kMirror, we extend the range to [0..2] and add a flipped 109 // interval series - then the iterator operates just as in the 110 // kRepeat case: 111 // 112 // ->[P0,P1)->..[Pn-1,Pn)->[2 - Pn,2 - Pn-1)->..[2 - P1,2 - P0)-> 113 // 114 // TODO: investigate collapsing intervals << 1px. 115 116 const SkMatrix& inverse = this->getTotalInverse(); 117 fDstToPos.setConcat(shader.fPtsToUnit, inverse); 118 fDstToPosProc = fDstToPos.getMapXYProc(); 119 fDstToPosClass = static_cast<uint8_t>(INHERITED::ComputeMatrixClass(fDstToPos)); 120 121 if (shader.fColorsAreOpaque && this->getPaintAlpha() == SK_AlphaOPAQUE) { 122 fFlags |= kOpaqueAlpha_Flag; 123 } 124 125 fColorsArePremul = 126 (shader.fGradFlags & SkGradientShader::kInterpolateColorsInPremul_Flag) 127 || shader.fColorsAreOpaque; 128 129 const float paintAlpha = rec.fPaint->getAlpha() * kInv255Float; 130 const Sk4f componentScale = fColorsArePremul 131 ? Sk4f(paintAlpha * kInv255Float) 132 : Sk4f(kInv255Float, kInv255Float, kInv255Float, paintAlpha * kInv255Float); 133 134 SkASSERT(shader.fColorCount > 1); 135 SkASSERT(shader.fOrigColors); 136 137 int direction = 1; 138 int first_index = 0; 139 int last_index = shader.fColorCount - 1; 140 SkScalar first_pos = 0; 141 SkScalar last_pos = 1; 142 const bool dx_is_pos = fDstToPos.getScaleX() >= 0; 143 if (!dx_is_pos) { 144 direction = -direction; 145 SkTSwap(first_index, last_index); 146 SkTSwap(first_pos, last_pos); 147 } 148 149 if (shader.fTileMode == SkShader::kClamp_TileMode) { 150 // synthetic edge interval: -/+inf .. P0) 151 const SkPMColor clamp_color = pack_color(shader.fOrigColors[first_index], 152 fColorsArePremul); 153 const SkScalar clamp_pos = dx_is_pos ? SK_ScalarMin : SK_ScalarMax; 154 fIntervals.emplace_back(clamp_color, clamp_pos, 155 clamp_color, first_pos, 156 componentScale); 157 } 158 159 int prev = first_index; 160 int curr = prev + direction; 161 SkScalar prev_pos = first_pos; 162 if (shader.fOrigPos) { 163 // explicit positions 164 do { 165 // TODO: this sanitization should be done in SkGradientShaderBase 166 const SkScalar curr_pos = (dx_is_pos) 167 ? SkTPin(shader.fOrigPos[curr], prev_pos, last_pos) 168 : SkTPin(shader.fOrigPos[curr], last_pos, prev_pos); 169 if (curr_pos != prev_pos) { 170 fIntervals.emplace_back( 171 pack_color(shader.fOrigColors[prev], fColorsArePremul), 172 prev_pos, 173 pack_color(shader.fOrigColors[curr], fColorsArePremul), 174 curr_pos, 175 componentScale); 176 } 177 prev = curr; 178 prev_pos = curr_pos; 179 curr += direction; 180 } while (prev != last_index); 181 } else { 182 // implicit positions 183 const SkScalar dt = direction * SK_Scalar1 / (shader.fColorCount - 1); 184 do { 185 const SkScalar curr_pos = prev_pos + dt; 186 fIntervals.emplace_back( 187 pack_color(shader.fOrigColors[prev], fColorsArePremul), 188 prev_pos, 189 pack_color(shader.fOrigColors[curr], fColorsArePremul), 190 curr_pos, 191 componentScale); 192 193 prev = curr; 194 prev_pos = curr_pos; 195 curr += direction; 196 } while (prev != last_index); 197 // pin the last pos to maintain accurate [0,1] pos coverage. 198 fIntervals.back().fP1 = last_pos; 199 } 200 201 if (shader.fTileMode == SkShader::kClamp_TileMode) { 202 // synthetic edge interval: Pn .. +/-inf 203 const SkPMColor clamp_color = 204 pack_color(shader.fOrigColors[last_index], fColorsArePremul); 205 const SkScalar clamp_pos = dx_is_pos ? SK_ScalarMax : SK_ScalarMin; 206 fIntervals.emplace_back(clamp_color, last_pos, 207 clamp_color, clamp_pos, 208 componentScale); 209 } else if (shader.fTileMode == SkShader::kMirror_TileMode) { 210 const int count = fIntervals.count(); 211 // synthetic flipped intervals in [1 .. 2) 212 for (int i = count - 1; i >= 0; --i) { 213 const Interval& interval = fIntervals[i]; 214 const SkScalar p0 = interval.fP0; 215 const SkScalar p1 = interval.fP1; 216 Sk4f dc = Sk4f::Load(interval.fDc.fVec); 217 Sk4f c = Sk4f::Load(interval.fC0.fVec) + dc * Sk4f(p1 - p0); 218 fIntervals.emplace_back(c, dc * Sk4f(-1), 2 - p1, 2 - p0); 219 } 220 221 if (!dx_is_pos) { 222 // When dx is negative, our initial invervals are in (1..0] order. 223 // The loop above appends their flipped counterparts, pivoted in 2: (1..0](2..1] 224 // To achieve the expected monotonic interval order, we need to 225 // swap the two halves: (2..1](1..0] 226 // TODO: we can probably avoid this late swap with some additional logic during 227 // the initial interval buildup. 228 SkASSERT(fIntervals.count() == count * 2) 229 for (int i = 0; i < count; ++i) { 230 SkTSwap(fIntervals[i], fIntervals[count + i]); 231 } 232 } 233 } 234 235 SkASSERT(fIntervals.count() > 0); 236 fCachedInterval = fIntervals.begin(); 237 } 238 239 const SkGradientShaderBase::GradientShaderBase4fContext::Interval* 240 SkGradientShaderBase:: 241 GradientShaderBase4fContext::findInterval(SkScalar fx) const { 242 SkASSERT(in_range(fx, fIntervals.front().fP0, fIntervals.back().fP1)); 243 244 if (1) { 245 // Linear search, using the last scanline interval as a starting point. 246 SkASSERT(fCachedInterval >= fIntervals.begin()); 247 SkASSERT(fCachedInterval < fIntervals.end()); 248 const int search_dir = fDstToPos.getScaleX() >= 0 ? 1 : -1; 249 while (!in_range(fx, fCachedInterval->fP0, fCachedInterval->fP1)) { 250 fCachedInterval += search_dir; 251 if (fCachedInterval >= fIntervals.end()) { 252 fCachedInterval = fIntervals.begin(); 253 } else if (fCachedInterval < fIntervals.begin()) { 254 fCachedInterval = fIntervals.end() - 1; 255 } 256 } 257 return fCachedInterval; 258 } else { 259 // Binary search. Seems less effective than linear + caching. 260 const Interval* i0 = fIntervals.begin(); 261 const Interval* i1 = fIntervals.end() - 1; 262 263 while (i0 != i1) { 264 SkASSERT(i0 < i1); 265 SkASSERT(in_range(fx, i0->fP0, i1->fP1)); 266 267 const Interval* i = i0 + ((i1 - i0) >> 1); 268 269 if (in_range(fx, i0->fP0, i->fP1)) { 270 i1 = i; 271 } else { 272 SkASSERT(in_range(fx, i->fP1, i1->fP1)); 273 i0 = i + 1; 274 } 275 } 276 277 SkASSERT(in_range(fx, i0->fP0, i0->fP1)); 278 return i0; 279 } 280 } 281