Home | History | Annotate | Download | only in jumper

Lines Matching refs:U32

64     using U32 = uint32_t;
76 SI U32 round (F v, F scale) { return (uint32_t)(v*scale + 0.5f); }
77 SI U16 pack(U32 v) { return (U16)v; }
83 SI T gather(const T* p, U32 ix) { return p[ix]; }
124 using U32 = V<uint32_t>;
134 SI U16 pack(U32 v) { return __builtin_convertvector(v, U16); }
137 SI F if_then_else(I32 c, F t, F e) { return vbslq_f32((U32)c,t,e); }
143 SI U32 round(F v, F scale) { return vcvtnq_u32_f32(v*scale); }
158 SI U32 round(F v, F scale) {
165 SI V<T> gather(const T* p, U32 ix) {
237 using U32 = V<uint32_t>;
256 SI U32 round (F v, F scale) { return _mm256_cvtps_epi32(v*scale); }
258 SI U16 pack(U32 v) {
270 SI V<T> gather(const T* p, U32 ix) {
275 SI F gather(const float* p, U32 ix) { return _mm256_i32gather_ps (p, ix, 4); }
276 SI U32 gather(const uint32_t* p, U32 ix) { return _mm256_i32gather_epi32(p, ix, 4); }
277 SI U64 gather(const uint64_t* p, U32 ix) {
451 using U32 = V<uint32_t>;
462 SI U32 round(F v, F scale) { return _mm_cvtps_epi32(v*scale); }
464 SI U16 pack(U32 v) {
494 SI V<T> gather(const T* p, U32 ix) {
613 SI F cast (U32 v) { return (F)v; }
614 SI U32 trunc_(F v) { return (U32)v; }
615 SI U32 expand(U16 v) { return (U32)v; }
616 SI U32 expand(U8 v) { return (U32)v; }
618 SI F cast (U32 v) { return __builtin_convertvector((I32)v, F); }
619 SI U32 trunc_(F v) { return (U32)__builtin_convertvector( v, I32); }
620 SI U32 expand(U16 v) { return __builtin_convertvector( v, U32); }
621 SI U32 expand(U8 v) { return __builtin_convertvector( v, U32); }
646 F e = cast(bit_cast<U32>(x)) * (1.0f / (1<<23));
649 F m = bit_cast<F>((bit_cast<U32>(x) & 0x007fffff) | 0x3f000000);
677 U32 sem = expand(h),
697 U32 sem = bit_cast<U32>(f),
703 return pack(if_then_else(denorm, U32(0)
848 U32 wide = expand(_565);
854 U32 wide = expand(_4444);
860 SI void from_8888(U32 _8888, F* r, F* g, F* b, F* a) {
866 SI void from_1010102(U32 rgba, F* r, F* g, F* b, F* a) {
881 F inclusive = bit_cast<F>( bit_cast<U32>(limit) - 1 ); // Exclusive -> inclusive.
887 SI U32 ix_and_ptr(T** ptr, const SkJumper_GatherCtx* ctx, F x, F y) {
902 SI U32 to_unorm(F v, F scale, F bias = 1.0f) {
924 U32 X = dx + unaligned_load<U32>(iota),
935 U32 M = (Y & 1) << 5 | (X & 1) << 4
1178 U32 dst = load<U32>(ptr, tail);
1203 U32 dst = load<U32>(ptr, tail);
1464 auto px = load<U32>((const uint32_t*)c->src + dx, tail);
1577 U32 ix = ix_and_ptr(&ptr, ctx, r,g);
1602 U32 ix = ix_and_ptr(&ptr, ctx, r,g);
1621 U32 ix = ix_and_ptr(&ptr, ctx, r,g);
1644 U32 ix = ix_and_ptr(&ptr, ctx, r,g);
1658 from_8888(load<U32>(ptr, tail), &r,&g,&b,&a);
1662 from_8888(load<U32>(ptr, tail), &dr,&dg,&db,&da);
1666 U32 ix = ix_and_ptr(&ptr, ctx, r,g);
1672 U32 px = to_unorm(r, 255)
1681 from_8888(load<U32>(ptr, tail), &b,&g,&r,&a);
1685 from_8888(load<U32>(ptr, tail), &db,&dg,&dr,&da);
1689 U32 ix = ix_and_ptr(&ptr, ctx, r,g);
1695 U32 px = to_unorm(b, 255)
1704 from_1010102(load<U32>(ptr, tail), &r,&g,&b,&a);
1708 from_1010102(load<U32>(ptr, tail), &dr,&dg,&db,&da);
1712 U32 ix = ix_and_ptr(&ptr, ctx, r,g);
1718 U32 px = to_unorm(r, 1023)
1747 U32 ix = ix_and_ptr(&ptr, ctx, r,g);
1890 SI void gradient_lookup(const SkJumper_GradientCtx* c, U32 idx, F t,
1930 U32 idx = 0;
1934 idx += if_then_else(t >= c->ts[i], U32(1), U32(0));
2027 unaligned_store(&c->fMask, if_then_else(is_degenerate, U32(0), U32(0xffffffff)));
2034 unaligned_store(&c->fMask, if_then_else(is_degenerate, U32(0), U32(0xffffffff)));
2038 const U32 mask = unaligned_load<U32>(ctx);
2039 r = bit_cast<F>(bit_cast<U32>(r) & mask);
2040 g = bit_cast<F>(bit_cast<U32>(g) & mask);
2041 b = bit_cast<F>(bit_cast<U32>(b) & mask);
2042 a = bit_cast<F>(bit_cast<U32>(a) & mask);
2162 F& r, F& g, F& b, F a, U32 index, U32 stride) {
2175 U32 lo = trunc_(x ),
2194 F& r, F& g, F& b, F a, U32 index, U32 stride) {
2248 U32 ix = ix_and_ptr(&ptr, ctx, x,y);