Home | History | Annotate | Download | only in opts
      1 /* NEON optimized code (C) COPYRIGHT 2009 Motorola
      2  *
      3  * Use of this source code is governed by a BSD-style license that can be
      4  * found in the LICENSE file.
      5  */
      6 
      7 #include "SkBitmapProcState.h"
      8 #include "SkPerspIter.h"
      9 #include "SkShader.h"
     10 #include "SkUtilsArm.h"
     11 #include "SkBitmapProcState_utils.h"
     12 
     13 #include <arm_neon.h>
     14 
     15 extern const SkBitmapProcState::MatrixProc ClampX_ClampY_Procs_neon[];
     16 extern const SkBitmapProcState::MatrixProc RepeatX_RepeatY_Procs_neon[];
     17 
     18 static void decal_nofilter_scale_neon(uint32_t dst[], SkFixed fx, SkFixed dx, int count);
     19 static void decal_filter_scale_neon(uint32_t dst[], SkFixed fx, SkFixed dx, int count);
     20 
     21 // TILEX_PROCF(fx, max)    SkClampMax((fx) >> 16, max)
     22 static inline int16x8_t sbpsm_clamp_tile8(int32x4_t low, int32x4_t high, unsigned max) {
     23     int16x8_t res;
     24 
     25     // get the hi 16s of all those 32s
     26     res = vuzpq_s16(vreinterpretq_s16_s32(low), vreinterpretq_s16_s32(high)).val[1];
     27 
     28     // clamp
     29     res = vmaxq_s16(res, vdupq_n_s16(0));
     30     res = vminq_s16(res, vdupq_n_s16(max));
     31 
     32     return res;
     33 }
     34 
     35 // TILEX_PROCF(fx, max)    SkClampMax((fx) >> 16, max)
     36 static inline int32x4_t sbpsm_clamp_tile4(int32x4_t f, unsigned max) {
     37     int32x4_t res;
     38 
     39     // get the hi 16s of all those 32s
     40     res = vshrq_n_s32(f, 16);
     41 
     42     // clamp
     43     res = vmaxq_s32(res, vdupq_n_s32(0));
     44     res = vminq_s32(res, vdupq_n_s32(max));
     45 
     46     return res;
     47 }
     48 
     49 // TILEY_LOW_BITS(fy, max)         (((fy) >> 12) & 0xF)
     50 static inline int32x4_t sbpsm_clamp_tile4_low_bits(int32x4_t fx) {
     51     int32x4_t ret;
     52 
     53     ret = vshrq_n_s32(fx, 12);
     54 
     55     /* We don't need the mask below because the caller will
     56      * overwrite the non-masked bits
     57      */
     58     //ret = vandq_s32(ret, vdupq_n_s32(0xF));
     59 
     60     return ret;
     61 }
     62 
     63 // TILEX_PROCF(fx, max) (((fx)&0xFFFF)*((max)+1)>> 16)
     64 static inline int16x8_t sbpsm_repeat_tile8(int32x4_t low, int32x4_t high, unsigned max) {
     65     uint16x8_t res;
     66     uint32x4_t tmpl, tmph;
     67 
     68     // get the lower 16 bits
     69     res = vuzpq_u16(vreinterpretq_u16_s32(low), vreinterpretq_u16_s32(high)).val[0];
     70 
     71     // bare multiplication, not SkFixedMul
     72     tmpl = vmull_u16(vget_low_u16(res), vdup_n_u16(max+1));
     73     tmph = vmull_u16(vget_high_u16(res), vdup_n_u16(max+1));
     74 
     75     // extraction of the 16 upper bits
     76     res = vuzpq_u16(vreinterpretq_u16_u32(tmpl), vreinterpretq_u16_u32(tmph)).val[1];
     77 
     78     return vreinterpretq_s16_u16(res);
     79 }
     80 
     81 // TILEX_PROCF(fx, max) (((fx)&0xFFFF)*((max)+1)>> 16)
     82 static inline int32x4_t sbpsm_repeat_tile4(int32x4_t f, unsigned max) {
     83     uint16x4_t res;
     84     uint32x4_t tmp;
     85 
     86     // get the lower 16 bits
     87     res = vmovn_u32(vreinterpretq_u32_s32(f));
     88 
     89     // bare multiplication, not SkFixedMul
     90     tmp = vmull_u16(res, vdup_n_u16(max+1));
     91 
     92     // extraction of the 16 upper bits
     93     tmp = vshrq_n_u32(tmp, 16);
     94 
     95     return vreinterpretq_s32_u32(tmp);
     96 }
     97 
     98 // TILEX_LOW_BITS(fx, max)         ((((fx) & 0xFFFF) * ((max) + 1) >> 12) & 0xF)
     99 static inline int32x4_t sbpsm_repeat_tile4_low_bits(int32x4_t fx, unsigned max) {
    100     uint16x4_t res;
    101     uint32x4_t tmp;
    102     int32x4_t ret;
    103 
    104     // get the lower 16 bits
    105     res = vmovn_u32(vreinterpretq_u32_s32(fx));
    106 
    107     // bare multiplication, not SkFixedMul
    108     tmp = vmull_u16(res, vdup_n_u16(max + 1));
    109 
    110     // shift and mask
    111     ret = vshrq_n_s32(vreinterpretq_s32_u32(tmp), 12);
    112 
    113     /* We don't need the mask below because the caller will
    114      * overwrite the non-masked bits
    115      */
    116     //ret = vandq_s32(ret, vdupq_n_s32(0xF));
    117 
    118     return ret;
    119 }
    120 
    121 #define MAKENAME(suffix)                ClampX_ClampY ## suffix ## _neon
    122 #define TILEX_PROCF(fx, max)            SkClampMax((fx) >> 16, max)
    123 #define TILEY_PROCF(fy, max)            SkClampMax((fy) >> 16, max)
    124 #define TILEX_PROCF_NEON8(l, h, max)    sbpsm_clamp_tile8(l, h, max)
    125 #define TILEY_PROCF_NEON8(l, h, max)    sbpsm_clamp_tile8(l, h, max)
    126 #define TILEX_PROCF_NEON4(fx, max)      sbpsm_clamp_tile4(fx, max)
    127 #define TILEY_PROCF_NEON4(fy, max)      sbpsm_clamp_tile4(fy, max)
    128 #define TILEX_LOW_BITS(fx, max)         (((fx) >> 12) & 0xF)
    129 #define TILEY_LOW_BITS(fy, max)         (((fy) >> 12) & 0xF)
    130 #define TILEX_LOW_BITS_NEON4(fx, max)   sbpsm_clamp_tile4_low_bits(fx)
    131 #define TILEY_LOW_BITS_NEON4(fy, max)   sbpsm_clamp_tile4_low_bits(fy)
    132 #define CHECK_FOR_DECAL
    133 #include "SkBitmapProcState_matrix_neon.h"
    134 
    135 #define MAKENAME(suffix)                RepeatX_RepeatY ## suffix ## _neon
    136 #define TILEX_PROCF(fx, max)            SK_USHIFT16(((fx) & 0xFFFF) * ((max) + 1))
    137 #define TILEY_PROCF(fy, max)            SK_USHIFT16(((fy) & 0xFFFF) * ((max) + 1))
    138 #define TILEX_PROCF_NEON8(l, h, max)    sbpsm_repeat_tile8(l, h, max)
    139 #define TILEY_PROCF_NEON8(l, h, max)    sbpsm_repeat_tile8(l, h, max)
    140 #define TILEX_PROCF_NEON4(fx, max)      sbpsm_repeat_tile4(fx, max)
    141 #define TILEY_PROCF_NEON4(fy, max)      sbpsm_repeat_tile4(fy, max)
    142 #define TILEX_LOW_BITS(fx, max)         ((((fx) & 0xFFFF) * ((max) + 1) >> 12) & 0xF)
    143 #define TILEY_LOW_BITS(fy, max)         ((((fy) & 0xFFFF) * ((max) + 1) >> 12) & 0xF)
    144 #define TILEX_LOW_BITS_NEON4(fx, max)   sbpsm_repeat_tile4_low_bits(fx, max)
    145 #define TILEY_LOW_BITS_NEON4(fy, max)   sbpsm_repeat_tile4_low_bits(fy, max)
    146 #include "SkBitmapProcState_matrix_neon.h"
    147 
    148 
    149 
    150 void decal_nofilter_scale_neon(uint32_t dst[], SkFixed fx, SkFixed dx, int count) {
    151     if (count >= 8) {
    152         // SkFixed is 16.16 fixed point
    153         SkFixed dx8 = dx * 8;
    154         int32x4_t vdx8 = vdupq_n_s32(dx8);
    155 
    156         // setup lbase and hbase
    157         int32x4_t lbase, hbase;
    158         lbase = vdupq_n_s32(fx);
    159         lbase = vsetq_lane_s32(fx + dx, lbase, 1);
    160         lbase = vsetq_lane_s32(fx + dx + dx, lbase, 2);
    161         lbase = vsetq_lane_s32(fx + dx + dx + dx, lbase, 3);
    162         hbase = lbase + vdupq_n_s32(4 * dx);
    163 
    164         do {
    165             // store the upper 16 bits
    166             vst1q_u32(dst, vreinterpretq_u32_s16(
    167                 vuzpq_s16(vreinterpretq_s16_s32(lbase), vreinterpretq_s16_s32(hbase)).val[1]
    168             ));
    169 
    170             // on to the next group of 8
    171             lbase += vdx8;
    172             hbase += vdx8;
    173             dst += 4; // we did 8 elements but the result is twice smaller
    174             count -= 8;
    175             fx += dx8;
    176         } while (count >= 8);
    177     }
    178 
    179     uint16_t* xx = (uint16_t*)dst;
    180     for (int i = count; i > 0; --i) {
    181         *xx++ = SkToU16(fx >> 16); fx += dx;
    182     }
    183 }
    184 
    185 void decal_filter_scale_neon(uint32_t dst[], SkFixed fx, SkFixed dx, int count) {
    186     if (count >= 8) {
    187         SkFixed dx8 = dx * 8;
    188         int32x4_t vdx8 = vdupq_n_s32(dx8);
    189 
    190         int32x4_t wide_fx, wide_fx2;
    191         wide_fx = vdupq_n_s32(fx);
    192         wide_fx = vsetq_lane_s32(fx + dx, wide_fx, 1);
    193         wide_fx = vsetq_lane_s32(fx + dx + dx, wide_fx, 2);
    194         wide_fx = vsetq_lane_s32(fx + dx + dx + dx, wide_fx, 3);
    195 
    196         wide_fx2 = vaddq_s32(wide_fx, vdupq_n_s32(4 * dx));
    197 
    198         while (count >= 8) {
    199             int32x4_t wide_out;
    200             int32x4_t wide_out2;
    201 
    202             wide_out = vshlq_n_s32(vshrq_n_s32(wide_fx, 12), 14);
    203             wide_out = wide_out | (vshrq_n_s32(wide_fx,16) + vdupq_n_s32(1));
    204 
    205             wide_out2 = vshlq_n_s32(vshrq_n_s32(wide_fx2, 12), 14);
    206             wide_out2 = wide_out2 | (vshrq_n_s32(wide_fx2,16) + vdupq_n_s32(1));
    207 
    208             vst1q_u32(dst, vreinterpretq_u32_s32(wide_out));
    209             vst1q_u32(dst+4, vreinterpretq_u32_s32(wide_out2));
    210 
    211             dst += 8;
    212             fx += dx8;
    213             wide_fx += vdx8;
    214             wide_fx2 += vdx8;
    215             count -= 8;
    216         }
    217     }
    218 
    219     if (count & 1)
    220     {
    221         SkASSERT((fx >> (16 + 14)) == 0);
    222         *dst++ = (fx >> 12 << 14) | ((fx >> 16) + 1);
    223         fx += dx;
    224     }
    225     while ((count -= 2) >= 0)
    226     {
    227         SkASSERT((fx >> (16 + 14)) == 0);
    228         *dst++ = (fx >> 12 << 14) | ((fx >> 16) + 1);
    229         fx += dx;
    230 
    231         *dst++ = (fx >> 12 << 14) | ((fx >> 16) + 1);
    232         fx += dx;
    233     }
    234 }
    235