Home | History | Annotate | Download | only in dsp
      1 // Copyright 2015 Google Inc. All Rights Reserved.
      2 //
      3 // Use of this source code is governed by a BSD-style license
      4 // that can be found in the COPYING file in the root of the source
      5 // tree. An additional intellectual property rights grant can be found
      6 // in the file PATENTS. All contributing project authors may
      7 // be found in the AUTHORS file in the root of the source tree.
      8 // -----------------------------------------------------------------------------
      9 //
     10 // MIPS version of lossless functions
     11 //
     12 // Author(s):  Djordje Pesut    (djordje.pesut (at) imgtec.com)
     13 //             Jovan Zelincevic (jovan.zelincevic (at) imgtec.com)
     14 
     15 #include "./dsp.h"
     16 #include "./lossless.h"
     17 
     18 #if defined(WEBP_USE_MIPS32)
     19 
     20 #include <assert.h>
     21 #include <math.h>
     22 #include <stdlib.h>
     23 #include <string.h>
     24 
     25 static float FastSLog2Slow(uint32_t v) {
     26   assert(v >= LOG_LOOKUP_IDX_MAX);
     27   if (v < APPROX_LOG_WITH_CORRECTION_MAX) {
     28     uint32_t log_cnt, y, correction;
     29     const int c24 = 24;
     30     const float v_f = (float)v;
     31     uint32_t temp;
     32 
     33     // Xf = 256 = 2^8
     34     // log_cnt is index of leading one in upper 24 bits
     35     __asm__ volatile(
     36       "clz      %[log_cnt], %[v]                      \n\t"
     37       "addiu    %[y],       $zero,        1           \n\t"
     38       "subu     %[log_cnt], %[c24],       %[log_cnt]  \n\t"
     39       "sllv     %[y],       %[y],         %[log_cnt]  \n\t"
     40       "srlv     %[temp],    %[v],         %[log_cnt]  \n\t"
     41       : [log_cnt]"=&r"(log_cnt), [y]"=&r"(y),
     42         [temp]"=r"(temp)
     43       : [c24]"r"(c24), [v]"r"(v)
     44     );
     45 
     46     // vf = (2^log_cnt) * Xf; where y = 2^log_cnt and Xf < 256
     47     // Xf = floor(Xf) * (1 + (v % y) / v)
     48     // log2(Xf) = log2(floor(Xf)) + log2(1 + (v % y) / v)
     49     // The correction factor: log(1 + d) ~ d; for very small d values, so
     50     // log2(1 + (v % y) / v) ~ LOG_2_RECIPROCAL * (v % y)/v
     51     // LOG_2_RECIPROCAL ~ 23/16
     52 
     53     // (v % y) = (v % 2^log_cnt) = v & (2^log_cnt - 1)
     54     correction = (23 * (v & (y - 1))) >> 4;
     55     return v_f * (kLog2Table[temp] + log_cnt) + correction;
     56   } else {
     57     return (float)(LOG_2_RECIPROCAL * v * log((double)v));
     58   }
     59 }
     60 
     61 static float FastLog2Slow(uint32_t v) {
     62   assert(v >= LOG_LOOKUP_IDX_MAX);
     63   if (v < APPROX_LOG_WITH_CORRECTION_MAX) {
     64     uint32_t log_cnt, y;
     65     const int c24 = 24;
     66     double log_2;
     67     uint32_t temp;
     68 
     69     __asm__ volatile(
     70       "clz      %[log_cnt], %[v]                      \n\t"
     71       "addiu    %[y],       $zero,        1           \n\t"
     72       "subu     %[log_cnt], %[c24],       %[log_cnt]  \n\t"
     73       "sllv     %[y],       %[y],         %[log_cnt]  \n\t"
     74       "srlv     %[temp],    %[v],         %[log_cnt]  \n\t"
     75       : [log_cnt]"=&r"(log_cnt), [y]"=&r"(y),
     76         [temp]"=r"(temp)
     77       : [c24]"r"(c24), [v]"r"(v)
     78     );
     79 
     80     log_2 = kLog2Table[temp] + log_cnt;
     81     if (v >= APPROX_LOG_MAX) {
     82       // Since the division is still expensive, add this correction factor only
     83       // for large values of 'v'.
     84 
     85       const uint32_t correction = (23 * (v & (y - 1))) >> 4;
     86       log_2 += (double)correction / v;
     87     }
     88     return (float)log_2;
     89   } else {
     90     return (float)(LOG_2_RECIPROCAL * log((double)v));
     91   }
     92 }
     93 
     94 // C version of this function:
     95 //   int i = 0;
     96 //   int64_t cost = 0;
     97 //   const uint32_t* pop = &population[4];
     98 //   const uint32_t* LoopEnd = &population[length];
     99 //   while (pop != LoopEnd) {
    100 //     ++i;
    101 //     cost += i * *pop;
    102 //     cost += i * *(pop + 1);
    103 //     pop += 2;
    104 //   }
    105 //   return (double)cost;
    106 static double ExtraCost(const uint32_t* const population, int length) {
    107   int i, temp0, temp1;
    108   const uint32_t* pop = &population[4];
    109   const uint32_t* const LoopEnd = &population[length];
    110 
    111   __asm__ volatile(
    112     "mult   $zero,    $zero                  \n\t"
    113     "xor    %[i],     %[i],       %[i]       \n\t"
    114     "beq    %[pop],   %[LoopEnd], 2f         \n\t"
    115   "1:                                        \n\t"
    116     "lw     %[temp0], 0(%[pop])              \n\t"
    117     "lw     %[temp1], 4(%[pop])              \n\t"
    118     "addiu  %[i],     %[i],       1          \n\t"
    119     "addiu  %[pop],   %[pop],     8          \n\t"
    120     "madd   %[i],     %[temp0]               \n\t"
    121     "madd   %[i],     %[temp1]               \n\t"
    122     "bne    %[pop],   %[LoopEnd], 1b         \n\t"
    123   "2:                                        \n\t"
    124     "mfhi   %[temp0]                         \n\t"
    125     "mflo   %[temp1]                         \n\t"
    126     : [temp0]"=&r"(temp0), [temp1]"=&r"(temp1),
    127       [i]"=&r"(i), [pop]"+r"(pop)
    128     : [LoopEnd]"r"(LoopEnd)
    129     : "memory", "hi", "lo"
    130   );
    131 
    132   return (double)((int64_t)temp0 << 32 | temp1);
    133 }
    134 
    135 // C version of this function:
    136 //   int i = 0;
    137 //   int64_t cost = 0;
    138 //   const uint32_t* pX = &X[4];
    139 //   const uint32_t* pY = &Y[4];
    140 //   const uint32_t* LoopEnd = &X[length];
    141 //   while (pX != LoopEnd) {
    142 //     const uint32_t xy0 = *pX + *pY;
    143 //     const uint32_t xy1 = *(pX + 1) + *(pY + 1);
    144 //     ++i;
    145 //     cost += i * xy0;
    146 //     cost += i * xy1;
    147 //     pX += 2;
    148 //     pY += 2;
    149 //   }
    150 //   return (double)cost;
    151 static double ExtraCostCombined(const uint32_t* const X,
    152                                 const uint32_t* const Y, int length) {
    153   int i, temp0, temp1, temp2, temp3;
    154   const uint32_t* pX = &X[4];
    155   const uint32_t* pY = &Y[4];
    156   const uint32_t* const LoopEnd = &X[length];
    157 
    158   __asm__ volatile(
    159     "mult   $zero,    $zero                  \n\t"
    160     "xor    %[i],     %[i],       %[i]       \n\t"
    161     "beq    %[pX],    %[LoopEnd], 2f         \n\t"
    162   "1:                                        \n\t"
    163     "lw     %[temp0], 0(%[pX])               \n\t"
    164     "lw     %[temp1], 0(%[pY])               \n\t"
    165     "lw     %[temp2], 4(%[pX])               \n\t"
    166     "lw     %[temp3], 4(%[pY])               \n\t"
    167     "addiu  %[i],     %[i],       1          \n\t"
    168     "addu   %[temp0], %[temp0],   %[temp1]   \n\t"
    169     "addu   %[temp2], %[temp2],   %[temp3]   \n\t"
    170     "addiu  %[pX],    %[pX],      8          \n\t"
    171     "addiu  %[pY],    %[pY],      8          \n\t"
    172     "madd   %[i],     %[temp0]               \n\t"
    173     "madd   %[i],     %[temp2]               \n\t"
    174     "bne    %[pX],    %[LoopEnd], 1b         \n\t"
    175   "2:                                        \n\t"
    176     "mfhi   %[temp0]                         \n\t"
    177     "mflo   %[temp1]                         \n\t"
    178     : [temp0]"=&r"(temp0), [temp1]"=&r"(temp1),
    179       [temp2]"=&r"(temp2), [temp3]"=&r"(temp3),
    180       [i]"=&r"(i), [pX]"+r"(pX), [pY]"+r"(pY)
    181     : [LoopEnd]"r"(LoopEnd)
    182     : "memory", "hi", "lo"
    183   );
    184 
    185   return (double)((int64_t)temp0 << 32 | temp1);
    186 }
    187 
    188 #define HUFFMAN_COST_PASS                                 \
    189   __asm__ volatile(                                       \
    190     "sll   %[temp1],  %[temp0],    3           \n\t"      \
    191     "addiu %[temp3],  %[streak],   -3          \n\t"      \
    192     "addu  %[temp2],  %[pstreaks], %[temp1]    \n\t"      \
    193     "blez  %[temp3],  1f                       \n\t"      \
    194     "srl   %[temp1],  %[temp1],    1           \n\t"      \
    195     "addu  %[temp3],  %[pcnts],    %[temp1]    \n\t"      \
    196     "lw    %[temp0],  4(%[temp2])              \n\t"      \
    197     "lw    %[temp1],  0(%[temp3])              \n\t"      \
    198     "addu  %[temp0],  %[temp0],    %[streak]   \n\t"      \
    199     "addiu %[temp1],  %[temp1],    1           \n\t"      \
    200     "sw    %[temp0],  4(%[temp2])              \n\t"      \
    201     "sw    %[temp1],  0(%[temp3])              \n\t"      \
    202     "b     2f                                  \n\t"      \
    203   "1:                                          \n\t"      \
    204     "lw    %[temp0],  0(%[temp2])              \n\t"      \
    205     "addu  %[temp0],  %[temp0],    %[streak]   \n\t"      \
    206     "sw    %[temp0],  0(%[temp2])              \n\t"      \
    207   "2:                                          \n\t"      \
    208     : [temp1]"=&r"(temp1), [temp2]"=&r"(temp2),           \
    209       [temp3]"=&r"(temp3), [temp0]"+r"(temp0)             \
    210     : [pstreaks]"r"(pstreaks), [pcnts]"r"(pcnts),         \
    211       [streak]"r"(streak)                                 \
    212     : "memory"                                            \
    213   );
    214 
    215 // Returns the various RLE counts
    216 static WEBP_INLINE void GetEntropyUnrefinedHelper(
    217     uint32_t val, int i, uint32_t* const val_prev, int* const i_prev,
    218     VP8LBitEntropy* const bit_entropy, VP8LStreaks* const stats) {
    219   int* const pstreaks = &stats->streaks[0][0];
    220   int* const pcnts = &stats->counts[0];
    221   int temp0, temp1, temp2, temp3;
    222   const int streak = i - *i_prev;
    223 
    224   // Gather info for the bit entropy.
    225   if (*val_prev != 0) {
    226     bit_entropy->sum += (*val_prev) * streak;
    227     bit_entropy->nonzeros += streak;
    228     bit_entropy->nonzero_code = *i_prev;
    229     bit_entropy->entropy -= VP8LFastSLog2(*val_prev) * streak;
    230     if (bit_entropy->max_val < *val_prev) {
    231       bit_entropy->max_val = *val_prev;
    232     }
    233   }
    234 
    235   // Gather info for the Huffman cost.
    236   temp0 = (*val_prev != 0);
    237   HUFFMAN_COST_PASS
    238 
    239   *val_prev = val;
    240   *i_prev = i;
    241 }
    242 
    243 #define ASM_START                                       \
    244   __asm__ volatile(                                     \
    245     ".set   push                            \n\t"       \
    246     ".set   at                              \n\t"       \
    247     ".set   macro                           \n\t"       \
    248   "1:                                       \n\t"
    249 
    250 // P2 = P0 + P1
    251 // A..D - offsets
    252 // E - temp variable to tell macro
    253 //     if pointer should be incremented
    254 // literal_ and successive histograms could be unaligned
    255 // so we must use ulw and usw
    256 #define ADD_TO_OUT(A, B, C, D, E, P0, P1, P2)           \
    257     "ulw    %[temp0], " #A "(%[" #P0 "])    \n\t"       \
    258     "ulw    %[temp1], " #B "(%[" #P0 "])    \n\t"       \
    259     "ulw    %[temp2], " #C "(%[" #P0 "])    \n\t"       \
    260     "ulw    %[temp3], " #D "(%[" #P0 "])    \n\t"       \
    261     "ulw    %[temp4], " #A "(%[" #P1 "])    \n\t"       \
    262     "ulw    %[temp5], " #B "(%[" #P1 "])    \n\t"       \
    263     "ulw    %[temp6], " #C "(%[" #P1 "])    \n\t"       \
    264     "ulw    %[temp7], " #D "(%[" #P1 "])    \n\t"       \
    265     "addu   %[temp4], %[temp4],   %[temp0]  \n\t"       \
    266     "addu   %[temp5], %[temp5],   %[temp1]  \n\t"       \
    267     "addu   %[temp6], %[temp6],   %[temp2]  \n\t"       \
    268     "addu   %[temp7], %[temp7],   %[temp3]  \n\t"       \
    269     "addiu  %[" #P0 "],  %[" #P0 "],  16    \n\t"       \
    270   ".if " #E " == 1                          \n\t"       \
    271     "addiu  %[" #P1 "],  %[" #P1 "],  16    \n\t"       \
    272   ".endif                                   \n\t"       \
    273     "usw    %[temp4], " #A "(%[" #P2 "])    \n\t"       \
    274     "usw    %[temp5], " #B "(%[" #P2 "])    \n\t"       \
    275     "usw    %[temp6], " #C "(%[" #P2 "])    \n\t"       \
    276     "usw    %[temp7], " #D "(%[" #P2 "])    \n\t"       \
    277     "addiu  %[" #P2 "], %[" #P2 "],   16    \n\t"       \
    278     "bne    %[" #P0 "], %[LoopEnd], 1b      \n\t"       \
    279     ".set   pop                             \n\t"       \
    280 
    281 #define ASM_END_COMMON_0                                \
    282     : [temp0]"=&r"(temp0), [temp1]"=&r"(temp1),         \
    283       [temp2]"=&r"(temp2), [temp3]"=&r"(temp3),         \
    284       [temp4]"=&r"(temp4), [temp5]"=&r"(temp5),         \
    285       [temp6]"=&r"(temp6), [temp7]"=&r"(temp7),         \
    286       [pa]"+r"(pa), [pout]"+r"(pout)
    287 
    288 #define ASM_END_COMMON_1                                \
    289     : [LoopEnd]"r"(LoopEnd)                             \
    290     : "memory", "at"                                    \
    291   );
    292 
    293 #define ASM_END_0                                       \
    294     ASM_END_COMMON_0                                    \
    295       , [pb]"+r"(pb)                                    \
    296     ASM_END_COMMON_1
    297 
    298 #define ASM_END_1                                       \
    299     ASM_END_COMMON_0                                    \
    300     ASM_END_COMMON_1
    301 
    302 #define ADD_VECTOR(A, B, OUT, SIZE, EXTRA_SIZE)  do {   \
    303   const uint32_t* pa = (const uint32_t*)(A);            \
    304   const uint32_t* pb = (const uint32_t*)(B);            \
    305   uint32_t* pout = (uint32_t*)(OUT);                    \
    306   const uint32_t* const LoopEnd = pa + (SIZE);          \
    307   assert((SIZE) % 4 == 0);                              \
    308   ASM_START                                             \
    309   ADD_TO_OUT(0, 4, 8, 12, 1, pa, pb, pout)              \
    310   ASM_END_0                                             \
    311   if ((EXTRA_SIZE) > 0) {                               \
    312     const int last = (EXTRA_SIZE);                      \
    313     int i;                                              \
    314     for (i = 0; i < last; ++i) pout[i] = pa[i] + pb[i]; \
    315   }                                                     \
    316 } while (0)
    317 
    318 #define ADD_VECTOR_EQ(A, OUT, SIZE, EXTRA_SIZE)  do {   \
    319   const uint32_t* pa = (const uint32_t*)(A);            \
    320   uint32_t* pout = (uint32_t*)(OUT);                    \
    321   const uint32_t* const LoopEnd = pa + (SIZE);          \
    322   assert((SIZE) % 4 == 0);                              \
    323   ASM_START                                             \
    324   ADD_TO_OUT(0, 4, 8, 12, 0, pa, pout, pout)            \
    325   ASM_END_1                                             \
    326   if ((EXTRA_SIZE) > 0) {                               \
    327     const int last = (EXTRA_SIZE);                      \
    328     int i;                                              \
    329     for (i = 0; i < last; ++i) pout[i] += pa[i];        \
    330   }                                                     \
    331 } while (0)
    332 
    333 static void HistogramAdd(const VP8LHistogram* const a,
    334                          const VP8LHistogram* const b,
    335                          VP8LHistogram* const out) {
    336   uint32_t temp0, temp1, temp2, temp3, temp4, temp5, temp6, temp7;
    337   const int extra_cache_size = VP8LHistogramNumCodes(a->palette_code_bits_)
    338                              - (NUM_LITERAL_CODES + NUM_LENGTH_CODES);
    339   assert(a->palette_code_bits_ == b->palette_code_bits_);
    340 
    341   if (b != out) {
    342     ADD_VECTOR(a->literal_, b->literal_, out->literal_,
    343                NUM_LITERAL_CODES + NUM_LENGTH_CODES, extra_cache_size);
    344     ADD_VECTOR(a->distance_, b->distance_, out->distance_,
    345                NUM_DISTANCE_CODES, 0);
    346     ADD_VECTOR(a->red_, b->red_, out->red_, NUM_LITERAL_CODES, 0);
    347     ADD_VECTOR(a->blue_, b->blue_, out->blue_, NUM_LITERAL_CODES, 0);
    348     ADD_VECTOR(a->alpha_, b->alpha_, out->alpha_, NUM_LITERAL_CODES, 0);
    349   } else {
    350     ADD_VECTOR_EQ(a->literal_, out->literal_,
    351                   NUM_LITERAL_CODES + NUM_LENGTH_CODES, extra_cache_size);
    352     ADD_VECTOR_EQ(a->distance_, out->distance_, NUM_DISTANCE_CODES, 0);
    353     ADD_VECTOR_EQ(a->red_, out->red_, NUM_LITERAL_CODES, 0);
    354     ADD_VECTOR_EQ(a->blue_, out->blue_, NUM_LITERAL_CODES, 0);
    355     ADD_VECTOR_EQ(a->alpha_, out->alpha_, NUM_LITERAL_CODES, 0);
    356   }
    357 }
    358 
    359 #undef ADD_VECTOR_EQ
    360 #undef ADD_VECTOR
    361 #undef ASM_END_1
    362 #undef ASM_END_0
    363 #undef ASM_END_COMMON_1
    364 #undef ASM_END_COMMON_0
    365 #undef ADD_TO_OUT
    366 #undef ASM_START
    367 
    368 //------------------------------------------------------------------------------
    369 // Entry point
    370 
    371 extern void VP8LEncDspInitMIPS32(void);
    372 
    373 WEBP_TSAN_IGNORE_FUNCTION void VP8LEncDspInitMIPS32(void) {
    374   VP8LFastSLog2Slow = FastSLog2Slow;
    375   VP8LFastLog2Slow = FastLog2Slow;
    376   VP8LExtraCost = ExtraCost;
    377   VP8LExtraCostCombined = ExtraCostCombined;
    378   VP8LGetEntropyUnrefinedHelper = GetEntropyUnrefinedHelper;
    379   VP8LHistogramAdd = HistogramAdd;
    380 }
    381 
    382 #else  // !WEBP_USE_MIPS32
    383 
    384 WEBP_DSP_INIT_STUB(VP8LEncDspInitMIPS32)
    385 
    386 #endif  // WEBP_USE_MIPS32
    387