Home | History | Annotate | Download | only in mips
      1 /*
      2  *  Copyright (c) 2013 The WebM project authors. All Rights Reserved.
      3  *
      4  *  Use of this source code is governed by a BSD-style license
      5  *  that can be found in the LICENSE file in the root of the source
      6  *  tree. An additional intellectual property rights grant can be found
      7  *  in the file PATENTS.  All contributing project authors may
      8  *  be found in the AUTHORS file in the root of the source tree.
      9  */
     10 
     11 #include <assert.h>
     12 #include <stdio.h>
     13 
     14 #include "./vpx_dsp_rtcd.h"
     15 #include "vpx_dsp/mips/convolve_common_dspr2.h"
     16 #include "vpx_dsp/vpx_convolve.h"
     17 #include "vpx_dsp/vpx_dsp_common.h"
     18 #include "vpx_ports/mem.h"
     19 
     20 #if HAVE_DSPR2
     21 static void convolve_bi_avg_vert_4_dspr2(const uint8_t *src, int32_t src_stride,
     22                                          uint8_t *dst, int32_t dst_stride,
     23                                          const int16_t *filter_y, int32_t w,
     24                                          int32_t h) {
     25   int32_t x, y;
     26   const uint8_t *src_ptr;
     27   uint8_t *dst_ptr;
     28   uint8_t *cm = vpx_ff_cropTbl;
     29   uint32_t vector4a = 64;
     30   uint32_t load1, load2;
     31   uint32_t p1, p2;
     32   uint32_t scratch1, scratch2;
     33   uint32_t store1, store2;
     34   int32_t Temp1, Temp2;
     35   const int16_t *filter = &filter_y[3];
     36   uint32_t filter45;
     37 
     38   filter45 = ((const int32_t *)filter)[0];
     39 
     40   for (y = h; y--;) {
     41     /* prefetch data to cache memory */
     42     prefetch_store(dst + dst_stride);
     43 
     44     for (x = 0; x < w; x += 4) {
     45       src_ptr = src + x;
     46       dst_ptr = dst + x;
     47 
     48       __asm__ __volatile__(
     49           "ulw              %[load1],     0(%[src_ptr])                   \n\t"
     50           "add              %[src_ptr],   %[src_ptr],     %[src_stride]   \n\t"
     51           "ulw              %[load2],     0(%[src_ptr])                   \n\t"
     52 
     53           "mtlo             %[vector4a],  $ac0                            \n\t"
     54           "mtlo             %[vector4a],  $ac1                            \n\t"
     55           "mtlo             %[vector4a],  $ac2                            \n\t"
     56           "mtlo             %[vector4a],  $ac3                            \n\t"
     57           "mthi             $zero,        $ac0                            \n\t"
     58           "mthi             $zero,        $ac1                            \n\t"
     59           "mthi             $zero,        $ac2                            \n\t"
     60           "mthi             $zero,        $ac3                            \n\t"
     61 
     62           "preceu.ph.qbr    %[scratch1],  %[load1]                        \n\t"
     63           "preceu.ph.qbr    %[p1],        %[load2]                        \n\t"
     64           "precrq.ph.w      %[p2],        %[p1],          %[scratch1]     \n\t" /* pixel 2 */
     65           "append           %[p1],        %[scratch1],    16              \n\t" /* pixel 1 */
     66 
     67           "dpa.w.ph         $ac0,         %[p1],          %[filter45]     \n\t"
     68           "dpa.w.ph         $ac1,         %[p2],          %[filter45]     \n\t"
     69 
     70           "preceu.ph.qbl    %[scratch1],  %[load1]                        \n\t"
     71           "preceu.ph.qbl    %[p1],        %[load2]                        \n\t"
     72           "precrq.ph.w      %[p2],        %[p1],          %[scratch1]     \n\t" /* pixel 2 */
     73           "append           %[p1],        %[scratch1],    16              \n\t" /* pixel 1 */
     74 
     75           "dpa.w.ph         $ac2,         %[p1],          %[filter45]     \n\t"
     76           "dpa.w.ph         $ac3,         %[p2],          %[filter45]     \n\t"
     77 
     78           "extp             %[Temp1],     $ac0,           31              \n\t"
     79           "extp             %[Temp2],     $ac1,           31              \n\t"
     80 
     81           "lbu              %[scratch1],  0(%[dst_ptr])                   \n\t"
     82           "lbu              %[scratch2],  1(%[dst_ptr])                   \n\t"
     83 
     84           "lbux             %[store1],    %[Temp1](%[cm])                 \n\t"
     85           "addqh_r.w        %[store1],    %[store1],      %[scratch1]     \n\t" /* pixel 1 */
     86           "extp             %[Temp1],     $ac2,           31              \n\t"
     87 
     88           "lbux             %[store2],    %[Temp2](%[cm])                 \n\t"
     89           "addqh_r.w        %[store2],    %[store2],      %[scratch2]     \n\t" /* pixel 2 */
     90           "extp             %[Temp2],     $ac3,           31              \n\t"
     91           "lbu              %[scratch1],  2(%[dst_ptr])                   \n\t"
     92 
     93           "sb               %[store1],    0(%[dst_ptr])                   \n\t"
     94           "sb               %[store2],    1(%[dst_ptr])                   \n\t"
     95           "lbu              %[scratch2],  3(%[dst_ptr])                   \n\t"
     96 
     97           "lbux             %[store1],    %[Temp1](%[cm])                 \n\t"
     98           "lbux             %[store2],    %[Temp2](%[cm])                 \n\t"
     99           "addqh_r.w        %[store1],    %[store1],      %[scratch1]     \n\t" /* pixel 3 */
    100           "addqh_r.w        %[store2],    %[store2],      %[scratch2]     \n\t" /* pixel 4 */
    101 
    102           "sb               %[store1],    2(%[dst_ptr])                   \n\t"
    103           "sb               %[store2],    3(%[dst_ptr])                   \n\t"
    104 
    105           : [load1] "=&r"(load1), [load2] "=&r"(load2), [p1] "=&r"(p1),
    106             [p2] "=&r"(p2), [scratch1] "=&r"(scratch1),
    107             [scratch2] "=&r"(scratch2), [Temp1] "=&r"(Temp1),
    108             [Temp2] "=&r"(Temp2), [store1] "=&r"(store1),
    109             [store2] "=&r"(store2), [src_ptr] "+r"(src_ptr)
    110           : [filter45] "r"(filter45), [vector4a] "r"(vector4a),
    111             [src_stride] "r"(src_stride), [cm] "r"(cm), [dst_ptr] "r"(dst_ptr));
    112     }
    113 
    114     /* Next row... */
    115     src += src_stride;
    116     dst += dst_stride;
    117   }
    118 }
    119 
    120 static void convolve_bi_avg_vert_64_dspr2(const uint8_t *src,
    121                                           int32_t src_stride, uint8_t *dst,
    122                                           int32_t dst_stride,
    123                                           const int16_t *filter_y, int32_t h) {
    124   int32_t x, y;
    125   const uint8_t *src_ptr;
    126   uint8_t *dst_ptr;
    127   uint8_t *cm = vpx_ff_cropTbl;
    128   uint32_t vector4a = 64;
    129   uint32_t load1, load2;
    130   uint32_t p1, p2;
    131   uint32_t scratch1, scratch2;
    132   uint32_t store1, store2;
    133   int32_t Temp1, Temp2;
    134   const int16_t *filter = &filter_y[3];
    135   uint32_t filter45;
    136 
    137   filter45 = ((const int32_t *)filter)[0];
    138 
    139   for (y = h; y--;) {
    140     /* prefetch data to cache memory */
    141     prefetch_store(dst + dst_stride);
    142     prefetch_store(dst + dst_stride + 32);
    143 
    144     for (x = 0; x < 64; x += 4) {
    145       src_ptr = src + x;
    146       dst_ptr = dst + x;
    147 
    148       __asm__ __volatile__(
    149           "ulw              %[load1],     0(%[src_ptr])                   \n\t"
    150           "add              %[src_ptr],   %[src_ptr],     %[src_stride]   \n\t"
    151           "ulw              %[load2],     0(%[src_ptr])                   \n\t"
    152 
    153           "mtlo             %[vector4a],  $ac0                            \n\t"
    154           "mtlo             %[vector4a],  $ac1                            \n\t"
    155           "mtlo             %[vector4a],  $ac2                            \n\t"
    156           "mtlo             %[vector4a],  $ac3                            \n\t"
    157           "mthi             $zero,        $ac0                            \n\t"
    158           "mthi             $zero,        $ac1                            \n\t"
    159           "mthi             $zero,        $ac2                            \n\t"
    160           "mthi             $zero,        $ac3                            \n\t"
    161 
    162           "preceu.ph.qbr    %[scratch1],  %[load1]                        \n\t"
    163           "preceu.ph.qbr    %[p1],        %[load2]                        \n\t"
    164           "precrq.ph.w      %[p2],        %[p1],          %[scratch1]     \n\t" /* pixel 2 */
    165           "append           %[p1],        %[scratch1],    16              \n\t" /* pixel 1 */
    166 
    167           "dpa.w.ph         $ac0,         %[p1],          %[filter45]     \n\t"
    168           "dpa.w.ph         $ac1,         %[p2],          %[filter45]     \n\t"
    169 
    170           "preceu.ph.qbl    %[scratch1],  %[load1]                        \n\t"
    171           "preceu.ph.qbl    %[p1],        %[load2]                        \n\t"
    172           "precrq.ph.w      %[p2],        %[p1],          %[scratch1]     \n\t" /* pixel 2 */
    173           "append           %[p1],        %[scratch1],    16              \n\t" /* pixel 1 */
    174 
    175           "dpa.w.ph         $ac2,         %[p1],          %[filter45]     \n\t"
    176           "dpa.w.ph         $ac3,         %[p2],          %[filter45]     \n\t"
    177 
    178           "extp             %[Temp1],     $ac0,           31              \n\t"
    179           "extp             %[Temp2],     $ac1,           31              \n\t"
    180 
    181           "lbu              %[scratch1],  0(%[dst_ptr])                   \n\t"
    182           "lbu              %[scratch2],  1(%[dst_ptr])                   \n\t"
    183 
    184           "lbux             %[store1],    %[Temp1](%[cm])                 \n\t"
    185           "addqh_r.w        %[store1],    %[store1],      %[scratch1]     \n\t" /* pixel 1 */
    186           "extp             %[Temp1],     $ac2,           31              \n\t"
    187 
    188           "lbux             %[store2],    %[Temp2](%[cm])                 \n\t"
    189           "addqh_r.w        %[store2],    %[store2],      %[scratch2]     \n\t" /* pixel 2 */
    190           "extp             %[Temp2],     $ac3,           31              \n\t"
    191           "lbu              %[scratch1],  2(%[dst_ptr])                   \n\t"
    192 
    193           "sb               %[store1],    0(%[dst_ptr])                   \n\t"
    194           "sb               %[store2],    1(%[dst_ptr])                   \n\t"
    195           "lbu              %[scratch2],  3(%[dst_ptr])                   \n\t"
    196 
    197           "lbux             %[store1],    %[Temp1](%[cm])                 \n\t"
    198           "lbux             %[store2],    %[Temp2](%[cm])                 \n\t"
    199           "addqh_r.w        %[store1],    %[store1],      %[scratch1]     \n\t" /* pixel 3 */
    200           "addqh_r.w        %[store2],    %[store2],      %[scratch2]     \n\t" /* pixel 4 */
    201 
    202           "sb               %[store1],    2(%[dst_ptr])                   \n\t"
    203           "sb               %[store2],    3(%[dst_ptr])                   \n\t"
    204 
    205           : [load1] "=&r"(load1), [load2] "=&r"(load2), [p1] "=&r"(p1),
    206             [p2] "=&r"(p2), [scratch1] "=&r"(scratch1),
    207             [scratch2] "=&r"(scratch2), [Temp1] "=&r"(Temp1),
    208             [Temp2] "=&r"(Temp2), [store1] "=&r"(store1),
    209             [store2] "=&r"(store2), [src_ptr] "+r"(src_ptr)
    210           : [filter45] "r"(filter45), [vector4a] "r"(vector4a),
    211             [src_stride] "r"(src_stride), [cm] "r"(cm), [dst_ptr] "r"(dst_ptr));
    212     }
    213 
    214     /* Next row... */
    215     src += src_stride;
    216     dst += dst_stride;
    217   }
    218 }
    219 
    220 void vpx_convolve2_avg_vert_dspr2(const uint8_t *src, ptrdiff_t src_stride,
    221                                   uint8_t *dst, ptrdiff_t dst_stride,
    222                                   const InterpKernel *filter, int x0_q4,
    223                                   int32_t x_step_q4, int y0_q4, int y_step_q4,
    224                                   int w, int h) {
    225   const int16_t *const filter_y = filter[y0_q4];
    226   uint32_t pos = 38;
    227 
    228   assert(y_step_q4 == 16);
    229 
    230   /* bit positon for extract from acc */
    231   __asm__ __volatile__("wrdsp      %[pos],     1           \n\t"
    232                        :
    233                        : [pos] "r"(pos));
    234 
    235   prefetch_store(dst);
    236 
    237   switch (w) {
    238     case 4:
    239     case 8:
    240     case 16:
    241     case 32:
    242       convolve_bi_avg_vert_4_dspr2(src, src_stride, dst, dst_stride, filter_y,
    243                                    w, h);
    244       break;
    245     case 64:
    246       prefetch_store(dst + 32);
    247       convolve_bi_avg_vert_64_dspr2(src, src_stride, dst, dst_stride, filter_y,
    248                                     h);
    249       break;
    250     default:
    251       vpx_convolve8_avg_vert_c(src, src_stride, dst, dst_stride, filter, x0_q4,
    252                                x_step_q4, y0_q4, y_step_q4, w, h);
    253       break;
    254   }
    255 }
    256 #endif
    257