Home | History | Annotate | Download | only in llvm-ir
      1 ; RUN: llc < %s -march=mips -mcpu=mips2 -relocation-model=pic | FileCheck %s \
      2 ; RUN:    -check-prefixes=ALL,GP32,M2
      3 ; RUN: llc < %s -march=mips -mcpu=mips32 -relocation-model=pic | FileCheck %s \
      4 ; RUN:    -check-prefixes=ALL,GP32,32R1-R5
      5 ; RUN: llc < %s -march=mips -mcpu=mips32r2 -relocation-model=pic | FileCheck %s \
      6 ; RUN:    -check-prefixes=ALL,GP32,32R1-R5
      7 ; RUN: llc < %s -march=mips -mcpu=mips32r3 -relocation-model=pic | FileCheck %s \
      8 ; RUN:    -check-prefixes=ALL,GP32,32R1-R5
      9 ; RUN: llc < %s -march=mips -mcpu=mips32r5 -relocation-model=pic | FileCheck %s \
     10 ; RUN:    -check-prefixes=ALL,GP32,32R1-R5
     11 ; RUN: llc < %s -march=mips -mcpu=mips32r6 -relocation-model=pic | FileCheck %s \
     12 ; RUN:    -check-prefixes=ALL,GP32,32R6
     13 ; RUN: llc < %s -march=mips64 -mcpu=mips3 -relocation-model=pic | FileCheck %s \
     14 ; RUN:    -check-prefixes=ALL,GP64,M3
     15 ; RUN: llc < %s -march=mips64 -mcpu=mips4 -relocation-model=pic | FileCheck %s \
     16 ; RUN:    -check-prefixes=ALL,GP64,GP64-NOT-R6
     17 ; RUN: llc < %s -march=mips64 -mcpu=mips64 -relocation-model=pic | FileCheck %s \
     18 ; RUN:    -check-prefixes=ALL,GP64,GP64-NOT-R6
     19 ; RUN: llc < %s -march=mips64 -mcpu=mips64r2 -relocation-model=pic | FileCheck %s \
     20 ; RUN:    -check-prefixes=ALL,GP64,GP64-NOT-R6
     21 ; RUN: llc < %s -march=mips64 -mcpu=mips64r3 -relocation-model=pic | FileCheck %s \
     22 ; RUN:    -check-prefixes=ALL,GP64,GP64-NOT-R6
     23 ; RUN: llc < %s -march=mips64 -mcpu=mips64r5 -relocation-model=pic | FileCheck %s \
     24 ; RUN:    -check-prefixes=ALL,GP64,GP64-NOT-R6
     25 ; RUN: llc < %s -march=mips64 -mcpu=mips64r6 -relocation-model=pic | FileCheck %s \
     26 ; RUN:    -check-prefixes=ALL,GP64,64R6
     27 ; RUN: llc < %s -march=mips -mcpu=mips32r3 -mattr=+micromips -relocation-model=pic | FileCheck %s \
     28 ; RUN:    -check-prefixes=ALL,MM,MMR3
     29 ; RUN: llc < %s -march=mips -mcpu=mips32r6 -mattr=+micromips -relocation-model=pic | FileCheck %s \
     30 ; RUN:    -check-prefixes=ALL,MM,MMR6
     31 
     32 define signext i1 @lshr_i1(i1 signext %a, i1 signext %b) {
     33 entry:
     34 ; ALL-LABEL: lshr_i1:
     35 
     36   ; ALL:        move    $2, $4
     37 
     38   %r = lshr i1 %a, %b
     39   ret i1 %r
     40 }
     41 
     42 define zeroext i8 @lshr_i8(i8 zeroext %a, i8 zeroext %b) {
     43 entry:
     44 ; ALL-LABEL: lshr_i8:
     45 
     46   ; ALL:        srlv    $[[T0:[0-9]+]], $4, $5
     47   ; GP32:       andi    $2, $[[T0]], 255
     48   ; GP64:       andi    $2, $[[T0]], 255
     49   ; MM:         andi16  $2, $[[T0]], 255
     50 
     51   %r = lshr i8 %a, %b
     52   ret i8 %r
     53 }
     54 
     55 define zeroext i16 @lshr_i16(i16 zeroext %a, i16 zeroext %b) {
     56 entry:
     57 ; ALL-LABEL: lshr_i16:
     58 
     59   ; ALL:        srlv    $[[T0:[0-9]+]], $4, $5
     60   ; GP32:       andi    $2, $[[T0]], 65535
     61   ; GP64:       andi    $2, $[[T0]], 65535
     62   ; MM:         andi16  $2, $[[T0]], 65535
     63 
     64   %r = lshr i16 %a, %b
     65   ret i16 %r
     66 }
     67 
     68 define signext i32 @lshr_i32(i32 signext %a, i32 signext %b) {
     69 entry:
     70 ; ALL-LABEL: lshr_i32:
     71 
     72   ; ALL:          srlv    $2, $4, $5
     73 
     74   %r = lshr i32 %a, %b
     75   ret i32 %r
     76 }
     77 
     78 define signext i64 @lshr_i64(i64 signext %a, i64 signext %b) {
     79 entry:
     80 ; ALL-LABEL: lshr_i64:
     81 
     82   ; M2:         srlv      $[[T0:[0-9]+]], $4, $7
     83   ; M2:         andi      $[[T1:[0-9]+]], $7, 32
     84   ; M2:         bnez      $[[T1]], $[[BB0:BB[0-9_]+]]
     85   ; M2:         move      $3, $[[T0]]
     86   ; M2:         srlv      $[[T2:[0-9]+]], $5, $7
     87   ; M2:         not       $[[T3:[0-9]+]], $7
     88   ; M2:         sll       $[[T4:[0-9]+]], $4, 1
     89   ; M2:         sllv      $[[T5:[0-9]+]], $[[T4]], $[[T3]]
     90   ; M2:         or        $3, $[[T3]], $[[T2]]
     91   ; M2:         $[[BB0]]:
     92   ; M2:         bnez      $[[T1]], $[[BB1:BB[0-9_]+]]
     93   ; M2:         addiu     $2, $zero, 0
     94   ; M2:         move      $2, $[[T0]]
     95   ; M2:         $[[BB1]]:
     96   ; M2:         jr        $ra
     97   ; M2:         nop
     98 
     99   ; 32R1-R5:    srlv      $[[T0:[0-9]+]], $5, $7
    100   ; 32R1-R5:    not       $[[T1:[0-9]+]], $7
    101   ; 32R1-R5:    sll       $[[T2:[0-9]+]], $4, 1
    102   ; 32R1-R5:    sllv      $[[T3:[0-9]+]], $[[T2]], $[[T1]]
    103   ; 32R1-R5:    or        $3, $[[T3]], $[[T0]]
    104   ; 32R1-R5:    srlv      $[[T4:[0-9]+]], $4, $7
    105   ; 32R1-R5:    andi      $[[T5:[0-9]+]], $7, 32
    106   ; 32R1-R5:    movn      $3, $[[T4]], $[[T5]]
    107   ; 32R1-R5:    jr        $ra
    108   ; 32R1-R5:    movn      $2, $zero, $[[T5]]
    109 
    110   ; 32R6:       srlv      $[[T0:[0-9]+]], $5, $7
    111   ; 32R6:       not       $[[T1:[0-9]+]], $7
    112   ; 32R6:       sll       $[[T2:[0-9]+]], $4, 1
    113   ; 32R6:       sllv      $[[T3:[0-9]+]], $[[T2]], $[[T1]]
    114   ; 32R6:       or        $[[T4:[0-9]+]], $[[T3]], $[[T0]]
    115   ; 32R6:       andi      $[[T5:[0-9]+]], $7, 32
    116   ; 32R6:       seleqz    $[[T6:[0-9]+]], $[[T4]], $[[T3]]
    117   ; 32R6:       srlv      $[[T7:[0-9]+]], $4, $7
    118   ; 32R6:       selnez    $[[T8:[0-9]+]], $[[T7]], $[[T5]]
    119   ; 32R6:       or        $3, $[[T8]], $[[T6]]
    120   ; 32R6:       jr        $ra
    121   ; 32R6:       seleqz    $2, $[[T7]], $[[T5]]
    122 
    123   ; GP64:         dsrlv   $2, $4, $5
    124 
    125   ; MMR3:       srlv      $[[T0:[0-9]+]], $5, $7
    126   ; MMR3:       sll16     $[[T1:[0-9]+]], $4, 1
    127   ; MMR3:       not16     $[[T2:[0-9]+]], $7
    128   ; MMR3:       sllv      $[[T3:[0-9]+]], $[[T1]], $[[T2]]
    129   ; MMR3:       or16      $[[T4:[0-9]+]], $[[T0]]
    130   ; MMR3:       srlv      $[[T5:[0-9]+]], $4, $7
    131   ; MMR3:       andi16    $[[T6:[0-9]+]], $7, 32
    132   ; MMR3:       movn      $[[T7:[0-9]+]], $[[T5]], $[[T6]]
    133   ; MMR3:       lui       $[[T8:[0-9]+]], 0
    134   ; MMR3:       movn      $2, $[[T8]], $[[T6]]
    135 
    136   ; MMR6:       srlv      $[[T0:[0-9]+]], $5, $7
    137   ; MMR6:       sll16     $[[T1:[0-9]+]], $4, 1
    138   ; MMR6:       not16     $[[T2:[0-9]+]], $7
    139   ; MMR6:       sllv      $[[T3:[0-9]+]], $[[T1]], $[[T2]]
    140   ; MMR6:       or16      $[[T4:[0-9]+]], $[[T0]]
    141   ; MMR6:       andi16    $[[T5:[0-9]+]], $7, 32
    142   ; MMR6:       seleqz    $[[T6:[0-9]+]], $[[T4]], $[[T5]]
    143   ; MMR6:       srlv      $[[T7:[0-9]+]], $4, $7
    144   ; MMR6:       selnez    $[[T8:[0-9]+]], $[[T7]], $[[T5]]
    145   ; MMR6:       or        $3, $[[T8]], $[[T6]]
    146   ; MMR6:       seleqz    $2, $[[T7]], $[[T5]]
    147 
    148   %r = lshr i64 %a, %b
    149   ret i64 %r
    150 }
    151 
    152 define signext i128 @lshr_i128(i128 signext %a, i128 signext %b) {
    153 entry:
    154 ; ALL-LABEL: lshr_i128:
    155 
    156   ; GP32:         lw      $25, %call16(__lshrti3)($gp)
    157 
    158   ; M3:             sll       $[[T0:[0-9]+]], $7, 0
    159   ; M3:             dsrlv     $[[T1:[0-9]+]], $4, $7
    160   ; M3:             andi      $[[T2:[0-9]+]], $[[T0]], 64
    161   ; M3:             bnez      $[[T3:[0-9]+]], $[[BB0:BB[0-9_]+]]
    162   ; M3:             move      $3, $[[T1]]
    163   ; M3:             dsrlv     $[[T4:[0-9]+]], $5, $7
    164   ; M3:             dsll      $[[T5:[0-9]+]], $4, 1
    165   ; M3:             not       $[[T6:[0-9]+]], $[[T0]]
    166   ; M3:             dsllv     $[[T7:[0-9]+]], $[[T5]], $[[T6]]
    167   ; M3:             or        $3, $[[T7]], $[[T4]]
    168   ; M3:             $[[BB0]]:
    169   ; M3:             bnez      $[[T3]], $[[BB1:BB[0-9_]+]]
    170   ; M3:             daddiu    $2, $zero, 0
    171   ; M3:             move      $2, $[[T1]]
    172   ; M3:             $[[BB1]]:
    173   ; M3:             jr        $ra
    174   ; M3:             nop
    175 
    176   ; GP64-NOT-R6:    dsrlv     $[[T0:[0-9]+]], $5, $7
    177   ; GP64-NOT-R6:    dsll      $[[T1:[0-9]+]], $4, 1
    178   ; GP64-NOT-R6:    sll       $[[T2:[0-9]+]], $7, 0
    179   ; GP64-NOT-R6:    not       $[[T3:[0-9]+]], $[[T2]]
    180   ; GP64-NOT-R6:    dsllv     $[[T4:[0-9]+]], $[[T1]], $[[T3]]
    181   ; GP64-NOT-R6:    or        $3, $[[T4]], $[[T0]]
    182   ; GP64-NOT-R6:    dsrlv     $2, $4, $7
    183   ; GP64-NOT-R6:    andi      $[[T5:[0-9]+]], $[[T2]], 64
    184   ; GP64-NOT-R6:    movn      $3, $2, $[[T5]]
    185   ; GP64-NOT-R6:    jr        $ra
    186   ; GP64-NOT-R6:    movn      $2, $zero, $1
    187 
    188   ; 64R6:           dsrlv     $[[T0:[0-9]+]], $5, $7
    189   ; 64R6:           dsll      $[[T1:[0-9]+]], $4, 1
    190   ; 64R6:           sll       $[[T2:[0-9]+]], $7, 0
    191   ; 64R6:           not       $[[T3:[0-9]+]], $[[T2]]
    192   ; 64R6:           dsllv     $[[T4:[0-9]+]], $[[T1]], $[[T3]]
    193   ; 64R6:           or        $[[T5:[0-9]+]], $[[T4]], $[[T0]]
    194   ; 64R6:           andi      $[[T6:[0-9]+]], $[[T2]], 64
    195   ; 64R6:           sll       $[[T7:[0-9]+]], $[[T6]], 0
    196   ; 64R6:           seleqz    $[[T8:[0-9]+]], $[[T5]], $[[T7]]
    197   ; 64R6:           dsrlv     $[[T9:[0-9]+]], $4, $7
    198   ; 64R6:           selnez    $[[T10:[0-9]+]], $[[T9]], $[[T7]]
    199   ; 64R6:           or        $3, $[[T10]], $[[T8]]
    200   ; 64R6:           jr        $ra
    201   ; 64R6:           seleqz    $2, $[[T9]], $[[T7]]
    202 
    203   ; MM:             lw        $25, %call16(__lshrti3)($2)
    204 
    205   %r = lshr i128 %a, %b
    206   ret i128 %r
    207 }
    208