Home | History | Annotate | Download | only in llvm-ir
      1 ; RUN: llc < %s -march=mips -mcpu=mips2 | FileCheck %s \
      2 ; RUN:    -check-prefix=ALL -check-prefix=NOT-R2-R6 -check-prefix=GP32
      3 ; RUN: llc < %s -march=mips -mcpu=mips32 | FileCheck %s \
      4 ; RUN:    -check-prefix=ALL -check-prefix=NOT-R2-R6 -check-prefix=GP32
      5 ; RUN: llc < %s -march=mips -mcpu=mips32r2 | FileCheck %s \
      6 ; RUN:    -check-prefix=ALL -check-prefix=R2-R6 -check-prefix=GP32
      7 ; RUN: llc < %s -march=mips -mcpu=mips32r3 | FileCheck %s \
      8 ; RUN:    -check-prefix=ALL -check-prefix=R2-R6 -check-prefix=GP32
      9 ; RUN: llc < %s -march=mips -mcpu=mips32r5 | FileCheck %s \
     10 ; RUN:    -check-prefix=ALL -check-prefix=R2-R6 -check-prefix=GP32
     11 ; RUN: llc < %s -march=mips -mcpu=mips32r6 | FileCheck %s \
     12 ; RUN:    -check-prefix=ALL -check-prefix=R2-R6 -check-prefix=GP32
     13 ; RUN: llc < %s -march=mips64 -mcpu=mips3 | FileCheck %s \
     14 ; RUN:    -check-prefix=ALL -check-prefix=NOT-R2-R6 -check-prefix=GP64
     15 ; RUN: llc < %s -march=mips64 -mcpu=mips4 | FileCheck %s \
     16 ; RUN:    -check-prefix=ALL -check-prefix=NOT-R2-R6 -check-prefix=GP64
     17 ; RUN: llc < %s -march=mips64 -mcpu=mips64 | FileCheck %s \
     18 ; RUN:    -check-prefix=ALL -check-prefix=NOT-R2-R6 -check-prefix=GP64
     19 ; RUN: llc < %s -march=mips64 -mcpu=mips64r2 | FileCheck %s \
     20 ; RUN:    -check-prefix=ALL -check-prefix=R2-R6 -check-prefix=GP64
     21 ; RUN: llc < %s -march=mips64 -mcpu=mips64r3 | FileCheck %s \
     22 ; RUN:    -check-prefix=ALL -check-prefix=R2-R6 -check-prefix=GP64
     23 ; RUN: llc < %s -march=mips64 -mcpu=mips64r5 | FileCheck %s \
     24 ; RUN:    -check-prefix=ALL -check-prefix=R2-R6 -check-prefix=GP64
     25 ; RUN: llc < %s -march=mips64 -mcpu=mips64r6 | FileCheck %s \
     26 ; RUN:    -check-prefix=ALL -check-prefix=R2-R6 -check-prefix=GP64
     27 
     28 define signext i1 @add_i1(i1 signext %a, i1 signext %b) {
     29 entry:
     30 ; ALL-LABEL: add_i1:
     31 
     32   ; ALL:        addu    $[[T0:[0-9]+]], $4, $5
     33   ; ALL:        sll     $[[T0]], $[[T0]], 31
     34   ; ALL:        sra     $2, $[[T0]], 31
     35 
     36   %r = add i1 %a, %b
     37   ret i1 %r
     38 }
     39 
     40 define signext i8 @add_i8(i8 signext %a, i8 signext %b) {
     41 entry:
     42 ; ALL-LABEL: add_i8:
     43 
     44   ; NOT-R2-R6:  addu    $[[T0:[0-9]+]], $4, $5
     45   ; NOT-R2-R6:  sll     $[[T0]], $[[T0]], 24
     46   ; NOT-R2-R6:  sra     $2, $[[T0]], 24
     47 
     48   ; R2-R6:         addu    $[[T0:[0-9]+]], $4, $5
     49   ; R2-R6:         seb     $2, $[[T0:[0-9]+]]
     50 
     51   %r = add i8 %a, %b
     52   ret i8 %r
     53 }
     54 
     55 define signext i16 @add_i16(i16 signext %a, i16 signext %b) {
     56 entry:
     57 ; ALL-LABEL: add_i16:
     58 
     59   ; NOT-R2-R6:  addu    $[[T0:[0-9]+]], $4, $5
     60   ; NOT-R2-R6:  sll     $[[T0]], $[[T0]], 16
     61   ; NOT-R2-R6:  sra     $2, $[[T0]], 16
     62 
     63   ; R2-R6:         addu    $[[T0:[0-9]+]], $4, $5
     64   ; R2-R6:         seh     $2, $[[T0:[0-9]+]]
     65 
     66   %r = add i16 %a, %b
     67   ret i16 %r
     68 }
     69 
     70 define signext i32 @add_i32(i32 signext %a, i32 signext %b) {
     71 entry:
     72 ; ALL-LABEL: add_i32:
     73 
     74   ; ALL:        addu    $2, $4, $5
     75 
     76   %r = add i32 %a, %b
     77   ret i32 %r
     78 }
     79 
     80 define signext i64 @add_i64(i64 signext %a, i64 signext %b) {
     81 entry:
     82 ; ALL-LABEL: add_i64:
     83 
     84   ; GP32:       addu    $3, $5, $7
     85   ; GP32:       sltu    $[[T0:[0-9]+]], $3, $7
     86   ; GP32:       addu    $[[T1:[0-9]+]], $[[T0]], $6
     87   ; GP32:       addu    $2, $4, $[[T1]]
     88 
     89   ; GP64:       daddu   $2, $4, $5
     90 
     91   %r = add i64 %a, %b
     92   ret i64 %r
     93 }
     94 
     95 define signext i128 @add_i128(i128 signext %a, i128 signext %b) {
     96 entry:
     97 ; ALL-LABEL: add_i128:
     98 
     99   ; GP32:       lw        $[[T0:[0-9]+]], 28($sp)
    100   ; GP32:       addu      $[[T1:[0-9]+]], $7, $[[T0]]
    101   ; GP32:       sltu      $[[T2:[0-9]+]], $[[T1]], $[[T0]]
    102   ; GP32:       lw        $[[T3:[0-9]+]], 24($sp)
    103   ; GP32:       addu      $[[T4:[0-9]+]], $[[T2]], $[[T3]]
    104   ; GP32:       addu      $[[T5:[0-9]+]], $6, $[[T4]]
    105   ; GP32:       sltu      $[[T6:[0-9]+]], $[[T5]], $[[T3]]
    106   ; GP32:       lw        $[[T7:[0-9]+]], 20($sp)
    107   ; GP32:       addu      $[[T8:[0-9]+]], $[[T6]], $[[T7]]
    108   ; GP32:       lw        $[[T9:[0-9]+]], 16($sp)
    109   ; GP32:       addu      $3, $5, $[[T8]]
    110   ; GP32:       sltu      $[[T10:[0-9]+]], $3, $[[T7]]
    111   ; GP32:       addu      $[[T11:[0-9]+]], $[[T10]], $[[T9]]
    112   ; GP32:       addu      $2, $4, $[[T11]]
    113   ; GP32:       move      $4, $[[T5]]
    114   ; GP32:       move      $5, $[[T1]]
    115 
    116   ; GP64:       daddu     $3, $5, $7
    117   ; GP64:       sltu      $[[T0:[0-9]+]], $3, $7
    118   ; GP64:       daddu     $[[T1:[0-9]+]], $[[T0]], $6
    119   ; GP64:       daddu     $2, $4, $[[T1]]
    120 
    121   %r = add i128 %a, %b
    122   ret i128 %r
    123 }
    124