Home | History | Annotate | Download | only in CellSPU
      1 ; RUN: llc < %s -march=cellspu > %t1.s
      2 ; RUN: grep nand   %t1.s | count 90
      3 ; RUN: grep and    %t1.s | count 94
      4 ; RUN: grep xsbh   %t1.s | count 2
      5 ; RUN: grep xshw   %t1.s | count 4
      6 
      7 ; CellSPU legalization is over-sensitive to Legalize's traversal order.
      8 ; XFAIL: *
      9 
     10 target datalayout = "E-p:32:32:128-f64:64:128-f32:32:128-i64:32:128-i32:32:128-i16:16:128-i8:8:128-i1:8:128-a0:0:128-v128:128:128-s0:128:128"
     11 target triple = "spu"
     12 
     13 define <4 x i32> @nand_v4i32_1(<4 x i32> %arg1, <4 x i32> %arg2) {
     14         %A = and <4 x i32> %arg2, %arg1      ; <<4 x i32>> [#uses=1]
     15         %B = xor <4 x i32> %A, < i32 -1, i32 -1, i32 -1, i32 -1 >
     16         ret <4 x i32> %B
     17 }
     18 
     19 define <4 x i32> @nand_v4i32_2(<4 x i32> %arg1, <4 x i32> %arg2) {
     20         %A = and <4 x i32> %arg1, %arg2      ; <<4 x i32>> [#uses=1]
     21         %B = xor <4 x i32> %A, < i32 -1, i32 -1, i32 -1, i32 -1 >
     22         ret <4 x i32> %B
     23 }
     24 
     25 define <8 x i16> @nand_v8i16_1(<8 x i16> %arg1, <8 x i16> %arg2) {
     26         %A = and <8 x i16> %arg2, %arg1      ; <<8 x i16>> [#uses=1]
     27         %B = xor <8 x i16> %A, < i16 -1, i16 -1, i16 -1, i16 -1,
     28                                  i16 -1, i16 -1, i16 -1, i16 -1 >
     29         ret <8 x i16> %B
     30 }
     31 
     32 define <8 x i16> @nand_v8i16_2(<8 x i16> %arg1, <8 x i16> %arg2) {
     33         %A = and <8 x i16> %arg1, %arg2      ; <<8 x i16>> [#uses=1]
     34         %B = xor <8 x i16> %A, < i16 -1, i16 -1, i16 -1, i16 -1,
     35                                  i16 -1, i16 -1, i16 -1, i16 -1 >
     36         ret <8 x i16> %B
     37 }
     38 
     39 define <16 x i8> @nand_v16i8_1(<16 x i8> %arg1, <16 x i8> %arg2) {
     40         %A = and <16 x i8> %arg2, %arg1      ; <<16 x i8>> [#uses=1]
     41         %B = xor <16 x i8> %A, < i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1,
     42                                     i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1,
     43                                     i8 -1, i8 -1, i8 -1, i8 -1 >
     44         ret <16 x i8> %B
     45 }
     46 
     47 define <16 x i8> @nand_v16i8_2(<16 x i8> %arg1, <16 x i8> %arg2) {
     48         %A = and <16 x i8> %arg1, %arg2      ; <<16 x i8>> [#uses=1]
     49         %B = xor <16 x i8> %A, < i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1,
     50                                     i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1,
     51                                     i8 -1, i8 -1, i8 -1, i8 -1 >
     52         ret <16 x i8> %B
     53 }
     54 
     55 define i32 @nand_i32_1(i32 %arg1, i32 %arg2) {
     56         %A = and i32 %arg2, %arg1            ; <i32> [#uses=1]
     57         %B = xor i32 %A, -1                  ; <i32> [#uses=1]
     58         ret i32 %B
     59 }
     60 
     61 define i32 @nand_i32_2(i32 %arg1, i32 %arg2) {
     62         %A = and i32 %arg1, %arg2            ; <i32> [#uses=1]
     63         %B = xor i32 %A, -1                  ; <i32> [#uses=1]
     64         ret i32 %B
     65 }
     66 
     67 define signext i16 @nand_i16_1(i16 signext  %arg1, i16 signext  %arg2)   {
     68         %A = and i16 %arg2, %arg1            ; <i16> [#uses=1]
     69         %B = xor i16 %A, -1                  ; <i16> [#uses=1]
     70         ret i16 %B
     71 }
     72 
     73 define signext i16 @nand_i16_2(i16 signext  %arg1, i16 signext  %arg2)   {
     74         %A = and i16 %arg1, %arg2            ; <i16> [#uses=1]
     75         %B = xor i16 %A, -1                  ; <i16> [#uses=1]
     76         ret i16 %B
     77 }
     78 
     79 define zeroext i16 @nand_i16u_1(i16 zeroext  %arg1, i16 zeroext  %arg2)   {
     80         %A = and i16 %arg2, %arg1            ; <i16> [#uses=1]
     81         %B = xor i16 %A, -1                  ; <i16> [#uses=1]
     82         ret i16 %B
     83 }
     84 
     85 define zeroext i16 @nand_i16u_2(i16 zeroext  %arg1, i16 zeroext  %arg2)   {
     86         %A = and i16 %arg1, %arg2            ; <i16> [#uses=1]
     87         %B = xor i16 %A, -1                  ; <i16> [#uses=1]
     88         ret i16 %B
     89 }
     90 
     91 define zeroext i8 @nand_i8u_1(i8 zeroext  %arg1, i8 zeroext  %arg2)   {
     92         %A = and i8 %arg2, %arg1             ; <i8> [#uses=1]
     93         %B = xor i8 %A, -1                   ; <i8> [#uses=1]
     94         ret i8 %B
     95 }
     96 
     97 define zeroext i8 @nand_i8u_2(i8 zeroext  %arg1, i8 zeroext  %arg2)   {
     98         %A = and i8 %arg1, %arg2             ; <i8> [#uses=1]
     99         %B = xor i8 %A, -1                   ; <i8> [#uses=1]
    100         ret i8 %B
    101 }
    102 
    103 define signext i8 @nand_i8_1(i8 signext  %arg1, i8 signext  %arg2)   {
    104         %A = and i8 %arg2, %arg1             ; <i8> [#uses=1]
    105         %B = xor i8 %A, -1                   ; <i8> [#uses=1]
    106         ret i8 %B
    107 }
    108 
    109 define signext i8 @nand_i8_2(i8 signext  %arg1, i8 signext  %arg2) {
    110         %A = and i8 %arg1, %arg2             ; <i8> [#uses=1]
    111         %B = xor i8 %A, -1                   ; <i8> [#uses=1]
    112         ret i8 %B
    113 }
    114 
    115 define i8 @nand_i8_3(i8 %arg1, i8 %arg2) {
    116         %A = and i8 %arg2, %arg1             ; <i8> [#uses=1]
    117         %B = xor i8 %A, -1                   ; <i8> [#uses=1]
    118         ret i8 %B
    119 }
    120 
    121 define i8 @nand_i8_4(i8 %arg1, i8 %arg2) {
    122         %A = and i8 %arg1, %arg2             ; <i8> [#uses=1]
    123         %B = xor i8 %A, -1                   ; <i8> [#uses=1]
    124         ret i8 %B
    125 }
    126