1 ; RUN: llc < %s -march=cellspu > %t1.s 2 ; RUN: grep and %t1.s | count 20 3 ; RUN: grep andc %t1.s | count 5 4 target datalayout = "E-p:32:32:128-f64:64:128-f32:32:128-i64:32:128-i32:32:128-i16:16:128-i8:8:128-i1:8:128-a0:0:128-v128:128:128-s0:128:128" 5 target triple = "spu" 6 7 declare <4 x i32> @llvm.spu.si.and(<4 x i32>, <4 x i32>) 8 declare <4 x i32> @llvm.spu.si.andc(<4 x i32>, <4 x i32>) 9 declare <4 x i32> @llvm.spu.si.andi(<4 x i32>, i16) 10 declare <8 x i16> @llvm.spu.si.andhi(<8 x i16>, i16) 11 declare <16 x i8> @llvm.spu.si.andbi(<16 x i8>, i8) 12 13 declare <4 x i32> @llvm.spu.si.or(<4 x i32>, <4 x i32>) 14 declare <4 x i32> @llvm.spu.si.orc(<4 x i32>, <4 x i32>) 15 declare <4 x i32> @llvm.spu.si.ori(<4 x i32>, i16) 16 declare <8 x i16> @llvm.spu.si.orhi(<8 x i16>, i16) 17 declare <16 x i8> @llvm.spu.si.orbi(<16 x i8>, i8) 18 19 declare <4 x i32> @llvm.spu.si.xor(<4 x i32>, <4 x i32>) 20 declare <4 x i32> @llvm.spu.si.xori(<4 x i32>, i16) 21 declare <8 x i16> @llvm.spu.si.xorhi(<8 x i16>, i16) 22 declare <16 x i8> @llvm.spu.si.xorbi(<16 x i8>, i8) 23 24 declare <4 x i32> @llvm.spu.si.nand(<4 x i32>, <4 x i32>) 25 declare <4 x i32> @llvm.spu.si.nor(<4 x i32>, <4 x i32>) 26 27 define <4 x i32> @andtest(<4 x i32> %A, <4 x i32> %B) { 28 call <4 x i32> @llvm.spu.si.and(<4 x i32> %A, <4 x i32> %B) 29 %Y = bitcast <4 x i32> %1 to <4 x i32> 30 ret <4 x i32> %Y 31 } 32 33 define <4 x i32> @andctest(<4 x i32> %A, <4 x i32> %B) { 34 call <4 x i32> @llvm.spu.si.andc(<4 x i32> %A, <4 x i32> %B) 35 %Y = bitcast <4 x i32> %1 to <4 x i32> 36 ret <4 x i32> %Y 37 } 38 39 define <4 x i32> @anditest(<4 x i32> %A) { 40 call <4 x i32> @llvm.spu.si.andi(<4 x i32> %A, i16 65) 41 %Y = bitcast <4 x i32> %1 to <4 x i32> 42 ret <4 x i32> %Y 43 } 44 45 define <8 x i16> @andhitest(<8 x i16> %A) { 46 call <8 x i16> @llvm.spu.si.andhi(<8 x i16> %A, i16 65) 47 %Y = bitcast <8 x i16> %1 to <8 x i16> 48 ret <8 x i16> %Y 49 } 50