Home | History | Annotate | Download | only in X86
      1 ; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=core-avx2 -mattr=+avx2 | FileCheck %s
      2 
      3 ; CHECK: trunc4
      4 ; CHECK: vpermd
      5 ; CHECK-NOT: vinsert
      6 ; CHECK: ret
      7 define <4 x i32> @trunc4(<4 x i64> %A) nounwind {
      8   %B = trunc <4 x i64> %A to <4 x i32>
      9   ret <4 x i32>%B
     10 }
     11 
     12 ; CHECK: trunc8
     13 ; CHECK: vpshufb
     14 ; CHECK-NOT: vinsert
     15 ; CHECK: ret
     16 
     17 define <8 x i16> @trunc8(<8 x i32> %A) nounwind {
     18   %B = trunc <8 x i32> %A to <8 x i16>
     19   ret <8 x i16>%B
     20 }
     21 
     22 ; CHECK: sext4
     23 ; CHECK: vpmovsxdq
     24 ; CHECK-NOT: vinsert
     25 ; CHECK: ret
     26 define <4 x i64> @sext4(<4 x i32> %A) nounwind {
     27   %B = sext <4 x i32> %A to <4 x i64>
     28   ret <4 x i64>%B
     29 }
     30 
     31 ; CHECK: sext8
     32 ; CHECK: vpmovsxwd
     33 ; CHECK-NOT: vinsert
     34 ; CHECK: ret
     35 define <8 x i32> @sext8(<8 x i16> %A) nounwind {
     36   %B = sext <8 x i16> %A to <8 x i32>
     37   ret <8 x i32>%B
     38 }
     39 
     40 ; CHECK: zext4
     41 ; CHECK: vpmovzxdq
     42 ; CHECK-NOT: vinsert
     43 ; CHECK: ret
     44 define <4 x i64> @zext4(<4 x i32> %A) nounwind {
     45   %B = zext <4 x i32> %A to <4 x i64>
     46   ret <4 x i64>%B
     47 }
     48 
     49 ; CHECK: zext8
     50 ; CHECK: vpmovzxwd
     51 ; CHECK-NOT: vinsert
     52 ; CHECK: ret
     53 define <8 x i32> @zext8(<8 x i16> %A) nounwind {
     54   %B = zext <8 x i16> %A to <8 x i32>
     55   ret <8 x i32>%B
     56 }
     57 ; CHECK: zext_8i8_8i32
     58 ; CHECK: vpmovzxwd
     59 ; CHECK: vpand
     60 ; CHECK: ret
     61 define <8 x i32> @zext_8i8_8i32(<8 x i8> %A) nounwind {
     62   %B = zext <8 x i8> %A to <8 x i32>  
     63   ret <8 x i32>%B
     64 }
     65 
     66 ; CHECK: load_sext_test1
     67 ; CHECK: vpmovsxdq (%r{{[^,]*}}), %ymm{{.*}}
     68 ; CHECK: ret 
     69 define <4 x i64> @load_sext_test1(<4 x i32> *%ptr) {
     70  %X = load <4 x i32>* %ptr
     71  %Y = sext <4 x i32> %X to <4 x i64>
     72  ret <4 x i64>%Y
     73 }
     74 
     75 ; CHECK: load_sext_test2
     76 ; CHECK: vpmovsxbq (%r{{[^,]*}}), %ymm{{.*}}
     77 ; CHECK: ret 
     78 define <4 x i64> @load_sext_test2(<4 x i8> *%ptr) {
     79  %X = load <4 x i8>* %ptr
     80  %Y = sext <4 x i8> %X to <4 x i64>
     81  ret <4 x i64>%Y
     82 }
     83 
     84 ; CHECK: load_sext_test3
     85 ; CHECK: vpmovsxwq (%r{{[^,]*}}), %ymm{{.*}}
     86 ; CHECK: ret 
     87 define <4 x i64> @load_sext_test3(<4 x i16> *%ptr) {
     88  %X = load <4 x i16>* %ptr
     89  %Y = sext <4 x i16> %X to <4 x i64>
     90  ret <4 x i64>%Y
     91 }
     92 
     93 ; CHECK: load_sext_test4
     94 ; CHECK: vpmovsxwd (%r{{[^,]*}}), %ymm{{.*}}
     95 ; CHECK: ret 
     96 define <8 x i32> @load_sext_test4(<8 x i16> *%ptr) {
     97  %X = load <8 x i16>* %ptr
     98  %Y = sext <8 x i16> %X to <8 x i32>
     99  ret <8 x i32>%Y
    100 }
    101 
    102 ; CHECK: load_sext_test5
    103 ; CHECK: vpmovsxbd (%r{{[^,]*}}), %ymm{{.*}}
    104 ; CHECK: ret 
    105 define <8 x i32> @load_sext_test5(<8 x i8> *%ptr) {
    106  %X = load <8 x i8>* %ptr
    107  %Y = sext <8 x i8> %X to <8 x i32>
    108  ret <8 x i32>%Y
    109 }
    110