Home | History | Annotate | Download | only in X86
      1 ; RUN: llc < %s -o - -mcpu=generic -march=x86-64 -mattr=+sse42 | FileCheck %s
      2 
      3 ; Test based on pr5626 to load/store
      4 ;
      5 
      6 %i32vec3 = type <3 x i32>
      7 ; CHECK: add3i32
      8 define void @add3i32(%i32vec3*  sret %ret, %i32vec3* %ap, %i32vec3* %bp)  {
      9 ; CHECK: movdqa
     10 ; CHECK: paddd
     11 ; CHECK: pextrd
     12 ; CHECK: movq
     13 	%a = load %i32vec3* %ap, align 16
     14 	%b = load %i32vec3* %bp, align 16
     15 	%x = add %i32vec3 %a, %b
     16 	store %i32vec3 %x, %i32vec3* %ret, align 16
     17 	ret void
     18 }
     19 
     20 ; CHECK: add3i32_2
     21 define void @add3i32_2(%i32vec3*  sret %ret, %i32vec3* %ap, %i32vec3* %bp)  {
     22 ; CHECK: movq
     23 ; CHECK: pinsrd
     24 ; CHECK: movq
     25 ; CHECK: pinsrd
     26 ; CHECK: paddd
     27 ; CHECK: pextrd
     28 ; CHECK: movq
     29 	%a = load %i32vec3* %ap, align 8
     30 	%b = load %i32vec3* %bp, align 8
     31 	%x = add %i32vec3 %a, %b
     32 	store %i32vec3 %x, %i32vec3* %ret, align 8
     33 	ret void
     34 }
     35 
     36 %i32vec7 = type <7 x i32>
     37 ; CHECK: add7i32
     38 define void @add7i32(%i32vec7*  sret %ret, %i32vec7* %ap, %i32vec7* %bp)  {
     39 ; CHECK: movdqa
     40 ; CHECK: movdqa
     41 ; CHECK: paddd
     42 ; CHECK: paddd
     43 ; CHECK: pextrd
     44 ; CHECK: movq
     45 ; CHECK: movdqa
     46 	%a = load %i32vec7* %ap, align 16
     47 	%b = load %i32vec7* %bp, align 16
     48 	%x = add %i32vec7 %a, %b
     49 	store %i32vec7 %x, %i32vec7* %ret, align 16
     50 	ret void
     51 }
     52 
     53 ; CHECK: add12i32
     54 %i32vec12 = type <12 x i32>
     55 define void @add12i32(%i32vec12*  sret %ret, %i32vec12* %ap, %i32vec12* %bp)  {
     56 ; CHECK: movdqa
     57 ; CHECK: movdqa
     58 ; CHECK: movdqa
     59 ; CHECK: paddd
     60 ; CHECK: paddd
     61 ; CHECK: paddd
     62 ; CHECK: movdqa
     63 ; CHECK: movdqa
     64 ; CHECK: movdqa
     65 	%a = load %i32vec12* %ap, align 16
     66 	%b = load %i32vec12* %bp, align 16
     67 	%x = add %i32vec12 %a, %b
     68 	store %i32vec12 %x, %i32vec12* %ret, align 16
     69 	ret void
     70 }
     71 
     72 
     73 ; CHECK: add3i16
     74 %i16vec3 = type <3 x i16>
     75 define void @add3i16(%i16vec3* nocapture sret %ret, %i16vec3* %ap, %i16vec3* %bp) nounwind {
     76 ; CHECK: add3i16
     77 ; CHECK: addl
     78 ; CHECK: addl
     79 ; CHECK: addl
     80 ; CHECK: ret
     81 	%a = load %i16vec3* %ap, align 16
     82 	%b = load %i16vec3* %bp, align 16
     83 	%x = add %i16vec3 %a, %b
     84 	store %i16vec3 %x, %i16vec3* %ret, align 16
     85 	ret void
     86 }
     87 
     88 ; CHECK: add4i16
     89 %i16vec4 = type <4 x i16>
     90 define void @add4i16(%i16vec4* nocapture sret %ret, %i16vec4* %ap, %i16vec4* %bp) nounwind {
     91 ; CHECK: add4i16
     92 ; CHECK: paddd
     93 ; CHECK: movq
     94 	%a = load %i16vec4* %ap, align 16
     95 	%b = load %i16vec4* %bp, align 16
     96 	%x = add %i16vec4 %a, %b
     97 	store %i16vec4 %x, %i16vec4* %ret, align 16
     98 	ret void
     99 }
    100 
    101 ; CHECK: add12i16
    102 %i16vec12 = type <12 x i16>
    103 define void @add12i16(%i16vec12* nocapture sret %ret, %i16vec12* %ap, %i16vec12* %bp) nounwind {
    104 ; CHECK: movdqa
    105 ; CHECK: movdqa
    106 ; CHECK: paddw
    107 ; CHECK: paddw
    108 ; CHECK: movq
    109 ; CHECK: movdqa
    110 	%a = load %i16vec12* %ap, align 16
    111 	%b = load %i16vec12* %bp, align 16
    112 	%x = add %i16vec12 %a, %b
    113 	store %i16vec12 %x, %i16vec12* %ret, align 16
    114 	ret void
    115 }
    116 
    117 ; CHECK: add18i16
    118 %i16vec18 = type <18 x i16>
    119 define void @add18i16(%i16vec18* nocapture sret %ret, %i16vec18* %ap, %i16vec18* %bp) nounwind {
    120 ; CHECK: movdqa
    121 ; CHECK: movdqa
    122 ; CHECK: movdqa
    123 ; CHECK: paddw
    124 ; CHECK: paddw
    125 ; CHECK: paddw
    126 ; CHECK: movd
    127 ; CHECK: movdqa
    128 ; CHECK: movdqa
    129 	%a = load %i16vec18* %ap, align 16
    130 	%b = load %i16vec18* %bp, align 16
    131 	%x = add %i16vec18 %a, %b
    132 	store %i16vec18 %x, %i16vec18* %ret, align 16
    133 	ret void
    134 }
    135 
    136 
    137 ; CHECK: add3i8
    138 %i8vec3 = type <3 x i8>
    139 define void @add3i8(%i8vec3* nocapture sret %ret, %i8vec3* %ap, %i8vec3* %bp) nounwind {
    140 ; CHECK: addb
    141 ; CHECK: addb
    142 ; CHECK: addb
    143 ; CHECK: ret
    144 	%a = load %i8vec3* %ap, align 16
    145 	%b = load %i8vec3* %bp, align 16
    146 	%x = add %i8vec3 %a, %b
    147 	store %i8vec3 %x, %i8vec3* %ret, align 16
    148 	ret void
    149 }
    150 
    151 ; CHECK: add31i8:
    152 %i8vec31 = type <31 x i8>
    153 define void @add31i8(%i8vec31* nocapture sret %ret, %i8vec31* %ap, %i8vec31* %bp) nounwind {
    154 ; CHECK: movdqa
    155 ; CHECK: movdqa
    156 ; CHECK: paddb
    157 ; CHECK: paddb
    158 ; CHECK: movq
    159 ; CHECK: pextrb
    160 ; CHECK: pextrw
    161 ; CHECK: ret
    162 	%a = load %i8vec31* %ap, align 16
    163 	%b = load %i8vec31* %bp, align 16
    164 	%x = add %i8vec31 %a, %b
    165 	store %i8vec31 %x, %i8vec31* %ret, align 16
    166 	ret void
    167 }
    168 
    169 
    170 ; CHECK: rot
    171 %i8vec3pack = type { <3 x i8>, i8 }
    172 define %i8vec3pack  @rot() nounwind {
    173 ; CHECK: movd {{-?[0-9]+}}(%rsp), {{%xmm[0-9]}}
    174 entry:
    175   %X = alloca %i8vec3pack, align 4
    176   %rot = alloca %i8vec3pack, align 4
    177   %result = alloca %i8vec3pack, align 4
    178   %storetmp = bitcast %i8vec3pack* %X to <3 x i8>*
    179   store <3 x i8> <i8 -98, i8 -98, i8 -98>, <3 x i8>* %storetmp
    180   %storetmp1 = bitcast %i8vec3pack* %rot to <3 x i8>*
    181   store <3 x i8> <i8 1, i8 1, i8 1>, <3 x i8>* %storetmp1
    182   %tmp = load %i8vec3pack* %X
    183   %extractVec = extractvalue %i8vec3pack %tmp, 0
    184   %tmp2 = load %i8vec3pack* %rot
    185   %extractVec3 = extractvalue %i8vec3pack %tmp2, 0
    186   %shr = lshr <3 x i8> %extractVec, %extractVec3
    187   %storetmp4 = bitcast %i8vec3pack* %result to <3 x i8>*
    188   store <3 x i8> %shr, <3 x i8>* %storetmp4
    189   %tmp5 = load %i8vec3pack* %result
    190   ret %i8vec3pack %tmp5
    191 }
    192 
    193