Home | History | Annotate | Download | only in AMDGPU
      1 ; RUN:  llc -amdgpu-scalarize-global-loads=false  -march=amdgcn -mtriple=amdgcn---amdgiz -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefix=GCN -check-prefix=SI -check-prefix=FUNC %s
      2 ; RUN:  llc -amdgpu-scalarize-global-loads=false  -march=amdgcn -mtriple=amdgcn---amdgiz -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefix=GCN -check-prefix=VI -check-prefix=GFX89 -check-prefix=FUNC %s
      3 ; RUN:  llc -amdgpu-scalarize-global-loads=false  -march=amdgcn -mtriple=amdgcn---amdgiz -mcpu=gfx900 -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefix=GCN -check-prefix=GFX9 -check-prefix=GFX89 -check-prefix=FUNC %s
      4 ; RUN:  llc -amdgpu-scalarize-global-loads=false  -march=r600 -mtriple=r600---amdgiz -mcpu=cypress < %s | FileCheck -enable-var-scope -check-prefix=EG -check-prefix=FUNC %s
      5 
      6 ; FIXME: i16 promotion pass ruins the scalar cases when legal.
      7 ; FIXME: r600 fails verifier
      8 
      9 ; FUNC-LABEL: {{^}}sext_in_reg_i1_i32:
     10 ; GCN: s_load_dword [[ARG:s[0-9]+]],
     11 ; GCN: s_bfe_i32 [[SEXTRACT:s[0-9]+]], [[ARG]], 0x10000
     12 ; GCN: v_mov_b32_e32 [[EXTRACT:v[0-9]+]], [[SEXTRACT]]
     13 ; GCN: buffer_store_dword [[EXTRACT]],
     14 
     15 ; EG: MEM_{{.*}} STORE_{{.*}} [[RES:T[0-9]+\.[XYZW]]], [[ADDR:T[0-9]+.[XYZW]]]
     16 ; EG: LSHR * [[ADDR]]
     17 ; EG: BFE_INT * [[RES]], {{.*}}, 0.0, 1
     18 define amdgpu_kernel void @sext_in_reg_i1_i32(i32 addrspace(1)* %out, i32 %in) #0 {
     19   %shl = shl i32 %in, 31
     20   %sext = ashr i32 %shl, 31
     21   store i32 %sext, i32 addrspace(1)* %out
     22   ret void
     23 }
     24 
     25 ; FUNC-LABEL: {{^}}sext_in_reg_i8_to_i32:
     26 ; GCN: s_add_i32 [[VAL:s[0-9]+]],
     27 ; GCN: s_sext_i32_i8 [[EXTRACT:s[0-9]+]], [[VAL]]
     28 ; GCN: v_mov_b32_e32 [[VEXTRACT:v[0-9]+]], [[EXTRACT]]
     29 ; GCN: buffer_store_dword [[VEXTRACT]],
     30 
     31 ; EG: MEM_{{.*}} STORE_{{.*}} [[RES:T[0-9]+\.[XYZW]]], [[ADDR:T[0-9]+.[XYZW]]]
     32 ; EG: ADD_INT
     33 ; EG-NEXT: BFE_INT [[RES]], {{.*}}, 0.0, literal
     34 ; EG-NEXT: LSHR * [[ADDR]]
     35 define amdgpu_kernel void @sext_in_reg_i8_to_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) #0 {
     36   %c = add i32 %a, %b ; add to prevent folding into extload
     37   %shl = shl i32 %c, 24
     38   %ashr = ashr i32 %shl, 24
     39   store i32 %ashr, i32 addrspace(1)* %out, align 4
     40   ret void
     41 }
     42 
     43 ; FUNC-LABEL: {{^}}sext_in_reg_i16_to_i32:
     44 ; GCN: s_add_i32 [[VAL:s[0-9]+]],
     45 ; GCN: s_sext_i32_i16 [[EXTRACT:s[0-9]+]], [[VAL]]
     46 ; GCN: v_mov_b32_e32 [[VEXTRACT:v[0-9]+]], [[EXTRACT]]
     47 ; GCN: buffer_store_dword [[VEXTRACT]],
     48 
     49 ; EG: MEM_{{.*}} STORE_{{.*}} [[RES:T[0-9]+\.[XYZW]]], [[ADDR:T[0-9]+.[XYZW]]]
     50 ; EG: ADD_INT
     51 ; EG-NEXT: BFE_INT [[RES]], {{.*}}, 0.0, literal
     52 ; EG-NEXT: LSHR * [[ADDR]]
     53 define amdgpu_kernel void @sext_in_reg_i16_to_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) #0 {
     54   %c = add i32 %a, %b ; add to prevent folding into extload
     55   %shl = shl i32 %c, 16
     56   %ashr = ashr i32 %shl, 16
     57   store i32 %ashr, i32 addrspace(1)* %out, align 4
     58   ret void
     59 }
     60 
     61 ; FUNC-LABEL: {{^}}sext_in_reg_i8_to_v1i32:
     62 ; GCN: s_add_i32 [[VAL:s[0-9]+]],
     63 ; GCN: s_sext_i32_i8 [[EXTRACT:s[0-9]+]], [[VAL]]
     64 ; GCN: v_mov_b32_e32 [[VEXTRACT:v[0-9]+]], [[EXTRACT]]
     65 ; GCN: buffer_store_dword [[VEXTRACT]],
     66 
     67 ; EG: MEM_{{.*}} STORE_{{.*}} [[RES:T[0-9]+\.[XYZW]]], [[ADDR:T[0-9]+.[XYZW]]]
     68 ; EG: ADD_INT
     69 ; EG-NEXT: BFE_INT [[RES]], {{.*}}, 0.0, literal
     70 ; EG-NEXT: LSHR * [[ADDR]]
     71 define amdgpu_kernel void @sext_in_reg_i8_to_v1i32(<1 x i32> addrspace(1)* %out, <1 x i32> %a, <1 x i32> %b) #0 {
     72   %c = add <1 x i32> %a, %b ; add to prevent folding into extload
     73   %shl = shl <1 x i32> %c, <i32 24>
     74   %ashr = ashr <1 x i32> %shl, <i32 24>
     75   store <1 x i32> %ashr, <1 x i32> addrspace(1)* %out, align 4
     76   ret void
     77 }
     78 
     79 ; FUNC-LABEL: {{^}}sext_in_reg_i1_to_i64:
     80 ; GCN: s_lshl_b64 [[VAL:s\[[0-9]+:[0-9]+\]]]
     81 ; GCN-DAG: s_bfe_i64 s{{\[}}[[SLO:[0-9]+]]:[[SHI:[0-9]+]]{{\]}}, [[VAL]], 0x10000
     82 ; GCN-DAG: v_mov_b32_e32 v[[VLO:[0-9]+]], s[[SLO]]
     83 ; GCN-DAG: v_mov_b32_e32 v[[VHI:[0-9]+]], s[[SHI]]
     84 ; GCN: buffer_store_dwordx2 v{{\[}}[[VLO]]:[[VHI]]{{\]}}
     85 define amdgpu_kernel void @sext_in_reg_i1_to_i64(i64 addrspace(1)* %out, i64 %a, i64 %b) #0 {
     86   %c = shl i64 %a, %b
     87   %shl = shl i64 %c, 63
     88   %ashr = ashr i64 %shl, 63
     89   store i64 %ashr, i64 addrspace(1)* %out, align 8
     90   ret void
     91 }
     92 
     93 ; FUNC-LABEL: {{^}}sext_in_reg_i8_to_i64:
     94 ; GCN: s_lshl_b64 [[VAL:s\[[0-9]+:[0-9]+\]]]
     95 ; GCN-DAG: s_bfe_i64 s{{\[}}[[SLO:[0-9]+]]:[[SHI:[0-9]+]]{{\]}}, [[VAL]], 0x80000
     96 ; GCN-DAG: v_mov_b32_e32 v[[VLO:[0-9]+]], s[[SLO]]
     97 ; GCN-DAG: v_mov_b32_e32 v[[VHI:[0-9]+]], s[[SHI]]
     98 ; GCN: buffer_store_dwordx2 v{{\[}}[[VLO]]:[[VHI]]{{\]}}
     99 define amdgpu_kernel void @sext_in_reg_i8_to_i64(i64 addrspace(1)* %out, i64 %a, i64 %b) #0 {
    100   %c = shl i64 %a, %b
    101   %shl = shl i64 %c, 56
    102   %ashr = ashr i64 %shl, 56
    103   store i64 %ashr, i64 addrspace(1)* %out, align 8
    104   ret void
    105 }
    106 
    107 ; FUNC-LABEL: {{^}}sext_in_reg_i16_to_i64:
    108 ; GCN: s_lshl_b64 [[VAL:s\[[0-9]+:[0-9]+\]]]
    109 ; GCN-DAG: s_bfe_i64 s{{\[}}[[SLO:[0-9]+]]:[[SHI:[0-9]+]]{{\]}}, [[VAL]], 0x100000
    110 ; GCN-DAG: v_mov_b32_e32 v[[VLO:[0-9]+]], s[[SLO]]
    111 ; GCN-DAG: v_mov_b32_e32 v[[VHI:[0-9]+]], s[[SHI]]
    112 ; GCN: buffer_store_dwordx2 v{{\[}}[[VLO]]:[[VHI]]{{\]}}
    113 
    114 define amdgpu_kernel void @sext_in_reg_i16_to_i64(i64 addrspace(1)* %out, i64 %a, i64 %b) #0 {
    115   %c = shl i64 %a, %b
    116   %shl = shl i64 %c, 48
    117   %ashr = ashr i64 %shl, 48
    118   store i64 %ashr, i64 addrspace(1)* %out, align 8
    119   ret void
    120 }
    121 
    122 ; FUNC-LABEL: {{^}}sext_in_reg_i32_to_i64:
    123 ; GCN: s_lshl_b64 [[VAL:s\[[0-9]+:[0-9]+\]]]
    124 ; GCN-DAG: s_bfe_i64 s{{\[}}[[SLO:[0-9]+]]:[[SHI:[0-9]+]]{{\]}}, [[VAL]], 0x200000
    125 ; GCN-DAG: v_mov_b32_e32 v[[VLO:[0-9]+]], s[[SLO]]
    126 ; GCN-DAG: v_mov_b32_e32 v[[VHI:[0-9]+]], s[[SHI]]
    127 ; GCN: buffer_store_dwordx2 v{{\[}}[[VLO]]:[[VHI]]{{\]}}
    128 define amdgpu_kernel void @sext_in_reg_i32_to_i64(i64 addrspace(1)* %out, i64 %a, i64 %b) #0 {
    129   %c = shl i64 %a, %b
    130   %shl = shl i64 %c, 32
    131   %ashr = ashr i64 %shl, 32
    132   store i64 %ashr, i64 addrspace(1)* %out, align 8
    133   ret void
    134 }
    135 
    136 ; This is broken on Evergreen for some reason related to the <1 x i64> kernel arguments.
    137 ; XFUNC-LABEL: {{^}}sext_in_reg_i8_to_v1i64:
    138 ; XGCN: s_bfe_i32 [[EXTRACT:s[0-9]+]], {{s[0-9]+}}, 524288
    139 ; XGCN: s_ashr_i32 {{v[0-9]+}}, [[EXTRACT]], 31
    140 ; XGCN: buffer_store_dword
    141 ; XEG: BFE_INT
    142 ; XEG: ASHR
    143 ; define amdgpu_kernel void @sext_in_reg_i8_to_v1i64(<1 x i64> addrspace(1)* %out, <1 x i64> %a, <1 x i64> %b) #0 {
    144 ;   %c = add <1 x i64> %a, %b
    145 ;   %shl = shl <1 x i64> %c, <i64 56>
    146 ;   %ashr = ashr <1 x i64> %shl, <i64 56>
    147 ;   store <1 x i64> %ashr, <1 x i64> addrspace(1)* %out, align 8
    148 ;   ret void
    149 ; }
    150 
    151 ; FUNC-LABEL: {{^}}v_sext_in_reg_i1_to_i64:
    152 ; SI: buffer_load_dwordx2
    153 ; SI: v_lshl_b64 v{{\[}}[[VAL_LO:[0-9]+]]:[[VAL_HI:[0-9]+]]{{\]}}
    154 
    155 ; GFX89: {{flat|global}}_load_dwordx2
    156 ; GFX89: v_lshlrev_b64 v{{\[}}[[VAL_LO:[0-9]+]]:[[VAL_HI:[0-9]+]]{{\]}}
    157 
    158 ; GCN: v_bfe_i32 v[[LO:[0-9]+]], v[[VAL_LO]], 0, 1
    159 ; GCN: v_ashrrev_i32_e32 v[[HI:[0-9]+]], 31, v[[LO]]
    160 
    161 ; SI: buffer_store_dwordx2 v{{\[}}[[LO]]:[[HI]]{{\]}}
    162 ; GFX89: {{flat|global}}_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, v{{\[}}[[LO]]:[[HI]]{{\]}}
    163 define amdgpu_kernel void @v_sext_in_reg_i1_to_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 addrspace(1)* %bptr) #0 {
    164   %tid = call i32 @llvm.r600.read.tidig.x()
    165   %a.gep = getelementptr i64, i64 addrspace(1)* %aptr, i32 %tid
    166   %b.gep = getelementptr i64, i64 addrspace(1)* %aptr, i32 %tid
    167   %out.gep = getelementptr i64, i64 addrspace(1)* %out, i32 %tid
    168   %a = load i64, i64 addrspace(1)* %a.gep, align 8
    169   %b = load i64, i64 addrspace(1)* %b.gep, align 8
    170 
    171   %c = shl i64 %a, %b
    172   %shl = shl i64 %c, 63
    173   %ashr = ashr i64 %shl, 63
    174   store i64 %ashr, i64 addrspace(1)* %out.gep, align 8
    175   ret void
    176 }
    177 
    178 ; FUNC-LABEL: {{^}}v_sext_in_reg_i8_to_i64:
    179 ; SI: buffer_load_dwordx2
    180 ; SI: v_lshl_b64 v{{\[}}[[VAL_LO:[0-9]+]]:[[VAL_HI:[0-9]+]]{{\]}}
    181 
    182 ; GFX89: {{flat|global}}_load_dwordx2
    183 ; GFX89: v_lshlrev_b64 v{{\[}}[[VAL_LO:[0-9]+]]:[[VAL_HI:[0-9]+]]{{\]}}
    184 
    185 ; GCN: v_bfe_i32 v[[LO:[0-9]+]], v[[VAL_LO]], 0, 8
    186 ; GCN: v_ashrrev_i32_e32 v[[HI:[0-9]+]], 31, v[[LO]]
    187 
    188 ; SI: buffer_store_dwordx2 v{{\[}}[[LO]]:[[HI]]{{\]}}
    189 ; GFX89: {{flat|global}}_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, v{{\[}}[[LO]]:[[HI]]{{\]}}
    190 define amdgpu_kernel void @v_sext_in_reg_i8_to_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 addrspace(1)* %bptr) #0 {
    191   %tid = call i32 @llvm.r600.read.tidig.x()
    192   %a.gep = getelementptr i64, i64 addrspace(1)* %aptr, i32 %tid
    193   %b.gep = getelementptr i64, i64 addrspace(1)* %aptr, i32 %tid
    194   %out.gep = getelementptr i64, i64 addrspace(1)* %out, i32 %tid
    195   %a = load i64, i64 addrspace(1)* %a.gep, align 8
    196   %b = load i64, i64 addrspace(1)* %b.gep, align 8
    197 
    198   %c = shl i64 %a, %b
    199   %shl = shl i64 %c, 56
    200   %ashr = ashr i64 %shl, 56
    201   store i64 %ashr, i64 addrspace(1)* %out.gep, align 8
    202   ret void
    203 }
    204 
    205 ; FUNC-LABEL: {{^}}v_sext_in_reg_i16_to_i64:
    206 ; SI: buffer_load_dwordx2
    207 ; SI: v_lshl_b64 v{{\[}}[[VAL_LO:[0-9]+]]:[[VAL_HI:[0-9]+]]{{\]}}
    208 
    209 ; GFX89: {{flat|global}}_load_dwordx2
    210 ; GFX89: v_lshlrev_b64 v{{\[}}[[VAL_LO:[0-9]+]]:[[VAL_HI:[0-9]+]]{{\]}}
    211 
    212 ; GCN: v_bfe_i32 v[[LO:[0-9]+]], v[[VAL_LO]], 0, 16
    213 ; GCN: v_ashrrev_i32_e32 v[[HI:[0-9]+]], 31, v[[LO]]
    214 
    215 ; SI: buffer_store_dwordx2 v{{\[}}[[LO]]:[[HI]]{{\]}}
    216 ; GFX89: {{flat|global}}_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, v{{\[}}[[LO]]:[[HI]]{{\]}}
    217 define amdgpu_kernel void @v_sext_in_reg_i16_to_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 addrspace(1)* %bptr) #0 {
    218   %tid = call i32 @llvm.r600.read.tidig.x()
    219   %a.gep = getelementptr i64, i64 addrspace(1)* %aptr, i32 %tid
    220   %b.gep = getelementptr i64, i64 addrspace(1)* %aptr, i32 %tid
    221   %out.gep = getelementptr i64, i64 addrspace(1)* %out, i32 %tid
    222   %a = load i64, i64 addrspace(1)* %a.gep, align 8
    223   %b = load i64, i64 addrspace(1)* %b.gep, align 8
    224 
    225   %c = shl i64 %a, %b
    226   %shl = shl i64 %c, 48
    227   %ashr = ashr i64 %shl, 48
    228   store i64 %ashr, i64 addrspace(1)* %out.gep, align 8
    229   ret void
    230 }
    231 
    232 ; FUNC-LABEL: {{^}}v_sext_in_reg_i32_to_i64:
    233 ; SI: buffer_load_dwordx2
    234 ; SI: v_lshl_b64 v{{\[}}[[LO:[0-9]+]]:[[HI:[0-9]+]]{{\]}},
    235 
    236 ; GFX89: {{flat|global}}_load_dwordx2
    237 ; GFX89: v_lshlrev_b64 v{{\[}}[[LO:[0-9]+]]:[[HI:[0-9]+]]{{\]}},
    238 
    239 ; GCN: v_ashrrev_i32_e32 v[[SHR:[0-9]+]], 31, v[[LO]]
    240 ; GFX89: {{flat|global}}_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, v{{\[}}[[LO]]:[[SHR]]{{\]}}
    241 define amdgpu_kernel void @v_sext_in_reg_i32_to_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 addrspace(1)* %bptr) #0 {
    242   %tid = call i32 @llvm.r600.read.tidig.x()
    243   %a.gep = getelementptr i64, i64 addrspace(1)* %aptr, i32 %tid
    244   %b.gep = getelementptr i64, i64 addrspace(1)* %aptr, i32 %tid
    245   %out.gep = getelementptr i64, i64 addrspace(1)* %out, i32 %tid
    246   %a = load i64, i64 addrspace(1)* %a.gep, align 8
    247   %b = load i64, i64 addrspace(1)* %b.gep, align 8
    248 
    249   %c = shl i64 %a, %b
    250   %shl = shl i64 %c, 32
    251   %ashr = ashr i64 %shl, 32
    252   store i64 %ashr, i64 addrspace(1)* %out.gep, align 8
    253   ret void
    254 }
    255 
    256 ; FUNC-LABEL: {{^}}sext_in_reg_i1_in_i32_other_amount:
    257 ; GCN-NOT: s_lshl
    258 ; GCN-NOT: s_ashr
    259 ; GCN: s_bfe_i32 {{s[0-9]+}}, {{s[0-9]+}}, 0x190001
    260 
    261 ; EG: MEM_{{.*}} STORE_{{.*}} [[RES:T[0-9]+\.[XYZW]]], [[ADDR:T[0-9]+.[XYZW]]]
    262 ; EG-NOT: BFE
    263 ; EG: ADD_INT
    264 ; EG: LSHL
    265 ; EG: ASHR [[RES]]
    266 ; EG: LSHR {{\*?}} [[ADDR]]
    267 define amdgpu_kernel void @sext_in_reg_i1_in_i32_other_amount(i32 addrspace(1)* %out, i32 %a, i32 %b) #0 {
    268   %c = add i32 %a, %b
    269   %x = shl i32 %c, 6
    270   %y = ashr i32 %x, 7
    271   store i32 %y, i32 addrspace(1)* %out
    272   ret void
    273 }
    274 
    275 ; FUNC-LABEL: {{^}}sext_in_reg_v2i1_in_v2i32_other_amount:
    276 ; GCN-NOT: s_lshl
    277 ; GCN-NOT: s_ashr
    278 ; GCN-DAG: s_bfe_i32 {{s[0-9]+}}, {{s[0-9]+}}, 0x190001
    279 ; GCN-DAG: s_bfe_i32 {{s[0-9]+}}, {{s[0-9]+}}, 0x190001
    280 ; GCN: s_endpgm
    281 
    282 ; EG: MEM_{{.*}} STORE_{{.*}} [[RES:T[0-9]+]]{{\.[XYZW][XYZW]}}, [[ADDR:T[0-9]+.[XYZW]]]
    283 ; EG-NOT: BFE
    284 ; EG: ADD_INT
    285 ; EG: LSHL
    286 ; EG: ASHR [[RES]]
    287 ; EG: LSHL
    288 ; EG: ASHR [[RES]]
    289 ; EG: LSHR {{\*?}} [[ADDR]]
    290 define amdgpu_kernel void @sext_in_reg_v2i1_in_v2i32_other_amount(<2 x i32> addrspace(1)* %out, <2 x i32> %a, <2 x i32> %b) #0 {
    291   %c = add <2 x i32> %a, %b
    292   %x = shl <2 x i32> %c, <i32 6, i32 6>
    293   %y = ashr <2 x i32> %x, <i32 7, i32 7>
    294   store <2 x i32> %y, <2 x i32> addrspace(1)* %out
    295   ret void
    296 }
    297 
    298 
    299 ; FUNC-LABEL: {{^}}sext_in_reg_v2i1_to_v2i32:
    300 ; GCN: s_bfe_i32 {{s[0-9]+}}, {{s[0-9]+}}, 0x10000
    301 ; GCN: s_bfe_i32 {{s[0-9]+}}, {{s[0-9]+}}, 0x10000
    302 ; GCN: buffer_store_dwordx2
    303 
    304 ; EG: MEM_{{.*}} STORE_{{.*}} [[RES:T[0-9]+]]{{\.[XYZW][XYZW]}}, [[ADDR:T[0-9]+.[XYZW]]]
    305 ; EG: BFE_INT [[RES]]
    306 ; EG: BFE_INT [[RES]]
    307 ; EG: LSHR {{\*?}} [[ADDR]]
    308 define amdgpu_kernel void @sext_in_reg_v2i1_to_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> %a, <2 x i32> %b) #0 {
    309   %c = add <2 x i32> %a, %b ; add to prevent folding into extload
    310   %shl = shl <2 x i32> %c, <i32 31, i32 31>
    311   %ashr = ashr <2 x i32> %shl, <i32 31, i32 31>
    312   store <2 x i32> %ashr, <2 x i32> addrspace(1)* %out, align 8
    313   ret void
    314 }
    315 
    316 ; FUNC-LABEL: {{^}}sext_in_reg_v4i1_to_v4i32:
    317 ; GCN: s_bfe_i32 {{s[0-9]+}}, {{s[0-9]+}}, 0x10000
    318 ; GCN: s_bfe_i32 {{s[0-9]+}}, {{s[0-9]+}}, 0x10000
    319 ; GCN: s_bfe_i32 {{s[0-9]+}}, {{s[0-9]+}}, 0x10000
    320 ; GCN: s_bfe_i32 {{s[0-9]+}}, {{s[0-9]+}}, 0x10000
    321 ; GCN: buffer_store_dwordx4
    322 
    323 ; EG: MEM_{{.*}} STORE_{{.*}} [[RES:T[0-9]+]]{{\.[XYZW][XYZW][XYZW][XYZW]}}, [[ADDR:T[0-9]+.[XYZW]]]
    324 ; EG: BFE_INT [[RES]]
    325 ; EG: BFE_INT [[RES]]
    326 ; EG: BFE_INT [[RES]]
    327 ; EG: BFE_INT [[RES]]
    328 ; EG: LSHR {{\*?}} [[ADDR]]
    329 define amdgpu_kernel void @sext_in_reg_v4i1_to_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> %a, <4 x i32> %b) #0 {
    330   %c = add <4 x i32> %a, %b ; add to prevent folding into extload
    331   %shl = shl <4 x i32> %c, <i32 31, i32 31, i32 31, i32 31>
    332   %ashr = ashr <4 x i32> %shl, <i32 31, i32 31, i32 31, i32 31>
    333   store <4 x i32> %ashr, <4 x i32> addrspace(1)* %out, align 8
    334   ret void
    335 }
    336 
    337 ; FUNC-LABEL: {{^}}sext_in_reg_v2i8_to_v2i32:
    338 ; GCN: s_sext_i32_i8 {{s[0-9]+}}, {{s[0-9]+}}
    339 ; GCN: s_sext_i32_i8 {{s[0-9]+}}, {{s[0-9]+}}
    340 ; GCN: buffer_store_dwordx2
    341 
    342 ; EG: MEM_{{.*}} STORE_{{.*}} [[RES:T[0-9]+]]{{\.[XYZW][XYZW]}}, [[ADDR:T[0-9]+.[XYZW]]]
    343 ; EG: BFE_INT [[RES]]
    344 ; EG: BFE_INT [[RES]]
    345 ; EG: LSHR {{\*?}} [[ADDR]]
    346 define amdgpu_kernel void @sext_in_reg_v2i8_to_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> %a, <2 x i32> %b) #0 {
    347   %c = add <2 x i32> %a, %b ; add to prevent folding into extload
    348   %shl = shl <2 x i32> %c, <i32 24, i32 24>
    349   %ashr = ashr <2 x i32> %shl, <i32 24, i32 24>
    350   store <2 x i32> %ashr, <2 x i32> addrspace(1)* %out, align 8
    351   ret void
    352 }
    353 
    354 ; FUNC-LABEL: {{^}}sext_in_reg_v4i8_to_v4i32:
    355 ; GCN: s_sext_i32_i8 {{s[0-9]+}}, {{s[0-9]+}}
    356 ; GCN: s_sext_i32_i8 {{s[0-9]+}}, {{s[0-9]+}}
    357 ; GCN: s_sext_i32_i8 {{s[0-9]+}}, {{s[0-9]+}}
    358 ; GCN: s_sext_i32_i8 {{s[0-9]+}}, {{s[0-9]+}}
    359 ; GCN: buffer_store_dwordx4
    360 
    361 ; EG: MEM_{{.*}} STORE_{{.*}} [[RES:T[0-9]+]]{{\.[XYZW][XYZW][XYZW][XYZW]}}, [[ADDR:T[0-9]+.[XYZW]]]
    362 ; EG: BFE_INT [[RES]]
    363 ; EG: BFE_INT [[RES]]
    364 ; EG: BFE_INT [[RES]]
    365 ; EG: BFE_INT [[RES]]
    366 ; EG: LSHR {{\*?}} [[ADDR]]
    367 define amdgpu_kernel void @sext_in_reg_v4i8_to_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> %a, <4 x i32> %b) #0 {
    368   %c = add <4 x i32> %a, %b ; add to prevent folding into extload
    369   %shl = shl <4 x i32> %c, <i32 24, i32 24, i32 24, i32 24>
    370   %ashr = ashr <4 x i32> %shl, <i32 24, i32 24, i32 24, i32 24>
    371   store <4 x i32> %ashr, <4 x i32> addrspace(1)* %out, align 8
    372   ret void
    373 }
    374 
    375 ; FUNC-LABEL: {{^}}sext_in_reg_v2i16_to_v2i32:
    376 ; GCN: s_sext_i32_i16 {{s[0-9]+}}, {{s[0-9]+}}
    377 ; GCN: s_sext_i32_i16 {{s[0-9]+}}, {{s[0-9]+}}
    378 ; GCN: buffer_store_dwordx2
    379 
    380 ; EG: MEM_{{.*}} STORE_{{.*}} [[RES:T[0-9]+]]{{\.[XYZW][XYZW]}}, [[ADDR:T[0-9]+.[XYZW]]]
    381 ; EG: BFE_INT [[RES]]
    382 ; EG: BFE_INT [[RES]]
    383 ; EG: LSHR {{\*?}} [[ADDR]]
    384 define amdgpu_kernel void @sext_in_reg_v2i16_to_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> %a, <2 x i32> %b) #0 {
    385   %c = add <2 x i32> %a, %b ; add to prevent folding into extload
    386   %shl = shl <2 x i32> %c, <i32 16, i32 16>
    387   %ashr = ashr <2 x i32> %shl, <i32 16, i32 16>
    388   store <2 x i32> %ashr, <2 x i32> addrspace(1)* %out, align 8
    389   ret void
    390 }
    391 
    392 ; FUNC-LABEL: {{^}}testcase:
    393 define amdgpu_kernel void @testcase(i8 addrspace(1)* %out, i8 %a) #0 {
    394   %and_a_1 = and i8 %a, 1
    395   %cmp_eq = icmp eq i8 %and_a_1, 0
    396   %cmp_slt = icmp slt i8 %a, 0
    397   %sel0 = select i1 %cmp_slt, i8 0, i8 %a
    398   %sel1 = select i1 %cmp_eq, i8 0, i8 %a
    399   %xor = xor i8 %sel0, %sel1
    400   store i8 %xor, i8 addrspace(1)* %out
    401   ret void
    402 }
    403 
    404 ; FUNC-LABEL: {{^}}testcase_3:
    405 define amdgpu_kernel void @testcase_3(i8 addrspace(1)* %out, i8 %a) #0 {
    406   %and_a_1 = and i8 %a, 1
    407   %cmp_eq = icmp eq i8 %and_a_1, 0
    408   %cmp_slt = icmp slt i8 %a, 0
    409   %sel0 = select i1 %cmp_slt, i8 0, i8 %a
    410   %sel1 = select i1 %cmp_eq, i8 0, i8 %a
    411   %xor = xor i8 %sel0, %sel1
    412   store i8 %xor, i8 addrspace(1)* %out
    413   ret void
    414 }
    415 
    416 ; FUNC-LABEL: {{^}}vgpr_sext_in_reg_v4i8_to_v4i32:
    417 ; GCN: v_bfe_i32 [[EXTRACT:v[0-9]+]], {{v[0-9]+}}, 0, 8
    418 ; GCN: v_bfe_i32 [[EXTRACT:v[0-9]+]], {{v[0-9]+}}, 0, 8
    419 ; GCN: v_bfe_i32 [[EXTRACT:v[0-9]+]], {{v[0-9]+}}, 0, 8
    420 ; GCN: v_bfe_i32 [[EXTRACT:v[0-9]+]], {{v[0-9]+}}, 0, 8
    421 define amdgpu_kernel void @vgpr_sext_in_reg_v4i8_to_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %a, <4 x i32> addrspace(1)* %b) #0 {
    422   %loada = load <4 x i32>, <4 x i32> addrspace(1)* %a, align 16
    423   %loadb = load <4 x i32>, <4 x i32> addrspace(1)* %b, align 16
    424   %c = add <4 x i32> %loada, %loadb ; add to prevent folding into extload
    425   %shl = shl <4 x i32> %c, <i32 24, i32 24, i32 24, i32 24>
    426   %ashr = ashr <4 x i32> %shl, <i32 24, i32 24, i32 24, i32 24>
    427   store <4 x i32> %ashr, <4 x i32> addrspace(1)* %out, align 8
    428   ret void
    429 }
    430 
    431 ; FUNC-LABEL: {{^}}vgpr_sext_in_reg_v4i16_to_v4i32:
    432 ; GCN: v_bfe_i32 [[EXTRACT:v[0-9]+]], {{v[0-9]+}}, 0, 16
    433 ; GCN: v_bfe_i32 [[EXTRACT:v[0-9]+]], {{v[0-9]+}}, 0, 16
    434 define amdgpu_kernel void @vgpr_sext_in_reg_v4i16_to_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %a, <4 x i32> addrspace(1)* %b) #0 {
    435   %loada = load <4 x i32>, <4 x i32> addrspace(1)* %a, align 16
    436   %loadb = load <4 x i32>, <4 x i32> addrspace(1)* %b, align 16
    437   %c = add <4 x i32> %loada, %loadb ; add to prevent folding into extload
    438   %shl = shl <4 x i32> %c, <i32 16, i32 16, i32 16, i32 16>
    439   %ashr = ashr <4 x i32> %shl, <i32 16, i32 16, i32 16, i32 16>
    440   store <4 x i32> %ashr, <4 x i32> addrspace(1)* %out, align 8
    441   ret void
    442 }
    443 
    444 ; FUNC-LABEL: {{^}}sext_in_reg_to_illegal_type:
    445 ; GCN: buffer_load_sbyte
    446 ; GCN: v_max_i32
    447 ; GCN-NOT: bfe
    448 ; GCN: buffer_store_short
    449 define amdgpu_kernel void @sext_in_reg_to_illegal_type(i16 addrspace(1)* nocapture %out, i8 addrspace(1)* nocapture %src) #0 {
    450   %tmp5 = load i8, i8 addrspace(1)* %src, align 1
    451   %tmp2 = sext i8 %tmp5 to i32
    452   %tmp2.5 = icmp sgt i32 %tmp2, 0
    453   %tmp3 = select i1 %tmp2.5, i32 %tmp2, i32 0
    454   %tmp4 = trunc i32 %tmp3 to i8
    455   %tmp6 = sext i8 %tmp4 to i16
    456   store i16 %tmp6, i16 addrspace(1)* %out, align 2
    457   ret void
    458 }
    459 
    460 ; Make sure we propagate the VALUness to users of a moved scalar BFE.
    461 
    462 ; FUNC-LABEL: {{^}}v_sext_in_reg_i1_to_i64_move_use:
    463 ; SI: buffer_load_dwordx2
    464 ; SI: v_lshl_b64 v{{\[}}[[VAL_LO:[0-9]+]]:[[VAL_HI:[0-9]+]]{{\]}}
    465 
    466 ; GFX89: {{flat|global}}_load_dwordx2
    467 ; GFX89: v_lshlrev_b64 v{{\[}}[[VAL_LO:[0-9]+]]:[[VAL_HI:[0-9]+]]{{\]}}
    468 
    469 ; GCN-DAG: v_bfe_i32 v[[LO:[0-9]+]], v[[VAL_LO]], 0, 1
    470 ; GCN-DAG: v_ashrrev_i32_e32 v[[HI:[0-9]+]], 31, v[[LO]]
    471 ; GCN-DAG: v_and_b32_e32 v[[RESULT_LO:[0-9]+]], s{{[0-9]+}}, v[[LO]]
    472 ; GCN-DAG: v_and_b32_e32 v[[RESULT_HI:[0-9]+]], s{{[0-9]+}}, v[[HI]]
    473 ; SI: buffer_store_dwordx2 v{{\[}}[[RESULT_LO]]:[[RESULT_HI]]{{\]}}
    474 ; GFX89: {{flat|global}}_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, v{{\[}}[[RESULT_LO]]:[[RESULT_HI]]{{\]}}
    475 define amdgpu_kernel void @v_sext_in_reg_i1_to_i64_move_use(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 addrspace(1)* %bptr, i64 %s.val) #0 {
    476   %tid = call i32 @llvm.r600.read.tidig.x()
    477   %a.gep = getelementptr i64, i64 addrspace(1)* %aptr, i32 %tid
    478   %b.gep = getelementptr i64, i64 addrspace(1)* %aptr, i32 %tid
    479   %out.gep = getelementptr i64, i64 addrspace(1)* %out, i32 %tid
    480   %a = load i64, i64 addrspace(1)* %a.gep, align 8
    481   %b = load i64, i64 addrspace(1)* %b.gep, align 8
    482 
    483   %c = shl i64 %a, %b
    484   %shl = shl i64 %c, 63
    485   %ashr = ashr i64 %shl, 63
    486 
    487   %and = and i64 %ashr, %s.val
    488   store i64 %and, i64 addrspace(1)* %out.gep, align 8
    489   ret void
    490 }
    491 
    492 ; FUNC-LABEL: {{^}}v_sext_in_reg_i32_to_i64_move_use:
    493 ; SI: buffer_load_dwordx2
    494 ; SI: v_lshl_b64 v{{\[}}[[LO:[0-9]+]]:[[HI:[0-9]+]]{{\]}},
    495 
    496 ; GFX89: {{flat|global}}_load_dwordx2
    497 ; GFX89: v_lshlrev_b64 v{{\[}}[[LO:[0-9]+]]:[[HI:[0-9]+]]{{\]}},
    498 
    499 ; GCN-DAG: v_ashrrev_i32_e32 v[[SHR:[0-9]+]], 31, v[[LO]]
    500 ; GCN-DAG: v_and_b32_e32 v[[RESULT_LO:[0-9]+]], s{{[0-9]+}}, v[[LO]]
    501 ; GCN-DAG: v_and_b32_e32 v[[RESULT_HI:[0-9]+]], s{{[0-9]+}}, v[[SHR]]
    502 
    503 ; SI: buffer_store_dwordx2 v{{\[}}[[RESULT_LO]]:[[RESULT_HI]]{{\]}}
    504 ; GFX89: {{flat|global}}_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, v{{\[}}[[RESULT_LO]]:[[RESULT_HI]]{{\]}}
    505 define amdgpu_kernel void @v_sext_in_reg_i32_to_i64_move_use(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 addrspace(1)* %bptr, i64 %s.val) #0 {
    506   %tid = call i32 @llvm.r600.read.tidig.x()
    507   %a.gep = getelementptr i64, i64 addrspace(1)* %aptr, i32 %tid
    508   %b.gep = getelementptr i64, i64 addrspace(1)* %aptr, i32 %tid
    509   %out.gep = getelementptr i64, i64 addrspace(1)* %out, i32 %tid
    510   %a = load i64, i64 addrspace(1)* %a.gep, align 8
    511   %b = load i64, i64 addrspace(1)* %b.gep, align 8
    512 
    513   %c = shl i64 %a, %b
    514   %shl = shl i64 %c, 32
    515   %ashr = ashr i64 %shl, 32
    516   %and = and i64 %ashr, %s.val
    517   store i64 %and, i64 addrspace(1)* %out.gep, align 8
    518   ret void
    519 }
    520 
    521 ; FUNC-LABEL: {{^}}s_sext_in_reg_i1_i16:
    522 ; GCN: s_load_dword [[VAL:s[0-9]+]]
    523 
    524 ; SI: s_bfe_i32 [[BFE:s[0-9]+]], [[VAL]], 0x10000
    525 ; SI: v_mov_b32_e32 [[VBFE:v[0-9]+]], [[BFE]]
    526 ; SI: buffer_store_short [[VBFE]]
    527 
    528 ; GFX89: s_lshl_b32 s{{[0-9]+}}, s{{[0-9]+}}, 15
    529 ; GFX89: s_sext_i32_i16 s{{[0-9]+}}, s{{[0-9]+}}
    530 ; GFX89: s_lshr_b32 s{{[0-9]+}}, s{{[0-9]+}}, 15
    531 define amdgpu_kernel void @s_sext_in_reg_i1_i16(i16 addrspace(1)* %out, i32 addrspace(4)* %ptr) #0 {
    532   %ld = load i32, i32 addrspace(4)* %ptr
    533   %in = trunc i32 %ld to i16
    534   %shl = shl i16 %in, 15
    535   %sext = ashr i16 %shl, 15
    536   store i16 %sext, i16 addrspace(1)* %out
    537   ret void
    538 }
    539 
    540 ; FUNC-LABEL: {{^}}s_sext_in_reg_i2_i16:
    541 ; GCN: s_load_dword [[VAL:s[0-9]+]]
    542 
    543 ; SI: s_bfe_i32 [[BFE:s[0-9]+]], [[VAL]], 0x20000
    544 ; SI: v_mov_b32_e32 [[VBFE:v[0-9]+]], [[BFE]]
    545 ; SI: buffer_store_short [[VBFE]]
    546 
    547 ; GFX89: s_lshl_b32 s{{[0-9]+}}, s{{[0-9]+}}, 14
    548 ; GFX89: s_sext_i32_i16 s{{[0-9]+}}, s{{[0-9]+}}
    549 ; GFX89: s_lshr_b32 s{{[0-9]+}}, s{{[0-9]+}}, 14
    550 define amdgpu_kernel void @s_sext_in_reg_i2_i16(i16 addrspace(1)* %out, i32 addrspace(4)* %ptr) #0 {
    551   %ld = load i32, i32 addrspace(4)* %ptr
    552   %in = trunc i32 %ld to i16
    553   %shl = shl i16 %in, 14
    554   %sext = ashr i16 %shl, 14
    555   store i16 %sext, i16 addrspace(1)* %out
    556   ret void
    557 }
    558 
    559 ; FUNC-LABEL: {{^}}v_sext_in_reg_i1_i16:
    560 ; GCN: {{buffer|flat|global}}_load_ushort [[VAL:v[0-9]+]]
    561 ; GCN: v_bfe_i32 [[BFE:v[0-9]+]], [[VAL]], 0, 1{{$}}
    562 
    563 ; GCN: ds_write_b16 v{{[0-9]+}}, [[BFE]]
    564 define amdgpu_kernel void @v_sext_in_reg_i1_i16(i16 addrspace(3)* %out, i16 addrspace(1)* %ptr) #0 {
    565   %tid = call i32 @llvm.r600.read.tidig.x()
    566   %gep = getelementptr i16, i16 addrspace(1)* %ptr, i32 %tid
    567   %out.gep = getelementptr i16, i16 addrspace(3)* %out, i32 %tid
    568 
    569   %in = load i16, i16 addrspace(1)* %gep
    570   %shl = shl i16 %in, 15
    571   %sext = ashr i16 %shl, 15
    572   store i16 %sext, i16 addrspace(3)* %out.gep
    573   ret void
    574 }
    575 
    576 ; FUNC-LABEL: {{^}}v_sext_in_reg_i1_i16_nonload:
    577 ; GCN: {{buffer|flat|global}}_load_ushort [[VAL0:v[0-9]+]]
    578 ; GCN: {{buffer|flat|global}}_load_ushort [[VAL1:v[0-9]+]]
    579 
    580 ; SI: v_lshlrev_b32_e32 [[REG:v[0-9]+]], [[VAL1]], [[VAL0]]
    581 ; GFX89: v_lshlrev_b16_e32 [[REG:v[0-9]+]], [[VAL1]], [[VAL0]]
    582 
    583 ; GCN: v_bfe_i32 [[BFE:v[0-9]+]], [[REG]], 0, 1{{$}}
    584 ; GCN: ds_write_b16 v{{[0-9]+}}, [[BFE]]
    585 define amdgpu_kernel void @v_sext_in_reg_i1_i16_nonload(i16 addrspace(3)* %out, i16 addrspace(1)* %aptr, i16 addrspace(1)* %bptr, i16 %s.val) nounwind {
    586   %tid = call i32 @llvm.r600.read.tidig.x()
    587   %a.gep = getelementptr i16, i16 addrspace(1)* %aptr, i32 %tid
    588   %b.gep = getelementptr i16, i16 addrspace(1)* %bptr, i32 %tid
    589   %out.gep = getelementptr i16, i16 addrspace(3)* %out, i32 %tid
    590   %a = load volatile i16, i16 addrspace(1)* %a.gep, align 2
    591   %b = load volatile i16, i16 addrspace(1)* %b.gep, align 2
    592 
    593   %c = shl i16 %a, %b
    594   %shl = shl i16 %c, 15
    595   %ashr = ashr i16 %shl, 15
    596 
    597   store i16 %ashr, i16 addrspace(3)* %out.gep, align 2
    598   ret void
    599 }
    600 
    601 ; FUNC-LABEL: {{^}}s_sext_in_reg_i2_i16_arg:
    602 ; GCN: s_load_dword [[VAL:s[0-9]+]]
    603 
    604 ; SI: s_bfe_i32 [[BFE:s[0-9]+]], [[VAL]], 0x20000
    605 ; SI: v_mov_b32_e32 [[VBFE:v[0-9]+]], [[BFE]]
    606 ; SI: buffer_store_short [[VBFE]]
    607 
    608 ; GFX89: s_lshl_b32 s{{[0-9]+}}, s{{[0-9]+}}, 14{{$}}
    609 ; GFX89: s_sext_i32_i16 s{{[0-9]+}}, s{{[0-9]+}}
    610 ; GFX89: s_lshr_b32 s{{[0-9]+}}, s{{[0-9]+}}, 14{{$}}
    611 define amdgpu_kernel void @s_sext_in_reg_i2_i16_arg(i16 addrspace(1)* %out, i16 %in) #0 {
    612   %shl = shl i16 %in, 14
    613   %sext = ashr i16 %shl, 14
    614   store i16 %sext, i16 addrspace(1)* %out
    615   ret void
    616 }
    617 
    618 ; FUNC-LABEL: {{^}}s_sext_in_reg_i8_i16_arg:
    619 ; GCN: s_load_dword [[VAL:s[0-9]+]]
    620 
    621 ; SI: s_sext_i32_i8 [[SSEXT:s[0-9]+]], [[VAL]]
    622 ; SI: v_mov_b32_e32 [[VSEXT:v[0-9]+]], [[SSEXT]]
    623 ; SI: buffer_store_short [[VSEXT]]
    624 
    625 ; GFX89: s_lshl_b32 s{{[0-9]+}}, s{{[0-9]+}}, 8{{$}}
    626 ; GFX89: s_sext_i32_i16 s{{[0-9]+}}, s{{[0-9]+}}
    627 ; GFX89: s_lshr_b32 s{{[0-9]+}}, s{{[0-9]+}}, 8{{$}}
    628 define amdgpu_kernel void @s_sext_in_reg_i8_i16_arg(i16 addrspace(1)* %out, i16 %in) #0 {
    629   %shl = shl i16 %in, 8
    630   %sext = ashr i16 %shl, 8
    631   store i16 %sext, i16 addrspace(1)* %out
    632   ret void
    633 }
    634 
    635 ; FUNC-LABEL: {{^}}s_sext_in_reg_i15_i16_arg:
    636 ; GCN: s_load_dword [[VAL:s[0-9]+]]
    637 
    638 ; SI: s_bfe_i32 [[BFE:s[0-9]+]], [[VAL]], 0xf0000
    639 ; SI: v_mov_b32_e32 [[VBFE:v[0-9]+]], [[BFE]]
    640 ; SI: buffer_store_short [[VBFE]]
    641 
    642 ; GFX89: s_lshl_b32 s{{[0-9]+}}, s{{[0-9]+}}, 1{{$}}
    643 ; GFX89: s_sext_i32_i16 s{{[0-9]+}}, s{{[0-9]+}}
    644 ; GFX89: s_lshr_b32 s{{[0-9]+}}, s{{[0-9]+}}, 1{{$}}
    645 define amdgpu_kernel void @s_sext_in_reg_i15_i16_arg(i16 addrspace(1)* %out, i16 %in) #0 {
    646   %shl = shl i16 %in, 1
    647   %sext = ashr i16 %shl, 1
    648   store i16 %sext, i16 addrspace(1)* %out
    649   ret void
    650 }
    651 
    652 ; FUNC-LABEL: {{^}}sext_in_reg_v2i1_to_v2i16:
    653 ; GFX9: v_pk_add_u16 [[ADD:v[0-9]+]]
    654 ; GFX9: v_pk_lshlrev_b16 [[SHL:v[0-9]+]], 15, [[ADD]]
    655 ; GFX9: v_pk_ashrrev_i16 [[SRA:v[0-9]+]], 15, [[SHL]]
    656 define amdgpu_kernel void @sext_in_reg_v2i1_to_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> %a, <2 x i16> %b) #0 {
    657   %c = add <2 x i16> %a, %b ; add to prevent folding into extload
    658   %shl = shl <2 x i16> %c, <i16 15, i16 15>
    659   %ashr = ashr <2 x i16> %shl, <i16 15, i16 15>
    660   store <2 x i16> %ashr, <2 x i16> addrspace(1)* %out
    661   ret void
    662 }
    663 
    664 ; FUNC-LABEL: {{^}}sext_in_reg_v3i1_to_v3i16:
    665 ; GFX9: v_pk_add_u16
    666 ; GFX9: v_pk_add_u16
    667 ; GFX9: v_pk_lshlrev_b16 v{{[0-9]+}}, 15, v{{[0-9]+}}
    668 ; GFX9: v_pk_lshlrev_b16 v{{[0-9]+}}, 15, v{{[0-9]+}}
    669 ; GFX9: v_pk_ashrrev_i16 v{{[0-9]+}}, 15, v{{[0-9]+}}
    670 ; GFX9: v_pk_ashrrev_i16 v{{[0-9]+}}, 15, v{{[0-9]+}}
    671 define amdgpu_kernel void @sext_in_reg_v3i1_to_v3i16(<3 x i16> addrspace(1)* %out, <3 x i16> %a, <3 x i16> %b) #0 {
    672   %c = add <3 x i16> %a, %b ; add to prevent folding into extload
    673   %shl = shl <3 x i16> %c, <i16 15, i16 15, i16 15>
    674   %ashr = ashr <3 x i16> %shl, <i16 15, i16 15, i16 15>
    675   store <3 x i16> %ashr, <3 x i16> addrspace(1)* %out
    676   ret void
    677 }
    678 
    679 ; FUNC-LABEL: {{^}}sext_in_reg_v2i2_to_v2i16:
    680 ; GFX9: v_pk_add_u16 [[ADD:v[0-9]+]]
    681 ; GFX9: v_pk_lshlrev_b16 [[SHL:v[0-9]+]], 14, [[ADD]]
    682 ; GFX9: v_pk_ashrrev_i16 [[SRA:v[0-9]+]], 14, [[SHL]]
    683 define amdgpu_kernel void @sext_in_reg_v2i2_to_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> %a, <2 x i16> %b) #0 {
    684   %c = add <2 x i16> %a, %b ; add to prevent folding into extload
    685   %shl = shl <2 x i16> %c, <i16 14, i16 14>
    686   %ashr = ashr <2 x i16> %shl, <i16 14, i16 14>
    687   store <2 x i16> %ashr, <2 x i16> addrspace(1)* %out
    688   ret void
    689 }
    690 
    691 ; FUNC-LABEL: {{^}}sext_in_reg_v2i8_to_v2i16:
    692 ; GFX9: v_pk_add_u16 [[ADD:v[0-9]+]]
    693 ; GFX9: v_pk_lshlrev_b16 [[SHL:v[0-9]+]], 8, [[ADD]]
    694 ; GFX9: v_pk_ashrrev_i16 [[SRA:v[0-9]+]], 8, [[SHL]]
    695 define amdgpu_kernel void @sext_in_reg_v2i8_to_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> %a, <2 x i16> %b) #0 {
    696   %c = add <2 x i16> %a, %b ; add to prevent folding into extload
    697   %shl = shl <2 x i16> %c, <i16 8, i16 8>
    698   %ashr = ashr <2 x i16> %shl, <i16 8, i16 8>
    699   store <2 x i16> %ashr, <2 x i16> addrspace(1)* %out
    700   ret void
    701 }
    702 
    703 ; FUNC-LABEL: {{^}}sext_in_reg_v3i8_to_v3i16:
    704 ; GFX9: v_pk_add_u16
    705 ; GFX9: v_pk_add_u16
    706 ; GFX9: v_pk_lshlrev_b16 v{{[0-9]+}}, 8, v{{[0-9]+}}
    707 ; GFX9: v_pk_lshlrev_b16 v{{[0-9]+}}, 8, v{{[0-9]+}}
    708 ; GFX9: v_pk_ashrrev_i16 v{{[0-9]+}}, 8, v{{[0-9]+}}
    709 ; GFX9: v_pk_ashrrev_i16 v{{[0-9]+}}, 8, v{{[0-9]+}}
    710 define amdgpu_kernel void @sext_in_reg_v3i8_to_v3i16(<3 x i16> addrspace(1)* %out, <3 x i16> %a, <3 x i16> %b) #0 {
    711   %c = add <3 x i16> %a, %b ; add to prevent folding into extload
    712   %shl = shl <3 x i16> %c, <i16 8, i16 8, i16 8>
    713   %ashr = ashr <3 x i16> %shl, <i16 8, i16 8, i16 8>
    714   store <3 x i16> %ashr, <3 x i16> addrspace(1)* %out
    715   ret void
    716 }
    717 
    718 declare i32 @llvm.r600.read.tidig.x() #1
    719 
    720 attributes #0 = { nounwind }
    721 attributes #1 = { nounwind readnone }
    722