Home | History | Annotate | Download | only in AMDGPU
      1 ; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
      2 ; RUN: llc -march=amdgcn -mcpu=verde -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
      3 ; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
      4 
      5 declare i32 @llvm.r600.read.tidig.x() #0
      6 
      7 ; FUNC-LABEL: {{^}}test2:
      8 ; EG: AND_INT {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
      9 ; EG: AND_INT {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
     10 
     11 ; SI: v_and_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
     12 ; SI: v_and_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
     13 
     14 define void @test2(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
     15   %b_ptr = getelementptr <2 x i32>, <2 x i32> addrspace(1)* %in, i32 1
     16   %a = load <2 x i32>, <2 x i32> addrspace(1) * %in
     17   %b = load <2 x i32>, <2 x i32> addrspace(1) * %b_ptr
     18   %result = and <2 x i32> %a, %b
     19   store <2 x i32> %result, <2 x i32> addrspace(1)* %out
     20   ret void
     21 }
     22 
     23 ; FUNC-LABEL: {{^}}test4:
     24 ; EG: AND_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
     25 ; EG: AND_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
     26 ; EG: AND_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
     27 ; EG: AND_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
     28 
     29 ; SI: v_and_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
     30 ; SI: v_and_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
     31 ; SI: v_and_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
     32 ; SI: v_and_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
     33 
     34 define void @test4(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
     35   %b_ptr = getelementptr <4 x i32>, <4 x i32> addrspace(1)* %in, i32 1
     36   %a = load <4 x i32>, <4 x i32> addrspace(1) * %in
     37   %b = load <4 x i32>, <4 x i32> addrspace(1) * %b_ptr
     38   %result = and <4 x i32> %a, %b
     39   store <4 x i32> %result, <4 x i32> addrspace(1)* %out
     40   ret void
     41 }
     42 
     43 ; FUNC-LABEL: {{^}}s_and_i32:
     44 ; SI: s_and_b32
     45 define void @s_and_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) {
     46   %and = and i32 %a, %b
     47   store i32 %and, i32 addrspace(1)* %out, align 4
     48   ret void
     49 }
     50 
     51 ; FUNC-LABEL: {{^}}s_and_constant_i32:
     52 ; SI: s_and_b32 s{{[0-9]+}}, s{{[0-9]+}}, 0x12d687
     53 define void @s_and_constant_i32(i32 addrspace(1)* %out, i32 %a) {
     54   %and = and i32 %a, 1234567
     55   store i32 %and, i32 addrspace(1)* %out, align 4
     56   ret void
     57 }
     58 
     59 ; FIXME: We should really duplicate the constant so that the SALU use
     60 ; can fold into the s_and_b32 and the VALU one is materialized
     61 ; directly without copying from the SGPR.
     62 
     63 ; Second use is a VGPR use of the constant.
     64 ; FUNC-LABEL: {{^}}s_and_multi_use_constant_i32_0:
     65 ; SI: s_mov_b32 [[K:s[0-9]+]], 0x12d687
     66 ; SI-DAG: s_and_b32 [[AND:s[0-9]+]], s{{[0-9]+}}, [[K]]
     67 ; SI-DAG: v_mov_b32_e32 [[VK:v[0-9]+]], [[K]]
     68 ; SI: buffer_store_dword [[VK]]
     69 define void @s_and_multi_use_constant_i32_0(i32 addrspace(1)* %out, i32 %a, i32 %b) {
     70   %and = and i32 %a, 1234567
     71 
     72   ; Just to stop future replacement of copy to vgpr + store with VALU op.
     73   %foo = add i32 %and, %b
     74   store volatile i32 %foo, i32 addrspace(1)* %out
     75   store volatile i32 1234567, i32 addrspace(1)* %out
     76   ret void
     77 }
     78 
     79 ; Second use is another SGPR use of the constant.
     80 ; FUNC-LABEL: {{^}}s_and_multi_use_constant_i32_1:
     81 ; SI: s_mov_b32 [[K:s[0-9]+]], 0x12d687
     82 ; SI: s_and_b32 [[AND:s[0-9]+]], s{{[0-9]+}}, [[K]]
     83 ; SI: s_add_i32
     84 ; SI: s_add_i32 [[ADD:s[0-9]+]], s{{[0-9]+}}, [[K]]
     85 ; SI: buffer_store_dword [[VK]]
     86 define void @s_and_multi_use_constant_i32_1(i32 addrspace(1)* %out, i32 %a, i32 %b) {
     87   %and = and i32 %a, 1234567
     88   %foo = add i32 %and, 1234567
     89   %bar = add i32 %foo, %b
     90   store volatile i32 %bar, i32 addrspace(1)* %out
     91   ret void
     92 }
     93 
     94 ; FUNC-LABEL: {{^}}v_and_i32_vgpr_vgpr:
     95 ; SI: v_and_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
     96 define void @v_and_i32_vgpr_vgpr(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr, i32 addrspace(1)* %bptr) {
     97   %tid = call i32 @llvm.r600.read.tidig.x() #0
     98   %gep.a = getelementptr i32, i32 addrspace(1)* %aptr, i32 %tid
     99   %gep.b = getelementptr i32, i32 addrspace(1)* %bptr, i32 %tid
    100   %gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
    101   %a = load i32, i32 addrspace(1)* %gep.a
    102   %b = load i32, i32 addrspace(1)* %gep.b
    103   %and = and i32 %a, %b
    104   store i32 %and, i32 addrspace(1)* %gep.out
    105   ret void
    106 }
    107 
    108 ; FUNC-LABEL: {{^}}v_and_i32_sgpr_vgpr:
    109 ; SI-DAG: s_load_dword [[SA:s[0-9]+]]
    110 ; SI-DAG: {{buffer|flat}}_load_dword [[VB:v[0-9]+]]
    111 ; SI: v_and_b32_e32 v{{[0-9]+}}, [[SA]], [[VB]]
    112 define void @v_and_i32_sgpr_vgpr(i32 addrspace(1)* %out, i32 %a, i32 addrspace(1)* %bptr) {
    113   %tid = call i32 @llvm.r600.read.tidig.x() #0
    114   %gep.b = getelementptr i32, i32 addrspace(1)* %bptr, i32 %tid
    115   %gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
    116   %b = load i32, i32 addrspace(1)* %gep.b
    117   %and = and i32 %a, %b
    118   store i32 %and, i32 addrspace(1)* %gep.out
    119   ret void
    120 }
    121 
    122 ; FUNC-LABEL: {{^}}v_and_i32_vgpr_sgpr:
    123 ; SI-DAG: s_load_dword [[SA:s[0-9]+]]
    124 ; SI-DAG: {{buffer|flat}}_load_dword [[VB:v[0-9]+]]
    125 ; SI: v_and_b32_e32 v{{[0-9]+}}, [[SA]], [[VB]]
    126 define void @v_and_i32_vgpr_sgpr(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr, i32 %b) {
    127   %tid = call i32 @llvm.r600.read.tidig.x() #0
    128   %gep.a = getelementptr i32, i32 addrspace(1)* %aptr, i32 %tid
    129   %gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
    130   %a = load i32, i32 addrspace(1)* %gep.a
    131   %and = and i32 %a, %b
    132   store i32 %and, i32 addrspace(1)* %gep.out
    133   ret void
    134 }
    135 
    136 ; FUNC-LABEL: {{^}}v_and_constant_i32
    137 ; SI: v_and_b32_e32 v{{[0-9]+}}, 0x12d687, v{{[0-9]+}}
    138 define void @v_and_constant_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr) {
    139   %a = load i32, i32 addrspace(1)* %aptr, align 4
    140   %and = and i32 %a, 1234567
    141   store i32 %and, i32 addrspace(1)* %out, align 4
    142   ret void
    143 }
    144 
    145 ; FUNC-LABEL: {{^}}v_and_inline_imm_64_i32
    146 ; SI: v_and_b32_e32 v{{[0-9]+}}, 64, v{{[0-9]+}}
    147 define void @v_and_inline_imm_64_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr) {
    148   %a = load i32, i32 addrspace(1)* %aptr, align 4
    149   %and = and i32 %a, 64
    150   store i32 %and, i32 addrspace(1)* %out, align 4
    151   ret void
    152 }
    153 
    154 ; FUNC-LABEL: {{^}}v_and_inline_imm_neg_16_i32
    155 ; SI: v_and_b32_e32 v{{[0-9]+}}, -16, v{{[0-9]+}}
    156 define void @v_and_inline_imm_neg_16_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr) {
    157   %a = load i32, i32 addrspace(1)* %aptr, align 4
    158   %and = and i32 %a, -16
    159   store i32 %and, i32 addrspace(1)* %out, align 4
    160   ret void
    161 }
    162 
    163 ; FUNC-LABEL: {{^}}s_and_i64
    164 ; SI: s_and_b64
    165 define void @s_and_i64(i64 addrspace(1)* %out, i64 %a, i64 %b) {
    166   %and = and i64 %a, %b
    167   store i64 %and, i64 addrspace(1)* %out, align 8
    168   ret void
    169 }
    170 
    171 ; FIXME: Should use SGPRs
    172 ; FUNC-LABEL: {{^}}s_and_i1:
    173 ; SI: v_and_b32
    174 define void @s_and_i1(i1 addrspace(1)* %out, i1 %a, i1 %b) {
    175   %and = and i1 %a, %b
    176   store i1 %and, i1 addrspace(1)* %out
    177   ret void
    178 }
    179 
    180 ; FUNC-LABEL: {{^}}s_and_constant_i64
    181 ; SI: s_and_b64 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}
    182 define void @s_and_constant_i64(i64 addrspace(1)* %out, i64 %a) {
    183   %and = and i64 %a, 281474976710655
    184   store i64 %and, i64 addrspace(1)* %out, align 8
    185   ret void
    186 }
    187 
    188 ; FUNC-LABEL: {{^}}v_and_i64:
    189 ; SI: v_and_b32
    190 ; SI: v_and_b32
    191 define void @v_and_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 addrspace(1)* %bptr) {
    192   %a = load i64, i64 addrspace(1)* %aptr, align 8
    193   %b = load i64, i64 addrspace(1)* %bptr, align 8
    194   %and = and i64 %a, %b
    195   store i64 %and, i64 addrspace(1)* %out, align 8
    196   ret void
    197 }
    198 
    199 ; FUNC-LABEL: {{^}}v_and_i64_br:
    200 ; SI: v_and_b32
    201 ; SI: v_and_b32
    202 define void @v_and_i64_br(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 addrspace(1)* %bptr, i32 %cond) {
    203 entry:
    204   %tmp0 = icmp eq i32 %cond, 0
    205   br i1 %tmp0, label %if, label %endif
    206 
    207 if:
    208   %a = load i64, i64 addrspace(1)* %aptr, align 8
    209   %b = load i64, i64 addrspace(1)* %bptr, align 8
    210   %and = and i64 %a, %b
    211   br label %endif
    212 
    213 endif:
    214   %tmp1 = phi i64 [%and, %if], [0, %entry]
    215   store i64 %tmp1, i64 addrspace(1)* %out, align 8
    216   ret void
    217 }
    218 
    219 ; FUNC-LABEL: {{^}}v_and_constant_i64:
    220 ; SI-DAG: s_mov_b32 [[KLO:s[0-9]+]], 0xab19b207
    221 ; SI-DAG: s_movk_i32 [[KHI:s[0-9]+]], 0x11e{{$}}
    222 ; SI-DAG: v_and_b32_e32 {{v[0-9]+}}, [[KLO]], {{v[0-9]+}}
    223 ; SI-DAG: v_and_b32_e32 {{v[0-9]+}}, [[KHI]], {{v[0-9]+}}
    224 ; SI: buffer_store_dwordx2
    225 define void @v_and_constant_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr) {
    226   %a = load i64, i64 addrspace(1)* %aptr, align 8
    227   %and = and i64 %a, 1231231234567
    228   store i64 %and, i64 addrspace(1)* %out, align 8
    229   ret void
    230 }
    231 
    232 ; FIXME: Should replace and 0
    233 ; FUNC-LABEL: {{^}}v_and_i64_32_bit_constant:
    234 ; SI: v_and_b32_e32 {{v[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}
    235 ; SI: v_and_b32_e32 {{v[0-9]+}}, 0, {{v[0-9]+}}
    236 define void @v_and_i64_32_bit_constant(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr) {
    237   %a = load i64, i64 addrspace(1)* %aptr, align 8
    238   %and = and i64 %a, 1234567
    239   store i64 %and, i64 addrspace(1)* %out, align 8
    240   ret void
    241 }
    242 
    243 ; FIXME: Replace and 0 with mov 0
    244 ; FUNC-LABEL: {{^}}v_and_inline_imm_i64:
    245 ; SI: v_and_b32_e32 {{v[0-9]+}}, 64, {{v[0-9]+}}
    246 ; SI: v_and_b32_e32 {{v[0-9]+}}, 0, {{v[0-9]+}}
    247 define void @v_and_inline_imm_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr) {
    248   %a = load i64, i64 addrspace(1)* %aptr, align 8
    249   %and = and i64 %a, 64
    250   store i64 %and, i64 addrspace(1)* %out, align 8
    251   ret void
    252 }
    253 
    254 ; FUNC-LABEL: {{^}}s_and_inline_imm_64_i64
    255 ; SI: s_and_b64 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 64
    256 define void @s_and_inline_imm_64_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
    257   %and = and i64 %a, 64
    258   store i64 %and, i64 addrspace(1)* %out, align 8
    259   ret void
    260 }
    261 
    262 ; FUNC-LABEL: {{^}}s_and_inline_imm_1_i64
    263 ; SI: s_and_b64 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 1
    264 define void @s_and_inline_imm_1_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
    265   %and = and i64 %a, 1
    266   store i64 %and, i64 addrspace(1)* %out, align 8
    267   ret void
    268 }
    269 
    270 ; FUNC-LABEL: {{^}}s_and_inline_imm_1.0_i64
    271 ; SI: s_and_b64 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 1.0
    272 define void @s_and_inline_imm_1.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
    273   %and = and i64 %a, 4607182418800017408
    274   store i64 %and, i64 addrspace(1)* %out, align 8
    275   ret void
    276 }
    277 
    278 ; FUNC-LABEL: {{^}}s_and_inline_imm_neg_1.0_i64
    279 ; SI: s_and_b64 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, -1.0
    280 define void @s_and_inline_imm_neg_1.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
    281   %and = and i64 %a, 13830554455654793216
    282   store i64 %and, i64 addrspace(1)* %out, align 8
    283   ret void
    284 }
    285 
    286 ; FUNC-LABEL: {{^}}s_and_inline_imm_0.5_i64
    287 ; SI: s_and_b64 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0.5
    288 define void @s_and_inline_imm_0.5_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
    289   %and = and i64 %a, 4602678819172646912
    290   store i64 %and, i64 addrspace(1)* %out, align 8
    291   ret void
    292 }
    293 
    294 ; FUNC-LABEL: {{^}}s_and_inline_imm_neg_0.5_i64
    295 ; SI: s_and_b64 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, -0.5
    296 define void @s_and_inline_imm_neg_0.5_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
    297   %and = and i64 %a, 13826050856027422720
    298   store i64 %and, i64 addrspace(1)* %out, align 8
    299   ret void
    300 }
    301 
    302 ; FUNC-LABEL: {{^}}s_and_inline_imm_2.0_i64
    303 ; SI: s_and_b64 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 2.0
    304 define void @s_and_inline_imm_2.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
    305   %and = and i64 %a, 4611686018427387904
    306   store i64 %and, i64 addrspace(1)* %out, align 8
    307   ret void
    308 }
    309 
    310 ; FUNC-LABEL: {{^}}s_and_inline_imm_neg_2.0_i64
    311 ; SI: s_and_b64 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, -2.0
    312 define void @s_and_inline_imm_neg_2.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
    313   %and = and i64 %a, 13835058055282163712
    314   store i64 %and, i64 addrspace(1)* %out, align 8
    315   ret void
    316 }
    317 
    318 ; FUNC-LABEL: {{^}}s_and_inline_imm_4.0_i64
    319 ; SI: s_and_b64 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 4.0
    320 define void @s_and_inline_imm_4.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
    321   %and = and i64 %a, 4616189618054758400
    322   store i64 %and, i64 addrspace(1)* %out, align 8
    323   ret void
    324 }
    325 
    326 ; FUNC-LABEL: {{^}}s_and_inline_imm_neg_4.0_i64
    327 ; SI: s_and_b64 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, -4.0
    328 define void @s_and_inline_imm_neg_4.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
    329   %and = and i64 %a, 13839561654909534208
    330   store i64 %and, i64 addrspace(1)* %out, align 8
    331   ret void
    332 }
    333 
    334 
    335 ; Test with the 64-bit integer bitpattern for a 32-bit float in the
    336 ; low 32-bits, which is not a valid 64-bit inline immmediate.
    337 
    338 ; FUNC-LABEL: {{^}}s_and_inline_imm_f32_4.0_i64
    339 ; SI-DAG: s_mov_b32 s[[K_LO:[0-9]+]], 4.0
    340 ; SI-DAG: s_mov_b32 s[[K_HI:[0-9]+]], 0{{$}}
    341 ; SI: s_and_b64 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, s{{\[}}[[K_LO]]:[[K_HI]]{{\]}}
    342 define void @s_and_inline_imm_f32_4.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
    343   %and = and i64 %a, 1082130432
    344   store i64 %and, i64 addrspace(1)* %out, align 8
    345   ret void
    346 }
    347 
    348 ; FIXME: Copy of -1 register
    349 ; FUNC-LABEL: {{^}}s_and_inline_imm_f32_neg_4.0_i64
    350 ; SI-DAG: s_mov_b32 s[[K_LO:[0-9]+]], -4.0
    351 ; SI-DAG: s_mov_b32 s[[K_HI:[0-9]+]], -1{{$}}
    352 ; SI-DAG: s_mov_b32 s[[K_HI_COPY:[0-9]+]], s[[K_HI]]
    353 ; SI: s_and_b64 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, s{{\[}}[[K_LO]]:[[K_HI_COPY]]{{\]}}
    354 define void @s_and_inline_imm_f32_neg_4.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
    355   %and = and i64 %a, -1065353216
    356   store i64 %and, i64 addrspace(1)* %out, align 8
    357   ret void
    358 }
    359 
    360 ; Shift into upper 32-bits
    361 ; FUNC-LABEL: {{^}}s_and_inline_high_imm_f32_4.0_i64
    362 ; SI-DAG: s_mov_b32 s[[K_HI:[0-9]+]], 4.0
    363 ; SI-DAG: s_mov_b32 s[[K_LO:[0-9]+]], 0{{$}}
    364 ; SI: s_and_b64 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, s{{\[}}[[K_LO]]:[[K_HI]]{{\]}}
    365 define void @s_and_inline_high_imm_f32_4.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
    366   %and = and i64 %a, 4647714815446351872
    367   store i64 %and, i64 addrspace(1)* %out, align 8
    368   ret void
    369 }
    370 
    371 ; FUNC-LABEL: {{^}}s_and_inline_high_imm_f32_neg_4.0_i64
    372 ; SI-DAG: s_mov_b32 s[[K_HI:[0-9]+]], -4.0
    373 ; SI-DAG: s_mov_b32 s[[K_LO:[0-9]+]], 0{{$}}
    374 ; SI: s_and_b64 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, s{{\[}}[[K_LO]]:[[K_HI]]{{\]}}
    375 define void @s_and_inline_high_imm_f32_neg_4.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
    376   %and = and i64 %a, 13871086852301127680
    377   store i64 %and, i64 addrspace(1)* %out, align 8
    378   ret void
    379 }
    380 
    381 attributes #0 = { nounwind readnone }
    382