Home | History | Annotate | Download | only in AMDGPU
      1 ; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
      2 
      3 ; GCN-LABEL: {{^}}zext_shl64_to_32:
      4 ; GCN: s_lshl_b32
      5 ; GCN-NOT: s_lshl_b64
      6 define amdgpu_kernel void @zext_shl64_to_32(i64 addrspace(1)* nocapture %out, i32 %x) {
      7   %and = and i32 %x, 1073741823
      8   %ext = zext i32 %and to i64
      9   %shl = shl i64 %ext, 2
     10   store i64 %shl, i64 addrspace(1)* %out, align 4
     11   ret void
     12 }
     13 
     14 ; GCN-LABEL: {{^}}sext_shl64_to_32:
     15 ; GCN: s_lshl_b32
     16 ; GCN-NOT: s_lshl_b64
     17 define amdgpu_kernel void @sext_shl64_to_32(i64 addrspace(1)* nocapture %out, i32 %x) {
     18   %and = and i32 %x, 536870911
     19   %ext = sext i32 %and to i64
     20   %shl = shl i64 %ext, 2
     21   store i64 %shl, i64 addrspace(1)* %out, align 4
     22   ret void
     23 }
     24 
     25 ; GCN-LABEL: {{^}}zext_shl64_overflow:
     26 ; GCN: s_lshl_b64
     27 ; GCN-NOT: s_lshl_b32
     28 define amdgpu_kernel void @zext_shl64_overflow(i64 addrspace(1)* nocapture %out, i32 %x) {
     29   %and = and i32 %x, 2147483647
     30   %ext = zext i32 %and to i64
     31   %shl = shl i64 %ext, 2
     32   store i64 %shl, i64 addrspace(1)* %out, align 4
     33   ret void
     34 }
     35 
     36 ; GCN-LABEL: {{^}}sext_shl64_overflow:
     37 ; GCN: s_lshl_b64
     38 ; GCN-NOT: s_lshl_b32
     39 define amdgpu_kernel void @sext_shl64_overflow(i64 addrspace(1)* nocapture %out, i32 %x) {
     40   %and = and i32 %x, 2147483647
     41   %ext = sext i32 %and to i64
     42   %shl = shl i64 %ext, 2
     43   store i64 %shl, i64 addrspace(1)* %out, align 4
     44   ret void
     45 }
     46 
     47 ; GCN-LABEL: {{^}}mulu24_shl64:
     48 ; GCN: v_mul_u32_u24_e32 [[M:v[0-9]+]], 7, v{{[0-9]+}}
     49 ; GCN: v_lshlrev_b32_e32 v{{[0-9]+}}, 2, [[M]]
     50 define amdgpu_kernel void @mulu24_shl64(i32 addrspace(1)* nocapture %arg) {
     51 bb:
     52   %tmp = tail call i32 @llvm.amdgcn.workitem.id.x()
     53   %tmp1 = and i32 %tmp, 6
     54   %mulconv = mul nuw nsw i32 %tmp1, 7
     55   %tmp2 = zext i32 %mulconv to i64
     56   %tmp3 = getelementptr inbounds i32, i32 addrspace(1)* %arg, i64 %tmp2
     57   store i32 0, i32 addrspace(1)* %tmp3, align 4
     58   ret void
     59 }
     60 
     61 ; GCN-LABEL: {{^}}muli24_shl64:
     62 ; GCN: v_mul_i32_i24_e32 [[M:v[0-9]+]], -7, v{{[0-9]+}}
     63 ; GCN: v_lshlrev_b32_e32 v{{[0-9]+}}, 3, [[M]]
     64 define amdgpu_kernel void @muli24_shl64(i64 addrspace(1)* nocapture %arg, i32 addrspace(1)* nocapture readonly %arg1) {
     65 bb:
     66   %tmp = tail call i32 @llvm.amdgcn.workitem.id.x()
     67   %tmp2 = sext i32 %tmp to i64
     68   %tmp3 = getelementptr inbounds i32, i32 addrspace(1)* %arg1, i64 %tmp2
     69   %tmp4 = load i32, i32 addrspace(1)* %tmp3, align 4
     70   %tmp5 = or i32 %tmp4, -8388608
     71   %tmp6 = mul nsw i32 %tmp5, -7
     72   %tmp7 = zext i32 %tmp6 to i64
     73   %tmp8 = shl nuw nsw i64 %tmp7, 3
     74   %tmp9 = getelementptr inbounds i64, i64 addrspace(1)* %arg, i64 %tmp2
     75   store i64 %tmp8, i64 addrspace(1)* %tmp9, align 8
     76   ret void
     77 }
     78 
     79 declare i32 @llvm.amdgcn.workitem.id.x()
     80