Home | History | Annotate | Download | only in NVPTX
      1 ; RUN: llc < %s -march=nvptx64 -mcpu=sm_20 | FileCheck %s
      2 
      3 ; Checks how NVPTX lowers alloca buffers and their passing to functions.
      4 ;
      5 ; Produced with the following CUDA code:
      6 ;  extern "C" __attribute__((device)) void callee(float* f, char* buf);
      7 ;
      8 ;  extern "C" __attribute__((global)) void kernel_func(float* a) {
      9 ;    char buf[4 * sizeof(float)];
     10 ;    *(reinterpret_cast<float*>(&buf[0])) = a[0];
     11 ;    *(reinterpret_cast<float*>(&buf[1])) = a[1];
     12 ;    *(reinterpret_cast<float*>(&buf[2])) = a[2];
     13 ;    *(reinterpret_cast<float*>(&buf[3])) = a[3];
     14 ;    callee(a, buf);
     15 ;  }
     16 
     17 ; CHECK: .visible .entry kernel_func
     18 define void @kernel_func(float* %a) {
     19 entry:
     20   %buf = alloca [16 x i8], align 4
     21 
     22 ; CHECK: .local .align 4 .b8 	__local_depot0[16]
     23 ; CHECK: mov.u64 %SPL
     24 
     25 ; CHECK: ld.param.u64 %rd[[A_REG:[0-9]+]], [kernel_func_param_0]
     26 ; CHECK: cvta.to.global.u64 %rd[[A1_REG:[0-9]+]], %rd[[A_REG]]
     27 ; CHECK: add.u64 %rd[[SP_REG:[0-9]+]], %SP, 0
     28 ; CHECK: ld.global.f32 %f[[A0_REG:[0-9]+]], [%rd[[A1_REG]]]
     29 ; CHECK: st.local.f32 [{{%rd[0-9]+}}], %f[[A0_REG]]
     30 
     31   %0 = load float, float* %a, align 4
     32   %1 = bitcast [16 x i8]* %buf to float*
     33   store float %0, float* %1, align 4
     34   %arrayidx2 = getelementptr inbounds float, float* %a, i64 1
     35   %2 = load float, float* %arrayidx2, align 4
     36   %arrayidx3 = getelementptr inbounds [16 x i8], [16 x i8]* %buf, i64 0, i64 1
     37   %3 = bitcast i8* %arrayidx3 to float*
     38   store float %2, float* %3, align 4
     39   %arrayidx4 = getelementptr inbounds float, float* %a, i64 2
     40   %4 = load float, float* %arrayidx4, align 4
     41   %arrayidx5 = getelementptr inbounds [16 x i8], [16 x i8]* %buf, i64 0, i64 2
     42   %5 = bitcast i8* %arrayidx5 to float*
     43   store float %4, float* %5, align 4
     44   %arrayidx6 = getelementptr inbounds float, float* %a, i64 3
     45   %6 = load float, float* %arrayidx6, align 4
     46   %arrayidx7 = getelementptr inbounds [16 x i8], [16 x i8]* %buf, i64 0, i64 3
     47   %7 = bitcast i8* %arrayidx7 to float*
     48   store float %6, float* %7, align 4
     49 
     50 ; CHECK:        .param .b64 param0;
     51 ; CHECK-NEXT:   st.param.b64  [param0+0], %rd[[A_REG]]
     52 ; CHECK-NEXT:   .param .b64 param1;
     53 ; CHECK-NEXT:   st.param.b64  [param1+0], %rd[[SP_REG]]
     54 ; CHECK-NEXT:   call.uni
     55 ; CHECK-NEXT:   callee,
     56 
     57   %arraydecay = getelementptr inbounds [16 x i8], [16 x i8]* %buf, i64 0, i64 0
     58   call void @callee(float* %a, i8* %arraydecay) #2
     59   ret void
     60 }
     61 
     62 declare void @callee(float*, i8*)
     63 
     64 !nvvm.annotations = !{!0}
     65 
     66 !0 = !{void (float*)* @kernel_func, !"kernel", i32 1}
     67