1 ; RUN: llc < %s -march=nvptx64 -mcpu=sm_20 | FileCheck %s 2 3 ; Checks how NVPTX lowers alloca buffers and their passing to functions. 4 ; 5 ; Produced with the following CUDA code: 6 ; extern "C" __attribute__((device)) void callee(float* f, char* buf); 7 ; 8 ; extern "C" __attribute__((global)) void kernel_func(float* a) { 9 ; char buf[4 * sizeof(float)]; 10 ; *(reinterpret_cast<float*>(&buf[0])) = a[0]; 11 ; *(reinterpret_cast<float*>(&buf[1])) = a[1]; 12 ; *(reinterpret_cast<float*>(&buf[2])) = a[2]; 13 ; *(reinterpret_cast<float*>(&buf[3])) = a[3]; 14 ; callee(a, buf); 15 ; } 16 17 ; CHECK: .visible .entry kernel_func 18 define void @kernel_func(float* %a) { 19 entry: 20 %buf = alloca [16 x i8], align 4 21 22 ; CHECK: .local .align 4 .b8 __local_depot0[16] 23 ; CHECK: mov.u64 %SPL 24 25 ; CHECK: ld.param.u64 %rd[[A_REG:[0-9]+]], [kernel_func_param_0] 26 ; CHECK: cvta.to.global.u64 %rd[[A1_REG:[0-9]+]], %rd[[A_REG]] 27 ; FIXME: casting A1_REG to A2_REG is unnecessary; A2_REG is essentially A_REG 28 ; CHECK: cvta.global.u64 %rd[[A2_REG:[0-9]+]], %rd[[A1_REG]] 29 ; CHECK: cvta.local.u64 %rd[[SP_REG:[0-9]+]] 30 ; CHECK: ld.global.f32 %f[[A0_REG:[0-9]+]], [%rd[[A1_REG]]] 31 ; CHECK: st.local.f32 [{{%rd[0-9]+}}], %f[[A0_REG]] 32 33 %0 = load float, float* %a, align 4 34 %1 = bitcast [16 x i8]* %buf to float* 35 store float %0, float* %1, align 4 36 %arrayidx2 = getelementptr inbounds float, float* %a, i64 1 37 %2 = load float, float* %arrayidx2, align 4 38 %arrayidx3 = getelementptr inbounds [16 x i8], [16 x i8]* %buf, i64 0, i64 1 39 %3 = bitcast i8* %arrayidx3 to float* 40 store float %2, float* %3, align 4 41 %arrayidx4 = getelementptr inbounds float, float* %a, i64 2 42 %4 = load float, float* %arrayidx4, align 4 43 %arrayidx5 = getelementptr inbounds [16 x i8], [16 x i8]* %buf, i64 0, i64 2 44 %5 = bitcast i8* %arrayidx5 to float* 45 store float %4, float* %5, align 4 46 %arrayidx6 = getelementptr inbounds float, float* %a, i64 3 47 %6 = load float, float* %arrayidx6, align 4 48 %arrayidx7 = getelementptr inbounds [16 x i8], [16 x i8]* %buf, i64 0, i64 3 49 %7 = bitcast i8* %arrayidx7 to float* 50 store float %6, float* %7, align 4 51 52 ; CHECK: .param .b64 param0; 53 ; CHECK-NEXT: st.param.b64 [param0+0], %rd[[A2_REG]] 54 ; CHECK-NEXT: .param .b64 param1; 55 ; CHECK-NEXT: st.param.b64 [param1+0], %rd[[SP_REG]] 56 ; CHECK-NEXT: call.uni 57 ; CHECK-NEXT: callee, 58 59 %arraydecay = getelementptr inbounds [16 x i8], [16 x i8]* %buf, i64 0, i64 0 60 call void @callee(float* %a, i8* %arraydecay) #2 61 ret void 62 } 63 64 declare void @callee(float*, i8*) 65 66 !nvvm.annotations = !{!0} 67 68 !0 = !{void (float*)* @kernel_func, !"kernel", i32 1} 69