1 ; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s 2 3 declare void @llvm.AMDGPU.barrier.local() noduplicate nounwind 4 5 ; SI-LABEL: @private_access_f64_alloca: 6 ; SI: DS_WRITE_B64 7 ; SI: DS_READ_B64 8 define void @private_access_f64_alloca(double addrspace(1)* noalias %out, double addrspace(1)* noalias %in, i32 %b) nounwind { 9 %val = load double addrspace(1)* %in, align 8 10 %array = alloca double, i32 16, align 8 11 %ptr = getelementptr double* %array, i32 %b 12 store double %val, double* %ptr, align 8 13 call void @llvm.AMDGPU.barrier.local() noduplicate nounwind 14 %result = load double* %ptr, align 8 15 store double %result, double addrspace(1)* %out, align 8 16 ret void 17 } 18 19 ; SI-LABEL: @private_access_v2f64_alloca: 20 ; SI: DS_WRITE_B64 21 ; SI: DS_WRITE_B64 22 ; SI: DS_READ_B64 23 ; SI: DS_READ_B64 24 define void @private_access_v2f64_alloca(<2 x double> addrspace(1)* noalias %out, <2 x double> addrspace(1)* noalias %in, i32 %b) nounwind { 25 %val = load <2 x double> addrspace(1)* %in, align 16 26 %array = alloca <2 x double>, i32 16, align 16 27 %ptr = getelementptr <2 x double>* %array, i32 %b 28 store <2 x double> %val, <2 x double>* %ptr, align 16 29 call void @llvm.AMDGPU.barrier.local() noduplicate nounwind 30 %result = load <2 x double>* %ptr, align 16 31 store <2 x double> %result, <2 x double> addrspace(1)* %out, align 16 32 ret void 33 } 34 35 ; SI-LABEL: @private_access_i64_alloca: 36 ; SI: DS_WRITE_B64 37 ; SI: DS_READ_B64 38 define void @private_access_i64_alloca(i64 addrspace(1)* noalias %out, i64 addrspace(1)* noalias %in, i32 %b) nounwind { 39 %val = load i64 addrspace(1)* %in, align 8 40 %array = alloca i64, i32 16, align 8 41 %ptr = getelementptr i64* %array, i32 %b 42 store i64 %val, i64* %ptr, align 8 43 call void @llvm.AMDGPU.barrier.local() noduplicate nounwind 44 %result = load i64* %ptr, align 8 45 store i64 %result, i64 addrspace(1)* %out, align 8 46 ret void 47 } 48 49 ; SI-LABEL: @private_access_v2i64_alloca: 50 ; SI: DS_WRITE_B64 51 ; SI: DS_WRITE_B64 52 ; SI: DS_READ_B64 53 ; SI: DS_READ_B64 54 define void @private_access_v2i64_alloca(<2 x i64> addrspace(1)* noalias %out, <2 x i64> addrspace(1)* noalias %in, i32 %b) nounwind { 55 %val = load <2 x i64> addrspace(1)* %in, align 16 56 %array = alloca <2 x i64>, i32 16, align 16 57 %ptr = getelementptr <2 x i64>* %array, i32 %b 58 store <2 x i64> %val, <2 x i64>* %ptr, align 16 59 call void @llvm.AMDGPU.barrier.local() noduplicate nounwind 60 %result = load <2 x i64>* %ptr, align 16 61 store <2 x i64> %result, <2 x i64> addrspace(1)* %out, align 16 62 ret void 63 } 64