1 ; RUN: llc < %s -march=nvptx64 -mcpu=sm_35 | FileCheck %s 2 3 ; Check that invariant loads from the global addrspace are lowered to 4 ; ld.global.nc. 5 6 ; CHECK-LABEL: @ld_global 7 define i32 @ld_global(i32 addrspace(1)* %ptr) { 8 ; CHECK: ld.global.nc.{{[a-z]}}32 9 %a = load i32, i32 addrspace(1)* %ptr, !invariant.load !0 10 ret i32 %a 11 } 12 13 ; CHECK-LABEL: @ld_global_v2f16 14 define half @ld_global_v2f16(<2 x half> addrspace(1)* %ptr) { 15 ; Load of v2f16 is weird. We consider it to be a legal type, which happens to be 16 ; loaded/stored as a 32-bit scalar. 17 ; CHECK: ld.global.nc.b32 18 %a = load <2 x half>, <2 x half> addrspace(1)* %ptr, !invariant.load !0 19 %v1 = extractelement <2 x half> %a, i32 0 20 %v2 = extractelement <2 x half> %a, i32 1 21 %sum = fadd half %v1, %v2 22 ret half %sum 23 } 24 25 ; CHECK-LABEL: @ld_global_v4f16 26 define half @ld_global_v4f16(<4 x half> addrspace(1)* %ptr) { 27 ; Larger f16 vectors may be split into individual f16 elements and multiple 28 ; loads/stores may be vectorized using f16 element type. Practically it's 29 ; limited to v4 variant only. 30 ; CHECK: ld.global.nc.v4.b16 31 %a = load <4 x half>, <4 x half> addrspace(1)* %ptr, !invariant.load !0 32 %v1 = extractelement <4 x half> %a, i32 0 33 %v2 = extractelement <4 x half> %a, i32 1 34 %v3 = extractelement <4 x half> %a, i32 2 35 %v4 = extractelement <4 x half> %a, i32 3 36 %sum1 = fadd half %v1, %v2 37 %sum2 = fadd half %v3, %v4 38 %sum = fadd half %sum1, %sum2 39 ret half %sum 40 } 41 42 ; CHECK-LABEL: @ld_global_v8f16 43 define half @ld_global_v8f16(<8 x half> addrspace(1)* %ptr) { 44 ; Larger vectors are, again, loaded as v4i32. PTX has no v8 variants of loads/stores, 45 ; so load/store vectorizer has to convert v8f16 -> v4 x v2f16. 46 ; CHECK: ld.global.nc.v4.b32 47 %a = load <8 x half>, <8 x half> addrspace(1)* %ptr, !invariant.load !0 48 %v1 = extractelement <8 x half> %a, i32 0 49 %v2 = extractelement <8 x half> %a, i32 2 50 %v3 = extractelement <8 x half> %a, i32 4 51 %v4 = extractelement <8 x half> %a, i32 6 52 %sum1 = fadd half %v1, %v2 53 %sum2 = fadd half %v3, %v4 54 %sum = fadd half %sum1, %sum2 55 ret half %sum 56 } 57 58 ; CHECK-LABEL: @ld_global_v2i32 59 define i32 @ld_global_v2i32(<2 x i32> addrspace(1)* %ptr) { 60 ; CHECK: ld.global.nc.v2.{{[a-z]}}32 61 %a = load <2 x i32>, <2 x i32> addrspace(1)* %ptr, !invariant.load !0 62 %v1 = extractelement <2 x i32> %a, i32 0 63 %v2 = extractelement <2 x i32> %a, i32 1 64 %sum = add i32 %v1, %v2 65 ret i32 %sum 66 } 67 68 ; CHECK-LABEL: @ld_global_v4i32 69 define i32 @ld_global_v4i32(<4 x i32> addrspace(1)* %ptr) { 70 ; CHECK: ld.global.nc.v4.{{[a-z]}}32 71 %a = load <4 x i32>, <4 x i32> addrspace(1)* %ptr, !invariant.load !0 72 %v1 = extractelement <4 x i32> %a, i32 0 73 %v2 = extractelement <4 x i32> %a, i32 1 74 %v3 = extractelement <4 x i32> %a, i32 2 75 %v4 = extractelement <4 x i32> %a, i32 3 76 %sum1 = add i32 %v1, %v2 77 %sum2 = add i32 %v3, %v4 78 %sum3 = add i32 %sum1, %sum2 79 ret i32 %sum3 80 } 81 82 ; CHECK-LABEL: @ld_not_invariant 83 define i32 @ld_not_invariant(i32 addrspace(1)* %ptr) { 84 ; CHECK: ld.global.{{[a-z]}}32 85 %a = load i32, i32 addrspace(1)* %ptr 86 ret i32 %a 87 } 88 89 ; CHECK-LABEL: @ld_not_global_addrspace 90 define i32 @ld_not_global_addrspace(i32 addrspace(0)* %ptr) { 91 ; CHECK: ld.{{[a-z]}}32 92 %a = load i32, i32 addrspace(0)* %ptr 93 ret i32 %a 94 } 95 96 !0 = !{} 97