1 ; RUN: llc < %s -mtriple=x86_64-linux -O0 | FileCheck %s --check-prefix=X64 2 ; RUN: llc < %s -mtriple=x86_64-win32 -O0 | FileCheck %s --check-prefix=X64 3 ; RUN: llc < %s -march=x86 -O0 | FileCheck %s --check-prefix=X32 4 5 ; GEP indices are interpreted as signed integers, so they 6 ; should be sign-extended to 64 bits on 64-bit targets. 7 ; PR3181 8 define i32 @test1(i32 %t3, i32* %t1) nounwind { 9 %t9 = getelementptr i32* %t1, i32 %t3 ; <i32*> [#uses=1] 10 %t15 = load i32* %t9 ; <i32> [#uses=1] 11 ret i32 %t15 12 ; X32: test1: 13 ; X32: movl (%eax,%ecx,4), %eax 14 ; X32: ret 15 16 ; X64: test1: 17 ; X64: movslq %e[[A0:di|cx]], %rax 18 ; X64: movl (%r[[A1:si|dx]],%rax,4), %eax 19 ; X64: ret 20 21 } 22 define i32 @test2(i64 %t3, i32* %t1) nounwind { 23 %t9 = getelementptr i32* %t1, i64 %t3 ; <i32*> [#uses=1] 24 %t15 = load i32* %t9 ; <i32> [#uses=1] 25 ret i32 %t15 26 ; X32: test2: 27 ; X32: movl (%edx,%ecx,4), %e 28 ; X32: ret 29 30 ; X64: test2: 31 ; X64: movl (%r[[A1]],%r[[A0]],4), %eax 32 ; X64: ret 33 } 34 35 36 37 ; PR4984 38 define i8 @test3(i8* %start) nounwind { 39 entry: 40 %A = getelementptr i8* %start, i64 -2 ; <i8*> [#uses=1] 41 %B = load i8* %A, align 1 ; <i8> [#uses=1] 42 ret i8 %B 43 44 45 ; X32: test3: 46 ; X32: movl 4(%esp), %eax 47 ; X32: movb -2(%eax), %al 48 ; X32: ret 49 50 ; X64: test3: 51 ; X64: movb -2(%r[[A0]]), %al 52 ; X64: ret 53 54 } 55 56 define double @test4(i64 %x, double* %p) nounwind { 57 entry: 58 %x.addr = alloca i64, align 8 ; <i64*> [#uses=2] 59 %p.addr = alloca double*, align 8 ; <double**> [#uses=2] 60 store i64 %x, i64* %x.addr 61 store double* %p, double** %p.addr 62 %tmp = load i64* %x.addr ; <i64> [#uses=1] 63 %add = add nsw i64 %tmp, 16 ; <i64> [#uses=1] 64 %tmp1 = load double** %p.addr ; <double*> [#uses=1] 65 %arrayidx = getelementptr inbounds double* %tmp1, i64 %add ; <double*> [#uses=1] 66 %tmp2 = load double* %arrayidx ; <double> [#uses=1] 67 ret double %tmp2 68 69 ; X32: test4: 70 ; X32: 128(%e{{.*}},%e{{.*}},8) 71 ; X64: test4: 72 ; X64: 128(%r{{.*}},%r{{.*}},8) 73 } 74 75 ; PR8961 - Make sure the sext for the GEP addressing comes before the load that 76 ; is folded. 77 define i64 @test5(i8* %A, i32 %I, i64 %B) nounwind { 78 %v8 = getelementptr i8* %A, i32 %I 79 %v9 = bitcast i8* %v8 to i64* 80 %v10 = load i64* %v9 81 %v11 = add i64 %B, %v10 82 ret i64 %v11 83 ; X64: test5: 84 ; X64: movslq %e[[A1]], %rax 85 ; X64-NEXT: movq (%r[[A0]],%rax), %rax 86 ; X64-NEXT: addq %{{rdx|r8}}, %rax 87 ; X64-NEXT: ret 88 } 89 90 ; PR9500, rdar://9156159 - Don't do non-local address mode folding, 91 ; because it may require values which wouldn't otherwise be live out 92 ; of their blocks. 93 define void @test6() { 94 if.end: ; preds = %if.then, %invoke.cont 95 %tmp15 = load i64* undef 96 %dec = add i64 %tmp15, 13 97 store i64 %dec, i64* undef 98 %call17 = invoke i8* @_ZNK18G__FastAllocString4dataEv() 99 to label %invoke.cont16 unwind label %lpad 100 101 invoke.cont16: ; preds = %if.then14 102 %arrayidx18 = getelementptr inbounds i8* %call17, i64 %dec 103 store i8 0, i8* %arrayidx18 104 unreachable 105 106 lpad: ; preds = %if.end19, %if.then14, %if.end, %entry 107 %exn = landingpad {i8*, i32} personality i32 (...)* @__gxx_personality_v0 108 cleanup 109 unreachable 110 } 111 declare i8* @_ZNK18G__FastAllocString4dataEv() nounwind 112 113 114 ; PR10605 / rdar://9930964 - Don't fold loads incorrectly. The load should 115 ; happen before the store. 116 define i32 @test7({i32,i32,i32}* %tmp1, i32 %tmp71, i32 %tmp63) nounwind { 117 ; X64: test7: 118 ; X64: movl 8({{%rdi|%rcx}}), %eax 119 ; X64: movl $4, 8({{%rdi|%rcx}}) 120 121 122 %tmp29 = getelementptr inbounds {i32,i32,i32}* %tmp1, i32 0, i32 2 123 %tmp30 = load i32* %tmp29, align 4 124 125 %p2 = getelementptr inbounds {i32,i32,i32}* %tmp1, i32 0, i32 2 126 store i32 4, i32* %p2 127 128 %tmp72 = or i32 %tmp71, %tmp30 129 %tmp73 = icmp ne i32 %tmp63, 32 130 br i1 %tmp73, label %T, label %F 131 132 T: 133 ret i32 %tmp72 134 135 F: 136 ret i32 4 137 } 138 139 declare i32 @__gxx_personality_v0(...) 140