1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2 ; RUN: llc < %s -mtriple=x86_64-linux | FileCheck %s --check-prefix=CHECK 3 4 ; This tests for a cyclic dependencies in the generated DAG. 5 6 @c = external local_unnamed_addr global i32, align 4 7 @a = external local_unnamed_addr global i32, align 4 8 @b = external local_unnamed_addr global i32, align 4 9 10 define void @foo() { 11 ; CHECK-LABEL: foo: 12 ; CHECK: # %bb.0: # %entry 13 ; CHECK-NEXT: movl -{{[0-9]+}}(%rsp), %eax 14 ; CHECK-NEXT: movl -{{[0-9]+}}(%rsp), %eax 15 ; CHECK-NEXT: movl -{{[0-9]+}}(%rsp), %eax 16 ; CHECK-NEXT: movl -{{[0-9]+}}(%rsp), %eax 17 ; CHECK-NEXT: movl -{{[0-9]+}}(%rsp), %eax 18 ; CHECK-NEXT: movl -{{[0-9]+}}(%rsp), %eax 19 ; CHECK-NEXT: movl -{{[0-9]+}}(%rsp), %eax 20 ; CHECK-NEXT: movl -{{[0-9]+}}(%rsp), %eax 21 ; CHECK-NEXT: movl -{{[0-9]+}}(%rsp), %eax 22 ; CHECK-NEXT: movl -{{[0-9]+}}(%rsp), %eax 23 ; CHECK-NEXT: movl -{{[0-9]+}}(%rsp), %eax 24 ; CHECK-NEXT: movl -{{[0-9]+}}(%rsp), %eax 25 ; CHECK-NEXT: movl -{{[0-9]+}}(%rsp), %eax 26 ; CHECK-NEXT: movl -{{[0-9]+}}(%rsp), %eax 27 ; CHECK-NEXT: movl -{{[0-9]+}}(%rsp), %eax 28 ; CHECK-NEXT: movl $0, {{.*}}(%rip) 29 ; CHECK-NEXT: movl -{{[0-9]+}}(%rsp), %r8d 30 ; CHECK-NEXT: movl -{{[0-9]+}}(%rsp), %edi 31 ; CHECK-NEXT: movl -{{[0-9]+}}(%rsp), %esi 32 ; CHECK-NEXT: movl -{{[0-9]+}}(%rsp), %eax 33 ; CHECK-NEXT: cltd 34 ; CHECK-NEXT: idivl {{.*}}(%rip) 35 ; CHECK-NEXT: movl %eax, %ecx 36 ; CHECK-NEXT: movl {{.*}}(%rip), %eax 37 ; CHECK-NEXT: cltd 38 ; CHECK-NEXT: idivl %esi 39 ; CHECK-NEXT: andl %edi, %eax 40 ; CHECK-NEXT: addl %ecx, %eax 41 ; CHECK-NEXT: andl %r8d, %eax 42 ; CHECK-NEXT: movl %eax, (%rax) 43 ; CHECK-NEXT: retq 44 entry: 45 %e = alloca i32, align 4 46 %e.0.e.0.24 = load volatile i32, i32* %e, align 4 47 %e.0.e.0.25 = load volatile i32, i32* %e, align 4 48 %e.0.e.0.26 = load volatile i32, i32* %e, align 4 49 %e.0.e.0.27 = load volatile i32, i32* %e, align 4 50 %e.0.e.0.28 = load volatile i32, i32* %e, align 4 51 %e.0.e.0.29 = load volatile i32, i32* %e, align 4 52 %e.0.e.0.30 = load volatile i32, i32* %e, align 4 53 %e.0.e.0.31 = load volatile i32, i32* %e, align 4 54 %e.0.e.0.32 = load volatile i32, i32* %e, align 4 55 %e.0.e.0.33 = load volatile i32, i32* %e, align 4 56 %e.0.e.0.34 = load volatile i32, i32* %e, align 4 57 %e.0.e.0.35 = load volatile i32, i32* %e, align 4 58 %e.0.e.0.36 = load volatile i32, i32* %e, align 4 59 %e.0.e.0.37 = load volatile i32, i32* %e, align 4 60 %e.0.e.0.39 = load volatile i32, i32* %e, align 4 61 %tmp = load i32, i32* @a, align 4 62 store i32 0, i32* @b, align 4 63 %e.0.e.0.41 = load volatile i32, i32* %e, align 4 64 %add17 = add nsw i32 %e.0.e.0.41, 0 65 %e.0.e.0.42 = load volatile i32, i32* %e, align 4 66 %tmp1 = load i32, i32* @c, align 4 67 %e.0.e.0.43 = load volatile i32, i32* %e, align 4 68 %div = sdiv i32 %tmp1, %e.0.e.0.43 69 %and18 = and i32 %div, %e.0.e.0.42 70 %e.0.e.0.44 = load volatile i32, i32* %e, align 4 71 %div19 = sdiv i32 %e.0.e.0.44, %tmp 72 %add20 = add nsw i32 %div19, %and18 73 %and21 = and i32 %add20, %add17 74 store volatile i32 %and21, i32* undef, align 4 75 ret void 76 } 77