1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2 ; RUN: llc < %s -mtriple=i686-unknown-unknown | FileCheck %s --check-prefix=X86 3 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefix=X64 4 5 @a = common global i32 0, align 4 6 @c = common local_unnamed_addr global i8 0, align 1 7 @b = common local_unnamed_addr global i32* null, align 8 8 9 define void @e() { 10 ; X86-LABEL: e: 11 ; X86: # %bb.0: # %entry 12 ; X86-NEXT: movl b, %eax 13 ; X86-NEXT: .p2align 4, 0x90 14 ; X86-NEXT: .LBB0_1: # %for.cond 15 ; X86-NEXT: # =>This Inner Loop Header: Depth=1 16 ; X86-NEXT: movzbl c, %ecx 17 ; X86-NEXT: leal a+2(%ecx), %ecx 18 ; X86-NEXT: movb $0, c 19 ; X86-NEXT: movl %ecx, (%eax) 20 ; X86-NEXT: jmp .LBB0_1 21 ; 22 ; X64-LABEL: e: 23 ; X64: # %bb.0: # %entry 24 ; X64-NEXT: movq {{.*}}(%rip), %rax 25 ; X64-NEXT: movl $a, %esi 26 ; X64-NEXT: .p2align 4, 0x90 27 ; X64-NEXT: .LBB0_1: # %for.cond 28 ; X64-NEXT: # =>This Inner Loop Header: Depth=1 29 ; X64-NEXT: movzbl {{.*}}(%rip), %edx 30 ; X64-NEXT: addq %rsi, %rdx 31 ; X64-NEXT: setb %cl 32 ; X64-NEXT: addq $2, %rdx 33 ; X64-NEXT: adcb $0, %cl 34 ; X64-NEXT: movb %cl, {{.*}}(%rip) 35 ; X64-NEXT: movl %edx, (%rax) 36 ; X64-NEXT: jmp .LBB0_1 37 entry: 38 %0 = load i32*, i32** @b, align 8 39 br label %for.cond 40 41 for.cond: 42 %1 = load i8, i8* @c, align 1 43 %conv = zext i8 %1 to i128 44 %add = add nuw nsw i128 %conv, add (i128 ptrtoint (i32* @a to i128), i128 2) 45 %2 = lshr i128 %add, 64 46 %conv1 = trunc i128 %2 to i8 47 store i8 %conv1, i8* @c, align 1 48 %conv2 = trunc i128 %add to i32 49 store i32 %conv2, i32* %0, align 4 50 br label %for.cond 51 } 52