1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2 ; RUN: llc -fast-isel-sink-local-values -O0 -mtriple=x86_64-unknown-linux-gnu -o - %s | FileCheck %s -check-prefix=X640 3 ; RUN: llc -fast-isel-sink-local-values -O0 -mtriple=i686-unknown -o - %s | FileCheck %s -check-prefix=6860 4 ; RUN: llc -fast-isel-sink-local-values -mtriple=x86_64-unknown-linux-gnu -o - %s | FileCheck %s -check-prefix=X64 5 ; RUN: llc -fast-isel-sink-local-values -mtriple=i686-unknown -o - %s | FileCheck %s -check-prefix=686 6 7 @var_22 = external global i16, align 2 8 @var_27 = external global i16, align 2 9 10 define void @foo() { 11 ; X640-LABEL: foo: 12 ; X640: # %bb.0: # %bb 13 ; X640-NEXT: movzwl var_22, %eax 14 ; X640-NEXT: movzwl var_27, %ecx 15 ; X640-NEXT: xorl %ecx, %eax 16 ; X640-NEXT: movzwl var_27, %ecx 17 ; X640-NEXT: xorl %ecx, %eax 18 ; X640-NEXT: movslq %eax, %rdx 19 ; X640-NEXT: movq %rdx, -{{[0-9]+}}(%rsp) 20 ; X640-NEXT: movzwl var_22, %eax 21 ; X640-NEXT: movzwl var_27, %ecx 22 ; X640-NEXT: xorl %ecx, %eax 23 ; X640-NEXT: movzwl var_27, %ecx 24 ; X640-NEXT: xorl %ecx, %eax 25 ; X640-NEXT: movslq %eax, %rdx 26 ; X640-NEXT: movzwl var_27, %eax 27 ; X640-NEXT: subl $16610, %eax # imm = 0x40E2 28 ; X640-NEXT: movl %eax, %eax 29 ; X640-NEXT: movl %eax, %ecx 30 ; X640-NEXT: # kill: def $cl killed $rcx 31 ; X640-NEXT: sarq %cl, %rdx 32 ; X640-NEXT: movb %dl, %cl 33 ; X640-NEXT: # implicit-def: $rdx 34 ; X640-NEXT: movb %cl, (%rdx) 35 ; X640-NEXT: retq 36 ; 37 ; 6860-LABEL: foo: 38 ; 6860: # %bb.0: # %bb 39 ; 6860-NEXT: pushl %ebp 40 ; 6860-NEXT: .cfi_def_cfa_offset 8 41 ; 6860-NEXT: .cfi_offset %ebp, -8 42 ; 6860-NEXT: movl %esp, %ebp 43 ; 6860-NEXT: .cfi_def_cfa_register %ebp 44 ; 6860-NEXT: pushl %ebx 45 ; 6860-NEXT: pushl %edi 46 ; 6860-NEXT: pushl %esi 47 ; 6860-NEXT: andl $-8, %esp 48 ; 6860-NEXT: subl $32, %esp 49 ; 6860-NEXT: .cfi_offset %esi, -20 50 ; 6860-NEXT: .cfi_offset %edi, -16 51 ; 6860-NEXT: .cfi_offset %ebx, -12 52 ; 6860-NEXT: movw var_22, %ax 53 ; 6860-NEXT: movzwl var_27, %ecx 54 ; 6860-NEXT: movw %cx, %dx 55 ; 6860-NEXT: xorw %dx, %ax 56 ; 6860-NEXT: # implicit-def: $esi 57 ; 6860-NEXT: movw %ax, %si 58 ; 6860-NEXT: xorl %ecx, %esi 59 ; 6860-NEXT: movw %si, %ax 60 ; 6860-NEXT: movzwl %ax, %ecx 61 ; 6860-NEXT: movl %ecx, {{[0-9]+}}(%esp) 62 ; 6860-NEXT: movl $0, {{[0-9]+}}(%esp) 63 ; 6860-NEXT: movw var_22, %ax 64 ; 6860-NEXT: movzwl var_27, %ecx 65 ; 6860-NEXT: movw %cx, %dx 66 ; 6860-NEXT: xorw %dx, %ax 67 ; 6860-NEXT: # implicit-def: $esi 68 ; 6860-NEXT: movw %ax, %si 69 ; 6860-NEXT: xorl %ecx, %esi 70 ; 6860-NEXT: movw %si, %ax 71 ; 6860-NEXT: movzwl %ax, %esi 72 ; 6860-NEXT: addl $-16610, %ecx # imm = 0xBF1E 73 ; 6860-NEXT: movb %cl, %bl 74 ; 6860-NEXT: xorl %ecx, %ecx 75 ; 6860-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill 76 ; 6860-NEXT: movb %bl, %cl 77 ; 6860-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload 78 ; 6860-NEXT: shrdl %cl, %edi, %esi 79 ; 6860-NEXT: testb $32, %bl 80 ; 6860-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill 81 ; 6860-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill 82 ; 6860-NEXT: jne .LBB0_2 83 ; 6860-NEXT: # %bb.1: # %bb 84 ; 6860-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload 85 ; 6860-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill 86 ; 6860-NEXT: .LBB0_2: # %bb 87 ; 6860-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload 88 ; 6860-NEXT: movb %al, %cl 89 ; 6860-NEXT: # implicit-def: $eax 90 ; 6860-NEXT: movb %cl, (%eax) 91 ; 6860-NEXT: leal -12(%ebp), %esp 92 ; 6860-NEXT: popl %esi 93 ; 6860-NEXT: popl %edi 94 ; 6860-NEXT: popl %ebx 95 ; 6860-NEXT: popl %ebp 96 ; 6860-NEXT: .cfi_def_cfa %esp, 4 97 ; 6860-NEXT: retl 98 ; 99 ; X64-LABEL: foo: 100 ; X64: # %bb.0: # %bb 101 ; X64-NEXT: movzwl {{.*}}(%rip), %eax 102 ; X64-NEXT: movzwl {{.*}}(%rip), %ecx 103 ; X64-NEXT: movl %ecx, %edx 104 ; X64-NEXT: xorl %edx, %edx 105 ; X64-NEXT: xorl %eax, %edx 106 ; X64-NEXT: movzwl %dx, %eax 107 ; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) 108 ; X64-NEXT: addl $-16610, %ecx # imm = 0xBF1E 109 ; X64-NEXT: # kill: def $cl killed $cl killed $ecx 110 ; X64-NEXT: shrq %cl, %rax 111 ; X64-NEXT: movb %al, (%rax) 112 ; X64-NEXT: retq 113 ; 114 ; 686-LABEL: foo: 115 ; 686: # %bb.0: # %bb 116 ; 686-NEXT: pushl %ebp 117 ; 686-NEXT: .cfi_def_cfa_offset 8 118 ; 686-NEXT: .cfi_offset %ebp, -8 119 ; 686-NEXT: movl %esp, %ebp 120 ; 686-NEXT: .cfi_def_cfa_register %ebp 121 ; 686-NEXT: andl $-8, %esp 122 ; 686-NEXT: subl $8, %esp 123 ; 686-NEXT: movzwl var_22, %eax 124 ; 686-NEXT: movzwl var_27, %ecx 125 ; 686-NEXT: movl %ecx, %edx 126 ; 686-NEXT: xorl %ecx, %edx 127 ; 686-NEXT: xorl %eax, %edx 128 ; 686-NEXT: movzwl %dx, %eax 129 ; 686-NEXT: movl %eax, (%esp) 130 ; 686-NEXT: movl $0, {{[0-9]+}}(%esp) 131 ; 686-NEXT: addl $-16610, %ecx # imm = 0xBF1E 132 ; 686-NEXT: xorl %edx, %edx 133 ; 686-NEXT: shrdl %cl, %edx, %eax 134 ; 686-NEXT: testb $32, %cl 135 ; 686-NEXT: jne .LBB0_2 136 ; 686-NEXT: # %bb.1: # %bb 137 ; 686-NEXT: movl %eax, %edx 138 ; 686-NEXT: .LBB0_2: # %bb 139 ; 686-NEXT: movb %dl, (%eax) 140 ; 686-NEXT: movl %ebp, %esp 141 ; 686-NEXT: popl %ebp 142 ; 686-NEXT: .cfi_def_cfa %esp, 4 143 ; 686-NEXT: retl 144 bb: 145 %tmp = alloca i64, align 8 146 %tmp1 = load i16, i16* @var_22, align 2 147 %tmp2 = zext i16 %tmp1 to i32 148 %tmp3 = load i16, i16* @var_27, align 2 149 %tmp4 = zext i16 %tmp3 to i32 150 %tmp5 = xor i32 %tmp2, %tmp4 151 %tmp6 = load i16, i16* @var_27, align 2 152 %tmp7 = zext i16 %tmp6 to i32 153 %tmp8 = xor i32 %tmp5, %tmp7 154 %tmp9 = sext i32 %tmp8 to i64 155 store i64 %tmp9, i64* %tmp, align 8 156 %tmp10 = load i16, i16* @var_22, align 2 157 %tmp11 = zext i16 %tmp10 to i32 158 %tmp12 = load i16, i16* @var_27, align 2 159 %tmp13 = zext i16 %tmp12 to i32 160 %tmp14 = xor i32 %tmp11, %tmp13 161 %tmp15 = load i16, i16* @var_27, align 2 162 %tmp16 = zext i16 %tmp15 to i32 163 %tmp17 = xor i32 %tmp14, %tmp16 164 %tmp18 = sext i32 %tmp17 to i64 165 %tmp19 = load i16, i16* @var_27, align 2 166 %tmp20 = zext i16 %tmp19 to i32 167 %tmp21 = sub nsw i32 %tmp20, 16610 168 %tmp22 = zext i32 %tmp21 to i64 169 %tmp23 = ashr i64 %tmp18, %tmp22 170 %tmp24 = trunc i64 %tmp23 to i8 171 store i8 %tmp24, i8* undef, align 1 172 ret void 173 } 174