1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2 ; RUN: llc < %s -mtriple=x86_64-unknown -mcpu=bdver1 | FileCheck %s 3 4 ; clang -Oz -c test1.cpp -emit-llvm -S -o 5 ; Verify that we generate shld insruction when we are optimizing for size, 6 ; even for X86_64 processors that are known to have poor latency double 7 ; precision shift instructions. 8 ; uint64_t lshift10(uint64_t a, uint64_t b) 9 ; { 10 ; return (a << 10) | (b >> 54); 11 ; } 12 13 ; Function Attrs: minsize nounwind readnone uwtable 14 define i64 @_Z8lshift10mm(i64 %a, i64 %b) #0 { 15 ; CHECK-LABEL: _Z8lshift10mm: 16 ; CHECK: # %bb.0: # %entry 17 ; CHECK-NEXT: shldq $10, %rsi, %rdi 18 ; CHECK-NEXT: movq %rdi, %rax 19 ; CHECK-NEXT: retq 20 entry: 21 %shl = shl i64 %a, 10 22 %shr = lshr i64 %b, 54 23 %or = or i64 %shr, %shl 24 ret i64 %or 25 } 26 27 attributes #0 = { minsize nounwind readnone uwtable "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" } 28 29 30 ; clang -Os -c test2.cpp -emit-llvm -S 31 ; Verify that we generate shld insruction when we are optimizing for size, 32 ; even for X86_64 processors that are known to have poor latency double 33 ; precision shift instructions. 34 ; uint64_t lshift11(uint64_t a, uint64_t b) 35 ; { 36 ; return (a << 11) | (b >> 53); 37 ; } 38 39 ; Function Attrs: nounwind optsize readnone uwtable 40 define i64 @_Z8lshift11mm(i64 %a, i64 %b) #1 { 41 ; CHECK-LABEL: _Z8lshift11mm: 42 ; CHECK: # %bb.0: # %entry 43 ; CHECK-NEXT: shldq $11, %rsi, %rdi 44 ; CHECK-NEXT: movq %rdi, %rax 45 ; CHECK-NEXT: retq 46 entry: 47 %shl = shl i64 %a, 11 48 %shr = lshr i64 %b, 53 49 %or = or i64 %shr, %shl 50 ret i64 %or 51 } 52 53 attributes #1 = { nounwind optsize readnone uwtable "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" } 54 55 ; clang -O2 -c test2.cpp -emit-llvm -S 56 ; Verify that we do not generate shld insruction when we are not optimizing 57 ; for size for X86_64 processors that are known to have poor latency double 58 ; precision shift instructions. 59 ; uint64_t lshift12(uint64_t a, uint64_t b) 60 ; { 61 ; return (a << 12) | (b >> 52); 62 ; } 63 64 ; Function Attrs: nounwind optsize readnone uwtable 65 define i64 @_Z8lshift12mm(i64 %a, i64 %b) #2 { 66 ; CHECK-LABEL: _Z8lshift12mm: 67 ; CHECK: # %bb.0: # %entry 68 ; CHECK-NEXT: shlq $12, %rdi 69 ; CHECK-NEXT: shrq $52, %rsi 70 ; CHECK-NEXT: leaq (%rsi,%rdi), %rax 71 ; CHECK-NEXT: retq 72 entry: 73 %shl = shl i64 %a, 12 74 %shr = lshr i64 %b, 52 75 %or = or i64 %shr, %shl 76 ret i64 %or 77 } 78 79 attributes #2= { nounwind readnone uwtable "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" } 80 81