1 ; RUN: opt -S -codegenprepare < %s | FileCheck %s --check-prefix=SLOW 2 ; RUN: opt -S -codegenprepare -mattr=+bmi < %s | FileCheck %s --check-prefix=FAST_TZ 3 ; RUN: opt -S -codegenprepare -mattr=+lzcnt < %s | FileCheck %s --check-prefix=FAST_LZ 4 5 target triple = "x86_64-unknown-unknown" 6 target datalayout = "e-n32:64" 7 8 ; If the intrinsic is cheap, nothing should change. 9 ; If the intrinsic is expensive, check if the input is zero to avoid the call. 10 ; This is undoing speculation that may have been created by SimplifyCFG + InstCombine. 11 12 define i64 @cttz(i64 %A) { 13 entry: 14 %z = call i64 @llvm.cttz.i64(i64 %A, i1 false) 15 ret i64 %z 16 17 ; SLOW-LABEL: @cttz( 18 ; SLOW: entry: 19 ; SLOW: %cmpz = icmp eq i64 %A, 0 20 ; SLOW: br i1 %cmpz, label %cond.end, label %cond.false 21 ; SLOW: cond.false: 22 ; SLOW: %z = call i64 @llvm.cttz.i64(i64 %A, i1 true) 23 ; SLOW: br label %cond.end 24 ; SLOW: cond.end: 25 ; SLOW: %ctz = phi i64 [ 64, %entry ], [ %z, %cond.false ] 26 ; SLOW: ret i64 %ctz 27 28 ; FAST_TZ-LABEL: @cttz( 29 ; FAST_TZ: %z = call i64 @llvm.cttz.i64(i64 %A, i1 false) 30 ; FAST_TZ: ret i64 %z 31 } 32 33 define i64 @ctlz(i64 %A) { 34 entry: 35 %z = call i64 @llvm.ctlz.i64(i64 %A, i1 false) 36 ret i64 %z 37 38 ; SLOW-LABEL: @ctlz( 39 ; SLOW: entry: 40 ; SLOW: %cmpz = icmp eq i64 %A, 0 41 ; SLOW: br i1 %cmpz, label %cond.end, label %cond.false 42 ; SLOW: cond.false: 43 ; SLOW: %z = call i64 @llvm.ctlz.i64(i64 %A, i1 true) 44 ; SLOW: br label %cond.end 45 ; SLOW: cond.end: 46 ; SLOW: %ctz = phi i64 [ 64, %entry ], [ %z, %cond.false ] 47 ; SLOW: ret i64 %ctz 48 49 ; FAST_LZ-LABEL: @ctlz( 50 ; FAST_LZ: %z = call i64 @llvm.ctlz.i64(i64 %A, i1 false) 51 ; FAST_LZ: ret i64 %z 52 } 53 54 declare i64 @llvm.cttz.i64(i64, i1) 55 declare i64 @llvm.ctlz.i64(i64, i1) 56 57