1 ; RUN: opt < %s -basicaa -slp-vectorizer -slp-threshold=-999 -dce -S -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7-avx | FileCheck %s 2 3 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128" 4 target triple = "x86_64-apple-macosx10.8.0" 5 6 declare double @sin(double) 7 declare double @cos(double) 8 declare double @pow(double, double) 9 declare double @exp2(double) 10 declare double @sqrt(double) 11 declare i64 @round(i64) 12 13 14 ; CHECK: sin_libm 15 ; CHECK: call <2 x double> @llvm.sin.v2f64 16 ; CHECK: ret void 17 define void @sin_libm(double* %a, double* %b, double* %c) { 18 entry: 19 %i0 = load double, double* %a, align 8 20 %i1 = load double, double* %b, align 8 21 %mul = fmul double %i0, %i1 22 %call = tail call double @sin(double %mul) nounwind readnone 23 %arrayidx3 = getelementptr inbounds double, double* %a, i64 1 24 %i3 = load double, double* %arrayidx3, align 8 25 %arrayidx4 = getelementptr inbounds double, double* %b, i64 1 26 %i4 = load double, double* %arrayidx4, align 8 27 %mul5 = fmul double %i3, %i4 28 %call5 = tail call double @sin(double %mul5) nounwind readnone 29 store double %call, double* %c, align 8 30 %arrayidx5 = getelementptr inbounds double, double* %c, i64 1 31 store double %call5, double* %arrayidx5, align 8 32 ret void 33 } 34 35 ; CHECK: cos_libm 36 ; CHECK: call <2 x double> @llvm.cos.v2f64 37 ; CHECK: ret void 38 define void @cos_libm(double* %a, double* %b, double* %c) { 39 entry: 40 %i0 = load double, double* %a, align 8 41 %i1 = load double, double* %b, align 8 42 %mul = fmul double %i0, %i1 43 %call = tail call double @cos(double %mul) nounwind readnone 44 %arrayidx3 = getelementptr inbounds double, double* %a, i64 1 45 %i3 = load double, double* %arrayidx3, align 8 46 %arrayidx4 = getelementptr inbounds double, double* %b, i64 1 47 %i4 = load double, double* %arrayidx4, align 8 48 %mul5 = fmul double %i3, %i4 49 %call5 = tail call double @cos(double %mul5) nounwind readnone 50 store double %call, double* %c, align 8 51 %arrayidx5 = getelementptr inbounds double, double* %c, i64 1 52 store double %call5, double* %arrayidx5, align 8 53 ret void 54 } 55 56 ; CHECK: pow_libm 57 ; CHECK: call <2 x double> @llvm.pow.v2f64 58 ; CHECK: ret void 59 define void @pow_libm(double* %a, double* %b, double* %c) { 60 entry: 61 %i0 = load double, double* %a, align 8 62 %i1 = load double, double* %b, align 8 63 %mul = fmul double %i0, %i1 64 %call = tail call double @pow(double %mul,double %mul) nounwind readnone 65 %arrayidx3 = getelementptr inbounds double, double* %a, i64 1 66 %i3 = load double, double* %arrayidx3, align 8 67 %arrayidx4 = getelementptr inbounds double, double* %b, i64 1 68 %i4 = load double, double* %arrayidx4, align 8 69 %mul5 = fmul double %i3, %i4 70 %call5 = tail call double @pow(double %mul5,double %mul5) nounwind readnone 71 store double %call, double* %c, align 8 72 %arrayidx5 = getelementptr inbounds double, double* %c, i64 1 73 store double %call5, double* %arrayidx5, align 8 74 ret void 75 } 76 77 78 ; CHECK: exp2_libm 79 ; CHECK: call <2 x double> @llvm.exp2.v2f64 80 ; CHECK: ret void 81 define void @exp2_libm(double* %a, double* %b, double* %c) { 82 entry: 83 %i0 = load double, double* %a, align 8 84 %i1 = load double, double* %b, align 8 85 %mul = fmul double %i0, %i1 86 %call = tail call double @exp2(double %mul) nounwind readnone 87 %arrayidx3 = getelementptr inbounds double, double* %a, i64 1 88 %i3 = load double, double* %arrayidx3, align 8 89 %arrayidx4 = getelementptr inbounds double, double* %b, i64 1 90 %i4 = load double, double* %arrayidx4, align 8 91 %mul5 = fmul double %i3, %i4 92 %call5 = tail call double @exp2(double %mul5) nounwind readnone 93 store double %call, double* %c, align 8 94 %arrayidx5 = getelementptr inbounds double, double* %c, i64 1 95 store double %call5, double* %arrayidx5, align 8 96 ret void 97 } 98 99 100 ; CHECK: sqrt_libm 101 ; CHECK: call <2 x double> @llvm.sqrt.v2f64 102 ; CHECK: ret void 103 define void @sqrt_libm(double* %a, double* %b, double* %c) { 104 entry: 105 %i0 = load double, double* %a, align 8 106 %i1 = load double, double* %b, align 8 107 %mul = fmul double %i0, %i1 108 %call = tail call nnan double @sqrt(double %mul) nounwind readnone 109 %arrayidx3 = getelementptr inbounds double, double* %a, i64 1 110 %i3 = load double, double* %arrayidx3, align 8 111 %arrayidx4 = getelementptr inbounds double, double* %b, i64 1 112 %i4 = load double, double* %arrayidx4, align 8 113 %mul5 = fmul double %i3, %i4 114 %call5 = tail call nnan double @sqrt(double %mul5) nounwind readnone 115 store double %call, double* %c, align 8 116 %arrayidx5 = getelementptr inbounds double, double* %c, i64 1 117 store double %call5, double* %arrayidx5, align 8 118 ret void 119 } 120 121 122 ; Negative test case 123 ; CHECK: round_custom 124 ; CHECK-NOT: load <4 x i64> 125 ; CHECK: ret void 126 define void @round_custom(i64* %a, i64* %b, i64* %c) { 127 entry: 128 %i0 = load i64, i64* %a, align 8 129 %i1 = load i64, i64* %b, align 8 130 %mul = mul i64 %i0, %i1 131 %call = tail call i64 @round(i64 %mul) nounwind readnone 132 %arrayidx3 = getelementptr inbounds i64, i64* %a, i64 1 133 %i3 = load i64, i64* %arrayidx3, align 8 134 %arrayidx4 = getelementptr inbounds i64, i64* %b, i64 1 135 %i4 = load i64, i64* %arrayidx4, align 8 136 %mul5 = mul i64 %i3, %i4 137 %call5 = tail call i64 @round(i64 %mul5) nounwind readnone 138 store i64 %call, i64* %c, align 8 139 %arrayidx5 = getelementptr inbounds i64, i64* %c, i64 1 140 store i64 %call5, i64* %arrayidx5, align 8 141 ret void 142 } 143 144 145 ; CHECK: declare <2 x double> @llvm.sin.v2f64(<2 x double>) [[ATTR0:#[0-9]+]] 146 ; CHECK: declare <2 x double> @llvm.cos.v2f64(<2 x double>) [[ATTR0]] 147 ; CHECK: declare <2 x double> @llvm.pow.v2f64(<2 x double>, <2 x double>) [[ATTR0]] 148 ; CHECK: declare <2 x double> @llvm.exp2.v2f64(<2 x double>) [[ATTR0]] 149 150 ; CHECK: attributes [[ATTR0]] = { nounwind readnone } 151 152