1 ; RUN: llc -mtriple=x86_64-pc-linux < %s | FileCheck %s 2 ; 3 ; Check that x86's peephole optimization doesn't fold a 64-bit load (movsd) into 4 ; addpd. 5 ; rdar://problem/18236850 6 7 %struct.S1 = type { double, double } 8 9 @g = common global %struct.S1 zeroinitializer, align 8 10 11 declare void @foo3(%struct.S1*) 12 13 ; CHECK: movsd {{[0-9]*}}(%rsp), [[R0:%xmm[0-9]+]] 14 ; CHECK: addpd [[R0]], %xmm{{[0-9]+}} 15 16 define void @foo1(double %a.coerce0, double %a.coerce1, double %b.coerce0, double %b.coerce1) { 17 %1 = alloca <2 x double>, align 16 18 %tmpcast = bitcast <2 x double>* %1 to %struct.S1* 19 call void @foo3(%struct.S1* %tmpcast) #2 20 %p2 = getelementptr inbounds %struct.S1, %struct.S1* %tmpcast, i64 0, i32 0 21 %2 = load double, double* %p2, align 16 22 %p3 = getelementptr inbounds %struct.S1, %struct.S1* %tmpcast, i64 0, i32 1 23 %3 = load double, double* %p3, align 8 24 %4 = insertelement <2 x double> undef, double %2, i32 0 25 %5 = insertelement <2 x double> %4, double 0.000000e+00, i32 1 26 %6 = insertelement <2 x double> undef, double %3, i32 1 27 %7 = insertelement <2 x double> %6, double 1.000000e+00, i32 0 28 %8 = fadd <2 x double> %5, %7 29 store <2 x double> %8, <2 x double>* bitcast (%struct.S1* @g to <2 x double>*), align 16 30 ret void 31 } 32