1 ; RUN: llc < %s -march=ppc32 -mcpu=g5 | grep li.*16 2 ; RUN: llc < %s -march=ppc32 -mcpu=g5 | not grep addi 3 4 ; Codegen lvx (R+16) as t = li 16, lvx t,R 5 ; This shares the 16 between the two loads. 6 7 define void @func(<4 x float>* %a, <4 x float>* %b) { 8 %tmp1 = getelementptr <4 x float>, <4 x float>* %b, i32 1 ; <<4 x float>*> [#uses=1] 9 %tmp = load <4 x float>, <4 x float>* %tmp1 ; <<4 x float>> [#uses=1] 10 %tmp3 = getelementptr <4 x float>, <4 x float>* %a, i32 1 ; <<4 x float>*> [#uses=1] 11 %tmp4 = load <4 x float>, <4 x float>* %tmp3 ; <<4 x float>> [#uses=1] 12 %tmp5 = fmul <4 x float> %tmp, %tmp4 ; <<4 x float>> [#uses=1] 13 %tmp8 = load <4 x float>, <4 x float>* %b ; <<4 x float>> [#uses=1] 14 %tmp9 = fadd <4 x float> %tmp5, %tmp8 ; <<4 x float>> [#uses=1] 15 store <4 x float> %tmp9, <4 x float>* %a 16 ret void 17 } 18 19