1 ; RUN: llc < %s -mcpu=cortex-a9 | FileCheck %s 2 target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:32:64-v128:32:128-a0:0:32-n32-S32" 3 target triple = "thumbv7-apple-ios0.0.0" 4 5 ; CHECK: f 6 ; The vld2 and vst2 are not aligned wrt each other, the second Q loaded is the 7 ; first one stored. 8 ; The coalescer must find a super-register larger than QQ to eliminate the copy 9 ; setting up the vst2 data. 10 ; CHECK: vld2 11 ; CHECK-NOT: vorr 12 ; CHECK-NOT: vmov 13 ; CHECK: vst2 14 define void @f(float* %p, i32 %c) nounwind ssp { 15 entry: 16 %0 = bitcast float* %p to i8* 17 %vld2 = tail call { <4 x float>, <4 x float> } @llvm.arm.neon.vld2.v4f32(i8* %0, i32 4) 18 %vld221 = extractvalue { <4 x float>, <4 x float> } %vld2, 1 19 %add.ptr = getelementptr inbounds float* %p, i32 8 20 %1 = bitcast float* %add.ptr to i8* 21 tail call void @llvm.arm.neon.vst2.v4f32(i8* %1, <4 x float> %vld221, <4 x float> undef, i32 4) 22 ret void 23 } 24 25 ; CHECK: f1 26 ; FIXME: This function still has copies. 27 define void @f1(float* %p, i32 %c) nounwind ssp { 28 entry: 29 %0 = bitcast float* %p to i8* 30 %vld2 = tail call { <4 x float>, <4 x float> } @llvm.arm.neon.vld2.v4f32(i8* %0, i32 4) 31 %vld221 = extractvalue { <4 x float>, <4 x float> } %vld2, 1 32 %add.ptr = getelementptr inbounds float* %p, i32 8 33 %1 = bitcast float* %add.ptr to i8* 34 %vld22 = tail call { <4 x float>, <4 x float> } @llvm.arm.neon.vld2.v4f32(i8* %1, i32 4) 35 %vld2215 = extractvalue { <4 x float>, <4 x float> } %vld22, 0 36 tail call void @llvm.arm.neon.vst2.v4f32(i8* %1, <4 x float> %vld221, <4 x float> %vld2215, i32 4) 37 ret void 38 } 39 40 ; CHECK: f2 41 ; FIXME: This function still has copies. 42 define void @f2(float* %p, i32 %c) nounwind ssp { 43 entry: 44 %0 = bitcast float* %p to i8* 45 %vld2 = tail call { <4 x float>, <4 x float> } @llvm.arm.neon.vld2.v4f32(i8* %0, i32 4) 46 %vld224 = extractvalue { <4 x float>, <4 x float> } %vld2, 1 47 br label %do.body 48 49 do.body: ; preds = %do.body, %entry 50 %qq0.0.1.0 = phi <4 x float> [ %vld224, %entry ], [ %vld2216, %do.body ] 51 %c.addr.0 = phi i32 [ %c, %entry ], [ %dec, %do.body ] 52 %p.addr.0 = phi float* [ %p, %entry ], [ %add.ptr, %do.body ] 53 %add.ptr = getelementptr inbounds float* %p.addr.0, i32 8 54 %1 = bitcast float* %add.ptr to i8* 55 %vld22 = tail call { <4 x float>, <4 x float> } @llvm.arm.neon.vld2.v4f32(i8* %1, i32 4) 56 %vld2215 = extractvalue { <4 x float>, <4 x float> } %vld22, 0 57 %vld2216 = extractvalue { <4 x float>, <4 x float> } %vld22, 1 58 tail call void @llvm.arm.neon.vst2.v4f32(i8* %1, <4 x float> %qq0.0.1.0, <4 x float> %vld2215, i32 4) 59 %dec = add nsw i32 %c.addr.0, -1 60 %tobool = icmp eq i32 %dec, 0 61 br i1 %tobool, label %do.end, label %do.body 62 63 do.end: ; preds = %do.body 64 ret void 65 } 66 67 declare { <4 x float>, <4 x float> } @llvm.arm.neon.vld2.v4f32(i8*, i32) nounwind readonly 68 declare void @llvm.arm.neon.vst2.v4f32(i8*, <4 x float>, <4 x float>, i32) nounwind 69