Home | History | Annotate | Download | only in ARM
      1 ; RUN: llc < %s -mcpu=swift -mtriple=armv7s-apple-ios | FileCheck %s
      2 ; RUN: llc < %s -arm-assume-misaligned-load-store -mcpu=swift -mtriple=armv7s-apple-ios | FileCheck %s
      3 
      4 ; Check that we avoid producing vldm instructions using d registers that
      5 ; begin in the most-significant half of a q register. These require more
      6 ; micro-ops on swift and so aren't worth combining.
      7 
      8 ; CHECK-LABEL: test_vldm
      9 ; CHECK: vldmia r{{[0-9]+}}, {d2, d3, d4}
     10 ; CHECK-NOT: vldmia r{{[0-9]+}}, {d1, d2, d3, d4}
     11 
     12 declare fastcc void @force_register(double %d0, double %d1, double %d2, double %d3, double %d4) 
     13 
     14 define void @test_vldm(double* %x, double * %y) {
     15 entry:
     16   %addr1 = getelementptr double, double * %x, i32 1
     17   %addr2 = getelementptr double, double * %x, i32 2
     18   %addr3 = getelementptr double, double * %x, i32 3
     19   %d0 = load double , double * %y
     20   %d1 = load double , double * %x
     21   %d2 = load double , double * %addr1
     22   %d3 = load double , double * %addr2
     23   %d4 = load double , double * %addr3
     24   ; We are trying to force x[0-3] in registers d1 to d4 so that we can test we
     25   ; don't form a "vldmia rX, {d1, d2, d3, d4}".
     26   ; We are relying on the calling convention and that register allocation
     27   ; properly coalesces registers.
     28   call fastcc void @force_register(double %d0, double %d1, double %d2, double %d3, double %d4)
     29   ret void
     30 }
     31