Home | History | Annotate | Download | only in XCore
      1 ; RUN: llc < %s -march=xcore > %t1.s
      2 ; RUN: grep "bl __misaligned_load" %t1.s | count 1
      3 ; RUN: grep ld16s %t1.s | count 2
      4 ; RUN: grep ldw %t1.s | count 2
      5 ; RUN: grep shl %t1.s | count 2
      6 ; RUN: grep shr %t1.s | count 1
      7 ; RUN: grep zext %t1.s | count 1
      8 ; RUN: grep "or " %t1.s | count 2
      9 
     10 ; Byte aligned load. Expands to call to __misaligned_load.
     11 define i32 @align1(i32* %p) nounwind {
     12 entry:
     13 	%0 = load i32* %p, align 1		; <i32> [#uses=1]
     14 	ret i32 %0
     15 }
     16 
     17 ; Half word aligned load. Expands to two 16bit loads.
     18 define i32 @align2(i32* %p) nounwind {
     19 entry:
     20 	%0 = load i32* %p, align 2		; <i32> [#uses=1]
     21 	ret i32 %0
     22 }
     23 
     24 @a = global [5 x i8] zeroinitializer, align 4
     25 
     26 ; Constant offset from word aligned base. Expands to two 32bit loads.
     27 define i32 @align3() nounwind {
     28 entry:
     29 	%0 = load i32* bitcast (i8* getelementptr ([5 x i8]* @a, i32 0, i32 1) to i32*), align 1
     30 	ret i32 %0
     31 }
     32