/external/llvm/test/CodeGen/Mips/ |
small-section-reserve-gp.ll | 4 @i = internal unnamed_addr global i32 0, align 4 9 %0 = load i32* @i, align 4
|
/external/llvm/test/CodeGen/NVPTX/ |
constant-vectors.ll | 5 ; CHECK: .visible .global .align 16 .b8 testArray[8] = {0, 1, 2, 3, 4, 5, 6, 7}; 6 @testArray = constant [2 x <4 x i8>] [<4 x i8> <i8 0, i8 1, i8 2, i8 3>, <4 x i8> <i8 4, i8 5, i8 6, i8 7>], align 16
|
managed.ll | 4 ; CHECK: .visible .global .align 4 .u32 device_g; 6 ; CHECK: .visible .global .attribute(.managed) .align 4 .u32 managed_g;
|
/external/llvm/test/CodeGen/Thumb/ |
2009-07-20-TwoAddrBug.ll | 7 %0 = load i64* @Time.2535, align 4 ; <i64> [#uses=2] 9 store i64 %1, i64* @Time.2535, align 4
|
/external/llvm/test/CodeGen/Thumb2/ |
thumb2-ldrd.ll | 9 %0 = load i64** @b, align 4 10 %1 = load i64* %0, align 4
|
/external/llvm/test/CodeGen/X86/ |
2008-02-05-ISelCrash.ll | 8 %tmp1 = load i64* @nodes, align 8 ; <i64> [#uses=1] 10 store i64 %tmp2, i64* @nodes, align 8
|
2008-04-24-MemCpyBug.ll | 7 declare void @test63(%struct.S63* byval align 4 ) nounwind 10 tail call void @test63( %struct.S63* byval align 4 @g1s63 ) nounwind
|
2010-05-10-DAGCombinerBug.ll | 7 %0 = load i16* %num2, align 2 ; <i16> [#uses=2] 9 store i16 %1, i16* %num2, align 2
|
fmul-zero.ll | 5 load <4 x float>* %0, align 1 7 store <4 x float> %3, <4 x float>* %0, align 1
|
pr2182.ll | 18 %tmp = load volatile i32* @x, align 4 ; <i32> [#uses=1] 20 store volatile i32 %tmp1, i32* @x, align 4 21 %tmp.1 = load volatile i32* @x, align 4 ; <i32> [#uses=1] 23 store volatile i32 %tmp1.1, i32* @x, align 4 24 %tmp.2 = load volatile i32* @x, align 4 ; <i32> [#uses=1] 26 store volatile i32 %tmp1.2, i32* @x, align 4 27 %tmp.3 = load volatile i32* @x, align 4 ; <i32> [#uses=1] 29 store volatile i32 %tmp1.3, i32* @x, align 4
|
sandybridge-loads.ll | 11 %v0 = load <8 x float>* %a, align 16 ; <---- unaligned! 12 %v1 = load <8 x float>* %b, align 32 ; <---- aligned! 14 %v2 = load <8 x float>* %c, align 32 ; <---- aligned! 18 store <8 x i32> %r, <8 x i32>* undef, align 32 33 %v0 = load <8 x float>* %a, align 32 34 %v1 = load <8 x float>* %b, align 32 35 store <8 x float> %v0, <8 x float>* %b, align 32 ; <--- aligned 36 store <8 x float> %v1, <8 x float>* %a, align 16 ; <--- unaligned
|
sse-align-4.ll | 4 store <4 x float> %x, <4 x float>* %p, align 4 8 store <2 x double> %x, <2 x double>* %p, align 8
|
sse-align-9.ll | 4 %t = load <4 x float>* %p, align 4 8 %t = load <2 x double>* %p, align 8
|
stack-align-memcpy.ll | 1 ; RUN: llc < %s -force-align-stack -mtriple i386-apple-darwin -mcpu=i486 | FileCheck %s 5 declare void @bar(i8* nocapture, %struct.foo* align 4 byval) nounwind 11 %dynalloc = alloca i8, i32 %y, align 1 12 call void @bar(i8* %dynalloc, %struct.foo* align 4 byval %x) 24 call void @bar(i8* %z, %struct.foo* align 4 byval %x) 25 %dynalloc = alloca i8, i32 %y, align 1 36 call void @bar(i8* %z, %struct.foo* align 4 byval %x) 37 %statalloc = alloca i8, i32 8, align 1
|
stride-nine-with-base-reg.ll | 8 @B = external global [1000 x i8], align 32 9 @A = external global [1000 x i8], align 32 10 @P = external global [1000 x i8], align 32 11 @Q = external global [1000 x i8], align 32 21 %tmp3 = load i8* %tmp2, align 4 24 store i8 %tmp4, i8* %tmp5, align 4 28 store i8 17, i8* %tmp10, align 4 30 store i8 19, i8* %tmp11, align 4
|
/external/llvm/test/CodeGen/XCore/ |
unaligned_store_combine.ll | 10 %0 = load i64* %src, align 1 11 store i64 %0, i64* %dst, align 1
|
/external/llvm/test/MC/ARM/ |
pool.s | 5 .align 2 15 @ CHECK: .align 2
|
/external/llvm/test/MC/AsmParser/ |
directive_align.s | 4 # CHECK: .align 1 6 .align 1
|
/external/llvm/test/MC/COFF/ |
bss_section.ll | 5 @"\01?thingy@@3Ufoo@@B" = global %struct.foo zeroinitializer, align 4 8 @thingy_linkonce = linkonce_odr global %struct.foo zeroinitializer, align 4
|
/external/llvm/test/Transforms/GVN/ |
2007-07-26-NonRedundant.ll | 10 store i32 0, i32* @bsLive, align 4 14 %tmp29 = load i32* @bsLive, align 4 ; <i32> [#uses=0]
|
/external/llvm/test/Transforms/GlobalOpt/ |
2008-01-29-VolatileGlobal.ll | 2 @t0.1441 = internal global double 0x3FD5555555555555, align 8 ; <double*> [#uses=1] 6 %tmp1 = load volatile double* @t0.1441, align 8 ; <double> [#uses=2]
|
tls.ll | 7 @x = internal thread_local global [100 x i32] zeroinitializer, align 16 8 @ip = internal global i32* null, align 8 17 store i32* getelementptr inbounds ([100 x i32]* @x, i64 0, i64 1), i32** @ip, align 8 24 store i32 0, i32* getelementptr inbounds ([100 x i32]* @x, i64 0, i64 1), align 4 27 %0 = load i32** @ip, align 8 29 %1 = load i32* %0, align 4 41 store i32* getelementptr inbounds ([100 x i32]* @x, i64 0, i64 1), i32** @ip, align 8 44 store i32 50, i32* getelementptr inbounds ([100 x i32]* @x, i64 0, i64 1), align 4
|
/external/llvm/test/Transforms/InstCombine/ |
2012-05-28-select-hang.ll | 3 @c = common global i8 0, align 1 4 @a = common global i8 0, align 1 5 @b = common global i8 0, align 1 9 %0 = load i8* @c, align 1 13 store i8 %conv1, i8* @a, align 1 18 store i8 %conv3, i8* @b, align 1 19 %1 = load i8* @a, align 1 34 store i8 %conv9, i8* @a, align 1
|
/external/llvm/test/Transforms/Mem2Reg/ |
atomic.ll | 9 store atomic i32 %x, i32* %a seq_cst, align 4 10 %r = load atomic i32* %a seq_cst, align 4
|
/external/llvm/test/Transforms/MemCpyOpt/ |
2011-06-02-CallSlotOverwritten.ll | 15 %x = alloca %struct1, align 8 16 %y = alloca %struct2, align 8 21 store i32 0, i32* %gepn1, align 8 23 store i32 0, i32* %gepn2, align 4 27 %load = load i64* %bit1, align 8 28 store i64 %load, i64* %bit2, align 8 30 ; CHECK: %load = load i64* %bit1, align 8 31 ; CHECK: store i64 %load, i64* %bit2, align 8
|