/external/llvm/test/CodeGen/MSP430/ |
2009-10-10-OrImpDef.ll | 7 load volatile i8* %r, align 1 ; <i8>:0 [#uses=1] 9 store volatile i8 %1, i8* %r, align 1
|
/external/llvm/test/CodeGen/Mips/ |
const1.ll | 7 @i = common global i32 0, align 4 8 @j = common global i32 0, align 4 9 @k = common global i32 0, align 4 10 @l = common global i32 0, align 4 15 store i32 -559023410, i32* @i, align 4 16 store i32 -559023410, i32* @j, align 4 17 store i32 -87105875, i32* @k, align 4 18 store i32 262991277, i32* @l, align 4
|
f16abs.ll | 3 @y = global double -1.450000e+00, align 8 4 @x = common global double 0.000000e+00, align 8 6 @y1 = common global float 0.000000e+00, align 4 7 @x1 = common global float 0.000000e+00, align 4 14 %0 = load double* @y, align 8 16 store double %call, double* @x, align 8 19 %1 = load float* @y1, align 4 21 store float %call2, float* @x1, align 4
|
fp-spill-reload.ll | 8 %0 = load i32* %b, align 4 10 %1 = load i32* %arrayidx.1, align 4 13 %2 = load i32* %arrayidx.2, align 4 16 %3 = load i32* %arrayidx.3, align 4 19 %4 = load i32* %arrayidx.4, align 4 22 %5 = load i32* %arrayidx.5, align 4 25 %6 = load i32* %arrayidx.6, align 4 28 %7 = load i32* %arrayidx.7, align 4
|
fpnotneeded.ll | 5 @i = global i32 1, align 4 6 @f = global float 1.000000e+00, align 4 22 %0 = load i32* @i, align 4 35 %i.addr = alloca i32, align 4 36 %f.addr = alloca float, align 4 37 store i32 %i, i32* %i.addr, align 4 38 store float %f, float* %f.addr, align 4 51 store float 2.000000e+00, float* @f, align 4
|
swzero.ll | 10 store i32 0, i32* %x, align 1 17 store i32 0, i32* %p, align 4
|
tls16.ll | 3 @a = thread_local global i32 4, align 4 7 %0 = load i32* @a, align 4
|
/external/llvm/test/CodeGen/NVPTX/ |
aggr-param.ll | 8 ; CHECK: .param .align 4 .b8 bar_param_0[16] 15 ; CHECK: .param .align 4 .b8 foo_param_0[20]
|
/external/llvm/test/CodeGen/PowerPC/ |
2008-02-09-LocalRegAllocAssert.ll | 5 %tmp = load i64* null, align 8 ; <i64> [#uses=2] 8 store i64 %min, i64* null, align 8
|
2008-10-28-UnprocessedNode.ll | 5 %imag59 = load ppc_fp128* null, align 8 ; <ppc_fp128> [#uses=1] 9 store ppc_fp128 %2, ppc_fp128* null, align 16
|
hidden-vis-2.ll | 8 %0 = load i32* @x, align 4 ; <i32> [#uses=1] 9 %1 = load i32* @y, align 4 ; <i32> [#uses=1]
|
/external/llvm/test/CodeGen/R600/ |
large-constant-initializer.ll | 5 @gv = external unnamed_addr addrspace(2) constant [239 x i32], align 4 8 %val = load i32 addrspace(2)* getelementptr ([239 x i32] addrspace(2)* @gv, i64 0, i64 239), align 4
|
llvm.AMDGPU.cvt_f32_ubyte.ll | 11 %val = load i32 addrspace(1)* %in, align 4 13 store float %cvt, float addrspace(1)* %out, align 4 20 %val = load i32 addrspace(1)* %in, align 4 22 store float %cvt, float addrspace(1)* %out, align 4 29 %val = load i32 addrspace(1)* %in, align 4 31 store float %cvt, float addrspace(1)* %out, align 4 38 %val = load i32 addrspace(1)* %in, align 4 40 store float %cvt, float addrspace(1)* %out, align 4
|
v1i64-kernel-arg.ll | 7 store i64 %a, i64 addrspace(1)* %out, align 8 14 store <1 x i64> %a, <1 x i64> addrspace(1)* %out, align 8
|
/external/llvm/test/CodeGen/SystemZ/ |
unaligned-01.ll | 31 %val = load i16 *%src, align 1 32 store i16 %val, i16 *%dst, align 1 43 %val1 = load i32 *%src1, align 1 44 %val2 = load i32 *%src2, align 2 46 store i32 %sub, i32 *%dst, align 1 57 %val1 = load i64 *%src1, align 1 58 %val2 = load i64 *%src2, align 2 60 store i64 %sub, i64 *%dst, align 4
|
/external/llvm/test/CodeGen/Thumb/ |
2014-06-10-thumb1-ldst-opt-bug.ll | 11 %0 = load i32* %A, align 4 13 %1 = load i32* %arrayidx1, align 4
|
/external/llvm/test/CodeGen/Thumb2/ |
tail-call-r9.ll | 3 @foo = common global void ()* null, align 4 10 %tmp = load void ()** @foo, align 4
|
/external/llvm/test/CodeGen/X86/ |
2008-01-16-FPStackifierAssert.ll | 5 %tmp71 = load x86_fp80* null, align 16 ; <x86_fp80> [#uses=1] 9 store double %tmp7374, double* null, align 8 10 %tmp81 = load double* null, align 8 ; <double> [#uses=1] 16 %tmp87 = load x86_fp80* null, align 16 ; <x86_fp80> [#uses=1] 20 store double %tmp8990, double* null, align 8 21 %tmp97 = load double* null, align 8 ; <double> [#uses=1] 27 %tmp103 = load x86_fp80* null, align 16 ; <x86_fp80> [#uses=1] 31 store double %tmp105106, double* null, align 8
|
2008-05-22-FoldUnalignedLoad.ll | 5 %tmp2 = load <4 x float>* %x, align 1 7 store <4 x float> %inv, <4 x float>* %x, align 1
|
2008-10-06-MMXISelBug.ll | 8 %0 = load <2 x i32>* @tmp_V2i, align 8 ; <<2 x i32>> [#uses=1] 10 store <2 x i32> %1, <2 x i32>* @tmp_V2i, align 8
|
2009-05-28-DAGCombineCrash.ll | 8 %srcval16 = load i448* %P, align 8 ; <i448> [#uses=1] 13 store i448 %ins, i448* %P, align 8
|
2010-06-28-matched-g-constraint.ll | 7 %_r = alloca i32, align 4 ; <i32*> [#uses=2] 9 %0 = load i32* %_r, align 4 ; <i32> [#uses=1]
|
2011-06-03-x87chain.ll | 5 %tmp1 = load i64* %a, align 8 10 store float %conv, float* %f, align 4 14 store i64 %conv5, i64* %b, align 8 21 store i64 0, i64* %b, align 8 26 %tmp4 = load i64* %arrayidx, align 8 29 store float %conv, float* %f, align 4 38 %x.1.copyload = load i24* undef, align 1 41 store float %div, float* undef, align 4
|
2011-06-12-FastAllocSpill.ll | 24 %tmp5 = alloca i64, align 8 25 %tmp6 = alloca void ()*, align 8 26 %tmp7 = alloca %3, align 8 27 store i64 0, i64* %tmp5, align 8 34 store void ()* %tmp16, void ()** %tmp6, align 8 35 %tmp17 = load void ()** %tmp6, align 8 45 %tmp24 = load i64* %tmp5, align 8 47 store i64 %tmp25, i64* %tmp5, align 8
|
2012-01-11-split-cv.ll | 6 %b = load <18 x i16>* %bp, align 16 8 store <18 x i16> %x, <18 x i16>* %ret, align 16
|