/external/llvm/test/CodeGen/Thumb2/ |
thumb2-ldr.ll | 7 %tmp = load i32* %v 16 %tmp = load i32* %tmp2 26 %tmp = load i32* %tmp2 36 %tmp3 = load i32* %tmp2 46 %tmp3 = load i32* %tmp2 57 %tmp4 = load i32* %tmp3 70 %tmp4 = load i32* %tmp3
|
thumb2-ldrb.ll | 7 %tmp = load i8* %v 16 %tmp = load i8* %tmp2 27 %tmp3 = load i8* %tmp2 37 %tmp3 = load i8* %tmp2 47 %tmp3 = load i8* %tmp2 58 %tmp4 = load i8* %tmp3 70 %tmp4 = load i8* %tmp3
|
thumb2-ldrh.ll | 7 %tmp = load i16* %v 16 %tmp = load i16* %tmp2 26 %tmp = load i16* %tmp2 36 %tmp3 = load i16* %tmp2 46 %tmp3 = load i16* %tmp2 57 %tmp4 = load i16* %tmp3 69 %tmp4 = load i16* %tmp3
|
/external/llvm/test/CodeGen/X86/ |
h-register-addressing-32.ll | 9 %t3 = load double* %t2, align 8 16 %t3 = load float* %t2, align 8 23 %t3 = load i16* %t2, align 8 30 %t3 = load i8* %t2, align 8 37 %t3 = load i8* %t2, align 8 44 %t3 = load i8* %t2, align 8 51 %t3 = load i8* %t2, align 8
|
h-register-addressing-64.ll | 9 %t3 = load double* %t2, align 8 16 %t3 = load float* %t2, align 8 23 %t3 = load i16* %t2, align 8 30 %t3 = load i8* %t2, align 8 37 %t3 = load i8* %t2, align 8 44 %t3 = load i8* %t2, align 8 51 %t3 = load i8* %t2, align 8
|
trunc-ext-ld-st.ll | 4 ; A single 16-bit load 14 %T = load <2 x i8>* %A 29 %T = load <2 x i16>* %A 41 %T = load <2 x i32>* %A 54 %T = load <4 x i8>* %A 66 %T = load <4 x i16>* %A 78 %T = load <8 x i8>* %A
|
widen_cast-1.ll | 19 %tmp = load i32* %i ; <i32> [#uses=1] 24 %tmp1 = load i32* %i ; <i32> [#uses=1] 25 %tmp2 = load <2 x i32>** %dst.addr ; <<2 x i32>*> [#uses=1] 27 %tmp3 = load i32* %i ; <i32> [#uses=1] 28 %tmp4 = load <4 x i16>** %src.addr ; <<4 x i16>*> [#uses=1] 30 %tmp6 = load <4 x i16>* %arrayidx5 ; <<4 x i16>> [#uses=1] 37 %tmp7 = load i32* %i ; <i32> [#uses=1]
|
widen_cast-2.ll | 21 %tmp = load i32* %i ; <i32> [#uses=1] 26 %tmp1 = load i32* %i ; <i32> [#uses=1] 27 %tmp2 = load <7 x i32>** %dst.addr ; <<2 x i32>*> [#uses=1] 29 %tmp3 = load i32* %i ; <i32> [#uses=1] 30 %tmp4 = load <14 x i16>** %src.addr ; <<4 x i16>*> [#uses=1] 32 %tmp6 = load <14 x i16>* %arrayidx5 ; <<4 x i16>> [#uses=1] 39 %tmp7 = load i32* %i ; <i32> [#uses=1]
|
SwizzleShuff.ll | 9 %A = load <4 x i8>* %pA 10 %B = load <4 x i8>* %pB 24 %A = load <4 x i32>* %pA 25 %B = load <4 x i32>* %pB 37 %A = load <4 x i8>* %pA 39 %B = load <4 x i8>* %pB 51 %A = load <4 x i32>* %pA 52 %B = load <4 x i32>* %pB 63 %A = load <4 x i32>* %pA 64 %B = load <4 x i32>* %p [all...] |
mmx-arith.ll | 8 %tmp1 = load x86_mmx* %A ; <x86_mmx> [#uses=1] 9 %tmp3 = load x86_mmx* %B ; <x86_mmx> [#uses=1] 15 %tmp7 = load x86_mmx* %B ; <x86_mmx> [#uses=1] 18 %tmp16 = load x86_mmx* %B ; <x86_mmx> [#uses=1] 21 %tmp27 = load x86_mmx* %B ; <x86_mmx> [#uses=1] 27 %tmp31 = load x86_mmx* %B ; <x86_mmx> [#uses=1] 30 %tmp40 = load x86_mmx* %B ; <x86_mmx> [#uses=1] 33 %tmp51 = load x86_mmx* %B ; <x86_mmx> [#uses=1] 39 %tmp57 = load x86_mmx* %B ; <x86_mmx> [#uses=1] 44 %tmp63 = load x86_mmx* %B ; <x86_mmx> [#uses=1 [all...] |
/external/llvm/test/Instrumentation/ThreadSanitizer/ |
read_from_global.ll | 9 %0 = load i32* @const_global, align 4 19 %0 = load i32* @non_const_global, align 4 32 %0 = load i32* %arrayidx, align 4 44 %vtable = load void (%struct.Foo*)*** %0, align 8, !tbaa !3 45 %1 = load void (%struct.Foo*)** %vtable, align 8 52 ; CHECK: = load 54 ; CHECK: = load
|
/external/llvm/test/Transforms/GVN/ |
2011-06-01-NonLocalMemdepMiscompile.ll | 25 ; CHECK: %tmp17.pre = load i8* %tmp16, align 1 35 %tmp7 = load i8** %tmp, align 8 36 %tmp8 = load i8* %tmp7, align 1 41 %tmp11 = load i8** %tmp, align 8 42 %tmp12 = load i8* %tmp11, align 1 49 %tmp16 = load i8** %tmp, align 8 50 %tmp17 = load i8* %tmp16, align 1
|
non-local-offset.ll | 5 ; GVN should ignore the store to p[1] to see that the load from p[0] is 21 %t = load i32* %p 29 ; GVN should ignore the store to p[1] to see that the first load from p[0] is 30 ; fully redundant. However, the second load is larger, so it's not a simple 38 ; CHECK: load i64* %pc 49 %t = load i32* %p 56 %t64 = load i64* %pc
|
pre-single-pred.ll | 1 ; RUN: opt < %s -gvn -enable-load-pre -S | FileCheck %s 2 ; This testcase assumed we'll PRE the load into %for.cond, but we don't actually 3 ; verify that doing so is safe. If there didn't _happen_ to be a load in 10 ; hoist the load. Doing the right thing for the wrong reasons is still a bug. 26 ; CHECK-NEXT: %tmp3 = load i32* @p 28 %tmp3 = load i32* @p ; <i32> [#uses=1] 43 %tmp9 = load i32* @p ; <i32> [#uses=1]
|
/external/llvm/test/Transforms/GlobalOpt/ |
globalsra-unknown-index.ll | 3 ; RUN: grep load %t | count 6 20 %a = load i32* %p 22 %b = load i32* %q 28 %a = load i32* %p 30 %b = load i32* %q 36 %a = load i32* %p 38 %b = load i32* %q
|
/external/llvm/test/Transforms/InstCombine/ |
2011-05-28-swapmulsub.ll | 10 %tmp = load i32* %on_off.addr, align 4 17 %tmp1 = load i32* %a, align 4 29 %tmp = load i32* %q.addr, align 4 30 %tmp1 = load i32* %on_off.addr, align 4 37 %tmp2 = load i32* %a, align 4 47 %tmp = load i32* %on_off.addr, align 4 54 %tmp1 = load i32* %a, align 4
|
/frameworks/base/libs/hwui/ |
Matrix.h | 56 load(v); 60 load(v); 64 load(v); 69 void load(const float* v); 70 void load(const Matrix4& v); 71 void load(const SkMatrix& v); 86 load(u);
|
/external/chromium/net/base/ |
load_states.h | 11 // These states correspond to the lengthy periods of time that a resource load 14 // This is the default state. It corresponds to a resource load that has 20 // This state corresponds to a resource load that is blocked waiting for 27 // This state corresponds to a resource load that is blocked waiting for a 37 // This state corresponds to a resource load that is blocked waiting for a 43 // This state corresponds to a resource load that is blocked waiting for a 48 // This state corresponds to a resource load that is blocked waiting for the 52 // This state corresponds to a resource load that is blocked waiting to 58 // This state corresponds to a resource load that is blocked waiting for the 64 // This state corresponds to a resource load that is blocked waiting for [all...] |
/external/clang/test/CodeGenObjCXX/ |
arc-move.mm | 5 // CHECK: = load i8** 7 // CHECK: = load i8** 37 // CHECK: load i8** 39 // CHECK: load i8*** 40 // CHECK-NEXT: load i8** 50 // Load the object 51 // CHECK-NEXT: [[OBJ:%[a-zA-Z0-9]+]] = load i8** [[Y]] 60 // CHECK-NEXT: [[OBJ:%[a-zA-Z0-9]+]] = load i8** [[X]] 69 // CHECK-NEXT: [[T0:%.*]] = load i8** [[X]] 72 // CHECK-NEXT: [[T0:%.*]] = load i8** [[Y] [all...] |
/external/libvpx/vp8/encoder/arm/neon/ |
fastquantizeb_neon.asm | 31 vld1.16 {q0, q1}, [r0] ;load z 32 vld1.16 {q10, q11}, [r1] ;load zbin 48 ldr r0, [sp, #8] ;load round_ptr 49 ldr r12, [sp, #12] ;load quant_ptr 55 vld1.s16 {q6, q7}, [r0] ;load round_ptr [0-15] 56 vld1.s16 {q8, q9}, [r12] ;load quant_ptr [0-15] 61 ldr r0, [sp, #4] ;load rvsplus1_scan_order ptr 66 vld1.16 {q0, q1}, [r0] ;load rvsplus1_scan_order 76 ldr r12, [sp] ;load dequant_ptr 84 vld1.s16 {q6, q7}, [r12] ;load dequant_ptr[i [all...] |
/external/llvm/test/CodeGen/ARM/ |
fast-isel-pred.ll | 9 %tmp = load <4 x i32>* %X, align 16 11 %0 = load i32* %retval 26 %tmp = load <4 x i32>* %v.addr, align 16 28 %tmp.i = load <4 x i32>* %__a.addr.i, align 16 32 %tmp1 = load i8** %p.addr, align 4 33 %tmp2 = load i32* %offset.addr, align 4 34 %tmp3 = load <4 x float>** %constants.addr, align 4 50 %tmp = load i64* %data, align 4 51 %tmp1 = load i8** %p.addr, align 4 52 %tmp2 = load i32* %offset.addr, align [all...] |
vabd.ll | 6 %tmp1 = load <8 x i8>* %A 7 %tmp2 = load <8 x i8>* %B 15 %tmp1 = load <4 x i16>* %A 16 %tmp2 = load <4 x i16>* %B 24 %tmp1 = load <2 x i32>* %A 25 %tmp2 = load <2 x i32>* %B 33 %tmp1 = load <8 x i8>* %A 34 %tmp2 = load <8 x i8>* %B 42 %tmp1 = load <4 x i16>* %A 43 %tmp2 = load <4 x i16>* % [all...] |
fast-isel-static.ll | 10 %tmp = load float** %sum.addr, align 4 11 %tmp1 = load float* %tmp 12 %tmp2 = load float** %addend.addr, align 4 13 %tmp3 = load float* %tmp2 15 %tmp4 = load float** %sum.addr, align 4
|
vtbl.ll | 10 %tmp1 = load <8 x i8>* %A 11 %tmp2 = load <8 x i8>* %B 19 %tmp1 = load <8 x i8>* %A 20 %tmp2 = load %struct.__neon_int8x8x2_t* %B 30 %tmp1 = load <8 x i8>* %A 31 %tmp2 = load %struct.__neon_int8x8x3_t* %B 42 %tmp1 = load <8 x i8>* %A 43 %tmp2 = load %struct.__neon_int8x8x4_t* %B 55 %tmp1 = load <8 x i8>* %A 56 %tmp2 = load <8 x i8>* % [all...] |
/external/libvpx/vp8/encoder/arm/armv6/ |
vp8_variance_halfpixvar16x16_hv_armv6.asm | 36 ldr r4, [r0, #0] ; load source pixels a, row N 37 ldr r6, [r0, #1] ; load source pixels b, row N 38 ldr r5, [r9, #0] ; load source pixels c, row N+1 39 ldr r7, [r9, #1] ; load source pixels d, row N+1 52 ldr r5, [r2, #0] ; load 4 ref pixels 74 ldr r4, [r0, #4] ; load source pixels a, row N 75 ldr r6, [r0, #5] ; load source pixels b, row N 76 ldr r5, [r9, #4] ; load source pixels c, row N+1 80 ldr r7, [r9, #5] ; load source pixels d, row N+1 93 ldr r5, [r2, #4] ; load 4 ref pixel [all...] |