HomeSort by relevance Sort by last modified time
    Searched full:load (Results 451 - 475 of 12660) sorted by null

<<11121314151617181920>>

  /external/llvm/test/CodeGen/X86/
widen_arith-6.ll 22 %tmp = load i32* %i ; <i32> [#uses=1]
23 %tmp1 = load i32* %n.addr ; <i32> [#uses=1]
28 %tmp2 = load i32* %i ; <i32> [#uses=1]
29 %tmp3 = load <3 x float>** %dst.addr ; <<3 x float>*> [#uses=1]
31 %tmp4 = load i32* %i ; <i32> [#uses=1]
32 %tmp5 = load <3 x float>** %src.addr ; <<3 x float>*> [#uses=1]
34 %tmp7 = load <3 x float>* %arrayidx6 ; <<3 x float>> [#uses=1]
35 %tmp8 = load <3 x float>* %v ; <<3 x float>> [#uses=1]
42 %tmp9 = load i32* %i ; <i32> [#uses=1]
2008-02-22-LocalRegAllocBug.ll 16 %tmp = load i8** %dst_addr, align 4 ; <i8*> [#uses=1]
19 %tmp3 = load i8** %dst_addr, align 4 ; <i8*> [#uses=1]
20 %tmp4 = load i32* %dst_stride_addr, align 4 ; <i32> [#uses=1]
23 %tmp7 = load i32* %dst_stride_addr, align 4 ; <i32> [#uses=1]
25 %tmp9 = load i8** %dst_addr, align 4 ; <i8*> [#uses=1]
28 %tmp13 = load i32* %dst_stride_addr, align 4 ; <i32> [#uses=1]
30 %tmp15 = load i8** %dst_addr, align 4 ; <i8*> [#uses=1]
33 %tmp18 = load i8** %src_addr, align 4 ; <i8*> [#uses=1]
36 %tmp21 = load i8** %src_addr, align 4 ; <i8*> [#uses=1]
37 %tmp22 = load i32* %src_stride_addr, align 4 ; <i32> [#uses=1
    [all...]
vararg_tailcall.ll 42 %tmp1 = load i8** @sel, align 8, !tbaa !0
55 %tmp2 = load i8** @sel3, align 8, !tbaa !0
56 %tmp3 = load i8** @sel4, align 8, !tbaa !0
57 %tmp4 = load i8** @sel5, align 8, !tbaa !0
58 %tmp5 = load i8** @sel6, align 8, !tbaa !0
71 %tmp2 = load i8** @sel3, align 8, !tbaa !0
72 %tmp3 = load i8** @sel4, align 8, !tbaa !0
73 %tmp4 = load i8** @sel5, align 8, !tbaa !0
74 %tmp5 = load i8** @sel6, align 8, !tbaa !0
75 %tmp6 = load i8** @sel7, align 8, !tbaa !
    [all...]
block-placement.ll 23 %val1 = load i32* %gep1
33 %val2 = load i32* %gep2
43 %val3 = load i32* %gep3
53 %val4 = load i32* %gep4
63 %val5 = load i32* %gep5
109 %0 = load i32* %arrayidx
162 %0 = load i32* %arrayidx
193 %0 = load i32* %arrayidx
218 %0 = load i32* %arrayidx
248 %cond1 = load volatile i1* unde
    [all...]
remat-scalar-zero.ll 9 ; as a constant-pool load.
13 %tmp1 = load double* %x ; <double> [#uses=1]
15 %tmp5 = load double* %arrayidx4 ; <double> [#uses=1]
17 %tmp9 = load double* %arrayidx8 ; <double> [#uses=1]
19 %tmp13 = load double* %arrayidx12 ; <double> [#uses=1]
21 %tmp17 = load double* %arrayidx16 ; <double> [#uses=1]
23 %tmp21 = load double* %arrayidx20 ; <double> [#uses=1]
25 %tmp25 = load double* %arrayidx24 ; <double> [#uses=1]
27 %tmp29 = load double* %arrayidx28 ; <double> [#uses=1]
29 %tmp33 = load double* %arrayidx32 ; <double> [#uses=1
    [all...]
mem-promote-integers.ll 8 %bb = load <1 x i8>* %b
19 %bb = load <1 x i16>* %b
30 %bb = load <1 x i32>* %b
41 %bb = load <1 x i64>* %b
52 %bb = load <1 x i128>* %b
63 %bb = load <1 x i256>* %b
74 %bb = load <1 x i512>* %b
85 %bb = load <2 x i8>* %b
96 %bb = load <2 x i16>* %b
107 %bb = load <2 x i32>* %
    [all...]
  /external/llvm/test/Transforms/BBVectorize/
ld1.ll 6 %i0 = load double* %a, align 8
7 %i1 = load double* %b, align 8
9 %i2 = load double* %c, align 8
12 %i3 = load double* %arrayidx3, align 8
14 %i4 = load double* %arrayidx4, align 8
17 %i5 = load double* %arrayidx6, align 8
29 ; CHECK: %i0 = load <2 x double>* %i0.v.i0, align 8
30 ; CHECK: %i1 = load <2 x double>* %i1.v.i0, align 8
32 ; CHECK: %i2 = load <2 x double>* %i2.v.i0, align 8
  /external/llvm/test/Transforms/LICM/
atomics.ll 10 %val = load atomic i32* %y unordered, align 4
18 ; CHECK: load atomic
30 %val = load atomic i32* %y monotonic, align 4
37 ; CHECK: load atomic
50 %vala = load atomic i32* %y monotonic, align 4
51 %valb = load atomic i32* %x unordered, align 4
58 ; CHECK: load atomic i32* %x unordered
69 %vala = load atomic i32* %y monotonic, align 4
77 ; CHECK: load atomic i32* %y monotonic
  /external/llvm/test/Transforms/TailCallElim/
dont_reorder_load.ll 4 ; Several cases where tail call elimination should not move the load above the
11 ; This load can't be safely moved above the call because the load is from an
24 %tmp9 = load i32* @extern_weak_global ; <i32> [#uses=1]
30 ; This load can't be safely moved above the call because function may write to the pointer.
43 %tmp9 = load i32* %a_arg ; <i32> [#uses=1]
48 ; This load can't be safely moved above the call because that would change the
49 ; order in which the load volatiles are performed.
61 %tmp9 = load volatile i32* %a_arg ; <i32> [#uses=1]
  /external/valgrind/main/coregrind/
link_tool_exe_linux.in 4 # statically and at an alternative load address.
8 # client's) coexisting in the same process. The alternative load
9 # address is needed because Valgrind itself will load the client at
11 # default load address. Hence we can't allow Valgrind itself (viz,
20 # the alternative load address
37 # notes section which ends up at the default load address and
53 # expect at least: alt-load-address gcc -o foo bar.o
59 # check for plausible-ish alt load address
60 die "Bogus alt-load address"
  /external/llvm/test/CodeGen/ARM/
vminmax.ll 6 %tmp1 = load <8 x i8>* %A
7 %tmp2 = load <8 x i8>* %B
15 %tmp1 = load <4 x i16>* %A
16 %tmp2 = load <4 x i16>* %B
24 %tmp1 = load <2 x i32>* %A
25 %tmp2 = load <2 x i32>* %B
33 %tmp1 = load <8 x i8>* %A
34 %tmp2 = load <8 x i8>* %B
42 %tmp1 = load <4 x i16>* %A
43 %tmp2 = load <4 x i16>* %
    [all...]
vneg.ll 6 %tmp1 = load <8 x i8>* %A
14 %tmp1 = load <4 x i16>* %A
22 %tmp1 = load <2 x i32>* %A
30 %tmp1 = load <2 x float>* %A
38 %tmp1 = load <16 x i8>* %A
46 %tmp1 = load <8 x i16>* %A
54 %tmp1 = load <4 x i32>* %A
62 %tmp1 = load <4 x float>* %A
70 %tmp1 = load <8 x i8>* %A
78 %tmp1 = load <4 x i16>* %
    [all...]
vcvt.ll 6 %tmp1 = load <2 x float>* %A
14 %tmp1 = load <2 x float>* %A
22 %tmp1 = load <2 x i32>* %A
30 %tmp1 = load <2 x i32>* %A
38 %tmp1 = load <4 x float>* %A
46 %tmp1 = load <4 x float>* %A
54 %tmp1 = load <4 x i32>* %A
62 %tmp1 = load <4 x i32>* %A
70 %tmp1 = load <2 x float>* %A
78 %tmp1 = load <2 x float>* %
    [all...]
vfp.ll 5 %A = load float* %P ; <float> [#uses=1]
6 %B = load double* %D ; <double> [#uses=1]
18 %a = load float* %P ; <float> [#uses=1]
22 %A = load double* %D ; <double> [#uses=1]
31 %a = load float* %P ; <float> [#uses=2]
34 %A = load double* %D ; <double> [#uses=2]
42 %a = load float* %P ; <float> [#uses=1]
46 %A = load double* %D ; <double> [#uses=1]
55 %a1 = load float* %P1 ; <float> [#uses=1]
56 %a2 = load float* %P2 ; <float> [#uses=1
    [all...]
2010-08-04-EHCrash.ll 15 %eh_select = load i32* %eh_selector ; <i32> [#uses=1]
17 %eh_value = load i8** %eh_exception ; <i8*> [#uses=1]
23 %tmp6 = load i8** %save_eptr.935, align 4 ; <i8*> [#uses=1]
25 %tmp7 = load i32* %save_filt.936, align 4 ; <i32> [#uses=1]
41 %eh_ptr13 = load i8** %eh_exception ; <i8*> [#uses=1]
50 %eh_ptr15 = load i8** %eh_exception
fast-isel-fold.ll 14 %1 = load i8* @a, align 1
26 %1 = load i16* @b, align 2
41 %1 = load i8* @a, align 1
53 %1 = load i16* @b, align 2
65 %1 = load i16* @b, align 2
77 %1 = load i8* @a, align 2
fast-isel-ldr-str-arm.ll 7 %0 = load i32* %add.ptr, align 4
16 %0 = load i32* %add.ptr, align 4
25 %0 = load i16* %add.ptr, align 4
34 %0 = load i16* %add.ptr, align 4
43 %0 = load i8* %add.ptr, align 4
52 %0 = load i8* %add.ptr, align 4
  /external/llvm/test/Transforms/IndVarSimplify/
avoid-i0.ll 37 %1 = load i8* %_si2_addr, align 1 ; <i8> [#uses=1]
39 %3 = load i32* %_si1_addr, align 4 ; <i32> [#uses=1]
41 %5 = load i8* %_si2_addr, align 1 ; <i8> [#uses=1]
44 %8 = load i32* %_si1_addr, align 4 ; <i32> [#uses=1]
50 %13 = load i32* %0, align 4 ; <i32> [#uses=1]
55 %retval1 = load i32* %retval ; <i32> [#uses=1]
69 %1 = load i32* %_ui1_addr, align 4 ; <i32> [#uses=1]
72 %3 = load i32* %0, align 4 ; <i32> [#uses=1]
77 %retval1 = load i32* %retval ; <i32> [#uses=1]
93 %0 = load volatile i32* @x, align 4 ; <i32> [#uses=1
    [all...]
  /external/llvm/test/CodeGen/PTX/
ld.ll 53 %x = load i16* %p
61 %x = load i32* %p
69 %x = load i64* %p
77 %x = load float* %p
85 %x = load double* %p
94 %x = load i16* %i
103 %x = load i32* %i
112 %x = load i64* %i
121 %x = load float* %i
130 %x = load double* %
    [all...]
  /external/clang/test/CodeGen/
address-space-field1.c 8 // CHECK: [[t0:%.*]] = load %struct.S addrspace(2)** [[p2addr]], align 8
10 // CHECK: [[t2:%.*]] = load i32 addrspace(2)* [[t1]], align 4
11 // CHECK: [[t3:%.*]] = load %struct.S addrspace(1)** [[p1addr]], align 8
14 // CHECK: [[t5:%.*]] = load %struct.S addrspace(2)** [[p2addr]], align 8
16 // CHECK: [[t7:%.*]] = load i32 addrspace(2)* [[t6]], align 4
17 // CHECK: [[t8:%.*]] = load %struct.S addrspace(1)** [[p1addr]], align 8
trapv.c 9 // CHECK: [[T1:%.*]] = load i32* @uj
10 // CHECK-NEXT: [[T2:%.*]] = load i32* @uk
15 // CHECK: [[T1:%.*]] = load i32* @j
16 // CHECK-NEXT: [[T2:%.*]] = load i32* @k
30 // CHECK: [[T1:%.*]] = load i32* @i
43 // CHECK: [[T1:%.*]] = load i32* @i
  /external/clang/test/CodeGenCXX/
vla.cpp 28 // CHECK-NEXT: [[T0:%.*]] = load i32* [[N]], align 4
30 // CHECK-NEXT: [[T0:%.*]] = load i32* [[N]], align 4
35 // CHECK-NEXT: [[T0:%.*]] = load i8** [[ARRAY]], align 8
40 // CHECK-NEXT: [[T0:%.*]] = load i16** [[REF]]
47 // CHECK-NEXT: [[T0:%.*]] = load i16** [[REF]]
51 // CHECK-NEXT: [[T4:%.*]] = load i16* [[T3]]
  /external/compiler-rt/lib/i386/
ashrdi3.S 14 movd 12(%esp), %xmm2 // Load count
19 punpckldq %xmm1, %xmm0 // Load input
21 movq 4(%esp), %xmm0 // Load input
50 movl 12(%esp), %ecx // Load count
51 movl 8(%esp), %edx // Load high
52 movl 4(%esp), %eax // Load low
  /external/llvm/test/CodeGen/CellSPU/
loads.ll 9 %tmp1 = load <4 x float>* %a
17 %tmp1 = load <4 x float>* %arrayidx
32 %rv3 = load <4 x i32>* %rv1
38 %val = load <4 x float>* undef
50 %rv = load i32* %ptr, align 2
57 %rv = load <4 x i32>* null
  /external/llvm/test/CodeGen/PowerPC/
a2-fp-basic.ll 8 %a.real = load double* %a.realp
10 %a.imag = load double* %a.imagp
12 %b.real = load double* %b.realp
14 %b.imag = load double* %b.imagp
22 %c.real = load double* %c.realp
24 %c.imag = load double* %c.imagp

Completed in 1894 milliseconds

<<11121314151617181920>>