HomeSort by relevance Sort by last modified time
    Searched full:load (Results 326 - 350 of 12660) sorted by null

<<11121314151617181920>>

  /external/llvm/test/CodeGen/ARM/
2011-09-09-OddVectorDivision.ll 14 %1 = load <3 x i16>* @x1
15 %2 = load <3 x i16>* @y1
18 %4 = load <4 x i16>* @x2
19 %5 = load <4 x i16>* @y2
2011-11-09-IllegalVectorFPIntConvert.ll 7 %tmp1 = load <2 x double>* %A
16 %tmp1 = load <2 x double>* %A
25 %tmp1 = load <2 x i32>* %A
34 %tmp1 = load <2 x i32>* %A
ldr_frame.ll 6 %tmp1 = load i32* %tmp
13 %tmp1 = load i8* %tmp
21 %tmp1 = load i32* %tmp
28 %tmp1 = load i8* %tmp
load.ll 10 %tmp = load i8* %p ; <i8> [#uses=1]
17 %tmp = load i8* %p ; <i8> [#uses=1]
24 %tmp = load i16* %p ; <i16> [#uses=1]
31 %tmp = load i16* %p ; <i16> [#uses=1]
vsra.ll 6 %tmp1 = load <8 x i8>* %A
7 %tmp2 = load <8 x i8>* %B
16 %tmp1 = load <4 x i16>* %A
17 %tmp2 = load <4 x i16>* %B
26 %tmp1 = load <2 x i32>* %A
27 %tmp2 = load <2 x i32>* %B
36 %tmp1 = load <1 x i64>* %A
37 %tmp2 = load <1 x i64>* %B
46 %tmp1 = load <16 x i8>* %A
47 %tmp2 = load <16 x i8>* %
    [all...]
  /external/llvm/test/CodeGen/Generic/
2009-03-29-SoftFloatVectorExtract.ll 7 %v = load <2 x double>* @m
APIntLoadStore.ll 516 %tmp = load i1* @i1_l ; <i1> [#uses=1]
522 %tmp = load i2* @i2_l ; <i2> [#uses=1]
528 %tmp = load i3* @i3_l ; <i3> [#uses=1]
534 %tmp = load i4* @i4_l ; <i4> [#uses=1]
540 %tmp = load i5* @i5_l ; <i5> [#uses=1]
546 %tmp = load i6* @i6_l ; <i6> [#uses=1]
552 %tmp = load i7* @i7_l ; <i7> [#uses=1]
558 %tmp = load i8* @i8_l ; <i8> [#uses=1]
564 %tmp = load i9* @i9_l ; <i9> [#uses=1]
570 %tmp = load i10* @i10_l ; <i10> [#uses=1
    [all...]
  /external/llvm/test/CodeGen/MSP430/
2009-09-18-AbsoluteAddr.ll 14 %tmp1 = load volatile i8* @"\010x0021" ; <i8> [#uses=1]
16 %tmp2 = load i8* %x.addr ; <i8> [#uses=1]
18 %tmp3 = load i8* %tmp ; <i8> [#uses=1]
20 %0 = load i8* %retval ; <i8> [#uses=1]
Inst16mi.ll 17 %1 = load i16* @foo
26 %1 = load i16* @foo
35 %1 = load i16* @foo
44 %1 = load i16* @foo
Inst8mi.ll 16 %1 = load i8* @foo
25 %1 = load i8* @foo
34 %1 = load i8* @foo
43 %1 = load i8* @foo
  /external/llvm/test/CodeGen/Mips/
mips64directive.ll 8 %0 = load i64* @gl, align 8
  /external/llvm/test/CodeGen/PowerPC/
2008-06-21-F128LoadStore.ll 7 %tmp = load ppc_fp128* @g
hidden-vis.ll 7 %0 = load i32* @x, align 4 ; <i32> [#uses=1]
lha.ll 4 %tmp.1 = load i16* %a ; <i16> [#uses=1]
load-constant-addr.ll 6 %tmp.i = load float* inttoptr (i32 186018016 to float*) ; <float> [#uses=1]
ppcf128-1.ll 15 %tmp1 = load ppc_fp128* %x_addr, align 16 ; <ppc_fp128> [#uses=1]
16 %tmp2 = load ppc_fp128* %y_addr, align 16 ; <ppc_fp128> [#uses=1]
19 %tmp4 = load ppc_fp128* %tmp, align 16 ; <ppc_fp128> [#uses=1]
24 %retval5 = load ppc_fp128* %retval ; <ppc_fp128> [#uses=1]
37 %tmp1 = load ppc_fp128* %x_addr, align 16 ; <ppc_fp128> [#uses=1]
38 %tmp2 = load ppc_fp128* %y_addr, align 16 ; <ppc_fp128> [#uses=1]
41 %tmp4 = load ppc_fp128* %tmp, align 16 ; <ppc_fp128> [#uses=1]
46 %retval5 = load ppc_fp128* %retval ; <ppc_fp128> [#uses=1]
59 %tmp1 = load ppc_fp128* %x_addr, align 16 ; <ppc_fp128> [#uses=1]
60 %tmp2 = load ppc_fp128* %y_addr, align 16 ; <ppc_fp128> [#uses=1
    [all...]
  /external/llvm/test/CodeGen/SPARC/
2009-08-28-PIC.ll 7 %0 = load i32* @foo, align 4 ; <i32> [#uses=1]
  /external/llvm/test/CodeGen/Thumb/
ldr_frame.ll 8 %tmp1 = load i32* %tmp
18 %tmp1 = load i8* %tmp
28 %tmp1 = load i32* %tmp
38 %tmp1 = load i8* %tmp
  /external/llvm/test/CodeGen/Thumb2/
thumb2-ldr_ext.ll 7 %tmp.u = load i8* %v.pntr.s0.u1
13 %tmp.u = load i16* %v.pntr.s0.u1
19 %tmp.s = load i8* %v.pntr.s1.u0
25 %tmp.s = load i16* null
  /external/llvm/test/CodeGen/X86/
2007-03-16-InlineAsm.ll 14 %tmp1 = load i32* %A_addr ; <i32> [#uses=1]
17 %tmp3 = load i32* %ret ; <i32> [#uses=1]
19 %tmp4 = load i32* %tmp ; <i32> [#uses=1]
24 %retval5 = load i32* %retval ; <i32> [#uses=1]
2008-10-16-VecUnaryOp.ll 4 %n = load <4 x i32>* %p
2009-06-05-VZextByteShort.ll 7 %x2 = load i32* %x1
15 %x2 = load i32* %x1
23 %x2 = load i32* %x1
31 %x2 = load i32* %x1
2010-02-04-SchedulerBug.ll 9 %0 = load %struct.b_t** null, align 4 ; <%struct.b_t*> [#uses=1]
11 %2 = load i64* %1, align 4 ; <i64> [#uses=1]
14 %5 = load i64** %4, align 4 ; <i64*> [#uses=0]
15 %6 = load i64* null, align 4 ; <i64> [#uses=1]
atom-sched.ll 23 %0 = load i32* @b, align 4
24 %1 = load i32* @c, align 4
27 %2 = load i32* @e, align 4
28 %3 = load i32* @f, align 4
avx-bitcast.ll 6 %a = load double* undef

Completed in 2274 milliseconds

<<11121314151617181920>>