HomeSort by relevance Sort by last modified time
    Searched full:load (Results 426 - 450 of 12660) sorted by null

<<11121314151617181920>>

  /external/llvm/test/CodeGen/ARM/
vtrn.ll 7 %tmp1 = load <8 x i8>* %A
8 %tmp2 = load <8 x i8>* %B
19 %tmp1 = load <4 x i16>* %A
20 %tmp2 = load <4 x i16>* %B
31 %tmp1 = load <2 x i32>* %A
32 %tmp2 = load <2 x i32>* %B
43 %tmp1 = load <2 x float>* %A
44 %tmp2 = load <2 x float>* %B
55 %tmp1 = load <16 x i8>* %A
56 %tmp2 = load <16 x i8>* %
    [all...]
vext.ll 6 %tmp1 = load <8 x i8>* %A
7 %tmp2 = load <8 x i8>* %B
15 %tmp1 = load <8 x i8>* %A
16 %tmp2 = load <8 x i8>* %B
24 %tmp1 = load <16 x i8>* %A
25 %tmp2 = load <16 x i8>* %B
33 %tmp1 = load <16 x i8>* %A
34 %tmp2 = load <16 x i8>* %B
42 %tmp1 = load <4 x i16>* %A
43 %tmp2 = load <4 x i16>* %
    [all...]
vshl.ll 6 %tmp1 = load <8 x i8>* %A
7 %tmp2 = load <8 x i8>* %B
15 %tmp1 = load <4 x i16>* %A
16 %tmp2 = load <4 x i16>* %B
24 %tmp1 = load <2 x i32>* %A
25 %tmp2 = load <2 x i32>* %B
33 %tmp1 = load <1 x i64>* %A
34 %tmp2 = load <1 x i64>* %B
42 %tmp1 = load <8 x i8>* %A
43 %tmp2 = load <8 x i8>* %
    [all...]
  /external/llvm/test/CodeGen/MSP430/
Inst16mr.ll 16 %1 = load i16* @foo
25 %1 = load i16* @foo
34 %1 = load i16* @foo
44 %2 = load i16* @foo
53 %1 = load i16* @foo
Inst16rm.ll 9 %1 = load i16* @foo
17 %1 = load i16* @foo
25 %1 = load i16* @foo
33 %1 = load i16* @foo
42 %1 = load i16* @foo
Inst8mr.ll 16 %1 = load i8* @foo
25 %1 = load i8* @foo
34 %1 = load i8* @foo
44 %2 = load i8* @foo
53 %1 = load i8* @foo
Inst8rm.ll 9 %1 = load i8* @foo
17 %1 = load i8* @foo
25 %1 = load i8* @foo
33 %1 = load i8* @foo
42 %1 = load i8* @foo
  /external/llvm/test/CodeGen/PowerPC/
return-val-i128.ll 10 %tmp1 = load float* %a_addr, align 4 ; <float> [#uses=1]
16 %tmp4 = load float* %a_addr, align 4 ; <float> [#uses=1]
23 %tmp9 = load float* %a_addr, align 4 ; <float> [#uses=1]
28 %tmp12 = load i128* %tmp, align 16 ; <i128> [#uses=1]
32 %retval13 = load i128* %retval ; <i128> [#uses=1]
  /external/llvm/test/CodeGen/Thumb/
ldr_ext.ll 10 %tmp.u = load i8* %t1
19 %tmp.u = load i16* %t1
31 %tmp.s = load i8* %t0
43 %tmp.s = load i16* %t0
54 %tmp.s = load i16* null
2009-08-12-ConstIslandAssert.ll 8 %1 = load i32* %data, align 4 ; <i32> [#uses=2]
9 %2 = load i32* undef, align 4 ; <i32> [#uses=2]
13 %3 = load i32* %0, align 4 ; <i32> [#uses=1]
15 %5 = load i32* null, align 4 ; <i32> [#uses=1]
18 %8 = load i32* %7, align 4 ; <i32> [#uses=1]
23 %13 = load i32* %12, align 4 ; <i32> [#uses=1]
26 %16 = load i32* %15, align 4 ; <i32> [#uses=1]
31 %21 = load i32* %20, align 4 ; <i32> [#uses=1]
36 %26 = load i32* %25, align 4 ; <i32> [#uses=1]
39 %29 = load i32* %28, align 4 ; <i32> [#uses=1
    [all...]
  /external/llvm/test/CodeGen/X86/
packed_struct.ll 20 %tmp = load i32* getelementptr (%struct.anon* @foos, i32 0, i32 1) ; <i32> [#uses=1]
21 %tmp3 = load i32* getelementptr (%struct.anon* @foos, i32 0, i32 2) ; <i32> [#uses=1]
22 %tmp6 = load i32* getelementptr (%struct.anon* @foos, i32 0, i32 3) ; <i32> [#uses=1]
30 %tmp = load i8* getelementptr ([4 x <{ i32, i8 }>]* @bara, i32 0, i32 0, i32 1) ; <i8> [#uses=1]
31 %tmp4 = load i8* getelementptr ([4 x <{ i32, i8 }>]* @bara, i32 0, i32 3, i32 1) ; <i8> [#uses=1]
masked-iv-unsafe.ll 17 %t1 = load double* %t0
22 %t4 = load double* %t3
26 %t7 = load double* %t6
45 %t1 = load double* %t0
50 %t4 = load double* %t3
54 %t7 = load double* %t6
74 %t1 = load double* %t0
80 %t4 = load double* %t3
84 %t7 = load double* %t6
104 %t1 = load double* %t
    [all...]
rd-mod-wr-eflags.ll 11 %0 = load i64* %refcnt, align 8, !tbaa !0
36 %0 = load i64* @c, align 8, !tbaa !0
50 %0 = load i64* @c, align 8, !tbaa !0
78 ; CHECK-NOT: load
79 %0 = load i64* %s64, align 8
89 ; CHECK-NOT: load
90 %1 = load i32* %s32, align 4
100 ; CHECK-NOT: load
101 %2 = load i16* %s16, align 2
111 ; CHECK-NOT: load
    [all...]
2010-02-19-TailCallRetAddrBug.ll 26 %r2 = load i32* %ptr1
28 %r3 = load i32* %ptr3
30 %r4 = load i32* %ptr5
32 %r5 = load i32* %ptr7
34 %r6 = load i32* %ptr9
36 %r7 = load i32* %ptr11
38 %r8 = load i32* %ptr13
40 %r9 = load i32* %ptr15
42 %r10 = load i32* %ptr17
  /external/llvm/test/ExecutionEngine/
test-loadstore.ll 5 %V = load i8* %P ; <i8> [#uses=1]
7 %V.upgrd.4 = load i16* %P.upgrd.1 ; <i16> [#uses=1]
9 %V.upgrd.5 = load i32* %P.upgrd.2 ; <i32> [#uses=1]
11 %V.upgrd.6 = load i64* %P.upgrd.3 ; <i64> [#uses=1]
20 %Y = load i32* %X ; <i32> [#uses=1]
  /external/llvm/test/Feature/
packed_struct.ll 19 %tmp = load i32* getelementptr (%struct.anon* @foos, i32 0, i32 1) ; <i32> [#uses=1]
20 %tmp3 = load i32* getelementptr (%struct.anon* @foos, i32 0, i32 2) ; <i32> [#uses=1]
21 %tmp6 = load i32* getelementptr (%struct.anon* @foos, i32 0, i32 3) ; <i32> [#uses=1]
29 %tmp = load i32* getelementptr([2 x <{ i32, i8 }>]* @bara, i32 0, i32 0, i32 0 ) ; <i32> [#uses=1]
30 %tmp4 = load i32* getelementptr ([2 x <{ i32, i8 }>]* @bara, i32 0, i32 1, i32 0) ; <i32> [#uses=1]
  /external/llvm/test/Integer/
packed_struct_bt.ll 19 %tmp = load i35* getelementptr (%struct.anon* @foos, i32 0, i32 1) ; <i35> [#uses=1]
20 %tmp3 = load i35* getelementptr (%struct.anon* @foos, i32 0, i32 2) ; <i35> [#uses=1]
21 %tmp6 = load i35* getelementptr (%struct.anon* @foos, i32 0, i32 3) ; <i35> [#uses=1]
29 %tmp = load i35* getelementptr([2 x <{ i35, i8 }>]* @bara, i32 0, i32 0, i32 0 ) ; <i35> [#uses=1]
30 %tmp4 = load i35* getelementptr ([2 x <{ i35, i8 }>]* @bara, i32 0, i32 1, i32 0) ; <i35> [#uses=1]
  /external/clang/test/CodeGen/
packed-arrays.c 55 // CHECK: load i32* %{{.*}}, align 1
58 // CHECK: load i32* %{{.*}}, align 4
70 // CHECK: load i32* %{{.*}}, align 1
73 // CHECK: load i32* %{{.*}}, align 4
79 // CHECK: load i32* %{{.*}}, align 4
82 // CHECK: load i32* %{{.*}}, align 1
98 // CHECK: load i32* %{{.*}}, align 1
101 // CHECK: load i32* %{{.*}}, align 4
104 // CHECK: load i32* %{{.*}}, align 1
107 // CHECK: load i32* %{{.*}}, align
    [all...]
  /frameworks/av/media/libstagefright/codecs/amrwbenc/src/asm/ARMV7/
residu_asm_neon.s 51 SUB r8, r9, #4 @load the x[i-2] address
55 SUB r8, r9, #6 @load the x[i-3] address
59 SUB r8, r9, #8 @load the x[i-4] address
63 SUB r8, r9, #10 @load the x[i-5] address
67 SUB r8, r9, #12 @load the x[i-6] address
71 SUB r8, r9, #14 @load the x[i-7] address
75 SUB r8, r9, #16 @load the x[i-8] address
79 SUB r8, r9, #18 @load the x[i-9] address
83 SUB r8, r9, #20 @load the x[i-10] address
87 SUB r8, r9, #22 @load the x[i-11] addres
    [all...]
  /frameworks/av/media/libstagefright/codecs/amrwbenc/src/asm/ARMV5E/
syn_filt_opt.s 83 LDRSH r5, [r0] @ load a[0]
87 @ load all a[]
90 LDRSH r6, [r0, #2] @ load a[1]
91 LDRSH r7, [r0, #4] @ load a[2]
92 LDRSH r9, [r0, #6] @ load a[3]
93 LDRSH r11,[r0, #8] @ load a[4]
101 LDRSH r6, [r0, #10] @ load a[5]
102 LDRSH r7, [r0, #12] @ load a[6]
103 LDRSH r9, [r0, #14] @ load a[7]
104 LDRSH r11,[r0, #16] @ load a[8
    [all...]
  /external/llvm/test/Transforms/BBVectorize/
func-alias.ll 63 %iounit.8748_288 = load i32* @__main1_MOD_iounit, align 4
70 %D.75807_289 = load i8** getelementptr inbounds (%"struct.array4_real(kind=4)"* @__main1_MOD_rmxval, i64 0, i32 0), align 8
71 %j.8758_290 = load i32* @j.4580, align 4
73 %iave.8736_292 = load i32* @__main1_MOD_iave, align 4
75 %D.75808_294 = load i64* getelementptr inbounds (%"struct.array4_real(kind=4)"* @__main1_MOD_rmxval, i64 0, i32 3, i64 2, i32 0), align 8
77 %igrp.8737_296 = load i32* @__main1_MOD_igrp, align 4
79 %D.75810_298 = load i64* getelementptr inbounds (%"struct.array4_real(kind=4)"* @__main1_MOD_rmxval, i64 0, i32 3, i64 1, i32 0), align 8
83 %ityp.8750_302 = load i32* @__main1_MOD_ityp, align 4
85 %D.75814_304 = load i64* getelementptr inbounds (%"struct.array4_real(kind=4)"* @__main1_MOD_rmxval, i64 0, i32 3, i64 3, i32 0), align 8
88 %D.75817_307 = load i64* getelementptr inbounds (%"struct.array4_real(kind=4)"* @__main1_MOD_rmxval, i64 0, i32 1), al (…)
    [all...]
  /external/chromium/chrome/browser/net/
sdch_dictionary_fetcher.h 5 // Support modularity by calling to load a new SDCH filter dictionary.
40 // chance we'll try to load the same URL multiple times when a pile of
67 // bandwidth from the actual page load. Create delayed tasks to spread out
73 // to load several distinct dictionaries (by telling a client to load a
77 // trying to load more than once). In addition, some dictionaries prove
79 // malformed?). As a protective element, Chromium will *only* load a
82 // The following set lists all the dictionary URLs that we've tried to load,
83 // so that we won't try to load from an URL more than once.
  /external/chromium/chrome/browser/policy/
asynchronous_policy_loader_unittest.cc 59 EXPECT_CALL(*delegate_, Load()).WillOnce(Return(template_dict));
71 EXPECT_CALL(*delegate_, Load()).WillOnce(
73 EXPECT_CALL(*delegate_, Load()).WillOnce(
91 ON_CALL(*delegate_, Load()).WillByDefault(CreateTestDictionary());
92 EXPECT_CALL(*delegate_, Load()).Times(1);
110 EXPECT_CALL(*delegate_, Load()).WillOnce(
112 EXPECT_CALL(*delegate_, Load()).WillOnce(
115 EXPECT_CALL(*delegate_, Load()).WillOnce(
118 EXPECT_CALL(*delegate_, Load()).WillOnce(
  /external/clang/test/CodeGenCXX/
lambda-expressions.cpp 18 // CHECK: load i32*
22 // CHECK: load i32*
31 // CHECK: load i32**
32 // CHECK: load i32*
44 // CHECK: load i32*
45 // CHECK: load i32*
59 // CHECK: load i32*
73 // CHECK-NEXT: load i32*
74 // CHECK-NEXT: load i32*
  /external/llvm/test/CodeGen/Mips/
fp-indexed-ls.ll 17 %0 = load float* %arrayidx, align 4
25 %0 = load double* %arrayidx, align 8
33 %0 = load float* %arrayidx1, align 1
40 %0 = load float* @gf, align 4
49 %0 = load double* @gd, align 8
58 %0 = load float* @gf, align 4
69 %0 = load double* %arrayidx1, align 1
77 %0 = load double* @gd, align 8
87 %0 = load float* getelementptr inbounds (%struct.S3* @s3, i32 0, i32 1), align 1

Completed in 737 milliseconds

<<11121314151617181920>>