HomeSort by relevance Sort by last modified time
    Searched full:next (Results 1176 - 1200 of 50205) sorted by null

<<41424344454647484950>>

  /external/clang/test/CoverageMapping/
return.c 6 int i = 0; // CHECK-NEXT: File 0, [[@LINE]]:3 -> [[@LINE+1]]:2 = 0
9 // CHECK-NEXT: func2
10 void func2() { // CHECK-NEXT: File 0, [[@LINE]]:14 -> {{[0-9]+}}:2 = #0
11 // CHECK-NEXT: File 0, [[@LINE+2]]:18 -> [[@LINE+2]]:24 = ((#0 + #1) - #2)
12 // CHECK-NEXT: File 0, [[@LINE+1]]:26 -> [[@LINE+1]]:29 = (#1 - #2)
13 for(int i = 0; i < 10; ++i) { // CHECK-NEXT: File 0, [[@LINE]]:31 -> {{[0-9]+}}:4 = #1
14 // CHECK-NEXT: File 0, [[@LINE+1]]:8 -> [[@LINE+1]]:13 = #1
15 if(i > 2) { // CHECK-NEXT: File 0, [[@LINE]]:15 -> [[@LINE+2]]:6 = #2
17 } // CHECK-NEXT: File 0, [[@LINE+2]]:5 -> {{[0-9]+}}:4 = (#1 - #2)
18 // CHECK-NEXT: File 0, [[@LINE+1]]:8 -> [[@LINE+1]]:14 = (#1 - #2
    [all...]
  /external/clang/test/Misc/
caret-diags-macros.c 44 // CHECK-NEXT: macro_args3(11);
45 // CHECK-NEXT: {{^ \^~}}
73 // CHECK-NEXT: macro_args2(22),
74 // CHECK-NEXT: {{^ \^~}}
89 // CHECK-NEXT: variadic_args3(1, 22, 3, 4);
90 // CHECK-NEXT: {{^ \^~}}
111 // CHECK-NEXT: variadic_pasting_args3a(1, 2, 3, 4);
112 // CHECK-NEXT: {{ \^~~~~~~~~~~~~~~~~~~~~~~}}
114 // CHECK-NEXT: #define variadic_pasting_args3a(x, y, ...) variadic_pasting_args2a(x, y, __VA_ARGS__)
115 // CHECK-NEXT: {{ \^~~~~~~~~~~~~~~~~~~~~~~}
    [all...]
  /external/llvm/test/CodeGen/X86/
dag-merge-fast-accesses.ll 11 ; FAST-NEXT: xorps %xmm0, %xmm0
12 ; FAST-NEXT: movups %xmm0, (%rdi)
13 ; FAST-NEXT: retq
17 ; SLOW-NEXT: movq $0, (%rdi)
18 ; SLOW-NEXT: movq $0, 8(%rdi)
19 ; SLOW-NEXT: retq
33 ; FAST-NEXT: movups %xmm0, (%rdi)
34 ; FAST-NEXT: retq
38 ; SLOW-NEXT: movlpd %xmm0, (%rdi)
39 ; SLOW-NEXT: movhpd %xmm0, 8(%rdi
    [all...]
extractelement-legalization-store-ordering.ll 11 ; CHECK-NEXT: pushl %ebx
12 ; CHECK-NEXT: pushl %edi
13 ; CHECK-NEXT: pushl %esi
14 ; CHECK-NEXT: movl 16(%esp), %eax
15 ; CHECK-NEXT: movl 24(%esp), %ecx
16 ; CHECK-NEXT: movl 20(%esp), %edx
17 ; CHECK-NEXT: paddd (%edx), %xmm0
18 ; CHECK-NEXT: movdqa %xmm0, (%edx)
19 ; CHECK-NEXT: shll $4, %ecx
20 ; CHECK-NEXT: movl (%ecx,%edx), %es
    [all...]
merge-consecutive-loads-128.ll 14 ; SSE-NEXT: movups 16(%rdi), %xmm0
15 ; SSE-NEXT: retq
19 ; AVX-NEXT: vmovups 16(%rdi), %xmm0
20 ; AVX-NEXT: retq
24 ; X32-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
25 ; X32-SSE-NEXT: movups 16(%eax), %xmm0
26 ; X32-SSE-NEXT: retl
39 ; SSE-NEXT: movups 8(%rdi), %xmm0
40 ; SSE-NEXT: retq
44 ; AVX-NEXT: vmovups 8(%rdi), %xmm
    [all...]
vector-popcnt-512.ll 8 ; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm1
9 ; AVX512F-NEXT: vmovdqa {{.*#+}} ymm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
10 ; AVX512F-NEXT: vpand %ymm2, %ymm1, %ymm3
11 ; AVX512F-NEXT: vmovdqa {{.*#+}} ymm4 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
12 ; AVX512F-NEXT: vpshufb %ymm3, %ymm4, %ymm3
13 ; AVX512F-NEXT: vpsrlw $4, %ymm1, %ymm1
14 ; AVX512F-NEXT: vpand %ymm2, %ymm1, %ymm1
15 ; AVX512F-NEXT: vpshufb %ymm1, %ymm4, %ymm1
16 ; AVX512F-NEXT: vpaddb %ymm3, %ymm1, %ymm1
17 ; AVX512F-NEXT: vpxor %ymm3, %ymm3, %ymm
    [all...]
avx512-select.ll 7 ; CHECK-NEXT: vpxord %zmm1, %zmm1, %zmm1
8 ; CHECK-NEXT: cmpl $255, %edi
9 ; CHECK-NEXT: je LBB0_2
10 ; CHECK-NEXT: ## BB#1:
11 ; CHECK-NEXT: vmovaps %zmm0, %zmm1
12 ; CHECK-NEXT: LBB0_2:
13 ; CHECK-NEXT: vpxord %zmm1, %zmm0, %zmm0
14 ; CHECK-NEXT: retq
24 ; CHECK-NEXT: vpxord %zmm1, %zmm1, %zmm1
25 ; CHECK-NEXT: cmpl $255, %ed
    [all...]
palignr.ll 8 ; CHECK-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,2,3,0]
9 ; CHECK-NEXT: retl
13 ; CHECK-YONAH-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,2,3,0]
14 ; CHECK-YONAH-NEXT: retl
22 ; CHECK-NEXT: palignr {{.*#+}} xmm1 = xmm0[4,5,6,7,8,9,10,11,12,13,14,15],xmm1[0,1,2,3]
23 ; CHECK-NEXT: movdqa %xmm1, %xmm0
24 ; CHECK-NEXT: retl
28 ; CHECK-YONAH-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[3,0]
29 ; CHECK-YONAH-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,2],xmm1[2,0]
30 ; CHECK-YONAH-NEXT: ret
    [all...]
vec_shift6.ll 13 ; SSE-NEXT: pmullw {{.*}}(%rip), %xmm0
14 ; SSE-NEXT: retq
18 ; AVX2-NEXT: vpmullw {{.*}}(%rip), %xmm0, %xmm0
19 ; AVX2-NEXT: retq
23 ; AVX512-NEXT: vpmullw {{.*}}(%rip), %xmm0, %xmm0
24 ; AVX512-NEXT: retq
32 ; SSE-NEXT: pmullw {{.*}}(%rip), %xmm0
33 ; SSE-NEXT: retq
37 ; AVX2-NEXT: vpmullw {{.*}}(%rip), %xmm0, %xmm0
38 ; AVX2-NEXT: ret
    [all...]
vector-shuffle-128-v16.ll 14 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
15 ; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
16 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
17 ; SSE2-NEXT: retq
21 ; SSSE3-NEXT: pxor %xmm1, %xmm1
22 ; SSSE3-NEXT: pshufb %xmm1, %xmm0
23 ; SSSE3-NEXT: retq
27 ; SSE41-NEXT: pxor %xmm1, %xmm1
28 ; SSE41-NEXT: pshufb %xmm1, %xmm0
29 ; SSE41-NEXT: ret
    [all...]
avx2-conversions.ll 7 ; CHECK-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,2,0,2,4,6,4,6]
8 ; CHECK-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,3,2,3]
9 ; CHECK-NEXT: ## kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
10 ; CHECK-NEXT: vzeroupper
11 ; CHECK-NEXT: retq
19 ; CHECK-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13],zero,zero,zero,zero,zero,zero,zero,zero,ymm0[16,17,20,21,24,25,28,29],zero,zero,zero,zero,zero,zero,zero,zero
20 ; CHECK-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
21 ; CHECK-NEXT: ## kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
22 ; CHECK-NEXT: vzeroupper
23 ; CHECK-NEXT: ret
    [all...]
win32-eh.ll 39 ; CHECK: movl %fs:0, %[[next:[^ ,]*]]
40 ; CHECK: movl %[[next]], -28(%ebp)
43 ; CHECK: movl -28(%ebp), %[[next:[^ ,]*]]
44 ; CHECK: movl %[[next]], %fs:0
50 ; CHECK-NEXT: .long -1
51 ; CHECK-NEXT: .long _catchall_filt
52 ; CHECK-NEXT: .long LBB1_2
78 ; CHECK: movl %fs:0, %[[next:[^ ,]*]]
79 ; CHECK: movl %[[next]], -28(%ebp)
82 ; CHECK: movl -28(%ebp), %[[next:[^ ,]*]
    [all...]
avx512-fma.ll 8 ; ALL-NEXT: vfmadd213ps %zmm2, %zmm1, %zmm0
9 ; ALL-NEXT: retq
18 ; ALL-NEXT: vfmsub213ps %zmm2, %zmm1, %zmm0
19 ; ALL-NEXT: retq
28 ; ALL-NEXT: vfnmadd213ps %zmm2, %zmm1, %zmm0
29 ; ALL-NEXT: retq
38 ; ALL-NEXT: vfnmsub213ps %zmm2, %zmm1, %zmm0
39 ; ALL-NEXT: retq
52 ; ALL-NEXT: vfmadd213pd %zmm2, %zmm1, %zmm0
53 ; ALL-NEXT: ret
    [all...]
avx512vl-vbroadcast.ll 8 ; CHECK-NEXT: pushq %rax
9 ; CHECK-NEXT: .Ltmp0:
10 ; CHECK-NEXT: .cfi_def_cfa_offset 16
11 ; CHECK-NEXT: vaddss %xmm0, %xmm0, %xmm0
12 ; CHECK-NEXT: vmovss %xmm0, {{[0-9]+}}(%rsp) # 4-byte Folded Spill
13 ; CHECK-NEXT: callq func_f32
14 ; CHECK-NEXT: vbroadcastss {{[0-9]+}}(%rsp), %ymm0 # 4-byte Folded Reload
15 ; CHECK-NEXT: popq %rax
16 ; CHECK-NEXT: retq
27 ; CHECK-NEXT: pushq %ra
    [all...]
  /external/llvm/test/MC/ARM/
coff-relocations.s 18 @ CHECK-ENCODING-NEXT: b.w #0
25 @ CHECK-ENCODING-NEXT: blo.w #0
32 @ CHECK-ENCODING-NEXT: bl #0
41 @ CHECK-ENCODING-NEXT: movw r0, #0
42 @ CHECK-ENCODING-NEXT: movt r0, #0
43 @ CHECK-ENCODING-NEXT: blx r0
54 @ CHECK-ENCODING-NEXT: ldr r0, [pc, #4]
55 @ CHECK-ENCODING-NEXT: bx r0
56 @ CHECK-ENCODING-NEXT: trap
57 @ CHECK-ENCODING-NEXT: movs r0, r
    [all...]
  /external/llvm/test/Object/
invalid.test 13 SECTION-NEXT: Type: SHT_DYNSYM
14 SECTION-NEXT: Flags [
15 SECTION-NEXT: SHF_ALLOC
16 SECTION-NEXT: ]
17 SECTION-NEXT: Address:
18 SECTION-NEXT: Offset:
19 SECTION-NEXT: Size:
20 SECTION-NEXT: Link:
21 SECTION-NEXT: Info:
22 SECTION-NEXT: AddressAlignment
    [all...]
  /external/clang/test/OpenMP/
declare_reduction_codegen.cpp 15 // CHECK-NEXT: store i32 [[MUL]], i32*
16 // CHECK-NEXT: ret void
17 // CHECK-NEXT: }
20 // CHECK-LOAD-NEXT: store i32 [[MUL]], i32*
21 // CHECK-LOAD-NEXT: ret void
22 // CHECK-LOAD-NEXT: }
28 // CHECK-NEXT: [[TRUNC:%.+]] = trunc i32 [[MUL]] to i8
29 // CHECK-NEXT: store i8 [[TRUNC]], i8*
30 // CHECK-NEXT: ret void
31 // CHECK-NEXT:
    [all...]
  /external/llvm/test/Instrumentation/EfficiencySanitizer/
working_set_slow.ll 14 ; CHECK-NEXT: %tmp1 = load i8, i8* %a, align 1
15 ; CHECK-NEXT: ret i8 %tmp1
23 ; CHECK-NEXT: call void @__esan_aligned_load2(i8* %0)
24 ; CHECK-NEXT: %tmp1 = load i16, i16* %a, align 2
25 ; CHECK-NEXT: ret i16 %tmp1
33 ; CHECK-NEXT: call void @__esan_aligned_load4(i8* %0)
34 ; CHECK-NEXT: %tmp1 = load i32, i32* %a, align 4
35 ; CHECK-NEXT: ret i32 %tmp1
43 ; CHECK-NEXT: call void @__esan_aligned_load8(i8* %0)
44 ; CHECK-NEXT: %tmp1 = load i64, i64* %a, align
    [all...]
  /external/clang/test/CodeGen/
ppc64le-varargs-complex.c 12 // CHECK-NEXT: %[[VAR41:[A-Za-z0-9.]+]] = getelementptr inbounds i8, i8* %[[VAR40]], i64 16
13 // CHECK-NEXT: store i8* %[[VAR41]], i8** %[[VAR100]]
14 // CHECK-NEXT: %[[VAR3:[A-Za-z0-9.]+]] = getelementptr inbounds i8, i8* %[[VAR40]], i64 8
15 // CHECK-NEXT: %[[VAR4:[A-Za-z0-9.]+]] = bitcast i8* %[[VAR40]] to i32*
16 // CHECK-NEXT: %[[VAR5:[A-Za-z0-9.]+]] = bitcast i8* %[[VAR3]] to i32*
17 // CHECK-NEXT: %[[VAR6:[A-Za-z0-9.]+]] = load i32, i32* %[[VAR4]], align 8
18 // CHECK-NEXT: %[[VAR7:[A-Za-z0-9.]+]] = load i32, i32* %[[VAR5]], align 8
19 // CHECK-NEXT: %[[VAR8:[A-Za-z0-9.]+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* %[[VAR0:[A-Za-z0-9.]+]], i32 0, i32 0
20 // CHECK-NEXT: %[[VAR9:[A-Za-z0-9.]+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* %[[VAR0]], i32 0, i32 1
21 // CHECK-NEXT: store i32 %[[VAR6]], i32* %[[VAR8]
    [all...]
  /external/clang/test/CodeGenObjC/
blocks.m 48 // CHECK-NEXT: [[WEAKX:%.*]] = alloca [[WEAK_T:%.*]],
49 // CHECK-NEXT: [[BLOCK:%.*]] = alloca [[BLOCK_T:<{.*}>]],
50 // CHECK-NEXT: store [[TEST2]]*
53 // CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds [[WEAK_T]], [[WEAK_T]]* [[WEAKX]], i32 0, i32 0
54 // CHECK-NEXT: store i8* inttoptr (i32 1 to i8*), i8** [[T0]]
57 // CHECK-NEXT: [[T1:%.*]] = getelementptr inbounds [[WEAK_T]], [[WEAK_T]]* [[WEAKX]], i32 0, i32 1
58 // CHECK-NEXT: store [[WEAK_T]]* [[WEAKX]], [[WEAK_T]]** [[T1]]
61 // CHECK-NEXT: [[T2:%.*]] = getelementptr inbounds [[WEAK_T]], [[WEAK_T]]* [[WEAKX]], i32 0, i32 2
62 // CHECK-NEXT: store i32 1375731712, i32* [[T2]]
65 // CHECK-NEXT: [[T3:%.*]] = getelementptr inbounds [[WEAK_T]], [[WEAK_T]]* [[WEAKX]], i32 0, i32
    [all...]
  /external/libcxx/test/std/containers/associative/multiset/
lower_bound.pass.cpp 45 assert(r == next(m.begin(), 0));
47 assert(r == next(m.begin(), 0));
49 assert(r == next(m.begin(), 3));
51 assert(r == next(m.begin(), 3));
53 assert(r == next(m.begin(), 6));
55 assert(r == next(m.begin(), 6));
57 assert(r == next(m.begin(), 9));
75 assert(r == next(m.begin(), 0));
77 assert(r == next(m.begin(), 0));
79 assert(r == next(m.begin(), 3))
    [all...]
upper_bound.pass.cpp 45 assert(r == next(m.begin(), 0));
47 assert(r == next(m.begin(), 3));
49 assert(r == next(m.begin(), 3));
51 assert(r == next(m.begin(), 6));
53 assert(r == next(m.begin(), 6));
55 assert(r == next(m.begin(), 9));
57 assert(r == next(m.begin(), 9));
75 assert(r == next(m.begin(), 0));
77 assert(r == next(m.begin(), 3));
79 assert(r == next(m.begin(), 3))
    [all...]
  /prebuilts/ndk/r16/sources/cxx-stl/llvm-libc++/test/std/containers/associative/multiset/
lower_bound.pass.cpp 45 assert(r == next(m.begin(), 0));
47 assert(r == next(m.begin(), 0));
49 assert(r == next(m.begin(), 3));
51 assert(r == next(m.begin(), 3));
53 assert(r == next(m.begin(), 6));
55 assert(r == next(m.begin(), 6));
57 assert(r == next(m.begin(), 9));
75 assert(r == next(m.begin(), 0));
77 assert(r == next(m.begin(), 0));
79 assert(r == next(m.begin(), 3))
    [all...]
upper_bound.pass.cpp 45 assert(r == next(m.begin(), 0));
47 assert(r == next(m.begin(), 3));
49 assert(r == next(m.begin(), 3));
51 assert(r == next(m.begin(), 6));
53 assert(r == next(m.begin(), 6));
55 assert(r == next(m.begin(), 9));
57 assert(r == next(m.begin(), 9));
75 assert(r == next(m.begin(), 0));
77 assert(r == next(m.begin(), 3));
79 assert(r == next(m.begin(), 3))
    [all...]
  /external/llvm/test/CodeGen/PowerPC/
fma-assoc.ll 13 ; CHECK-NEXT: fmadd
14 ; CHECK-NEXT: blr
18 ; CHECK-VSX-NEXT: xsmaddadp
19 ; CHECK-VSX-NEXT: fmr
20 ; CHECK-VSX-NEXT: blr
32 ; CHECK-NEXT: fmadd
33 ; CHECK-NEXT: blr
37 ; CHECK-VSX-NEXT: xsmaddadp
38 ; CHECK-VSX-NEXT: fmr
39 ; CHECK-VSX-NEXT: bl
    [all...]

Completed in 1524 milliseconds

<<41424344454647484950>>