/external/llvm/test/Assembler/ |
vbool-cmp.ll | 1 ; RUN: llvm-as < %s | llvm-dis | llvm-as | llvm-dis | FileCheck %s
|
/external/llvm/test/CodeGen/AArch64/ |
arm64-crc32.ll | 7 %val = call i32 @llvm.aarch64.crc32b(i32 %cur, i32 %bits) 15 %val = call i32 @llvm.aarch64.crc32h(i32 %cur, i32 %bits) 22 %val = call i32 @llvm.aarch64.crc32w(i32 %cur, i32 %next) 29 %val = call i32 @llvm.aarch64.crc32x(i32 %cur, i64 %next) 37 %val = call i32 @llvm.aarch64.crc32cb(i32 %cur, i32 %bits) 45 %val = call i32 @llvm.aarch64.crc32ch(i32 %cur, i32 %bits) 52 %val = call i32 @llvm.aarch64.crc32cw(i32 %cur, i32 %next) 59 %val = call i32 @llvm.aarch64.crc32cx(i32 %cur, i64 %next) 63 declare i32 @llvm.aarch64.crc32b(i32, i32) 64 declare i32 @llvm.aarch64.crc32h(i32, i32 [all...] |
arm64-sqshl-uqshl-i64Contant.ll | 7 %1 = tail call i64 @llvm.aarch64.neon.sqshl.i64(i64 %a, i64 36) 14 %1 = tail call i64 @llvm.aarch64.neon.uqshl.i64(i64 %a, i64 36) 18 declare i64 @llvm.aarch64.neon.uqshl.i64(i64, i64) 19 declare i64 @llvm.aarch64.neon.sqshl.i64(i64, i64)
|
arm64-stackpointer.ll | 7 %sp = call i64 @llvm.read_register.i64(metadata !0) 15 call void @llvm.write_register.i64(metadata !0, i64 %val) 19 declare i64 @llvm.read_register.i64(metadata) nounwind 20 declare void @llvm.write_register.i64(metadata, i64) nounwind
|
arm64-stacksave.ll | 13 %savedstack = call i8* @llvm.stacksave() nounwind 15 call void @llvm.stackrestore(i8* %savedstack) nounwind 19 declare i8* @llvm.stacksave() nounwind 20 declare void @llvm.stackrestore(i8*) nounwind
|
/external/llvm/test/CodeGen/ARM/ |
fabs-neon.ll | 6 %foo = call <4 x float> @llvm.fabs.v4f32(<4 x float> %a) 9 declare <4 x float> @llvm.fabs.v4f32(<4 x float> %a) 14 %foo = call <2 x float> @llvm.fabs.v2f32(<2 x float> %a) 17 declare <2 x float> @llvm.fabs.v2f32(<2 x float> %a)
|
ldaex-stlex.ll | 10 %ldaexd = tail call %0 @llvm.arm.ldaexd(i8* %p) 27 %stlexd = tail call i32 @llvm.arm.stlexd(i32 %tmp4, i32 %tmp7, i8* %ptr) 31 declare %0 @llvm.arm.ldaexd(i8*) nounwind readonly 32 declare i32 @llvm.arm.stlexd(i32, i32, i8*) nounwind 39 %val = call i32 @llvm.arm.ldaex.p0i8(i8* %addr) 49 %val = call i32 @llvm.arm.ldaex.p0i16(i16* %addr) 57 %val = call i32 @llvm.arm.ldaex.p0i32(i32* %addr) 61 declare i32 @llvm.arm.ldaex.p0i8(i8*) nounwind readonly 62 declare i32 @llvm.arm.ldaex.p0i16(i16*) nounwind readonly 63 declare i32 @llvm.arm.ldaex.p0i32(i32*) nounwind readonl [all...] |
stackpointer.ll | 8 %sp = call i32 @llvm.read_register.i32(metadata !0) 16 call void @llvm.write_register.i32(metadata !0, i32 %val) 20 declare i32 @llvm.read_register.i32(metadata) nounwind 21 declare void @llvm.write_register.i32(metadata, i32) nounwind
|
/external/llvm/test/CodeGen/Generic/ |
ptr-annotate.ll | 7 @.str = private unnamed_addr constant [4 x i8] c"sth\00", section "llvm.metadata" 8 @.str1 = private unnamed_addr constant [4 x i8] c"t.c\00", section "llvm.metadata" 13 %0 = call i8* @llvm.ptr.annotation.p0i8(i8* %m, i8* getelementptr inbounds ([4 x i8]* @.str, i32 0, i32 0), i8* getelementptr inbounds ([4 x i8]* @.str1, i32 0, i32 0), i32 2) 18 declare i8* @llvm.ptr.annotation.p0i8(i8*, i8*, i8*, i32) #1
|
/external/llvm/test/CodeGen/PowerPC/ |
2010-05-03-retaddr1.ll | 4 declare i8* @llvm.frameaddress(i32) nounwind readnone 10 %0 = tail call i8* @llvm.frameaddress(i32 1) ; <i8*> [#uses=1] 14 declare i8* @llvm.returnaddress(i32) nounwind readnone 23 %0 = tail call i8* @llvm.returnaddress(i32 1) ; <i8*> [#uses=1]
|
mul-with-overflow.ll | 3 declare {i32, i1} @llvm.umul.with.overflow.i32(i32 %a, i32 %b) 5 %res = call {i32, i1} @llvm.umul.with.overflow.i32(i32 %x, i32 3) 10 declare {i32, i1} @llvm.smul.with.overflow.i32(i32 %a, i32 %b) 12 %res = call {i32, i1} @llvm.smul.with.overflow.i32(i32 %x, i32 3)
|
/external/llvm/test/CodeGen/R600/ |
llvm.AMDGPU.mul.ll | 8 %r2 = call float @llvm.AMDGPU.mul( float %r0, float %r1) 10 call void @llvm.R600.store.swizzle(<4 x float> %vec, i32 0, i32 0) 14 declare float @llvm.AMDGPU.mul(float ,float ) readnone 15 declare void @llvm.R600.store.swizzle(<4 x float>, i32, i32)
|
llvm.SI.tid.ll | 8 %4 = call i32 @llvm.SI.tid() 10 call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %5, float %5, float %5, float %5) 14 declare i32 @llvm.SI.tid() readnone 16 declare void @llvm.SI.export(i32, i32, i32, i32, i32, float, float, float, float)
|
/external/llvm/test/CodeGen/SystemZ/ |
bswap-01.ll | 5 declare i32 @llvm.bswap.i32(i32 %a) 6 declare i64 @llvm.bswap.i64(i64 %a) 13 %swapped = call i32 @llvm.bswap.i32(i32 %a) 22 %swapped = call i64 @llvm.bswap.i64(i64 %a)
|
/external/llvm/test/CodeGen/X86/ |
2007-06-15-IntToMMX.ll | 11 %tmp7 = tail call x86_mmx @llvm.x86.mmx.paddus.w( x86_mmx %tmp2, x86_mmx %tmp3 ) ; <x86_mmx> [#uses=1] 13 tail call void @llvm.x86.mmx.emms( ) 17 declare x86_mmx @llvm.x86.mmx.paddus.w(x86_mmx, x86_mmx) 19 declare void @llvm.x86.mmx.emms()
|
2008-11-03-F80VAARG.ll | 3 declare void @llvm.va_start(i8*) nounwind 5 declare void @llvm.va_copy(i8*, i8*) nounwind 7 declare void @llvm.va_end(i8*) nounwind 12 call void @llvm.va_start(i8* %v1)
|
2011-08-23-Trampoline.ll | 8 call void @llvm.init.trampoline( i8* null, i8* bitcast (void (%struct.FRAME.gnat__perfect_hash_generators__select_char_position__build_identical_keys_sets*, i32, i32)* @gnat__perfect_hash_generators__select_char_position__build_identical_keys_sets__move.5177 to i8*), i8* null ) ; <i8*> [#uses=0] 9 %tramp22 = call i8* @llvm.adjust.trampoline( i8* null) 15 declare void @llvm.init.trampoline(i8*, i8*, i8*) nounwind 16 declare i8* @llvm.adjust.trampoline(i8*) nounwind
|
combine-sse2-intrinsics.ll | 8 %1 = tail call <4 x i32> @llvm.x86.sse2.psrai.d(<4 x i32> %A, i32 3) 9 %2 = tail call <4 x i32> @llvm.x86.sse2.psra.d(<4 x i32> %1, <4 x i32> <i32 3, i32 0, i32 7, i32 0>) 10 %3 = tail call <4 x i32> @llvm.x86.sse2.psrai.d(<4 x i32> %2, i32 2) 18 %1 = tail call <8 x i16> @llvm.x86.sse2.psrai.w(<8 x i16> %A, i32 3) 19 %2 = tail call <8 x i16> @llvm.x86.sse2.psra.w(<8 x i16> %1, <8 x i16> <i16 3, i16 0, i16 0, i16 0, i16 7, i16 0, i16 0, i16 0>) 20 %3 = tail call <8 x i16> @llvm.x86.sse2.psrai.w(<8 x i16> %2, i32 2) 28 %1 = tail call <4 x i32> @llvm.x86.sse2.psrai.d(<4 x i32> %A, i32 0) 29 %2 = tail call <4 x i32> @llvm.x86.sse2.psra.d(<4 x i32> %1, <4 x i32> <i32 0, i32 0, i32 7, i32 0>) 30 %3 = tail call <4 x i32> @llvm.x86.sse2.psrai.d(<4 x i32> %2, i32 0) 39 %1 = tail call <8 x i16> @llvm.x86.sse2.psrai.w(<8 x i16> %A, i32 0 [all...] |
ret-addr.ll | 6 %0 = tail call i8* @llvm.returnaddress(i32 2) ; <i8*> [#uses=1] 10 declare i8* @llvm.returnaddress(i32) nounwind readnone 14 %0 = tail call i8* @llvm.returnaddress(i32 1) ; <i8*> [#uses=1] 20 %0 = tail call i8* @llvm.returnaddress(i32 0) ; <i8*> [#uses=1]
|
sse42_64.ll | 3 declare i64 @llvm.x86.sse42.crc32.64.8(i64, i8) nounwind 4 declare i64 @llvm.x86.sse42.crc32.64.64(i64, i64) nounwind 7 %tmp = call i64 @llvm.x86.sse42.crc32.64.8(i64 %a, i8 %b) 15 %tmp = call i64 @llvm.x86.sse42.crc32.64.64(i64 %a, i64 %b)
|
stdarg.ll | 10 call void @llvm.va_start(i8* %ap12) 13 call void @llvm.va_end(i8* %ap12) 17 declare void @llvm.va_start(i8*) nounwind 21 declare void @llvm.va_end(i8*) nounwind
|
/external/llvm/test/CodeGen/XCore/ |
2011-08-01-DynamicAllocBug.ll | 4 declare i8* @llvm.stacksave() nounwind 5 declare void @llvm.stackrestore(i8*) nounwind 9 %0 = call i8* @llvm.stacksave() 13 call void @llvm.stackrestore(i8* %0)
|
misc-intrinsics.ll | 4 declare i32 @llvm.xcore.bitrev(i32) 5 declare i32 @llvm.xcore.crc32(i32, i32, i32) 6 declare %0 @llvm.xcore.crc8(i32, i32, i32) 7 declare i32 @llvm.xcore.zext(i32, i32) 8 declare i32 @llvm.xcore.sext(i32, i32) 9 declare i32 @llvm.xcore.geted() 10 declare i32 @llvm.xcore.getet() 15 %result = call i32 @llvm.xcore.bitrev(i32 %val) 22 %result = call i32 @llvm.xcore.crc32(i32 %crc, i32 %data, i32 %poly) 29 %result = call %0 @llvm.xcore.crc8(i32 %crc, i32 %data, i32 %poly [all...] |
/external/llvm/test/DebugInfo/ |
2010-03-19-DbgDeclare.ll | 7 call void @llvm.dbg.declare(metadata !{i32* null}, metadata !1) 10 !llvm.dbg.cu = !{!2} 11 !llvm.module.flags = !{!5} 18 declare void @llvm.dbg.declare(metadata, metadata) nounwind readnone
|
arm-relocs.test | 1 RUN: llvm-dwarfdump %p/Inputs/arm-relocs.elf-arm | FileCheck %s
|