| /external/llvm/test/CodeGen/R600/ |
| selectcc-icmp-select-float.ll | 8 define void @test(float addrspace(1)* %out, i32 addrspace(1)* %in) { 10 %0 = load i32 addrspace(1)* %in 13 store float %2, float addrspace(1)* %out
|
| unsupported-cc.ll | 7 define void @slt(i32 addrspace(1)* %out, i32 %in) { 11 store i32 %1, i32 addrspace(1)* %out 17 define void @ult_i32(i32 addrspace(1)* %out, i32 %in) { 21 store i32 %1, i32 addrspace(1)* %out 27 define void @ult_float(float addrspace(1)* %out, float %in) { 31 store float %1, float addrspace(1)* %out 37 define void @olt(float addrspace(1)* %out, float %in) { 41 store float %1, float addrspace(1)* %out 47 define void @sle(i32 addrspace(1)* %out, i32 %in) { 51 store i32 %1, i32 addrspace(1)* %ou [all...] |
| vec4-expand.ll | 9 define void @fp_to_sint(<4 x i32> addrspace(1)* %out, <4 x float> addrspace(1)* %in) { 10 %value = load <4 x float> addrspace(1) * %in 12 store <4 x i32> %result, <4 x i32> addrspace(1)* %out 22 define void @fp_to_uint(<4 x i32> addrspace(1)* %out, <4 x float> addrspace(1)* %in) { 23 %value = load <4 x float> addrspace(1) * %in 25 store <4 x i32> %result, <4 x i32> addrspace(1)* %out 35 define void @sint_to_fp(<4 x float> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) [all...] |
| add.v4i32.ll | 8 define void @test(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) { 9 %b_ptr = getelementptr <4 x i32> addrspace(1)* %in, i32 1 10 %a = load <4 x i32> addrspace(1) * %in 11 %b = load <4 x i32> addrspace(1) * %b_ptr 13 store <4 x i32> %result, <4 x i32> addrspace(1)* %out
|
| and.v4i32.ll | 8 define void @test(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) { 9 %b_ptr = getelementptr <4 x i32> addrspace(1)* %in, i32 1 10 %a = load <4 x i32> addrspace(1) * %in 11 %b = load <4 x i32> addrspace(1) * %b_ptr 13 store <4 x i32> %result, <4 x i32> addrspace(1)* %out
|
| sdiv.ll | 14 define void @test(i32 addrspace(1)* %out, i32 addrspace(1)* %in) { 15 %den_ptr = getelementptr i32 addrspace(1)* %in, i32 1 16 %num = load i32 addrspace(1) * %in 17 %den = load i32 addrspace(1) * %den_ptr 19 store i32 %result, i32 addrspace(1)* %out
|
| selectcc-opt.ll | 7 define void @test_a(i32 addrspace(1)* %out, float %in) { 19 %7 = getelementptr i32 addrspace(1)* %out, i32 1 20 store i32 0, i32 addrspace(1)* %7 24 store i32 0, i32 addrspace(1)* %out 34 define void @test_b(i32 addrspace(1)* %out, float %in) { 46 %7 = getelementptr i32 addrspace(1)* %out, i32 1 47 store i32 0, i32 addrspace(1)* %7 51 store i32 0, i32 addrspace(1)* %out 58 define void @test_c(float addrspace(1)* %out, i32 %in) { 62 store float %1, float addrspace(1)* %ou [all...] |
| fdiv.v4f32.ll | 12 define void @test(<4 x float> addrspace(1)* %out, <4 x float> addrspace(1)* %in) { 13 %b_ptr = getelementptr <4 x float> addrspace(1)* %in, i32 1 14 %a = load <4 x float> addrspace(1) * %in 15 %b = load <4 x float> addrspace(1) * %b_ptr 17 store <4 x float> %result, <4 x float> addrspace(1)* %out
|
| set-dx10.ll | 9 define void @fcmp_une_select_fptosi(i32 addrspace(1)* %out, float %in) { 15 store i32 %3, i32 addrspace(1)* %out 21 define void @fcmp_une_select_i32(i32 addrspace(1)* %out, float %in) { 25 store i32 %1, i32 addrspace(1)* %out 31 define void @fcmp_ueq_select_fptosi(i32 addrspace(1)* %out, float %in) { 37 store i32 %3, i32 addrspace(1)* %out 43 define void @fcmp_ueq_select_i32(i32 addrspace(1)* %out, float %in) { 47 store i32 %1, i32 addrspace(1)* %out 53 define void @fcmp_ugt_select_fptosi(i32 addrspace(1)* %out, float %in) { 59 store i32 %3, i32 addrspace(1)* %ou [all...] |
| literals.ll | 11 define void @i32_literal(i32 addrspace(1)* %out, i32 %in) { 14 store i32 %0, i32 addrspace(1)* %out 26 define void @float_literal(float addrspace(1)* %out, float %in) { 29 store float %0, float addrspace(1)* %out
|
| schedule-vs-if-nested-loop.ll | 24 %11 = load <4 x float> addrspace(9)* null 27 %14 = load <4 x float> addrspace(9)* null 30 %17 = load <4 x float> addrspace(9)* null 33 %20 = load <4 x float> addrspace(9)* null 36 %23 = load <4 x float> addrspace(9)* getelementptr ([1024 x <4 x float>] addrspace(9)* null, i64 0, i32 1) 40 %27 = load <4 x float> addrspace(9)* getelementptr ([1024 x <4 x float>] addrspace(9)* null, i64 0, i32 1) 44 %31 = load <4 x float> addrspace(9)* getelementptr ([1024 x <4 x float>] addrspace(9)* null, i64 0, i32 1 [all...] |
| predicates.ll | 9 define void @simple_if(i32 addrspace(1)* %out, i32 %in) { 20 store i32 %2, i32 addrspace(1)* %out 28 define void @simple_if_else(i32 addrspace(1)* %out, i32 %in) { 43 store i32 %3, i32 addrspace(1)* %out 52 define void @nested_if(i32 addrspace(1)* %out, i32 %in) { 68 store i32 %4, i32 addrspace(1)* %out 78 define void @nested_if_else(i32 addrspace(1)* %out, i32 %in) { 98 store i32 %5, i32 addrspace(1)* %out
|
| disconnected-predset-break-bug.ll | 11 define void @loop_ge(i32 addrspace(1)* nocapture %out, i32 %iterations) nounwind { 20 %arrayidx = getelementptr inbounds i32 addrspace(1)* %out, i32 %ai.06 21 store i32 %i.07, i32 addrspace(1)* %arrayidx, align 4
|
| llvm.SI.fs.interp.constant.ll | 6 define void @main(<16 x i8> addrspace(2)* inreg, <16 x i8> addrspace(2)* inreg, <32 x i8> addrspace(2)* inreg, i32 inreg) "ShaderType"="0" {
|
| /external/llvm/test/CodeGen/X86/ |
| movgs.ll | 7 %tmp = load i32* addrspace(256)* getelementptr (i32* addrspace(256)* inttoptr (i32 72 to i32* addrspace(256)*), i32 31) ; <i32*> [#uses=1] 21 define i64 @test2(void (i8*)* addrspace(256)* %tmp8) nounwind { 23 %tmp9 = load void (i8*)* addrspace(256)* %tmp8, align 8 39 define <2 x i64> @pmovsxwd_1(i64 addrspace(256)* %p) nounwind readonly { 41 %0 = load i64 addrspace(256)* %p 62 %tmp = load i32* addrspace(256)* getelementptr (i32* addrspace(256)* inttoptr (i32 72 to i32* addrspace(256)*), i32 31) ; <i32*> [#uses=1 [all...] |
| atomic-dagsched.ll | 21 %4 = load i64* addrspace(256)* inttoptr (i64 264 to i64* addrspace(256)*), align 8, !tbaa !0 37 %8 = bitcast i8* %ptrtoarg4 to i32 addrspace(1)* 38 %asr.iv911 = bitcast i8* %asr.iv9 to <8 x i32> addrspace(1)* 39 %9 = load <8 x i32> addrspace(1)* %asr.iv911, align 4 48 %10 = atomicrmw min i32 addrspace(1)* %8, i32 %extract8vector_func.i seq_cst 49 %11 = atomicrmw min i32 addrspace(1)* %8, i32 %extract9vector_func.i seq_cst 50 %12 = atomicrmw min i32 addrspace(1)* %8, i32 %extract10vector_func.i seq_cst 51 %13 = atomicrmw min i32 addrspace(1)* %8, i32 %extract11vector_func.i seq_cst 52 %14 = atomicrmw min i32 addrspace(1)* %8, i32 %extract12vector_func.i seq_cs [all...] |
| scalar_widen_div.ll | 7 define void @vectorDiv (<2 x i32> addrspace(1)* %nsource, <2 x i32> addrspace(1)* %dsource, <2 x i32> addrspace(1)* %qdest) nounwind { 13 %nsource.addr = alloca <2 x i32> addrspace(1)*, align 4 14 %dsource.addr = alloca <2 x i32> addrspace(1)*, align 4 15 %qdest.addr = alloca <2 x i32> addrspace(1)*, align 4 17 store <2 x i32> addrspace(1)* %nsource, <2 x i32> addrspace(1)** %nsource.addr 18 store <2 x i32> addrspace(1)* %dsource, <2 x i32> addrspace(1)** %dsource.add [all...] |
| /external/llvm/test/Transforms/InstCombine/ |
| 2009-01-16-PointerAddrSpace.ll | 1 ; RUN: opt < %s -instcombine -S | grep "store.*addrspace(1)" 8 %Q = bitcast i32* %P to i32 addrspace(1)* 9 store i32 0, i32 addrspace(1)* %Q, align 4
|
| /external/llvm/test/CodeGen/XCore/ |
| events.ll | 3 declare void @llvm.xcore.setv.p1i8(i8 addrspace(1)* %r, i8* %p) 8 define i32 @f(i8 addrspace(1)* %r) nounwind { 13 call void @llvm.xcore.setv.p1i8(i8 addrspace(1)* %r, i8* blockaddress(@f, %L1)) 14 call void @llvm.xcore.setv.p1i8(i8 addrspace(1)* %r, i8* blockaddress(@f, %L2)) 27 define i32 @g(i8 addrspace(1)* %r) nounwind { 32 call void @llvm.xcore.setv.p1i8(i8 addrspace(1)* %r, i8* blockaddress(@f, %L1))
|
| /external/clang/test/CodeGenOpenCL/ |
| local.cl | 4 // CHECK: @foo.i = internal addrspace(2)
|
| spir32_target.cl | 19 //CHECK: store i64 4, i64 addrspace(1)* 21 //CHECK: store i64 8, i64 addrspace(1)*
|
| spir64_target.cl | 18 //CHECK: store i64 8, i64 addrspace(1)* 20 //CHECK: store i64 16, i64 addrspace(1)*
|
| /external/llvm/test/Transforms/ConstantMerge/ |
| dont-merge.ll | 21 @T2b = internal addrspace(30) constant i32 224 26 define void @test2(i32** %P1, i32 addrspace(30)** %P2) { 28 store i32 addrspace(30)* @T2b, i32 addrspace(30)** %P2
|
| /external/llvm/test/Transforms/CorrelatedValuePropagation/ |
| non-null.ll | 80 declare void @llvm.memcpy.p1i8.p1i8.i32(i8 addrspace(1) *, i8 addrspace(1) *, i32, i32, i1) 81 define void @test8(i8 addrspace(1) * %dest, i8 addrspace(1) * %src) { 83 call void @llvm.memcpy.p1i8.p1i8.i32(i8 addrspace(1) * %dest, i8 addrspace(1) * %src, i32 1, i32 1, i1 false) 86 %KEEP1 = icmp ne i8 addrspace(1) * %dest, null 88 %KEEP2 = icmp ne i8 addrspace(1) * %src, null
|
| /external/llvm/test/CodeGen/NVPTX/ |
| annotations.ll | 7 @texture = internal addrspace(1) global i64 0, align 8 9 @surface = internal addrspace(1) global i64 0, align 8 54 !7 = metadata !{i64 addrspace(1)* @texture, metadata !"texture", i32 1} 55 !8 = metadata !{i64 addrspace(1)* @surface, metadata !"surface", i32 1}
|