/external/llvm/test/CodeGen/X86/ |
statepoint-forward.ll | 23 define i1 @test_load_forward(i32 addrspace(1)* addrspace(1)* %p) gc "statepoint-example" { 25 %before = load i32 addrspace(1)*, i32 addrspace(1)* addrspace(1)* %p 26 %cmp1 = call i1 @f(i32 addrspace(1)* %before) 28 %safepoint_token = tail call token (i64, i32, void ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidf(i64 0, i32 0, void ()* @func, i32 0, i32 0, i32 0, i32 0, i32 addrspace(1)* addrspace(1)* %p) 29 %pnew = call i32 addrspace(1)* addrspace(1)* @llvm.experimental.gc.relocate.p1p1i32(token %safepoint_token, i32 7, i32 7 [all...] |
statepoint-invoke.ll | 5 declare void @"some_call"(i64 addrspace(1)*) 6 declare i64 addrspace(1)* @"some_other_call"(i64 addrspace(1)*) 10 define i64 addrspace(1)* @test_basic(i64 addrspace(1)* %obj, 11 i64 addrspace(1)* %obj1) 17 %0 = invoke token (i64, i32, void (i64 addrspace(1)*)*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidp1i64f(i64 0, i32 0, void (i64 addrspace(1)*)* @some_call, i32 1, i32 0, i64 addrspace(1)* %obj, i32 0, i32 5, i32 0, i32 -1, i32 0, i32 0, i32 0, i64 addrspace(1)* %obj, i64 addrspace(1)* %obj1 [all...] |
statepoint-vector-bad-spill.ll | 7 define <2 x i8 addrspace(1)*> @test0(i8 addrspace(1)* %el, <2 x i8 addrspace(1)*>* %vec_ptr) gc "statepoint-example" { 11 %tok0 = call token (i64, i32, void ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidf(i64 0, i32 0, void ()* @do_safepoint, i32 0, i32 0, i32 0, i32 0, i8 addrspace(1)* %el) 12 %el.relocated = call i8 addrspace(1)* @llvm.experimental.gc.relocate.p1i8(token %tok0, i32 7, i32 7) 14 %obj.pre = load <2 x i8 addrspace(1)*>, <2 x i8 addrspace(1)*>* %vec_ptr 15 %obj = insertelement <2 x i8 addrspace(1)*> %obj.pre, i8 addrspace(1)* %el.relocated, i32 0 ; No real objective here, except to use %el 17 %tok1 = call token (i64, i32, void ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidf(i64 0, i32 0, void ()* @do_safepoint, i32 0, i32 0, i32 0, i32 0, <2 x i8 addrspace(1)*> %obj [all...] |
/external/llvm/test/CodeGen/AMDGPU/ |
or.ll | 12 define void @or_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) { 13 %b_ptr = getelementptr <2 x i32>, <2 x i32> addrspace(1)* %in, i32 1 14 %a = load <2 x i32>, <2 x i32> addrspace(1) * %in 15 %b = load <2 x i32>, <2 x i32> addrspace(1) * %b_ptr 17 store <2 x i32> %result, <2 x i32> addrspace(1)* %out 31 define void @or_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) { 32 %b_ptr = getelementptr <4 x i32>, <4 x i32> addrspace(1)* %in, i32 1 33 %a = load <4 x i32>, <4 x i32> addrspace(1) * %i [all...] |
promote-alloca-padding-size-estimate.ll | 26 @lds0 = internal unnamed_addr addrspace(3) global [32 x <4 x i32>] undef, align 16 27 @lds2 = internal unnamed_addr addrspace(3) global [32 x i64] undef, align 8 28 @lds1 = internal unnamed_addr addrspace(3) global [73 x i32] undef, align 4 33 define void @promote_alloca_size_order_0(i32 addrspace(1)* nocapture %out, i32 addrspace(1)* nocapture %in, i32 %idx) #0 { 36 %tmp0 = load i32, i32 addrspace(1)* %in, align 4 39 %arrayidx2 = getelementptr inbounds i32, i32 addrspace(1)* %in, i32 1 40 %tmp1 = load i32, i32 addrspace(1)* %arrayidx2, align 4 45 store i32 %tmp2, i32 addrspace(1)* %out, align 4 48 %arrayidx13 = getelementptr inbounds i32, i32 addrspace(1)* %out, i32 [all...] |
xor.ll | 13 define void @xor_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in0, <2 x i32> addrspace(1)* %in1) { 14 %a = load <2 x i32>, <2 x i32> addrspace(1) * %in0 15 %b = load <2 x i32>, <2 x i32> addrspace(1) * %in1 17 store <2 x i32> %result, <2 x i32> addrspace(1)* %out 32 define void @xor_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in0, <4 x i32> addrspace(1)* %in1) { 33 %a = load <4 x i32>, <4 x i32> addrspace(1) * %in [all...] |
register-count-comments.ll | 12 define void @foo(i32 addrspace(1)* noalias %out, i32 addrspace(1)* %abase, i32 addrspace(1)* %bbase) nounwind { 15 %aptr = getelementptr i32, i32 addrspace(1)* %abase, i32 %tid 16 %bptr = getelementptr i32, i32 addrspace(1)* %bbase, i32 %tid 17 %outptr = getelementptr i32, i32 addrspace(1)* %out, i32 %tid 18 %a = load i32, i32 addrspace(1)* %aptr, align 4 19 %b = load i32, i32 addrspace(1)* %bptr, align 4 21 store i32 %result, i32 addrspace(1)* %outptr, align 4 27 define void @one_vgpr_used(i32 addrspace(1)* %out, i32 %x) nounwind [all...] |
sub.ll | 11 define void @test_sub_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) { 12 %b_ptr = getelementptr i32, i32 addrspace(1)* %in, i32 1 13 %a = load i32, i32 addrspace(1)* %in 14 %b = load i32, i32 addrspace(1)* %b_ptr 16 store i32 %result, i32 addrspace(1)* %out 28 define void @test_sub_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) { 29 %b_ptr = getelementptr <2 x i32>, <2 x i32> addrspace(1)* %in, i32 1 30 %a = load <2 x i32>, <2 x i32> addrspace(1) * %i [all...] |
mad-sub.ll | 12 define void @mad_sub_f32(float addrspace(1)* noalias nocapture %out, float addrspace(1)* noalias nocapture readonly %ptr) #1 { 15 %gep0 = getelementptr float, float addrspace(1)* %ptr, i64 %tid.ext 17 %gep1 = getelementptr float, float addrspace(1)* %ptr, i64 %add1 19 %gep2 = getelementptr float, float addrspace(1)* %ptr, i64 %add2 20 %outgep = getelementptr float, float addrspace(1)* %out, i64 %tid.ext 21 %a = load volatile float, float addrspace(1)* %gep0, align 4 22 %b = load volatile float, float addrspace(1)* %gep1, align 4 23 %c = load volatile float, float addrspace(1)* %gep2, align 4 26 store float %sub, float addrspace(1)* %outgep, align [all...] |
fma-combine.ll | 16 define void @combine_to_fma_f64_0(double addrspace(1)* noalias %out, double addrspace(1)* noalias %in) #1 { 18 %gep.0 = getelementptr double, double addrspace(1)* %in, i32 %tid 19 %gep.1 = getelementptr double, double addrspace(1)* %gep.0, i32 1 20 %gep.2 = getelementptr double, double addrspace(1)* %gep.0, i32 2 21 %gep.out = getelementptr double, double addrspace(1)* %out, i32 %tid 23 %a = load volatile double, double addrspace(1)* %gep.0 24 %b = load volatile double, double addrspace(1)* %gep.1 25 %c = load volatile double, double addrspace(1)* %gep.2 29 store double %fma, double addrspace(1)* %gep.ou [all...] |
large-work-group-promote-alloca.ll | 3 ; CHECK: @promote_alloca_size_63.stack = internal unnamed_addr addrspace(3) global [63 x [5 x i32]] undef, align 4 5 define void @promote_alloca_size_63(i32 addrspace(1)* nocapture %out, i32 addrspace(1)* nocapture %in) #0 { 8 %0 = load i32, i32 addrspace(1)* %in, align 4 11 %arrayidx2 = getelementptr inbounds i32, i32 addrspace(1)* %in, i32 1 12 %1 = load i32, i32 addrspace(1)* %arrayidx2, align 4 17 store i32 %2, i32 addrspace(1)* %out, align 4 20 %arrayidx13 = getelementptr inbounds i32, i32 addrspace(1)* %out, i32 1 21 store i32 %3, i32 addrspace(1)* %arrayidx13 25 ; CHECK: @promote_alloca_size_256.stack = internal unnamed_addr addrspace(3) global [256 x [5 x i32]] undef, align [all...] |
local-64.ll | 8 define void @local_i32_load(i32 addrspace(1)* %out, i32 addrspace(3)* %in) nounwind { 9 %gep = getelementptr i32, i32 addrspace(3)* %in, i32 7 10 %val = load i32, i32 addrspace(3)* %gep, align 4 11 store i32 %val, i32 addrspace(1)* %out, align 4 18 define void @local_i32_load_0_offset(i32 addrspace(1)* %out, i32 addrspace(3)* %in) nounwind { 19 %val = load i32, i32 addrspace(3)* %in, align 4 20 store i32 %val, i32 addrspace(1)* %out, align 4 28 define void @local_i8_load_i16_max_offset(i8 addrspace(1)* %out, i8 addrspace(3)* %in) nounwind [all...] |
fmuladd.ll | 11 define void @fmuladd_f32(float addrspace(1)* %out, float addrspace(1)* %in1, 12 float addrspace(1)* %in2, float addrspace(1)* %in3) { 13 %r0 = load float, float addrspace(1)* %in1 14 %r1 = load float, float addrspace(1)* %in2 15 %r2 = load float, float addrspace(1)* %in3 17 store float %r3, float addrspace(1)* %out 24 define void @fmuladd_f64(double addrspace(1)* %out, double addrspace(1)* %in1 [all...] |
array-ptr-calc-i64.ll | 10 define void @test_array_ptr_calc(i32 addrspace(1)* noalias %out, [1025 x i32] addrspace(1)* noalias %inA, i32 addrspace(1)* noalias %inB) { 13 %a_ptr = getelementptr [1025 x i32], [1025 x i32] addrspace(1)* %inA, i32 %tid, i32 0 14 %b_ptr = getelementptr i32, i32 addrspace(1)* %inB, i32 %tid 15 %a = load i32, i32 addrspace(1)* %a_ptr 16 %b = load i32, i32 addrspace(1)* %b_ptr 18 store i32 %result, i32 addrspace(1)* %out
|
bitreverse.ll | 18 define void @s_brev_i16(i16 addrspace(1)* noalias %out, i16 %val) #0 { 20 store i16 %brev, i16 addrspace(1)* %out 26 define void @v_brev_i16(i16 addrspace(1)* noalias %out, i16 addrspace(1)* noalias %valptr) #0 { 27 %val = load i16, i16 addrspace(1)* %valptr 29 store i16 %brev, i16 addrspace(1)* %out 39 define void @s_brev_i32(i32 addrspace(1)* noalias %out, i32 %val) #0 { 41 store i32 %brev, i32 addrspace(1)* %out 50 define void @v_brev_i32(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %valptr) #0 [all...] |
extract_vector_elt-f64.ll | 8 define void @extract_vector_elt_v3f64_2(double addrspace(1)* %out, <3 x double> addrspace(1)* %in) #0 { 9 %ld = load volatile <3 x double>, <3 x double> addrspace(1)* %in 11 store volatile double %elt, double addrspace(1)* %out 16 define void @dyn_extract_vector_elt_v3f64(double addrspace(1)* %out, <3 x double> %foo, i32 %elt) #0 { 18 store volatile double %dynelt, double addrspace(1)* %out 23 define void @dyn_extract_vector_elt_v4f64(double addrspace(1)* %out, <4 x double> %foo, i32 %elt) #0 { 25 store volatile double %dynelt, double addrspace(1)* %out
|
store.r600.ll | 9 define void @store_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) { 10 %1 = load <4 x i32>, <4 x i32> addrspace(1) * %in 11 store <4 x i32> %1, <4 x i32> addrspace(1)* %out 18 define void @store_v4f32(<4 x float> addrspace(1)* %out, <4 x float> addrspace(1)* %in) { 19 %1 = load <4 x float>, <4 x float> addrspace(1) * %in 20 store <4 x float> %1, <4 x float> addrspace(1)* %out
|
trunc-bitcast-vector.ll | 7 define void @trunc_i64_bitcast_v2i32(i32 addrspace(1)* %out, <2 x i32> addrspace(1)* %in) { 8 %ld = load <2 x i32>, <2 x i32> addrspace(1)* %in 11 store i32 %trunc, i32 addrspace(1)* %out 18 define void @trunc_i96_bitcast_v3i32(i32 addrspace(1)* %out, <3 x i32> addrspace(1)* %in) { 19 %ld = load <3 x i32>, <3 x i32> addrspace(1)* %in 22 store i32 %trunc, i32 addrspace(1)* %out 29 define void @trunc_i128_bitcast_v4i32(i32 addrspace(1)* %out, <4 x i32> addrspace(1)* %in) [all...] |
/external/llvm/test/Transforms/RewriteStatepointsForGC/ |
base-pointers-6.ll | 7 define i64 addrspace(1)* @test(i64 addrspace(1)* %base_obj_x, i64 addrspace(1)* %base_obj_y, i1 %runtime_condition_x, i1 %runtime_condition_y) gc "statepoint-example" { 15 %x_a = getelementptr i64, i64 addrspace(1)* %base_obj_x, i32 1 19 %x_b = getelementptr i64, i64 addrspace(1)* %base_obj_x, i32 2 23 %x = phi i64 addrspace(1)* [ %x_a, %bump_here_a ], [ %x_b, %bump_here_b ] 27 %y = getelementptr i64, i64 addrspace(1)* %base_obj_y, i32 1 32 ; CHECK: %merged_value.base = phi i64 addrspace(1)* [ %base_obj_x, %merge_here ], [ %base_obj_y, %there ] 33 ; CHECK-NEXT: %merged_value = phi i64 addrspace(1)* [ %x, %merge_here ], [ %y, %there ] 34 %merged_value = phi i64 addrspace(1)* [ %x, %merge_here ], [ %y, %there [all...] |
/external/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/ |
extended-index.ll | 11 define void @basic_merge_sext_index(float addrspace(1)* nocapture %a, float addrspace(1)* nocapture %b, float addrspace(1)* nocapture readonly %c) #0 { 15 %a.idx.x = getelementptr inbounds float, float addrspace(1)* %a, i64 %sext.id.x 16 %c.idx.x = getelementptr inbounds float, float addrspace(1)* %c, i64 %sext.id.x 17 %a.idx.x.1 = getelementptr inbounds float, float addrspace(1)* %a.idx.x, i64 1 18 %c.idx.x.1 = getelementptr inbounds float, float addrspace(1)* %c.idx.x, i64 1 20 %ld.c = load float, float addrspace(1)* %c.idx.x, align 4 21 %ld.c.idx.1 = load float, float addrspace(1)* %c.idx.x.1, align 4 23 store float 0.0, float addrspace(1)* %a.idx.x, align [all...] |
/external/clang/test/CodeGenOpenCL/ |
const-str-array-decay.cl | 9 // CHECK: i8 addrspace(3)* getelementptr inbounds ([20 x i8], [20 x i8] addrspace(3)*
|
/external/llvm/test/CodeGen/NVPTX/ |
addrspacecast-gvar.ll | 9 @g = addrspace(1) global i32 42 10 @g2 = addrspace(1) global i32* addrspacecast (i32 addrspace(1)* @g to i32*) 11 @g3 = addrspace(1) global i32 addrspace(1)* @g 12 @g4 = constant {i32*, i32*} {i32* null, i32* addrspacecast (i32 addrspace(1)* @g to i32*)} 13 @g5 = constant {i32*, i32*} {i32* null, i32* addrspacecast (i32 addrspace(1)* getelementptr (i32, i32 addrspace(1)* @g, i32 2) to i32*)}
|
global-ordering.ll | 11 @a2 = addrspace(1) global i8 addrspace(1)* @a 12 @a = addrspace(1) global i8 2 19 @b2 = addrspace(1) global [2 x i8 addrspace(1)*] [i8 addrspace(1)* @b, i8 addrspace(1)* @b] 20 @b = addrspace(1) global i8 1
|
pr16278.ll | 4 @one_f = addrspace(4) global float 1.000000e+00, align 4 8 %val = load float, float addrspace(4)* @one_f
|
weak-global.ll | 4 @g = common addrspace(1) global i32 zeroinitializer 7 %val = load i32, i32 addrspace(1)* @g
|