/external/llvm/test/CodeGen/AMDGPU/ |
fmed3.ll | 17 %gep0 = getelementptr float, float addrspace(1)* %aptr, i32 %tid 19 %a = load float, float addrspace(1)* %gep0 35 %gep0 = getelementptr float, float addrspace(1)* %aptr, i32 %tid 37 %a = load float, float addrspace(1)* %gep0 53 %gep0 = getelementptr float, float addrspace(1)* %aptr, i32 %tid 55 %a = load float, float addrspace(1)* %gep0 69 %gep0 = getelementptr float, float addrspace(1)* %aptr, i32 %tid 71 %a = load float, float addrspace(1)* %gep0 86 %gep0 = getelementptr float, float addrspace(1)* %aptr, i32 %tid 88 %a = load float, float addrspace(1)* %gep0 [all...] |
max3.ll | 9 %gep0 = getelementptr i32, i32 addrspace(1)* %aptr, i32 %tid 13 %a = load i32, i32 addrspace(1)* %gep0, align 4 28 %gep0 = getelementptr i32, i32 addrspace(1)* %aptr, i32 %tid 32 %a = load i32, i32 addrspace(1)* %gep0, align 4
|
promote-alloca-no-opts.ll | 11 %gep0 = getelementptr inbounds [2 x [2 x i32]], [2 x [2 x i32]]* %alloca, i32 0, i32 0, i32 0 13 store i32 0, i32* %gep0 27 %gep0 = getelementptr inbounds [2 x [2 x i32]], [2 x [2 x i32]]* %alloca, i32 0, i32 0, i32 0 29 store i32 0, i32* %gep0
|
drop-mem-operand-move-smrd.ll | 16 %gep0 = getelementptr i32, i32 addrspace(1)* %gptr0, i32 %idx 24 %gptr0.phi = phi i32 addrspace(1)* [ %gep0, %entry ], [ %gep0.inc, %for.body ] 28 %val0 = load i32, i32 addrspace(1)* %gep0 32 %gep0.inc = getelementptr i32, i32 addrspace(1)* %gptr0.phi, i32 4
|
min3.ll | 9 %gep0 = getelementptr i32, i32 addrspace(1)* %aptr, i32 %tid 13 %a = load i32, i32 addrspace(1)* %gep0, align 4 28 %gep0 = getelementptr i32, i32 addrspace(1)* %aptr, i32 %tid 32 %a = load i32, i32 addrspace(1)* %gep0, align 4 49 %gep0 = getelementptr i32, i32 addrspace(1)* %aptr, i32 %tid 60 %a = load i32, i32 addrspace(1)* %gep0, align 4 83 %gep0 = getelementptr i32, i32 addrspace(1)* %aptr, i32 %tid 94 %a = load i32, i32 addrspace(1)* %gep0, align 4
|
mad-sub.ll | 15 %gep0 = getelementptr float, float addrspace(1)* %ptr, i64 %tid.ext 21 %a = load volatile float, float addrspace(1)* %gep0, align 4 39 %gep0 = getelementptr float, float addrspace(1)* %ptr, i64 %tid.ext 45 %a = load volatile float, float addrspace(1)* %gep0, align 4 60 %gep0 = getelementptr double, double addrspace(1)* %ptr, i64 %tid.ext 66 %a = load volatile double, double addrspace(1)* %gep0, align 8 84 %gep0 = getelementptr float, float addrspace(1)* %ptr, i64 %tid.ext 90 %a = load volatile float, float addrspace(1)* %gep0, align 4 109 %gep0 = getelementptr float, float addrspace(1)* %ptr, i64 %tid.ext 115 %a = load volatile float, float addrspace(1)* %gep0, align [all...] |
simplify-demanded-bits-build-pair.ll | 25 %gep0 = getelementptr i64, i64* %alloca, i64 0 29 store i64 24, i64* %gep0, align 8
|
smed3.ll | 10 %gep0 = getelementptr i32, i32 addrspace(1)* %aptr, i32 %tid 12 %a = load i32, i32 addrspace(1)* %gep0 29 %gep0 = getelementptr i32, i32 addrspace(1)* %aptr, i32 %tid 31 %a = load i32, i32 addrspace(1)* %gep0 49 %gep0 = getelementptr i32, i32 addrspace(1)* %aptr, i32 %tid 51 %a = load i32, i32 addrspace(1)* %gep0 68 %gep0 = getelementptr i32, i32 addrspace(1)* %aptr, i32 %tid 70 %a = load i32, i32 addrspace(1)* %gep0 87 %gep0 = getelementptr i64, i64 addrspace(1)* %aptr, i32 %tid 89 %a = load i64, i64 addrspace(1)* %gep0 [all...] |
private-memory-r600.ll | 199 %gep0 = getelementptr inbounds [2 x [2 x i8]], [2 x [2 x i8]]* %alloca, i32 0, i32 0, i32 0 201 store i8 0, i8* %gep0 213 %gep0 = getelementptr inbounds [2 x [2 x i32]], [2 x [2 x i32]]* %alloca, i32 0, i32 0, i32 0 215 store i32 0, i32* %gep0 226 %gep0 = getelementptr inbounds [2 x [2 x i64]], [2 x [2 x i64]]* %alloca, i32 0, i32 0, i32 0 228 store i64 0, i64* %gep0 241 %gep0 = getelementptr inbounds [2 x [2 x %struct.pair32]], [2 x [2 x %struct.pair32]]* %alloca, i32 0, i32 0, i32 0, i32 1 243 store i32 0, i32* %gep0 254 %gep0 = getelementptr inbounds [2 x %struct.pair32], [2 x %struct.pair32]* %alloca, i32 0, i32 0, i32 1 256 store i32 0, i32* %gep0 [all...] |
amdgpu.private-memory.ll | 55 ; HSAOPT: [[GEP0:%[0-9]+]] = getelementptr inbounds i32, i32 addrspace(2)* [[CAST_DISPATCH_PTR]], i64 1 56 ; HSAOPT: [[LDXY:%[0-9]+]] = load i32, i32 addrspace(2)* [[GEP0]], align 4, !invariant.load !0 302 %gep0 = getelementptr [2 x [2 x i8]], [2 x [2 x i8]]* %alloca, i32 0, i32 0, i32 0 304 store i8 0, i8* %gep0 316 %gep0 = getelementptr [2 x [2 x i32]], [2 x [2 x i32]]* %alloca, i32 0, i32 0, i32 0 318 store i32 0, i32* %gep0 329 %gep0 = getelementptr [2 x [2 x i64]], [2 x [2 x i64]]* %alloca, i32 0, i32 0, i32 0 331 store i64 0, i64* %gep0 344 %gep0 = getelementptr [2 x [2 x %struct.pair32]], [2 x [2 x %struct.pair32]]* %alloca, i32 0, i32 0, i32 0, i32 1 346 store i32 0, i32* %gep0 [all...] |
umed3.ll | 10 %gep0 = getelementptr i32, i32 addrspace(1)* %aptr, i32 %tid 12 %a = load i32, i32 addrspace(1)* %gep0 29 %gep0 = getelementptr i32, i32 addrspace(1)* %aptr, i32 %tid 31 %a = load i32, i32 addrspace(1)* %gep0 49 %gep0 = getelementptr i32, i32 addrspace(1)* %aptr, i32 %tid 51 %a = load i32, i32 addrspace(1)* %gep0 68 %gep0 = getelementptr i32, i32 addrspace(1)* %aptr, i32 %tid 70 %a = load i32, i32 addrspace(1)* %gep0 87 %gep0 = getelementptr i64, i64 addrspace(1)* %aptr, i32 %tid 89 %a = load i64, i64 addrspace(1)* %gep0 [all...] |
captured-frame-index.ll | 157 %gep0.tmp0 = getelementptr [4096 x i32], [4096 x i32]* %tmp0, i32 0, i32 0 158 store volatile i32 0, i32* %gep0.tmp0 161 %gep0.tmp1 = getelementptr [4096 x i32], [4096 x i32]* %tmp0, i32 0, i32 14 162 store i32* %gep0.tmp1, i32* addrspace(1)* %ptr
|
private-element-size.ll | 47 %gep0 = getelementptr inbounds [2 x <4 x i32>], [2 x <4 x i32>]* %alloca, i32 0, i32 0 49 store <4 x i32> zeroinitializer, <4 x i32>* %gep0 117 %gep0 = getelementptr inbounds [2 x <8 x i32>], [2 x <8 x i32>]* %alloca, i32 0, i32 0 119 store <8 x i32> zeroinitializer, <8 x i32>* %gep0 154 %gep0 = getelementptr inbounds [2 x i64], [2 x i64]* %alloca, i32 0, i32 0 156 store i64 0, i64* %gep0 190 %gep0 = getelementptr inbounds [2 x double], [2 x double]* %alloca, i32 0, i32 0 192 store double 0.0, double* %gep0 239 %gep0 = getelementptr inbounds [2 x <2 x i64>], [2 x <2 x i64>]* %alloca, i32 0, i32 0 241 store <2 x i64> zeroinitializer, <2 x i64>* %gep0 [all...] |
unaligned-load-store.ll | 545 %gep0 = getelementptr i32, i32 addrspace(2)* %p, i64 1 547 %v1 = load i32, i32 addrspace(2)* %gep0, align 4
|
/external/llvm/test/Transforms/SLPVectorizer/X86/ |
phi_overalignedtype.ll | 25 %i2.gep0 = getelementptr inbounds double, double* %i2, i64 0 26 %i2.0 = load double, double* %i2.gep0, align 16
|
phi.ll | 228 %i2.gep0 = getelementptr inbounds x86_fp80, x86_fp80* %i2, i64 0 229 %i2.0 = load x86_fp80, x86_fp80* %i2.gep0, align 16
|
/external/llvm/unittests/IR/ |
InstructionsTest.cpp | 327 GetElementPtrInst *Gep0 = GetElementPtrInst::Create(I32Ty, PtrVecA, C2xi32a); 332 CastInst *BTC0 = new BitCastInst(Gep0, V2xi8PTy); 342 EXPECT_NE(S0, Gep0); 352 GetPointerBaseWithConstantOffset(Gep0, Offset, TD); 358 GetElementPtrInst *GepII0 = GetElementPtrInst::Create(I32Ty, Gep0, C2xi32b); 383 delete Gep0;
|
/external/llvm/test/Transforms/SROA/ |
vector-promotion.ll | 270 %a.gep0 = getelementptr <4 x i32>, <4 x i32>* %a, i32 0, i32 0 271 %a.cast0 = bitcast i32* %a.gep0 to <2 x i32>* 304 %a.gep0 = getelementptr <4 x i32>, <4 x i32>* %a, i32 0, i32 0 305 %a.cast0 = bitcast i32* %a.gep0 to <2 x i32>* 337 %a.gep0 = getelementptr <4 x float>, <4 x float>* %a, i32 0, i32 0 338 %a.cast0 = bitcast float* %a.gep0 to i8* 370 %a.gep0 = getelementptr <4 x float>, <4 x float>* %a, i32 0, i32 0 371 %a.cast0 = bitcast float* %a.gep0 to i8*
|
basictest.ll | [all...] |
/external/freetype/src/truetype/ |
ttobjs.h | 94 FT_UShort gep0; member in struct:TT_GraphicsState_
|
ttinterp.c | 538 exec->GS.gep0 = 1; [all...] |
ttobjs.c | [all...] |
/external/llvm/test/Transforms/InstCombine/ |
unpack-fca.ll | 64 ; CHECK-NEXT: [[GEP0:%[a-z0-9\.]+]] = getelementptr inbounds [2 x %B], [2 x %B]* %ab.ptr, i64 0, i64 0, i32 0 66 ; CHECK-NEXT: store i8* [[EV0]], i8** [[GEP0]], align 8
|