/external/llvm/lib/Target/AMDGPU/ |
SILoadStoreOptimizer.cpp | 67 unsigned EltSize); 70 unsigned EltSize); 75 unsigned EltSize); 80 unsigned EltSize); 159 unsigned EltSize){ 185 if (offsetsCanBeCombined(Offset0, Offset1, EltSize)) 195 unsigned EltSize) { 210 unsigned NewOffset0 = Offset0 / EltSize; 211 unsigned NewOffset1 = Offset1 / EltSize; 212 unsigned Opc = (EltSize == 4) ? AMDGPU::DS_READ2_B32 : AMDGPU::DS_READ2_B64 [all...] |
/external/clang/lib/CodeGen/ |
CGBuilder.h | 205 /// \param EltSize - the size of the type T in bytes 206 Address CreateConstArrayGEP(Address Addr, uint64_t Index, CharUnits EltSize, 212 Addr.getAlignment().alignmentAtOffset(Index * EltSize)); 221 /// \param EltSize - the size of the type T in bytes 223 CharUnits EltSize, 227 Addr.getAlignment().alignmentAtOffset(Index * EltSize)); 236 /// \param EltSize - the size of the type T in bytes 237 Address CreateConstGEP(Address Addr, uint64_t Index, CharUnits EltSize, 241 Addr.getAlignment().alignmentAtOffset(Index * EltSize));
|
SwiftCallingConv.cpp | 75 auto eltSize = CGM.getContext().getTypeSizeInChars(eltType); 77 addTypedData(eltType, begin + i * eltSize); 83 auto eltSize = CGM.getContext().getTypeSizeInChars(eltType); 85 addTypedData(eltLLVMType, begin, begin + eltSize); 86 addTypedData(eltLLVMType, begin + eltSize, begin + 2 * eltSize); 239 auto eltSize = (end - begin) / numElts; 240 assert(eltSize == getTypeStoreSize(CGM, eltTy)); 242 addLegalTypedData(eltTy, begin, begin + eltSize); 243 begin += eltSize; [all...] |
/external/mesa3d/src/gallium/auxiliary/draw/ |
draw_pt.c | 121 } else if (draw->pt.eltSize != draw->pt.user.eltSize) { 122 /* Flush draw state if eltSize changed. 138 draw->pt.eltSize = draw->pt.user.eltSize; 247 if (draw->pt.user.eltSize) { 250 switch (draw->pt.user.eltSize) { 389 if (draw->pt.user.eltSize) { 394 switch (draw->pt.user.eltSize) { 414 assert(0 && "bad eltSize in draw_arrays()") [all...] |
draw_private.h | 159 unsigned eltSize; /* saved eltSize for flushing */ 194 unsigned eltSize;
|
draw_pt_vsplit.c | 206 switch (vsplit->draw->pt.user.eltSize) {
|
/external/mesa3d/src/gallium/drivers/nouveau/codegen/ |
nv50_ir_build_util.cpp | 471 uint32_t base, int len, int vecDim, int eltSize, 479 this->eltSize = eltSize; 486 baseSym->reg.size = eltSize; 502 return up->getScratch(eltSize); 520 return up->mkLoadv(typeOfSize(eltSize), static_cast<Symbol *>(sym), ptr); 552 sym->reg.size = eltSize; 553 sym->reg.type = typeOfSize(eltSize); 554 sym->setAddress(baseSym, baseAddr + idx * eltSize);
|
nv50_ir_build_util.h | 143 uint32_t base, int len, int vecDim, int eltSize, 167 uint8_t eltSize; // in bytes
|
/external/llvm/test/MC/Disassembler/X86/ |
avx-512.txt | 82 # TupleType = T1S, 64-bit eltsize 86 # TupleType = T1S, 32-bit eltsize 94 # TupleType = FV, broadcast, 64-bit eltsize 98 # TupleType = FV, broadcast, 32-bit eltsize
|
/external/elfutils/libdw/ |
dwarf_aggregate_size.c | 53 Dwarf_Word eltsize; local 55 &eltsize) != 0) 173 Dwarf_Word stride = eltsize;
|
/frameworks/compile/libbcc/lib/ |
RSX86TranslateGEPPass.cpp | 89 // Offset = Offset + Index * EltSize for index into an array or a vector 90 llvm::Value *EltSize = llvm::ConstantInt::get( 95 EltSize, "", GEP);
|
/external/swiftshader/third_party/LLVM/lib/Transforms/Scalar/ |
ScalarReplAggregates.cpp | 363 unsigned EltSize = In->getPrimitiveSizeInBits()/8; 364 if (EltSize == AllocaSize) 370 if (Offset % EltSize == 0 && AllocaSize % EltSize == 0 && 371 (!VectorTy || EltSize == VectorTy->getElementType() 375 VectorTy = VectorType::get(In, AllocaSize/EltSize); 695 unsigned EltSize = TD.getTypeAllocSizeInBits(VTy->getElementType()); 696 Elt = Offset/EltSize; 697 assert(EltSize*Elt == Offset && "Invalid modulus in validity checking"); 721 uint64_t EltSize = TD.getTypeAllocSizeInBits(AT->getElementType()) [all...] |
/external/swiftshader/third_party/LLVM/include/llvm/Support/ |
Allocator.h | 170 // Round EltSize up to the specified alignment. 171 size_t EltSize = (sizeof(T)+Alignment-1)&(-Alignment); 172 return static_cast<T*>(Allocate(Num * EltSize, Alignment));
|
/external/llvm/lib/Target/X86/Utils/ |
X86ShuffleDecode.cpp | 515 unsigned EltSize = VT.getScalarSizeInBits(); 520 assert((EltSize == 32 || EltSize == 64) && "Unexpected element size"); 524 M = (EltSize == 64 ? ((M >> 1) & 0x1) : (M & 0x3)); 533 unsigned EltSize = VT.getScalarSizeInBits(); 538 assert((EltSize == 32 || EltSize == 64) && "Unexpected element size"); 560 if (EltSize == 64)
|
/toolchain/binutils/binutils-2.27/bfd/ |
bfd-in.h | 485 #define bfd_read(BUF, ELTSIZE, NITEMS, ABFD) \ 487 bfd_bread ((BUF), (ELTSIZE) * (NITEMS), (ABFD))) 488 #define bfd_write(BUF, ELTSIZE, NITEMS, ABFD) \ 490 bfd_bwrite ((BUF), (ELTSIZE) * (NITEMS), (ABFD))) 492 #define bfd_read(BUF, ELTSIZE, NITEMS, ABFD) \ 494 bfd_bread ((BUF), (ELTSIZE) * (NITEMS), (ABFD))) 495 #define bfd_write(BUF, ELTSIZE, NITEMS, ABFD) \ 497 bfd_bwrite ((BUF), (ELTSIZE) * (NITEMS), (ABFD))) [all...] |
/external/swiftshader/third_party/LLVM/lib/Analysis/ |
ConstantFolding.cpp | 285 uint64_t EltSize = TD.getTypeAllocSize(CS->getOperand(Index)->getType()); 287 if (ByteOffset < EltSize && 314 uint64_t EltSize = TD.getTypeAllocSize(CA->getType()->getElementType()); 315 uint64_t Index = ByteOffset / EltSize; 316 uint64_t Offset = ByteOffset - Index * EltSize; 321 if (EltSize >= BytesLeft) 325 BytesLeft -= EltSize; 326 CurPtr += EltSize; 332 uint64_t EltSize = TD.getTypeAllocSize(CV->getType()->getElementType()); 333 uint64_t Index = ByteOffset / EltSize; [all...] |
/external/clang/utils/ABITest/ |
TypeGen.py | 128 eltSize = self.elementType.sizeof() 129 assert not (self.size % eltSize) 130 self.numElements = self.size // eltSize
|
/external/llvm/lib/Target/X86/ |
X86InstrAVX512.td | 61 int EltSize = EltVT.Size; 87 !if (!eq (EltSize, 64), "v8i64", "v16i32"), 93 // Note: For EltSize < 32, FloatVT is illegal and TableGen 96 !if (!eq (!srl(EltSize,5),0), 99 "v" # NumElts # "f" # EltSize, 103 !if (!eq (!srl(EltSize,5),0), 106 "v" # NumElts # "i" # EltSize, 452 EVEX_CD8<From.EltSize, From.CD8TupleForm>; 604 (!cast<Instruction>(NAME # To.EltSize # "x" # To.NumElts # 614 (!cast<Instruction>(NAME # To.EltSize # "x" # To.NumElts [all...] |
/external/swiftshader/third_party/LLVM/lib/CodeGen/ |
Analysis.cpp | 93 uint64_t EltSize = TLI.getTargetData()->getTypeAllocSize(EltTy); 96 StartingOffset + i * EltSize);
|
/external/llvm/lib/Target/Hexagon/ |
HexagonISelLowering.cpp | [all...] |
/external/llvm/lib/CodeGen/SelectionDAG/ |
DAGCombiner.cpp | [all...] |
/external/swiftshader/third_party/LLVM/lib/Target/PowerPC/ |
PPCISelLowering.h | 219 bool isSplatShuffleMask(ShuffleVectorSDNode *N, unsigned EltSize); 227 unsigned getVSPLTImmediate(SDNode *N, unsigned EltSize);
|
/external/llvm/lib/Analysis/ |
ConstantFolding.cpp | 339 uint64_t EltSize = DL.getTypeAllocSize(CS->getOperand(Index)->getType()); 341 if (ByteOffset < EltSize && 370 uint64_t EltSize = DL.getTypeAllocSize(EltTy); 371 uint64_t Index = ByteOffset / EltSize; 372 uint64_t Offset = ByteOffset - Index * EltSize; 384 uint64_t BytesWritten = EltSize - Offset; 385 assert(BytesWritten <= EltSize && "Not indexing into this element?"); [all...] |
/external/llvm/lib/Target/PowerPC/ |
PPCISelLowering.h | 295 /// VRRC = VADD_SPLAT Elt, EltSize - Temporary node to be expanded 429 bool isSplatShuffleMask(ShuffleVectorSDNode *N, unsigned EltSize); 443 unsigned getVSPLTImmediate(SDNode *N, unsigned EltSize, SelectionDAG &DAG); [all...] |
/external/llvm/lib/IR/ |
AutoUpgrade.cpp | 380 unsigned EltSize = Idx->getScalarSizeInBits(); 382 if (EltSize == 64 && IdxSize == 128) 384 else if (EltSize == 32 && IdxSize == 128) 386 else if (EltSize == 64 && IdxSize == 256) [all...] |