/external/clang/utils/TableGen/ |
NeonEmitter.cpp | 138 unsigned Bitwidth, ElementBitwidth, NumVectors; 144 Bitwidth(0), ElementBitwidth(0), NumVectors(0) {} 149 NoManglingQ(false), Bitwidth(0), ElementBitwidth(0), NumVectors(0) { 180 unsigned getNumElements() const { return Bitwidth / ElementBitwidth; } 181 unsigned getSizeInBits() const { return Bitwidth; } 197 Bitwidth = ElementBitwidth; 205 assert_with_loc(Bitwidth != 128, "Can't get bigger than 128!"); 206 Bitwidth = 128; 209 assert_with_loc(Bitwidth != 64, "Can't get smaller than 64!"); 210 Bitwidth = 64 [all...] |
/external/llvm/lib/Analysis/ |
ScalarEvolutionAliasAnalysis.cpp | 128 unsigned BitWidth = SE->getTypeSizeInBits(AS->getType()); 129 APInt ASizeInt(BitWidth, LocA.Size); 130 APInt BSizeInt(BitWidth, LocB.Size);
|
ConstantFolding.cpp | 232 unsigned BitWidth = TD.getPointerTypeSizeInBits(GV->getType()); 233 Offset = APInt(BitWidth, 0); 251 unsigned BitWidth = TD.getPointerTypeSizeInBits(GEP->getType()); 252 APInt TmpOffset(BitWidth, 0); 626 unsigned BitWidth = DL->getTypeSizeInBits(Op0->getType()->getScalarType()); 627 APInt KnownZero0(BitWidth, 0), KnownOne0(BitWidth, 0); 628 APInt KnownZero1(BitWidth, 0), KnownOne1(BitWidth, 0); 659 // PtrToInt may change the bitwidth so we have convert to the right siz [all...] |
ScalarEvolution.cpp | 693 // Suppose, W is the bitwidth of the return value. We must be prepared for [all...] |
/external/llvm/lib/Target/AArch64/InstPrinter/ |
AArch64InstPrinter.h | 87 template<int BitWidth> 89 printAMIndexedWB(MI, OpNum, BitWidth / 8, O);
|
/frameworks/compile/slang/ |
slang.h | 96 void createTarget(uint32_t BitWidth); 173 void init(uint32_t BitWidth, clang::DiagnosticsEngine *DiagEngine,
|
slang.cpp | 173 void Slang::createTarget(uint32_t BitWidth) { 176 if (BitWidth == 64) { 266 void Slang::init(uint32_t BitWidth, clang::DiagnosticsEngine *DiagEngine, 277 createTarget(BitWidth);
|
/external/llvm/lib/Support/ |
StringRef.cpp | 434 unsigned BitWidth = Log2Radix * Str.size(); 435 if (BitWidth < Result.getBitWidth()) 436 BitWidth = Result.getBitWidth(); // don't shrink the result 437 else if (BitWidth > Result.getBitWidth()) 438 Result = Result.zext(BitWidth); 443 RadixAP = APInt(BitWidth, Radix); 444 CharAP = APInt(BitWidth, 0);
|
/external/llvm/lib/Transforms/InstCombine/ |
InstCombineCasts.cpp | 374 uint32_t BitWidth = Ty->getScalarSizeInBits(); 375 if (BitWidth < OrigBitWidth) { 376 APInt Mask = APInt::getHighBitsSet(OrigBitWidth, OrigBitWidth-BitWidth); 389 uint32_t BitWidth = Ty->getScalarSizeInBits(); 390 if (CI->getLimitedValue(BitWidth) < BitWidth) 400 uint32_t BitWidth = Ty->getScalarSizeInBits(); 402 APInt::getHighBitsSet(OrigBitWidth, OrigBitWidth-BitWidth)) && 403 CI->getLimitedValue(BitWidth) < BitWidth) { [all...] |
InstCombineShifts.cpp | 93 uint32_t BitWidth = Ty->getScalarSizeInBits(); 95 APInt::getHighBitsSet(OrigBitWidth, OrigBitWidth-BitWidth)) && 96 CI->getLimitedValue(BitWidth) < BitWidth) { 746 unsigned BitWidth = Op0->getType()->getScalarSizeInBits(); 753 isPowerOf2_32(BitWidth) && Log2_32(BitWidth) == ShAmt) { [all...] |
InstCombineAddSub.cpp | 880 int BitWidth = Op0KnownZero.getBitWidth(); 882 Op0KnownZeroTemp.clearBit(BitWidth - 1); 883 int Op0ZeroPosition = BitWidth - Op0KnownZeroTemp.countLeadingZeros() - 1; 885 int Op1OnePosition = BitWidth - Op1MaybeOne.countLeadingZeros() - 1; 920 int BitWidth = IT->getBitWidth(); 921 APInt LHSKnownZero(BitWidth, 0); 922 APInt LHSKnownOne(BitWidth, 0); 925 APInt RHSKnownZero(BitWidth, 0); 926 APInt RHSKnownOne(BitWidth, 0); 931 if ((LHSKnownOne[BitWidth - 1] && RHSKnownZero[BitWidth - 1]) | [all...] |
/external/llvm/lib/Transforms/Scalar/ |
Reassociate.cpp | 331 /// CarmichaelShift - Returns k such that lambda(2^Bitwidth) = 2^k, where lambda 332 /// is the Carmichael function. This means that x^(2^k) === 1 mod 2^Bitwidth for 333 /// every odd x, i.e. x^(2^k) = 1 for every odd x in Bitwidth-bit arithmetic. 334 /// Note that 0 <= k < Bitwidth, and if Bitwidth > 3 then x^(2^k) = 0 for every 335 /// even x in Bitwidth-bit arithmetic. 336 static unsigned CarmichaelShift(unsigned Bitwidth) { 337 if (Bitwidth < 3) 338 return Bitwidth - 1; 339 return Bitwidth - 2 [all...] |
/external/llvm/lib/CodeGen/SelectionDAG/ |
SelectionDAG.cpp | [all...] |
/art/runtime/entrypoints/quick/ |
quick_trampoline_entrypoints_test.cc | 98 // kPointerSize, which is wrong when the target bitwidth is not the same as the host's.
|
/external/llvm/utils/TableGen/ |
FixedLenDecoderEmitter.cpp | 100 unsigned Indentation, unsigned BitWidth, 331 unsigned BitWidth; 341 BestIndex(FC.BestIndex), BitWidth(FC.BitWidth), 350 Parent(nullptr), BestIndex(-1), BitWidth(BW), Emitter(E) { 351 for (unsigned i = 0; i < BitWidth; ++i) 364 Parent(&parent), BestIndex(-1), BitWidth(parent.BitWidth), 369 unsigned getBitWidth() const { return BitWidth; } 384 for (unsigned i = 0; i < BitWidth; ++i) [all...] |
/external/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ |
BasicValueFactory.h | 79 const llvm::APSInt& getValue(uint64_t X, unsigned BitWidth, bool isUnsigned); 102 /// but with the bitwidth and signedness of 'To'.
|
/external/clang/lib/StaticAnalyzer/Core/ |
BasicValueFactory.cpp | 95 const llvm::APSInt& BasicValueFactory::getValue(uint64_t X, unsigned BitWidth, 97 llvm::APSInt V(BitWidth, isUnsigned);
|
/external/clang/test/Analysis/ |
null-deref-ps.c | 218 // for 'x' should have a bitwidth of 8. 222 // x against 0 (with the same bitwidth).
|
/external/clang/include/clang/AST/ |
TemplateBase.h | 84 // BitWidth > 64. The memory may be shared between multiple 86 unsigned BitWidth : 31; 288 if (Integer.BitWidth <= 64) 289 return APSInt(APInt(Integer.BitWidth, Integer.VAL), Integer.IsUnsigned); 291 unsigned NumWords = APInt::getNumWords(Integer.BitWidth); 292 return APSInt(APInt(Integer.BitWidth, makeArrayRef(Integer.pVal, NumWords)),
|
/external/llvm/lib/ExecutionEngine/ |
ExecutionEngine.cpp | 639 uint32_t BitWidth = cast<IntegerType>(CE->getType())->getBitWidth(); 640 GV.IntVal = GV.IntVal.trunc(BitWidth); 645 uint32_t BitWidth = cast<IntegerType>(CE->getType())->getBitWidth(); 646 GV.IntVal = GV.IntVal.zext(BitWidth); 651 uint32_t BitWidth = cast<IntegerType>(CE->getType())->getBitWidth(); 652 GV.IntVal = GV.IntVal.sext(BitWidth); 700 uint32_t BitWidth = cast<IntegerType>(CE->getType())->getBitWidth(); 702 GV.IntVal = APIntOps::RoundFloatToAPInt(GV.FloatVal, BitWidth); 704 GV.IntVal = APIntOps::RoundDoubleToAPInt(GV.DoubleVal, BitWidth); 709 (void)apf.convertToInteger(&v, BitWidth, [all...] |
/external/llvm/lib/IR/ |
DataLayout.cpp | 411 uint32_t BitWidth, bool ABIInfo, 418 Alignments[i].TypeBitWidth == BitWidth) 425 // the BitWidth requested. 426 if (Alignments[i].TypeBitWidth > BitWidth && (BestMatchIdx == -1 || 688 /// an integer type of the specified bitwidth. 689 unsigned DataLayout::getABIIntegerTypeAlignment(unsigned BitWidth) const { 690 return getAlignmentInfo(INTEGER_ALIGN, BitWidth, true, nullptr);
|
Type.cpp | 58 bool Type::isIntegerTy(unsigned Bitwidth) const { 59 return isIntegerTy() && cast<IntegerType>(this)->getBitWidth() == Bitwidth; 300 assert(NumBits >= MIN_INT_BITS && "bitwidth too small"); 301 assert(NumBits <= MAX_INT_BITS && "bitwidth too large"); 323 unsigned BitWidth = getBitWidth(); 324 return (BitWidth > 7) && isPowerOf2_32(BitWidth);
|
ConstantRange.cpp | 32 ConstantRange::ConstantRange(uint32_t BitWidth, bool Full) { 34 Lower = Upper = APInt::getMaxValue(BitWidth); 36 Lower = Upper = APInt::getMinValue(BitWidth); 137 /// its bitwidth, for example: i8 [120, 140). 496 // Chop off the most significant bits that are past the destination bitwidth.
|
/external/chromium_org/third_party/skia/src/images/ |
SkImageDecoder_libico.cpp | 234 int bitWidth = w*bitCount; 235 int test = bitWidth & 0x1F; 237 int lineBitWidth = (bitWidth & 0xFFFFFFE0) + (0x20 & mask);
|
/external/llvm/lib/Transforms/Utils/ |
SimplifyIndVar.cpp | 125 uint32_t BitWidth = cast<IntegerType>(UseInst->getType())->getBitWidth(); 126 if (D->getValue().uge(BitWidth)) 130 APInt::getOneBitSet(BitWidth, D->getZExtValue()));
|