/external/llvm/lib/ExecutionEngine/MCJIT/ |
MCJIT.cpp | 557 unsigned BitWidth = cast<IntegerType>(RetTy)->getBitWidth(); 558 if (BitWidth == 1) 559 rv.IntVal = APInt(BitWidth, ((bool(*)())(intptr_t)FPtr)()); 560 else if (BitWidth <= 8) 561 rv.IntVal = APInt(BitWidth, ((char(*)())(intptr_t)FPtr)()); 562 else if (BitWidth <= 16) 563 rv.IntVal = APInt(BitWidth, ((short(*)())(intptr_t)FPtr)()); 564 else if (BitWidth <= 32) 565 rv.IntVal = APInt(BitWidth, ((int(*)())(intptr_t)FPtr)()); 566 else if (BitWidth <= 64 [all...] |
/external/swiftshader/third_party/LLVM/lib/Transforms/InstCombine/ |
InstCombineCalls.cpp | 359 uint32_t BitWidth = IT->getBitWidth(); 360 APInt KnownZero(BitWidth, 0); 361 APInt KnownOne(BitWidth, 0); 362 ComputeMaskedBits(II->getArgOperand(0), APInt::getAllOnesValue(BitWidth), 365 APInt Mask(APInt::getLowBitsSet(BitWidth, TrailingZeros)); 368 APInt(BitWidth, TrailingZeros))); 378 uint32_t BitWidth = IT->getBitWidth(); 379 APInt KnownZero(BitWidth, 0); 380 APInt KnownOne(BitWidth, 0); 381 ComputeMaskedBits(II->getArgOperand(0), APInt::getAllOnesValue(BitWidth), [all...] |
InstCombineCasts.cpp | 364 uint32_t BitWidth = Ty->getScalarSizeInBits(); 365 if (BitWidth < OrigBitWidth) { 366 APInt Mask = APInt::getHighBitsSet(OrigBitWidth, OrigBitWidth-BitWidth); 379 uint32_t BitWidth = Ty->getScalarSizeInBits(); 380 if (CI->getLimitedValue(BitWidth) < BitWidth) 390 uint32_t BitWidth = Ty->getScalarSizeInBits(); 392 APInt::getHighBitsSet(OrigBitWidth, OrigBitWidth-BitWidth)) && 393 CI->getLimitedValue(BitWidth) < BitWidth) { [all...] |
/external/llvm/lib/IR/ |
ConstantRange.cpp | 34 ConstantRange::ConstantRange(uint32_t BitWidth, bool Full) { 36 Lower = Upper = APInt::getMaxValue(BitWidth); 38 Lower = Upper = APInt::getMinValue(BitWidth); 191 unsigned BitWidth = Other.getBitWidth(); 194 return ConstantRange(BitWidth, false); 199 return ConstantRange(BitWidth); 201 ConstantRange Result(BitWidth); 205 SubsetIntersect(Result, ConstantRange(APInt::getNullValue(BitWidth), 215 ConstantRange(APInt::getSignedMinValue(BitWidth), 216 APInt::getSignedMinValue(BitWidth) - SignedMax)) [all...] |
/external/swiftshader/third_party/LLVM/utils/TableGen/ |
FixedLenDecoderEmitter.cpp | 239 unsigned BitWidth; 249 BestIndex(FC.BestIndex), BitWidth(FC.BitWidth), 258 Parent(NULL), BestIndex(-1), BitWidth(BW), Emitter(E) { 259 for (unsigned i = 0; i < BitWidth; ++i) 272 Parent(&parent), BestIndex(-1), BitWidth(parent.BitWidth), 288 for (unsigned i = 0; i < BitWidth; ++i) 389 assert(StartBit + NumBits - 1 < Owner->BitWidth); 506 o.indent(Indentation) << "switch (fieldFromInstruction" << Owner->BitWidth [all...] |
/external/llvm/lib/Transforms/InstCombine/ |
InstCombineAddSub.cpp | 856 int BitWidth = Op0KnownZero.getBitWidth(); 858 Op0KnownZeroTemp.clearBit(BitWidth - 1); 859 int Op0ZeroPosition = BitWidth - Op0KnownZeroTemp.countLeadingZeros() - 1; 861 int Op1OnePosition = BitWidth - Op1MaybeOne.countLeadingZeros() - 1; 896 unsigned BitWidth = LHS->getType()->getScalarSizeInBits(); 897 APInt LHSKnownZero(BitWidth, 0); 898 APInt LHSKnownOne(BitWidth, 0); 901 APInt RHSKnownZero(BitWidth, 0); 902 APInt RHSKnownOne(BitWidth, 0); 907 if ((LHSKnownOne[BitWidth - 1] && RHSKnownZero[BitWidth - 1]) | [all...] |
InstCombineCasts.cpp | 368 uint32_t BitWidth = Ty->getScalarSizeInBits(); 369 if (BitWidth < OrigBitWidth) { 370 APInt Mask = APInt::getHighBitsSet(OrigBitWidth, OrigBitWidth-BitWidth); 383 uint32_t BitWidth = Ty->getScalarSizeInBits(); 384 if (CI->getLimitedValue(BitWidth) < BitWidth) 394 uint32_t BitWidth = Ty->getScalarSizeInBits(); 396 APInt::getHighBitsSet(OrigBitWidth, OrigBitWidth-BitWidth), 0, CxtI) && 397 CI->getLimitedValue(BitWidth) < BitWidth) { [all...] |
/external/llvm/lib/Target/X86/ |
X86ShuffleDecodeConstantPool.cpp | 53 unsigned BitWidth = cast<IntegerType>(EltTy)->getBitWidth(); 54 if ((BitWidth % 8) != 0) 57 int Scale = BitWidth / 8; 247 unsigned BitWidth = cast<IntegerType>(VecEltTy)->getBitWidth(); 248 if ((BitWidth % 8) != 0) 252 int Scale = BitWidth / 8;
|
/external/swiftshader/third_party/LLVM/lib/Support/ |
StringRef.cpp | 377 unsigned BitWidth = Log2Radix * Str.size(); 378 if (BitWidth < Result.getBitWidth()) 379 BitWidth = Result.getBitWidth(); // don't shrink the result 381 Result = Result.zext(BitWidth); 386 RadixAP = APInt(BitWidth, Radix); 387 CharAP = APInt(BitWidth, 0);
|
/external/swiftshader/third_party/LLVM/lib/Analysis/ |
ScalarEvolutionAliasAnalysis.cpp | 128 unsigned BitWidth = SE->getTypeSizeInBits(AS->getType()); 129 APInt ASizeInt(BitWidth, LocA.Size); 130 APInt BSizeInt(BitWidth, LocB.Size);
|
Lint.cpp | 413 unsigned BitWidth = TD->getTypeSizeInBits(Ptr->getType()); 414 APInt Mask = APInt::getAllOnesValue(BitWidth), 415 KnownZero(BitWidth, 0), KnownOne(BitWidth, 0); 417 Assert1(!(KnownOne & APInt::getLowBitsSet(BitWidth, Log2_32(Align))), 473 unsigned BitWidth = cast<IntegerType>(V->getType())->getBitWidth(); 474 APInt Mask = APInt::getAllOnesValue(BitWidth), 475 KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
|
ScalarEvolution.cpp | 664 // Suppose, W is the bitwidth of the return value. We must be prepared for [all...] |
/external/clang/utils/TableGen/ |
NeonEmitter.cpp | 141 unsigned Bitwidth, ElementBitwidth, NumVectors; 147 NoManglingQ(false), Bitwidth(0), ElementBitwidth(0), NumVectors(0) {} 152 ScalarForMangling(false), NoManglingQ(false), Bitwidth(0), 185 unsigned getNumElements() const { return Bitwidth / ElementBitwidth; } 186 unsigned getSizeInBits() const { return Bitwidth; } 210 Bitwidth = ElementBitwidth; 218 assert_with_loc(Bitwidth != 128, "Can't get bigger than 128!"); 219 Bitwidth = 128; 222 assert_with_loc(Bitwidth != 64, "Can't get smaller than 64!"); 223 Bitwidth = 64 [all...] |
/external/llvm/test/Transforms/MergeFunc/ |
inttoptr-address-space.ll | 19 ; Check for pointer bitwidth equal assertion failure
|
/prebuilts/go/darwin-x86/src/cmd/compile/internal/ssa/ |
TODO | 12 - Add a value range propagation pass (for bounds elim & bitwidth reduction)
|
/prebuilts/go/linux-x86/src/cmd/compile/internal/ssa/ |
TODO | 12 - Add a value range propagation pass (for bounds elim & bitwidth reduction)
|
/external/swiftshader/third_party/LLVM/lib/ExecutionEngine/JIT/ |
JIT.cpp | 451 unsigned BitWidth = cast<IntegerType>(RetTy)->getBitWidth(); 452 if (BitWidth == 1) 453 rv.IntVal = APInt(BitWidth, ((bool(*)())(intptr_t)FPtr)()); 454 else if (BitWidth <= 8) 455 rv.IntVal = APInt(BitWidth, ((char(*)())(intptr_t)FPtr)()); 456 else if (BitWidth <= 16) 457 rv.IntVal = APInt(BitWidth, ((short(*)())(intptr_t)FPtr)()); 458 else if (BitWidth <= 32) 459 rv.IntVal = APInt(BitWidth, ((int(*)())(intptr_t)FPtr)()); 460 else if (BitWidth <= 64 [all...] |
/external/llvm/lib/CodeGen/SelectionDAG/ |
SelectionDAG.cpp | [all...] |
TargetLowering.cpp | 378 unsigned BitWidth, 398 unsigned DemandedSize = BitWidth - Demanded.countLeadingZeros(); 402 for (; SmallVTBits < BitWidth; SmallVTBits = NextPowerOf2(SmallVTBits)) { 434 unsigned BitWidth = DemandedMask.getBitWidth(); 435 assert(Op.getValueType().getScalarType().getSizeInBits() == BitWidth && 442 KnownZero = KnownOne = APInt(BitWidth, 0); 454 NewMask = APInt::getAllOnesValue(BitWidth); 511 if (TLO.ShrinkDemandedOp(Op, BitWidth, NewMask, dl)) 545 if (TLO.ShrinkDemandedOp(Op, BitWidth, NewMask, dl)) 570 if (TLO.ShrinkDemandedOp(Op, BitWidth, NewMask, dl) [all...] |
/external/llvm/lib/Analysis/ |
ScalarEvolution.cpp | 939 // Suppose, W is the bitwidth of the return value. We must be prepared for [all...] |
/external/swiftshader/third_party/LLVM/lib/CodeGen/SelectionDAG/ |
TargetLowering.cpp | [all...] |
SelectionDAG.cpp | 907 unsigned BitWidth = Op.getValueType().getScalarType().getSizeInBits(); 908 APInt Imm = APInt::getLowBitsSet(BitWidth, [all...] |
/external/llvm/lib/Support/ |
StringRef.cpp | 473 unsigned BitWidth = Log2Radix * Str.size(); 474 if (BitWidth < Result.getBitWidth()) 475 BitWidth = Result.getBitWidth(); // don't shrink the result 476 else if (BitWidth > Result.getBitWidth()) 477 Result = Result.zext(BitWidth); 482 RadixAP = APInt(BitWidth, Radix); 483 CharAP = APInt(BitWidth, 0);
|
/external/swiftshader/third_party/LLVM/lib/Target/ |
TargetData.cpp | 269 uint32_t BitWidth, bool ABIInfo, 276 Alignments[i].TypeBitWidth == BitWidth) 283 // the BitWidth requested. 284 if (Alignments[i].TypeBitWidth > BitWidth && (BestMatchIdx == -1 || 501 /// an integer type of the specified bitwidth. 502 unsigned TargetData::getABIIntegerTypeAlignment(unsigned BitWidth) const { 503 return getAlignmentInfo(INTEGER_ALIGN, BitWidth, true, 0);
|
/external/llvm/lib/Transforms/Scalar/ |
Reassociate.cpp | 266 /// Returns k such that lambda(2^Bitwidth) = 2^k, where lambda is the Carmichael 267 /// function. This means that x^(2^k) === 1 mod 2^Bitwidth for 268 /// every odd x, i.e. x^(2^k) = 1 for every odd x in Bitwidth-bit arithmetic. 269 /// Note that 0 <= k < Bitwidth, and if Bitwidth > 3 then x^(2^k) = 0 for every 270 /// even x in Bitwidth-bit arithmetic. 271 static unsigned CarmichaelShift(unsigned Bitwidth) { 272 if (Bitwidth < 3) 273 return Bitwidth - 1; 274 return Bitwidth - 2 [all...] |