Lines Matching defs:Size
1162 // source/dest is aligned and the copy size is large enough. We therefore want
1344 for (unsigned i = 0; i != RVLocs.size(); ++i) {
1524 for (unsigned i = 0, realArgIdx = 0, e = ArgLocs.size();
1611 // If parameter size outsides register area, "offset" value
1651 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
1668 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
1802 // Emit regular call when code size is the priority
1816 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
1868 void ARMTargetLowering::HandleByVal(CCState *State, unsigned &Size,
1891 // Special case when NSAA != SP and parameter size greater than size of
1896 if (NSAAOffset != 0 && Size > Excess) {
1905 // the end (first after last) register would be reg + param-size-in-regs,
1909 unsigned ByValRegEnd = std::min<unsigned>(Reg + Size / 4, ARM::R4);
1916 // size truncated here.
1918 // size in memory to zero.
1919 Size = std::max<int>(Size - Excess, 0);
2051 if (RVLocs1.size() != RVLocs2.size())
2053 for (unsigned i = 0, e = RVLocs1.size(); i != e; ++i) {
2094 for (unsigned i = 0, realArgIdx = 0, e = ArgLocs.size();
2198 AFI->setReturnRegsCount(RVLocs.size());
2202 i != RVLocs.size();
2309 if (Copies.size() > 2)
2953 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
2982 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
3041 // truncate to the right size.
4488 // SplatBitSize is set to the smallest size that splats the vector, so a
4578 // caller would also need to check and handle the change in size.
4611 llvm_unreachable("unexpected size for isNEONModifiedImm");
4803 return VT == MVT::v8i8 && M.size() == 8;
4936 // Make sure the mask has the right size.
4937 if (NumElts != M.size())
5049 if (ValueCounts.size() != 1)
5051 if (!Value.getNode() && ValueCounts.size() > 0)
5054 if (ValueCounts.size() == 0)
5078 // size of the vector from which we get the value is different than the
5079 // size of the vector that we need to create. We will insert the element
5217 for (unsigned j = 0; j < SourceVecs.size(); ++j) {
5238 if (SourceVecs.size() > 2)
5246 for (unsigned i = 0; i < SourceVecs.size(); ++i) {
5680 /// from an integer type half its size.
5770 /// value size to 64 bits. We need a 64-bit D register as an operand to VMULL.
5776 // The vector originally had a size of OrigTy. It was then extended to ExtTy.
5779 assert(ExtTy.is128BitVector() && "Unexpected extension size");
5783 // Must extend size to at least 64 bits to be used as an operand for VMULL.
5789 /// SkipLoadExtensionForVMULL - return a load of the original vector size that
5792 /// reach a total size of 64 bits. We have to add the extension separately
5816 /// be used as an operand to a VMULL instruction. If the original vector size
5839 // Construct a new BUILD_VECTOR with elements truncated to half the size.
6529 LPadList.reserve(CallSiteNumToLPad.size());
6592 unsigned NumLPads = LPadList.size();
6603 .addImm(LPadList.size()));
6894 /// Return the load opcode for a given load size. If load size >= 8,
6913 /// Return the store opcode for a given store size. If store size >= 8,
6932 /// Emit a post-increment load operation with given size. The instructions
6964 /// Emit a post-increment store operation with given size. The instructions
6997 // This pseudo instruction has 3 operands: dst, src, size
6998 // We expand it to a loop if size > Subtarget->getMaxInlineSizeThreshold().
7038 // Select the correct opcode and register class for unit size load/store
7772 // operands, where N is the size of the formed vector.
8590 // 2. The size of its operands are 32-bits (64-bits are not legal).
8703 // size vector operands into a single quad-register size vector. Do that
8827 // Find the size of memory referenced by the load/store.
8842 // If the increment is a constant, it must match the memory ref size.
8927 SDValue &StVal = Ops[Ops.size()-2];
9063 // Make sure the VMOV element size is not bigger than the VDUPLANE elements.
9065 // The canonical VMOV for a zero vector uses a 32-bit element size.
9115 // Accumulated smaller vector elements must be a multiple of the store size.
9151 // Bitcast the original vector into a vector of store-size units
9904 EVT ARMTargetLowering::getOptimalMemOpType(uint64_t Size,
9915 if (Size >= 16 &&
9919 } else if (Size >= 8 &&
9927 // Lowering to i32/i16 if the size permits.
9928 if (Size >= 4)
9930 else if (Size >= 2)
10449 switch (AsmPieces.size()) {
10457 if (AsmPieces.size() == 3 &&
10474 if (Constraint.size() == 1) {
10487 } else if (Constraint.size() == 2) {
10536 if (Constraint.size() == 1) {
10804 SDValue Size = Op.getOperand(1);
10806 SDValue Words = DAG.getNode(ISD::SRL, DL, MVT::i32, Size,
11084 unsigned Size = SI->getValueOperand()->getType()->getPrimitiveSizeInBits();
11085 return (Size == 64) && !Subtarget->isMClass();
11096 unsigned Size = LI->getType()->getPrimitiveSizeInBits();
11097 return (Size == 64) && !Subtarget->isMClass();
11104 unsigned Size = AI->getType()->getPrimitiveSizeInBits();
11105 return (Size <= (Subtarget->isMClass() ? 32U : 64U))