Lines Matching refs:Splits
427 // If we've finished all splits, this is easy.
3020 /// This method recursively splits an aggregate op (load or store) into
3021 /// scalar or vector ops. It splits recursively until it hits a single value
3345 // We need to accumulate the splits required of each load or store where we
3351 std::vector<uint64_t> Splits;
3425 assert(Offsets.Splits.empty() &&
3426 "Should not have splits the first time we see an instruction!");
3428 Offsets.Splits.push_back(P.endOffset() - S.beginOffset());
3441 assert(!Offsets.Splits.empty() &&
3442 "Cannot have an empty set of splits on the second partition!");
3443 assert(Offsets.Splits.back() ==
3450 Offsets.Splits.push_back(P.endOffset() - Offsets.S->beginOffset());
3455 // such loads and stores, we can only pre-split them if their splits exactly
3480 if (LoadOffsets.Splits == StoreOffsets.Splits)
3484 << " Mismatched splits for load and store:\n"
3489 // with mismatched relative splits. Just give up on them
3533 // pre-split, and the specific splits needed for them. We actually do the
3562 uint64_t PartOffset = 0, PartSize = Offsets.Splits.front();
3563 int Idx = 0, Size = Offsets.Splits.size();
3587 // See if we've handled all the splits.
3592 PartOffset = Offsets.Splits[Idx];
3594 PartSize = (Idx < Size ? Offsets.Splits[Idx] : LoadSize) - PartOffset;
3616 uint64_t PartOffset = Idx == 0 ? 0 : Offsets.Splits[Idx - 1];
3682 Splits.size() + 1 &&
3683 "Too few split loads for the number of splits in the store!");
3688 uint64_t PartOffset = 0, PartSize = Offsets.Splits.front();
3689 int Idx = 0, Size = Offsets.Splits.size();
3728 // See if we've finished all the splits.
3733 PartOffset = Offsets.Splits[Idx];
3735 PartSize = (Idx < Size ? Offsets.Splits[Idx] : StoreSize) - PartOffset;
3760 // should handle the merging, and this uncovers SSA splits which is more
3762 // split and removed eventually, and the splits will be merged by any