1 //===- MemCpyOptimizer.cpp - Optimize use of memcpy and friends -----------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This pass performs various transformations related to eliminating memcpy 11 // calls, or transforming sets of stores into memset's. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #define DEBUG_TYPE "memcpyopt" 16 #include "llvm/Transforms/Scalar.h" 17 #include "llvm/ADT/SmallVector.h" 18 #include "llvm/ADT/Statistic.h" 19 #include "llvm/Analysis/AliasAnalysis.h" 20 #include "llvm/Analysis/Dominators.h" 21 #include "llvm/Analysis/MemoryDependenceAnalysis.h" 22 #include "llvm/Analysis/ValueTracking.h" 23 #include "llvm/IR/DataLayout.h" 24 #include "llvm/IR/GlobalVariable.h" 25 #include "llvm/IR/IRBuilder.h" 26 #include "llvm/IR/Instructions.h" 27 #include "llvm/IR/IntrinsicInst.h" 28 #include "llvm/Support/Debug.h" 29 #include "llvm/Support/GetElementPtrTypeIterator.h" 30 #include "llvm/Support/raw_ostream.h" 31 #include "llvm/Target/TargetLibraryInfo.h" 32 #include "llvm/Transforms/Utils/Local.h" 33 #include <list> 34 using namespace llvm; 35 36 STATISTIC(NumMemCpyInstr, "Number of memcpy instructions deleted"); 37 STATISTIC(NumMemSetInfer, "Number of memsets inferred"); 38 STATISTIC(NumMoveToCpy, "Number of memmoves converted to memcpy"); 39 STATISTIC(NumCpyToSet, "Number of memcpys converted to memset"); 40 41 static int64_t GetOffsetFromIndex(const GEPOperator *GEP, unsigned Idx, 42 bool &VariableIdxFound, const DataLayout &TD){ 43 // Skip over the first indices. 44 gep_type_iterator GTI = gep_type_begin(GEP); 45 for (unsigned i = 1; i != Idx; ++i, ++GTI) 46 /*skip along*/; 47 48 // Compute the offset implied by the rest of the indices. 49 int64_t Offset = 0; 50 for (unsigned i = Idx, e = GEP->getNumOperands(); i != e; ++i, ++GTI) { 51 ConstantInt *OpC = dyn_cast<ConstantInt>(GEP->getOperand(i)); 52 if (OpC == 0) 53 return VariableIdxFound = true; 54 if (OpC->isZero()) continue; // No offset. 55 56 // Handle struct indices, which add their field offset to the pointer. 57 if (StructType *STy = dyn_cast<StructType>(*GTI)) { 58 Offset += TD.getStructLayout(STy)->getElementOffset(OpC->getZExtValue()); 59 continue; 60 } 61 62 // Otherwise, we have a sequential type like an array or vector. Multiply 63 // the index by the ElementSize. 64 uint64_t Size = TD.getTypeAllocSize(GTI.getIndexedType()); 65 Offset += Size*OpC->getSExtValue(); 66 } 67 68 return Offset; 69 } 70 71 /// IsPointerOffset - Return true if Ptr1 is provably equal to Ptr2 plus a 72 /// constant offset, and return that constant offset. For example, Ptr1 might 73 /// be &A[42], and Ptr2 might be &A[40]. In this case offset would be -8. 74 static bool IsPointerOffset(Value *Ptr1, Value *Ptr2, int64_t &Offset, 75 const DataLayout &TD) { 76 Ptr1 = Ptr1->stripPointerCasts(); 77 Ptr2 = Ptr2->stripPointerCasts(); 78 GEPOperator *GEP1 = dyn_cast<GEPOperator>(Ptr1); 79 GEPOperator *GEP2 = dyn_cast<GEPOperator>(Ptr2); 80 81 bool VariableIdxFound = false; 82 83 // If one pointer is a GEP and the other isn't, then see if the GEP is a 84 // constant offset from the base, as in "P" and "gep P, 1". 85 if (GEP1 && GEP2 == 0 && GEP1->getOperand(0)->stripPointerCasts() == Ptr2) { 86 Offset = -GetOffsetFromIndex(GEP1, 1, VariableIdxFound, TD); 87 return !VariableIdxFound; 88 } 89 90 if (GEP2 && GEP1 == 0 && GEP2->getOperand(0)->stripPointerCasts() == Ptr1) { 91 Offset = GetOffsetFromIndex(GEP2, 1, VariableIdxFound, TD); 92 return !VariableIdxFound; 93 } 94 95 // Right now we handle the case when Ptr1/Ptr2 are both GEPs with an identical 96 // base. After that base, they may have some number of common (and 97 // potentially variable) indices. After that they handle some constant 98 // offset, which determines their offset from each other. At this point, we 99 // handle no other case. 100 if (!GEP1 || !GEP2 || GEP1->getOperand(0) != GEP2->getOperand(0)) 101 return false; 102 103 // Skip any common indices and track the GEP types. 104 unsigned Idx = 1; 105 for (; Idx != GEP1->getNumOperands() && Idx != GEP2->getNumOperands(); ++Idx) 106 if (GEP1->getOperand(Idx) != GEP2->getOperand(Idx)) 107 break; 108 109 int64_t Offset1 = GetOffsetFromIndex(GEP1, Idx, VariableIdxFound, TD); 110 int64_t Offset2 = GetOffsetFromIndex(GEP2, Idx, VariableIdxFound, TD); 111 if (VariableIdxFound) return false; 112 113 Offset = Offset2-Offset1; 114 return true; 115 } 116 117 118 /// MemsetRange - Represents a range of memset'd bytes with the ByteVal value. 119 /// This allows us to analyze stores like: 120 /// store 0 -> P+1 121 /// store 0 -> P+0 122 /// store 0 -> P+3 123 /// store 0 -> P+2 124 /// which sometimes happens with stores to arrays of structs etc. When we see 125 /// the first store, we make a range [1, 2). The second store extends the range 126 /// to [0, 2). The third makes a new range [2, 3). The fourth store joins the 127 /// two ranges into [0, 3) which is memset'able. 128 namespace { 129 struct MemsetRange { 130 // Start/End - A semi range that describes the span that this range covers. 131 // The range is closed at the start and open at the end: [Start, End). 132 int64_t Start, End; 133 134 /// StartPtr - The getelementptr instruction that points to the start of the 135 /// range. 136 Value *StartPtr; 137 138 /// Alignment - The known alignment of the first store. 139 unsigned Alignment; 140 141 /// TheStores - The actual stores that make up this range. 142 SmallVector<Instruction*, 16> TheStores; 143 144 bool isProfitableToUseMemset(const DataLayout &TD) const; 145 146 }; 147 } // end anon namespace 148 149 bool MemsetRange::isProfitableToUseMemset(const DataLayout &TD) const { 150 // If we found more than 4 stores to merge or 16 bytes, use memset. 151 if (TheStores.size() >= 4 || End-Start >= 16) return true; 152 153 // If there is nothing to merge, don't do anything. 154 if (TheStores.size() < 2) return false; 155 156 // If any of the stores are a memset, then it is always good to extend the 157 // memset. 158 for (unsigned i = 0, e = TheStores.size(); i != e; ++i) 159 if (!isa<StoreInst>(TheStores[i])) 160 return true; 161 162 // Assume that the code generator is capable of merging pairs of stores 163 // together if it wants to. 164 if (TheStores.size() == 2) return false; 165 166 // If we have fewer than 8 stores, it can still be worthwhile to do this. 167 // For example, merging 4 i8 stores into an i32 store is useful almost always. 168 // However, merging 2 32-bit stores isn't useful on a 32-bit architecture (the 169 // memset will be split into 2 32-bit stores anyway) and doing so can 170 // pessimize the llvm optimizer. 171 // 172 // Since we don't have perfect knowledge here, make some assumptions: assume 173 // the maximum GPR width is the same size as the pointer size and assume that 174 // this width can be stored. If so, check to see whether we will end up 175 // actually reducing the number of stores used. 176 unsigned Bytes = unsigned(End-Start); 177 unsigned NumPointerStores = Bytes/TD.getPointerSize(); 178 179 // Assume the remaining bytes if any are done a byte at a time. 180 unsigned NumByteStores = Bytes - NumPointerStores*TD.getPointerSize(); 181 182 // If we will reduce the # stores (according to this heuristic), do the 183 // transformation. This encourages merging 4 x i8 -> i32 and 2 x i16 -> i32 184 // etc. 185 return TheStores.size() > NumPointerStores+NumByteStores; 186 } 187 188 189 namespace { 190 class MemsetRanges { 191 /// Ranges - A sorted list of the memset ranges. We use std::list here 192 /// because each element is relatively large and expensive to copy. 193 std::list<MemsetRange> Ranges; 194 typedef std::list<MemsetRange>::iterator range_iterator; 195 const DataLayout &TD; 196 public: 197 MemsetRanges(const DataLayout &td) : TD(td) {} 198 199 typedef std::list<MemsetRange>::const_iterator const_iterator; 200 const_iterator begin() const { return Ranges.begin(); } 201 const_iterator end() const { return Ranges.end(); } 202 bool empty() const { return Ranges.empty(); } 203 204 void addInst(int64_t OffsetFromFirst, Instruction *Inst) { 205 if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) 206 addStore(OffsetFromFirst, SI); 207 else 208 addMemSet(OffsetFromFirst, cast<MemSetInst>(Inst)); 209 } 210 211 void addStore(int64_t OffsetFromFirst, StoreInst *SI) { 212 int64_t StoreSize = TD.getTypeStoreSize(SI->getOperand(0)->getType()); 213 214 addRange(OffsetFromFirst, StoreSize, 215 SI->getPointerOperand(), SI->getAlignment(), SI); 216 } 217 218 void addMemSet(int64_t OffsetFromFirst, MemSetInst *MSI) { 219 int64_t Size = cast<ConstantInt>(MSI->getLength())->getZExtValue(); 220 addRange(OffsetFromFirst, Size, MSI->getDest(), MSI->getAlignment(), MSI); 221 } 222 223 void addRange(int64_t Start, int64_t Size, Value *Ptr, 224 unsigned Alignment, Instruction *Inst); 225 226 }; 227 228 } // end anon namespace 229 230 231 /// addRange - Add a new store to the MemsetRanges data structure. This adds a 232 /// new range for the specified store at the specified offset, merging into 233 /// existing ranges as appropriate. 234 /// 235 /// Do a linear search of the ranges to see if this can be joined and/or to 236 /// find the insertion point in the list. We keep the ranges sorted for 237 /// simplicity here. This is a linear search of a linked list, which is ugly, 238 /// however the number of ranges is limited, so this won't get crazy slow. 239 void MemsetRanges::addRange(int64_t Start, int64_t Size, Value *Ptr, 240 unsigned Alignment, Instruction *Inst) { 241 int64_t End = Start+Size; 242 range_iterator I = Ranges.begin(), E = Ranges.end(); 243 244 while (I != E && Start > I->End) 245 ++I; 246 247 // We now know that I == E, in which case we didn't find anything to merge 248 // with, or that Start <= I->End. If End < I->Start or I == E, then we need 249 // to insert a new range. Handle this now. 250 if (I == E || End < I->Start) { 251 MemsetRange &R = *Ranges.insert(I, MemsetRange()); 252 R.Start = Start; 253 R.End = End; 254 R.StartPtr = Ptr; 255 R.Alignment = Alignment; 256 R.TheStores.push_back(Inst); 257 return; 258 } 259 260 // This store overlaps with I, add it. 261 I->TheStores.push_back(Inst); 262 263 // At this point, we may have an interval that completely contains our store. 264 // If so, just add it to the interval and return. 265 if (I->Start <= Start && I->End >= End) 266 return; 267 268 // Now we know that Start <= I->End and End >= I->Start so the range overlaps 269 // but is not entirely contained within the range. 270 271 // See if the range extends the start of the range. In this case, it couldn't 272 // possibly cause it to join the prior range, because otherwise we would have 273 // stopped on *it*. 274 if (Start < I->Start) { 275 I->Start = Start; 276 I->StartPtr = Ptr; 277 I->Alignment = Alignment; 278 } 279 280 // Now we know that Start <= I->End and Start >= I->Start (so the startpoint 281 // is in or right at the end of I), and that End >= I->Start. Extend I out to 282 // End. 283 if (End > I->End) { 284 I->End = End; 285 range_iterator NextI = I; 286 while (++NextI != E && End >= NextI->Start) { 287 // Merge the range in. 288 I->TheStores.append(NextI->TheStores.begin(), NextI->TheStores.end()); 289 if (NextI->End > I->End) 290 I->End = NextI->End; 291 Ranges.erase(NextI); 292 NextI = I; 293 } 294 } 295 } 296 297 //===----------------------------------------------------------------------===// 298 // MemCpyOpt Pass 299 //===----------------------------------------------------------------------===// 300 301 namespace { 302 class MemCpyOpt : public FunctionPass { 303 MemoryDependenceAnalysis *MD; 304 TargetLibraryInfo *TLI; 305 const DataLayout *TD; 306 public: 307 static char ID; // Pass identification, replacement for typeid 308 MemCpyOpt() : FunctionPass(ID) { 309 initializeMemCpyOptPass(*PassRegistry::getPassRegistry()); 310 MD = 0; 311 TLI = 0; 312 TD = 0; 313 } 314 315 bool runOnFunction(Function &F); 316 317 private: 318 // This transformation requires dominator postdominator info 319 virtual void getAnalysisUsage(AnalysisUsage &AU) const { 320 AU.setPreservesCFG(); 321 AU.addRequired<DominatorTree>(); 322 AU.addRequired<MemoryDependenceAnalysis>(); 323 AU.addRequired<AliasAnalysis>(); 324 AU.addRequired<TargetLibraryInfo>(); 325 AU.addPreserved<AliasAnalysis>(); 326 AU.addPreserved<MemoryDependenceAnalysis>(); 327 } 328 329 // Helper fuctions 330 bool processStore(StoreInst *SI, BasicBlock::iterator &BBI); 331 bool processMemSet(MemSetInst *SI, BasicBlock::iterator &BBI); 332 bool processMemCpy(MemCpyInst *M); 333 bool processMemMove(MemMoveInst *M); 334 bool performCallSlotOptzn(Instruction *cpy, Value *cpyDst, Value *cpySrc, 335 uint64_t cpyLen, unsigned cpyAlign, CallInst *C); 336 bool processMemCpyMemCpyDependence(MemCpyInst *M, MemCpyInst *MDep, 337 uint64_t MSize); 338 bool processByValArgument(CallSite CS, unsigned ArgNo); 339 Instruction *tryMergingIntoMemset(Instruction *I, Value *StartPtr, 340 Value *ByteVal); 341 342 bool iterateOnFunction(Function &F); 343 }; 344 345 char MemCpyOpt::ID = 0; 346 } 347 348 // createMemCpyOptPass - The public interface to this file... 349 FunctionPass *llvm::createMemCpyOptPass() { return new MemCpyOpt(); } 350 351 INITIALIZE_PASS_BEGIN(MemCpyOpt, "memcpyopt", "MemCpy Optimization", 352 false, false) 353 INITIALIZE_PASS_DEPENDENCY(DominatorTree) 354 INITIALIZE_PASS_DEPENDENCY(MemoryDependenceAnalysis) 355 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfo) 356 INITIALIZE_AG_DEPENDENCY(AliasAnalysis) 357 INITIALIZE_PASS_END(MemCpyOpt, "memcpyopt", "MemCpy Optimization", 358 false, false) 359 360 /// tryMergingIntoMemset - When scanning forward over instructions, we look for 361 /// some other patterns to fold away. In particular, this looks for stores to 362 /// neighboring locations of memory. If it sees enough consecutive ones, it 363 /// attempts to merge them together into a memcpy/memset. 364 Instruction *MemCpyOpt::tryMergingIntoMemset(Instruction *StartInst, 365 Value *StartPtr, Value *ByteVal) { 366 if (TD == 0) return 0; 367 368 // Okay, so we now have a single store that can be splatable. Scan to find 369 // all subsequent stores of the same value to offset from the same pointer. 370 // Join these together into ranges, so we can decide whether contiguous blocks 371 // are stored. 372 MemsetRanges Ranges(*TD); 373 374 BasicBlock::iterator BI = StartInst; 375 for (++BI; !isa<TerminatorInst>(BI); ++BI) { 376 if (!isa<StoreInst>(BI) && !isa<MemSetInst>(BI)) { 377 // If the instruction is readnone, ignore it, otherwise bail out. We 378 // don't even allow readonly here because we don't want something like: 379 // A[1] = 2; strlen(A); A[2] = 2; -> memcpy(A, ...); strlen(A). 380 if (BI->mayWriteToMemory() || BI->mayReadFromMemory()) 381 break; 382 continue; 383 } 384 385 if (StoreInst *NextStore = dyn_cast<StoreInst>(BI)) { 386 // If this is a store, see if we can merge it in. 387 if (!NextStore->isSimple()) break; 388 389 // Check to see if this stored value is of the same byte-splattable value. 390 if (ByteVal != isBytewiseValue(NextStore->getOperand(0))) 391 break; 392 393 // Check to see if this store is to a constant offset from the start ptr. 394 int64_t Offset; 395 if (!IsPointerOffset(StartPtr, NextStore->getPointerOperand(), 396 Offset, *TD)) 397 break; 398 399 Ranges.addStore(Offset, NextStore); 400 } else { 401 MemSetInst *MSI = cast<MemSetInst>(BI); 402 403 if (MSI->isVolatile() || ByteVal != MSI->getValue() || 404 !isa<ConstantInt>(MSI->getLength())) 405 break; 406 407 // Check to see if this store is to a constant offset from the start ptr. 408 int64_t Offset; 409 if (!IsPointerOffset(StartPtr, MSI->getDest(), Offset, *TD)) 410 break; 411 412 Ranges.addMemSet(Offset, MSI); 413 } 414 } 415 416 // If we have no ranges, then we just had a single store with nothing that 417 // could be merged in. This is a very common case of course. 418 if (Ranges.empty()) 419 return 0; 420 421 // If we had at least one store that could be merged in, add the starting 422 // store as well. We try to avoid this unless there is at least something 423 // interesting as a small compile-time optimization. 424 Ranges.addInst(0, StartInst); 425 426 // If we create any memsets, we put it right before the first instruction that 427 // isn't part of the memset block. This ensure that the memset is dominated 428 // by any addressing instruction needed by the start of the block. 429 IRBuilder<> Builder(BI); 430 431 // Now that we have full information about ranges, loop over the ranges and 432 // emit memset's for anything big enough to be worthwhile. 433 Instruction *AMemSet = 0; 434 for (MemsetRanges::const_iterator I = Ranges.begin(), E = Ranges.end(); 435 I != E; ++I) { 436 const MemsetRange &Range = *I; 437 438 if (Range.TheStores.size() == 1) continue; 439 440 // If it is profitable to lower this range to memset, do so now. 441 if (!Range.isProfitableToUseMemset(*TD)) 442 continue; 443 444 // Otherwise, we do want to transform this! Create a new memset. 445 // Get the starting pointer of the block. 446 StartPtr = Range.StartPtr; 447 448 // Determine alignment 449 unsigned Alignment = Range.Alignment; 450 if (Alignment == 0) { 451 Type *EltType = 452 cast<PointerType>(StartPtr->getType())->getElementType(); 453 Alignment = TD->getABITypeAlignment(EltType); 454 } 455 456 AMemSet = 457 Builder.CreateMemSet(StartPtr, ByteVal, Range.End-Range.Start, Alignment); 458 459 DEBUG(dbgs() << "Replace stores:\n"; 460 for (unsigned i = 0, e = Range.TheStores.size(); i != e; ++i) 461 dbgs() << *Range.TheStores[i] << '\n'; 462 dbgs() << "With: " << *AMemSet << '\n'); 463 464 if (!Range.TheStores.empty()) 465 AMemSet->setDebugLoc(Range.TheStores[0]->getDebugLoc()); 466 467 // Zap all the stores. 468 for (SmallVectorImpl<Instruction *>::const_iterator 469 SI = Range.TheStores.begin(), 470 SE = Range.TheStores.end(); SI != SE; ++SI) { 471 MD->removeInstruction(*SI); 472 (*SI)->eraseFromParent(); 473 } 474 ++NumMemSetInfer; 475 } 476 477 return AMemSet; 478 } 479 480 481 bool MemCpyOpt::processStore(StoreInst *SI, BasicBlock::iterator &BBI) { 482 if (!SI->isSimple()) return false; 483 484 if (TD == 0) return false; 485 486 // Detect cases where we're performing call slot forwarding, but 487 // happen to be using a load-store pair to implement it, rather than 488 // a memcpy. 489 if (LoadInst *LI = dyn_cast<LoadInst>(SI->getOperand(0))) { 490 if (LI->isSimple() && LI->hasOneUse() && 491 LI->getParent() == SI->getParent()) { 492 MemDepResult ldep = MD->getDependency(LI); 493 CallInst *C = 0; 494 if (ldep.isClobber() && !isa<MemCpyInst>(ldep.getInst())) 495 C = dyn_cast<CallInst>(ldep.getInst()); 496 497 if (C) { 498 // Check that nothing touches the dest of the "copy" between 499 // the call and the store. 500 AliasAnalysis &AA = getAnalysis<AliasAnalysis>(); 501 AliasAnalysis::Location StoreLoc = AA.getLocation(SI); 502 for (BasicBlock::iterator I = --BasicBlock::iterator(SI), 503 E = C; I != E; --I) { 504 if (AA.getModRefInfo(&*I, StoreLoc) != AliasAnalysis::NoModRef) { 505 C = 0; 506 break; 507 } 508 } 509 } 510 511 if (C) { 512 unsigned storeAlign = SI->getAlignment(); 513 if (!storeAlign) 514 storeAlign = TD->getABITypeAlignment(SI->getOperand(0)->getType()); 515 unsigned loadAlign = LI->getAlignment(); 516 if (!loadAlign) 517 loadAlign = TD->getABITypeAlignment(LI->getType()); 518 519 bool changed = performCallSlotOptzn(LI, 520 SI->getPointerOperand()->stripPointerCasts(), 521 LI->getPointerOperand()->stripPointerCasts(), 522 TD->getTypeStoreSize(SI->getOperand(0)->getType()), 523 std::min(storeAlign, loadAlign), C); 524 if (changed) { 525 MD->removeInstruction(SI); 526 SI->eraseFromParent(); 527 MD->removeInstruction(LI); 528 LI->eraseFromParent(); 529 ++NumMemCpyInstr; 530 return true; 531 } 532 } 533 } 534 } 535 536 // There are two cases that are interesting for this code to handle: memcpy 537 // and memset. Right now we only handle memset. 538 539 // Ensure that the value being stored is something that can be memset'able a 540 // byte at a time like "0" or "-1" or any width, as well as things like 541 // 0xA0A0A0A0 and 0.0. 542 if (Value *ByteVal = isBytewiseValue(SI->getOperand(0))) 543 if (Instruction *I = tryMergingIntoMemset(SI, SI->getPointerOperand(), 544 ByteVal)) { 545 BBI = I; // Don't invalidate iterator. 546 return true; 547 } 548 549 return false; 550 } 551 552 bool MemCpyOpt::processMemSet(MemSetInst *MSI, BasicBlock::iterator &BBI) { 553 // See if there is another memset or store neighboring this memset which 554 // allows us to widen out the memset to do a single larger store. 555 if (isa<ConstantInt>(MSI->getLength()) && !MSI->isVolatile()) 556 if (Instruction *I = tryMergingIntoMemset(MSI, MSI->getDest(), 557 MSI->getValue())) { 558 BBI = I; // Don't invalidate iterator. 559 return true; 560 } 561 return false; 562 } 563 564 565 /// performCallSlotOptzn - takes a memcpy and a call that it depends on, 566 /// and checks for the possibility of a call slot optimization by having 567 /// the call write its result directly into the destination of the memcpy. 568 bool MemCpyOpt::performCallSlotOptzn(Instruction *cpy, 569 Value *cpyDest, Value *cpySrc, 570 uint64_t cpyLen, unsigned cpyAlign, 571 CallInst *C) { 572 // The general transformation to keep in mind is 573 // 574 // call @func(..., src, ...) 575 // memcpy(dest, src, ...) 576 // 577 // -> 578 // 579 // memcpy(dest, src, ...) 580 // call @func(..., dest, ...) 581 // 582 // Since moving the memcpy is technically awkward, we additionally check that 583 // src only holds uninitialized values at the moment of the call, meaning that 584 // the memcpy can be discarded rather than moved. 585 586 // Deliberately get the source and destination with bitcasts stripped away, 587 // because we'll need to do type comparisons based on the underlying type. 588 CallSite CS(C); 589 590 // Require that src be an alloca. This simplifies the reasoning considerably. 591 AllocaInst *srcAlloca = dyn_cast<AllocaInst>(cpySrc); 592 if (!srcAlloca) 593 return false; 594 595 // Check that all of src is copied to dest. 596 if (TD == 0) return false; 597 598 ConstantInt *srcArraySize = dyn_cast<ConstantInt>(srcAlloca->getArraySize()); 599 if (!srcArraySize) 600 return false; 601 602 uint64_t srcSize = TD->getTypeAllocSize(srcAlloca->getAllocatedType()) * 603 srcArraySize->getZExtValue(); 604 605 if (cpyLen < srcSize) 606 return false; 607 608 // Check that accessing the first srcSize bytes of dest will not cause a 609 // trap. Otherwise the transform is invalid since it might cause a trap 610 // to occur earlier than it otherwise would. 611 if (AllocaInst *A = dyn_cast<AllocaInst>(cpyDest)) { 612 // The destination is an alloca. Check it is larger than srcSize. 613 ConstantInt *destArraySize = dyn_cast<ConstantInt>(A->getArraySize()); 614 if (!destArraySize) 615 return false; 616 617 uint64_t destSize = TD->getTypeAllocSize(A->getAllocatedType()) * 618 destArraySize->getZExtValue(); 619 620 if (destSize < srcSize) 621 return false; 622 } else if (Argument *A = dyn_cast<Argument>(cpyDest)) { 623 // If the destination is an sret parameter then only accesses that are 624 // outside of the returned struct type can trap. 625 if (!A->hasStructRetAttr()) 626 return false; 627 628 Type *StructTy = cast<PointerType>(A->getType())->getElementType(); 629 if (!StructTy->isSized()) { 630 // The call may never return and hence the copy-instruction may never 631 // be executed, and therefore it's not safe to say "the destination 632 // has at least <cpyLen> bytes, as implied by the copy-instruction", 633 return false; 634 } 635 636 uint64_t destSize = TD->getTypeAllocSize(StructTy); 637 if (destSize < srcSize) 638 return false; 639 } else { 640 return false; 641 } 642 643 // Check that dest points to memory that is at least as aligned as src. 644 unsigned srcAlign = srcAlloca->getAlignment(); 645 if (!srcAlign) 646 srcAlign = TD->getABITypeAlignment(srcAlloca->getAllocatedType()); 647 bool isDestSufficientlyAligned = srcAlign <= cpyAlign; 648 // If dest is not aligned enough and we can't increase its alignment then 649 // bail out. 650 if (!isDestSufficientlyAligned && !isa<AllocaInst>(cpyDest)) 651 return false; 652 653 // Check that src is not accessed except via the call and the memcpy. This 654 // guarantees that it holds only undefined values when passed in (so the final 655 // memcpy can be dropped), that it is not read or written between the call and 656 // the memcpy, and that writing beyond the end of it is undefined. 657 SmallVector<User*, 8> srcUseList(srcAlloca->use_begin(), 658 srcAlloca->use_end()); 659 while (!srcUseList.empty()) { 660 User *UI = srcUseList.pop_back_val(); 661 662 if (isa<BitCastInst>(UI)) { 663 for (User::use_iterator I = UI->use_begin(), E = UI->use_end(); 664 I != E; ++I) 665 srcUseList.push_back(*I); 666 } else if (GetElementPtrInst *G = dyn_cast<GetElementPtrInst>(UI)) { 667 if (G->hasAllZeroIndices()) 668 for (User::use_iterator I = UI->use_begin(), E = UI->use_end(); 669 I != E; ++I) 670 srcUseList.push_back(*I); 671 else 672 return false; 673 } else if (UI != C && UI != cpy) { 674 return false; 675 } 676 } 677 678 // Since we're changing the parameter to the callsite, we need to make sure 679 // that what would be the new parameter dominates the callsite. 680 DominatorTree &DT = getAnalysis<DominatorTree>(); 681 if (Instruction *cpyDestInst = dyn_cast<Instruction>(cpyDest)) 682 if (!DT.dominates(cpyDestInst, C)) 683 return false; 684 685 // In addition to knowing that the call does not access src in some 686 // unexpected manner, for example via a global, which we deduce from 687 // the use analysis, we also need to know that it does not sneakily 688 // access dest. We rely on AA to figure this out for us. 689 AliasAnalysis &AA = getAnalysis<AliasAnalysis>(); 690 AliasAnalysis::ModRefResult MR = AA.getModRefInfo(C, cpyDest, srcSize); 691 // If necessary, perform additional analysis. 692 if (MR != AliasAnalysis::NoModRef) 693 MR = AA.callCapturesBefore(C, cpyDest, srcSize, &DT); 694 if (MR != AliasAnalysis::NoModRef) 695 return false; 696 697 // All the checks have passed, so do the transformation. 698 bool changedArgument = false; 699 for (unsigned i = 0; i < CS.arg_size(); ++i) 700 if (CS.getArgument(i)->stripPointerCasts() == cpySrc) { 701 Value *Dest = cpySrc->getType() == cpyDest->getType() ? cpyDest 702 : CastInst::CreatePointerCast(cpyDest, cpySrc->getType(), 703 cpyDest->getName(), C); 704 changedArgument = true; 705 if (CS.getArgument(i)->getType() == Dest->getType()) 706 CS.setArgument(i, Dest); 707 else 708 CS.setArgument(i, CastInst::CreatePointerCast(Dest, 709 CS.getArgument(i)->getType(), Dest->getName(), C)); 710 } 711 712 if (!changedArgument) 713 return false; 714 715 // If the destination wasn't sufficiently aligned then increase its alignment. 716 if (!isDestSufficientlyAligned) { 717 assert(isa<AllocaInst>(cpyDest) && "Can only increase alloca alignment!"); 718 cast<AllocaInst>(cpyDest)->setAlignment(srcAlign); 719 } 720 721 // Drop any cached information about the call, because we may have changed 722 // its dependence information by changing its parameter. 723 MD->removeInstruction(C); 724 725 // Remove the memcpy. 726 MD->removeInstruction(cpy); 727 ++NumMemCpyInstr; 728 729 return true; 730 } 731 732 /// processMemCpyMemCpyDependence - We've found that the (upward scanning) 733 /// memory dependence of memcpy 'M' is the memcpy 'MDep'. Try to simplify M to 734 /// copy from MDep's input if we can. MSize is the size of M's copy. 735 /// 736 bool MemCpyOpt::processMemCpyMemCpyDependence(MemCpyInst *M, MemCpyInst *MDep, 737 uint64_t MSize) { 738 // We can only transforms memcpy's where the dest of one is the source of the 739 // other. 740 if (M->getSource() != MDep->getDest() || MDep->isVolatile()) 741 return false; 742 743 // If dep instruction is reading from our current input, then it is a noop 744 // transfer and substituting the input won't change this instruction. Just 745 // ignore the input and let someone else zap MDep. This handles cases like: 746 // memcpy(a <- a) 747 // memcpy(b <- a) 748 if (M->getSource() == MDep->getSource()) 749 return false; 750 751 // Second, the length of the memcpy's must be the same, or the preceding one 752 // must be larger than the following one. 753 ConstantInt *MDepLen = dyn_cast<ConstantInt>(MDep->getLength()); 754 ConstantInt *MLen = dyn_cast<ConstantInt>(M->getLength()); 755 if (!MDepLen || !MLen || MDepLen->getZExtValue() < MLen->getZExtValue()) 756 return false; 757 758 AliasAnalysis &AA = getAnalysis<AliasAnalysis>(); 759 760 // Verify that the copied-from memory doesn't change in between the two 761 // transfers. For example, in: 762 // memcpy(a <- b) 763 // *b = 42; 764 // memcpy(c <- a) 765 // It would be invalid to transform the second memcpy into memcpy(c <- b). 766 // 767 // TODO: If the code between M and MDep is transparent to the destination "c", 768 // then we could still perform the xform by moving M up to the first memcpy. 769 // 770 // NOTE: This is conservative, it will stop on any read from the source loc, 771 // not just the defining memcpy. 772 MemDepResult SourceDep = 773 MD->getPointerDependencyFrom(AA.getLocationForSource(MDep), 774 false, M, M->getParent()); 775 if (!SourceDep.isClobber() || SourceDep.getInst() != MDep) 776 return false; 777 778 // If the dest of the second might alias the source of the first, then the 779 // source and dest might overlap. We still want to eliminate the intermediate 780 // value, but we have to generate a memmove instead of memcpy. 781 bool UseMemMove = false; 782 if (!AA.isNoAlias(AA.getLocationForDest(M), AA.getLocationForSource(MDep))) 783 UseMemMove = true; 784 785 // If all checks passed, then we can transform M. 786 787 // Make sure to use the lesser of the alignment of the source and the dest 788 // since we're changing where we're reading from, but don't want to increase 789 // the alignment past what can be read from or written to. 790 // TODO: Is this worth it if we're creating a less aligned memcpy? For 791 // example we could be moving from movaps -> movq on x86. 792 unsigned Align = std::min(MDep->getAlignment(), M->getAlignment()); 793 794 IRBuilder<> Builder(M); 795 if (UseMemMove) 796 Builder.CreateMemMove(M->getRawDest(), MDep->getRawSource(), M->getLength(), 797 Align, M->isVolatile()); 798 else 799 Builder.CreateMemCpy(M->getRawDest(), MDep->getRawSource(), M->getLength(), 800 Align, M->isVolatile()); 801 802 // Remove the instruction we're replacing. 803 MD->removeInstruction(M); 804 M->eraseFromParent(); 805 ++NumMemCpyInstr; 806 return true; 807 } 808 809 810 /// processMemCpy - perform simplification of memcpy's. If we have memcpy A 811 /// which copies X to Y, and memcpy B which copies Y to Z, then we can rewrite 812 /// B to be a memcpy from X to Z (or potentially a memmove, depending on 813 /// circumstances). This allows later passes to remove the first memcpy 814 /// altogether. 815 bool MemCpyOpt::processMemCpy(MemCpyInst *M) { 816 // We can only optimize statically-sized memcpy's that are non-volatile. 817 ConstantInt *CopySize = dyn_cast<ConstantInt>(M->getLength()); 818 if (CopySize == 0 || M->isVolatile()) return false; 819 820 // If the source and destination of the memcpy are the same, then zap it. 821 if (M->getSource() == M->getDest()) { 822 MD->removeInstruction(M); 823 M->eraseFromParent(); 824 return false; 825 } 826 827 // If copying from a constant, try to turn the memcpy into a memset. 828 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(M->getSource())) 829 if (GV->isConstant() && GV->hasDefinitiveInitializer()) 830 if (Value *ByteVal = isBytewiseValue(GV->getInitializer())) { 831 IRBuilder<> Builder(M); 832 Builder.CreateMemSet(M->getRawDest(), ByteVal, CopySize, 833 M->getAlignment(), false); 834 MD->removeInstruction(M); 835 M->eraseFromParent(); 836 ++NumCpyToSet; 837 return true; 838 } 839 840 // The are two possible optimizations we can do for memcpy: 841 // a) memcpy-memcpy xform which exposes redundance for DSE. 842 // b) call-memcpy xform for return slot optimization. 843 MemDepResult DepInfo = MD->getDependency(M); 844 if (DepInfo.isClobber()) { 845 if (CallInst *C = dyn_cast<CallInst>(DepInfo.getInst())) { 846 if (performCallSlotOptzn(M, M->getDest(), M->getSource(), 847 CopySize->getZExtValue(), M->getAlignment(), 848 C)) { 849 MD->removeInstruction(M); 850 M->eraseFromParent(); 851 return true; 852 } 853 } 854 } 855 856 AliasAnalysis::Location SrcLoc = AliasAnalysis::getLocationForSource(M); 857 MemDepResult SrcDepInfo = MD->getPointerDependencyFrom(SrcLoc, true, 858 M, M->getParent()); 859 if (SrcDepInfo.isClobber()) { 860 if (MemCpyInst *MDep = dyn_cast<MemCpyInst>(SrcDepInfo.getInst())) 861 return processMemCpyMemCpyDependence(M, MDep, CopySize->getZExtValue()); 862 } 863 864 return false; 865 } 866 867 /// processMemMove - Transforms memmove calls to memcpy calls when the src/dst 868 /// are guaranteed not to alias. 869 bool MemCpyOpt::processMemMove(MemMoveInst *M) { 870 AliasAnalysis &AA = getAnalysis<AliasAnalysis>(); 871 872 if (!TLI->has(LibFunc::memmove)) 873 return false; 874 875 // See if the pointers alias. 876 if (!AA.isNoAlias(AA.getLocationForDest(M), AA.getLocationForSource(M))) 877 return false; 878 879 DEBUG(dbgs() << "MemCpyOpt: Optimizing memmove -> memcpy: " << *M << "\n"); 880 881 // If not, then we know we can transform this. 882 Module *Mod = M->getParent()->getParent()->getParent(); 883 Type *ArgTys[3] = { M->getRawDest()->getType(), 884 M->getRawSource()->getType(), 885 M->getLength()->getType() }; 886 M->setCalledFunction(Intrinsic::getDeclaration(Mod, Intrinsic::memcpy, 887 ArgTys)); 888 889 // MemDep may have over conservative information about this instruction, just 890 // conservatively flush it from the cache. 891 MD->removeInstruction(M); 892 893 ++NumMoveToCpy; 894 return true; 895 } 896 897 /// processByValArgument - This is called on every byval argument in call sites. 898 bool MemCpyOpt::processByValArgument(CallSite CS, unsigned ArgNo) { 899 if (TD == 0) return false; 900 901 // Find out what feeds this byval argument. 902 Value *ByValArg = CS.getArgument(ArgNo); 903 Type *ByValTy = cast<PointerType>(ByValArg->getType())->getElementType(); 904 uint64_t ByValSize = TD->getTypeAllocSize(ByValTy); 905 MemDepResult DepInfo = 906 MD->getPointerDependencyFrom(AliasAnalysis::Location(ByValArg, ByValSize), 907 true, CS.getInstruction(), 908 CS.getInstruction()->getParent()); 909 if (!DepInfo.isClobber()) 910 return false; 911 912 // If the byval argument isn't fed by a memcpy, ignore it. If it is fed by 913 // a memcpy, see if we can byval from the source of the memcpy instead of the 914 // result. 915 MemCpyInst *MDep = dyn_cast<MemCpyInst>(DepInfo.getInst()); 916 if (MDep == 0 || MDep->isVolatile() || 917 ByValArg->stripPointerCasts() != MDep->getDest()) 918 return false; 919 920 // The length of the memcpy must be larger or equal to the size of the byval. 921 ConstantInt *C1 = dyn_cast<ConstantInt>(MDep->getLength()); 922 if (C1 == 0 || C1->getValue().getZExtValue() < ByValSize) 923 return false; 924 925 // Get the alignment of the byval. If the call doesn't specify the alignment, 926 // then it is some target specific value that we can't know. 927 unsigned ByValAlign = CS.getParamAlignment(ArgNo+1); 928 if (ByValAlign == 0) return false; 929 930 // If it is greater than the memcpy, then we check to see if we can force the 931 // source of the memcpy to the alignment we need. If we fail, we bail out. 932 if (MDep->getAlignment() < ByValAlign && 933 getOrEnforceKnownAlignment(MDep->getSource(),ByValAlign, TD) < ByValAlign) 934 return false; 935 936 // Verify that the copied-from memory doesn't change in between the memcpy and 937 // the byval call. 938 // memcpy(a <- b) 939 // *b = 42; 940 // foo(*a) 941 // It would be invalid to transform the second memcpy into foo(*b). 942 // 943 // NOTE: This is conservative, it will stop on any read from the source loc, 944 // not just the defining memcpy. 945 MemDepResult SourceDep = 946 MD->getPointerDependencyFrom(AliasAnalysis::getLocationForSource(MDep), 947 false, CS.getInstruction(), MDep->getParent()); 948 if (!SourceDep.isClobber() || SourceDep.getInst() != MDep) 949 return false; 950 951 Value *TmpCast = MDep->getSource(); 952 if (MDep->getSource()->getType() != ByValArg->getType()) 953 TmpCast = new BitCastInst(MDep->getSource(), ByValArg->getType(), 954 "tmpcast", CS.getInstruction()); 955 956 DEBUG(dbgs() << "MemCpyOpt: Forwarding memcpy to byval:\n" 957 << " " << *MDep << "\n" 958 << " " << *CS.getInstruction() << "\n"); 959 960 // Otherwise we're good! Update the byval argument. 961 CS.setArgument(ArgNo, TmpCast); 962 ++NumMemCpyInstr; 963 return true; 964 } 965 966 /// iterateOnFunction - Executes one iteration of MemCpyOpt. 967 bool MemCpyOpt::iterateOnFunction(Function &F) { 968 bool MadeChange = false; 969 970 // Walk all instruction in the function. 971 for (Function::iterator BB = F.begin(), BBE = F.end(); BB != BBE; ++BB) { 972 for (BasicBlock::iterator BI = BB->begin(), BE = BB->end(); BI != BE;) { 973 // Avoid invalidating the iterator. 974 Instruction *I = BI++; 975 976 bool RepeatInstruction = false; 977 978 if (StoreInst *SI = dyn_cast<StoreInst>(I)) 979 MadeChange |= processStore(SI, BI); 980 else if (MemSetInst *M = dyn_cast<MemSetInst>(I)) 981 RepeatInstruction = processMemSet(M, BI); 982 else if (MemCpyInst *M = dyn_cast<MemCpyInst>(I)) 983 RepeatInstruction = processMemCpy(M); 984 else if (MemMoveInst *M = dyn_cast<MemMoveInst>(I)) 985 RepeatInstruction = processMemMove(M); 986 else if (CallSite CS = (Value*)I) { 987 for (unsigned i = 0, e = CS.arg_size(); i != e; ++i) 988 if (CS.isByValArgument(i)) 989 MadeChange |= processByValArgument(CS, i); 990 } 991 992 // Reprocess the instruction if desired. 993 if (RepeatInstruction) { 994 if (BI != BB->begin()) --BI; 995 MadeChange = true; 996 } 997 } 998 } 999 1000 return MadeChange; 1001 } 1002 1003 // MemCpyOpt::runOnFunction - This is the main transformation entry point for a 1004 // function. 1005 // 1006 bool MemCpyOpt::runOnFunction(Function &F) { 1007 bool MadeChange = false; 1008 MD = &getAnalysis<MemoryDependenceAnalysis>(); 1009 TD = getAnalysisIfAvailable<DataLayout>(); 1010 TLI = &getAnalysis<TargetLibraryInfo>(); 1011 1012 // If we don't have at least memset and memcpy, there is little point of doing 1013 // anything here. These are required by a freestanding implementation, so if 1014 // even they are disabled, there is no point in trying hard. 1015 if (!TLI->has(LibFunc::memset) || !TLI->has(LibFunc::memcpy)) 1016 return false; 1017 1018 while (1) { 1019 if (!iterateOnFunction(F)) 1020 break; 1021 MadeChange = true; 1022 } 1023 1024 MD = 0; 1025 return MadeChange; 1026 } 1027