1 //===-- LoopIdiomRecognize.cpp - Loop idiom recognition -------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This pass implements an idiom recognizer that transforms simple loops into a 11 // non-loop form. In cases that this kicks in, it can be a significant 12 // performance win. 13 // 14 //===----------------------------------------------------------------------===// 15 // 16 // TODO List: 17 // 18 // Future loop memory idioms to recognize: 19 // memcmp, memmove, strlen, etc. 20 // Future floating point idioms to recognize in -ffast-math mode: 21 // fpowi 22 // Future integer operation idioms to recognize: 23 // ctpop, ctlz, cttz 24 // 25 // Beware that isel's default lowering for ctpop is highly inefficient for 26 // i64 and larger types when i64 is legal and the value has few bits set. It 27 // would be good to enhance isel to emit a loop for ctpop in this case. 28 // 29 // We should enhance the memset/memcpy recognition to handle multiple stores in 30 // the loop. This would handle things like: 31 // void foo(_Complex float *P) 32 // for (i) { __real__(*P) = 0; __imag__(*P) = 0; } 33 // 34 // We should enhance this to handle negative strides through memory. 35 // Alternatively (and perhaps better) we could rely on an earlier pass to force 36 // forward iteration through memory, which is generally better for cache 37 // behavior. Negative strides *do* happen for memset/memcpy loops. 38 // 39 // This could recognize common matrix multiplies and dot product idioms and 40 // replace them with calls to BLAS (if linked in??). 41 // 42 //===----------------------------------------------------------------------===// 43 44 #define DEBUG_TYPE "loop-idiom" 45 #include "llvm/Transforms/Scalar.h" 46 #include "llvm/IRBuilder.h" 47 #include "llvm/IntrinsicInst.h" 48 #include "llvm/Module.h" 49 #include "llvm/ADT/Statistic.h" 50 #include "llvm/Analysis/AliasAnalysis.h" 51 #include "llvm/Analysis/LoopPass.h" 52 #include "llvm/Analysis/ScalarEvolutionExpander.h" 53 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 54 #include "llvm/Analysis/ValueTracking.h" 55 #include "llvm/Support/Debug.h" 56 #include "llvm/Support/raw_ostream.h" 57 #include "llvm/Target/TargetData.h" 58 #include "llvm/Target/TargetLibraryInfo.h" 59 #include "llvm/Transforms/Utils/Local.h" 60 using namespace llvm; 61 62 STATISTIC(NumMemSet, "Number of memset's formed from loop stores"); 63 STATISTIC(NumMemCpy, "Number of memcpy's formed from loop load+stores"); 64 65 namespace { 66 class LoopIdiomRecognize : public LoopPass { 67 Loop *CurLoop; 68 const TargetData *TD; 69 DominatorTree *DT; 70 ScalarEvolution *SE; 71 TargetLibraryInfo *TLI; 72 public: 73 static char ID; 74 explicit LoopIdiomRecognize() : LoopPass(ID) { 75 initializeLoopIdiomRecognizePass(*PassRegistry::getPassRegistry()); 76 } 77 78 bool runOnLoop(Loop *L, LPPassManager &LPM); 79 bool runOnLoopBlock(BasicBlock *BB, const SCEV *BECount, 80 SmallVectorImpl<BasicBlock*> &ExitBlocks); 81 82 bool processLoopStore(StoreInst *SI, const SCEV *BECount); 83 bool processLoopMemSet(MemSetInst *MSI, const SCEV *BECount); 84 85 bool processLoopStridedStore(Value *DestPtr, unsigned StoreSize, 86 unsigned StoreAlignment, 87 Value *SplatValue, Instruction *TheStore, 88 const SCEVAddRecExpr *Ev, 89 const SCEV *BECount); 90 bool processLoopStoreOfLoopLoad(StoreInst *SI, unsigned StoreSize, 91 const SCEVAddRecExpr *StoreEv, 92 const SCEVAddRecExpr *LoadEv, 93 const SCEV *BECount); 94 95 /// This transformation requires natural loop information & requires that 96 /// loop preheaders be inserted into the CFG. 97 /// 98 virtual void getAnalysisUsage(AnalysisUsage &AU) const { 99 AU.addRequired<LoopInfo>(); 100 AU.addPreserved<LoopInfo>(); 101 AU.addRequiredID(LoopSimplifyID); 102 AU.addPreservedID(LoopSimplifyID); 103 AU.addRequiredID(LCSSAID); 104 AU.addPreservedID(LCSSAID); 105 AU.addRequired<AliasAnalysis>(); 106 AU.addPreserved<AliasAnalysis>(); 107 AU.addRequired<ScalarEvolution>(); 108 AU.addPreserved<ScalarEvolution>(); 109 AU.addPreserved<DominatorTree>(); 110 AU.addRequired<DominatorTree>(); 111 AU.addRequired<TargetLibraryInfo>(); 112 } 113 }; 114 } 115 116 char LoopIdiomRecognize::ID = 0; 117 INITIALIZE_PASS_BEGIN(LoopIdiomRecognize, "loop-idiom", "Recognize loop idioms", 118 false, false) 119 INITIALIZE_PASS_DEPENDENCY(LoopInfo) 120 INITIALIZE_PASS_DEPENDENCY(DominatorTree) 121 INITIALIZE_PASS_DEPENDENCY(LoopSimplify) 122 INITIALIZE_PASS_DEPENDENCY(LCSSA) 123 INITIALIZE_PASS_DEPENDENCY(ScalarEvolution) 124 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfo) 125 INITIALIZE_AG_DEPENDENCY(AliasAnalysis) 126 INITIALIZE_PASS_END(LoopIdiomRecognize, "loop-idiom", "Recognize loop idioms", 127 false, false) 128 129 Pass *llvm::createLoopIdiomPass() { return new LoopIdiomRecognize(); } 130 131 /// deleteDeadInstruction - Delete this instruction. Before we do, go through 132 /// and zero out all the operands of this instruction. If any of them become 133 /// dead, delete them and the computation tree that feeds them. 134 /// 135 static void deleteDeadInstruction(Instruction *I, ScalarEvolution &SE, 136 const TargetLibraryInfo *TLI) { 137 SmallVector<Instruction*, 32> NowDeadInsts; 138 139 NowDeadInsts.push_back(I); 140 141 // Before we touch this instruction, remove it from SE! 142 do { 143 Instruction *DeadInst = NowDeadInsts.pop_back_val(); 144 145 // This instruction is dead, zap it, in stages. Start by removing it from 146 // SCEV. 147 SE.forgetValue(DeadInst); 148 149 for (unsigned op = 0, e = DeadInst->getNumOperands(); op != e; ++op) { 150 Value *Op = DeadInst->getOperand(op); 151 DeadInst->setOperand(op, 0); 152 153 // If this operand just became dead, add it to the NowDeadInsts list. 154 if (!Op->use_empty()) continue; 155 156 if (Instruction *OpI = dyn_cast<Instruction>(Op)) 157 if (isInstructionTriviallyDead(OpI, TLI)) 158 NowDeadInsts.push_back(OpI); 159 } 160 161 DeadInst->eraseFromParent(); 162 163 } while (!NowDeadInsts.empty()); 164 } 165 166 /// deleteIfDeadInstruction - If the specified value is a dead instruction, 167 /// delete it and any recursively used instructions. 168 static void deleteIfDeadInstruction(Value *V, ScalarEvolution &SE, 169 const TargetLibraryInfo *TLI) { 170 if (Instruction *I = dyn_cast<Instruction>(V)) 171 if (isInstructionTriviallyDead(I, TLI)) 172 deleteDeadInstruction(I, SE, TLI); 173 } 174 175 bool LoopIdiomRecognize::runOnLoop(Loop *L, LPPassManager &LPM) { 176 CurLoop = L; 177 178 // Disable loop idiom recognition if the function's name is a common idiom. 179 StringRef Name = L->getHeader()->getParent()->getName(); 180 if (Name == "memset" || Name == "memcpy") 181 return false; 182 183 // The trip count of the loop must be analyzable. 184 SE = &getAnalysis<ScalarEvolution>(); 185 if (!SE->hasLoopInvariantBackedgeTakenCount(L)) 186 return false; 187 const SCEV *BECount = SE->getBackedgeTakenCount(L); 188 if (isa<SCEVCouldNotCompute>(BECount)) return false; 189 190 // If this loop executes exactly one time, then it should be peeled, not 191 // optimized by this pass. 192 if (const SCEVConstant *BECst = dyn_cast<SCEVConstant>(BECount)) 193 if (BECst->getValue()->getValue() == 0) 194 return false; 195 196 // We require target data for now. 197 TD = getAnalysisIfAvailable<TargetData>(); 198 if (TD == 0) return false; 199 200 DT = &getAnalysis<DominatorTree>(); 201 LoopInfo &LI = getAnalysis<LoopInfo>(); 202 TLI = &getAnalysis<TargetLibraryInfo>(); 203 204 SmallVector<BasicBlock*, 8> ExitBlocks; 205 CurLoop->getUniqueExitBlocks(ExitBlocks); 206 207 DEBUG(dbgs() << "loop-idiom Scanning: F[" 208 << L->getHeader()->getParent()->getName() 209 << "] Loop %" << L->getHeader()->getName() << "\n"); 210 211 bool MadeChange = false; 212 // Scan all the blocks in the loop that are not in subloops. 213 for (Loop::block_iterator BI = L->block_begin(), E = L->block_end(); BI != E; 214 ++BI) { 215 // Ignore blocks in subloops. 216 if (LI.getLoopFor(*BI) != CurLoop) 217 continue; 218 219 MadeChange |= runOnLoopBlock(*BI, BECount, ExitBlocks); 220 } 221 return MadeChange; 222 } 223 224 /// runOnLoopBlock - Process the specified block, which lives in a counted loop 225 /// with the specified backedge count. This block is known to be in the current 226 /// loop and not in any subloops. 227 bool LoopIdiomRecognize::runOnLoopBlock(BasicBlock *BB, const SCEV *BECount, 228 SmallVectorImpl<BasicBlock*> &ExitBlocks) { 229 // We can only promote stores in this block if they are unconditionally 230 // executed in the loop. For a block to be unconditionally executed, it has 231 // to dominate all the exit blocks of the loop. Verify this now. 232 for (unsigned i = 0, e = ExitBlocks.size(); i != e; ++i) 233 if (!DT->dominates(BB, ExitBlocks[i])) 234 return false; 235 236 bool MadeChange = false; 237 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ) { 238 Instruction *Inst = I++; 239 // Look for store instructions, which may be optimized to memset/memcpy. 240 if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) { 241 WeakVH InstPtr(I); 242 if (!processLoopStore(SI, BECount)) continue; 243 MadeChange = true; 244 245 // If processing the store invalidated our iterator, start over from the 246 // top of the block. 247 if (InstPtr == 0) 248 I = BB->begin(); 249 continue; 250 } 251 252 // Look for memset instructions, which may be optimized to a larger memset. 253 if (MemSetInst *MSI = dyn_cast<MemSetInst>(Inst)) { 254 WeakVH InstPtr(I); 255 if (!processLoopMemSet(MSI, BECount)) continue; 256 MadeChange = true; 257 258 // If processing the memset invalidated our iterator, start over from the 259 // top of the block. 260 if (InstPtr == 0) 261 I = BB->begin(); 262 continue; 263 } 264 } 265 266 return MadeChange; 267 } 268 269 270 /// processLoopStore - See if this store can be promoted to a memset or memcpy. 271 bool LoopIdiomRecognize::processLoopStore(StoreInst *SI, const SCEV *BECount) { 272 if (!SI->isSimple()) return false; 273 274 Value *StoredVal = SI->getValueOperand(); 275 Value *StorePtr = SI->getPointerOperand(); 276 277 // Reject stores that are so large that they overflow an unsigned. 278 uint64_t SizeInBits = TD->getTypeSizeInBits(StoredVal->getType()); 279 if ((SizeInBits & 7) || (SizeInBits >> 32) != 0) 280 return false; 281 282 // See if the pointer expression is an AddRec like {base,+,1} on the current 283 // loop, which indicates a strided store. If we have something else, it's a 284 // random store we can't handle. 285 const SCEVAddRecExpr *StoreEv = 286 dyn_cast<SCEVAddRecExpr>(SE->getSCEV(StorePtr)); 287 if (StoreEv == 0 || StoreEv->getLoop() != CurLoop || !StoreEv->isAffine()) 288 return false; 289 290 // Check to see if the stride matches the size of the store. If so, then we 291 // know that every byte is touched in the loop. 292 unsigned StoreSize = (unsigned)SizeInBits >> 3; 293 const SCEVConstant *Stride = dyn_cast<SCEVConstant>(StoreEv->getOperand(1)); 294 295 if (Stride == 0 || StoreSize != Stride->getValue()->getValue()) { 296 // TODO: Could also handle negative stride here someday, that will require 297 // the validity check in mayLoopAccessLocation to be updated though. 298 // Enable this to print exact negative strides. 299 if (0 && Stride && StoreSize == -Stride->getValue()->getValue()) { 300 dbgs() << "NEGATIVE STRIDE: " << *SI << "\n"; 301 dbgs() << "BB: " << *SI->getParent(); 302 } 303 304 return false; 305 } 306 307 // See if we can optimize just this store in isolation. 308 if (processLoopStridedStore(StorePtr, StoreSize, SI->getAlignment(), 309 StoredVal, SI, StoreEv, BECount)) 310 return true; 311 312 // If the stored value is a strided load in the same loop with the same stride 313 // this this may be transformable into a memcpy. This kicks in for stuff like 314 // for (i) A[i] = B[i]; 315 if (LoadInst *LI = dyn_cast<LoadInst>(StoredVal)) { 316 const SCEVAddRecExpr *LoadEv = 317 dyn_cast<SCEVAddRecExpr>(SE->getSCEV(LI->getOperand(0))); 318 if (LoadEv && LoadEv->getLoop() == CurLoop && LoadEv->isAffine() && 319 StoreEv->getOperand(1) == LoadEv->getOperand(1) && LI->isSimple()) 320 if (processLoopStoreOfLoopLoad(SI, StoreSize, StoreEv, LoadEv, BECount)) 321 return true; 322 } 323 //errs() << "UNHANDLED strided store: " << *StoreEv << " - " << *SI << "\n"; 324 325 return false; 326 } 327 328 /// processLoopMemSet - See if this memset can be promoted to a large memset. 329 bool LoopIdiomRecognize:: 330 processLoopMemSet(MemSetInst *MSI, const SCEV *BECount) { 331 // We can only handle non-volatile memsets with a constant size. 332 if (MSI->isVolatile() || !isa<ConstantInt>(MSI->getLength())) return false; 333 334 // If we're not allowed to hack on memset, we fail. 335 if (!TLI->has(LibFunc::memset)) 336 return false; 337 338 Value *Pointer = MSI->getDest(); 339 340 // See if the pointer expression is an AddRec like {base,+,1} on the current 341 // loop, which indicates a strided store. If we have something else, it's a 342 // random store we can't handle. 343 const SCEVAddRecExpr *Ev = dyn_cast<SCEVAddRecExpr>(SE->getSCEV(Pointer)); 344 if (Ev == 0 || Ev->getLoop() != CurLoop || !Ev->isAffine()) 345 return false; 346 347 // Reject memsets that are so large that they overflow an unsigned. 348 uint64_t SizeInBytes = cast<ConstantInt>(MSI->getLength())->getZExtValue(); 349 if ((SizeInBytes >> 32) != 0) 350 return false; 351 352 // Check to see if the stride matches the size of the memset. If so, then we 353 // know that every byte is touched in the loop. 354 const SCEVConstant *Stride = dyn_cast<SCEVConstant>(Ev->getOperand(1)); 355 356 // TODO: Could also handle negative stride here someday, that will require the 357 // validity check in mayLoopAccessLocation to be updated though. 358 if (Stride == 0 || MSI->getLength() != Stride->getValue()) 359 return false; 360 361 return processLoopStridedStore(Pointer, (unsigned)SizeInBytes, 362 MSI->getAlignment(), MSI->getValue(), 363 MSI, Ev, BECount); 364 } 365 366 367 /// mayLoopAccessLocation - Return true if the specified loop might access the 368 /// specified pointer location, which is a loop-strided access. The 'Access' 369 /// argument specifies what the verboten forms of access are (read or write). 370 static bool mayLoopAccessLocation(Value *Ptr,AliasAnalysis::ModRefResult Access, 371 Loop *L, const SCEV *BECount, 372 unsigned StoreSize, AliasAnalysis &AA, 373 Instruction *IgnoredStore) { 374 // Get the location that may be stored across the loop. Since the access is 375 // strided positively through memory, we say that the modified location starts 376 // at the pointer and has infinite size. 377 uint64_t AccessSize = AliasAnalysis::UnknownSize; 378 379 // If the loop iterates a fixed number of times, we can refine the access size 380 // to be exactly the size of the memset, which is (BECount+1)*StoreSize 381 if (const SCEVConstant *BECst = dyn_cast<SCEVConstant>(BECount)) 382 AccessSize = (BECst->getValue()->getZExtValue()+1)*StoreSize; 383 384 // TODO: For this to be really effective, we have to dive into the pointer 385 // operand in the store. Store to &A[i] of 100 will always return may alias 386 // with store of &A[100], we need to StoreLoc to be "A" with size of 100, 387 // which will then no-alias a store to &A[100]. 388 AliasAnalysis::Location StoreLoc(Ptr, AccessSize); 389 390 for (Loop::block_iterator BI = L->block_begin(), E = L->block_end(); BI != E; 391 ++BI) 392 for (BasicBlock::iterator I = (*BI)->begin(), E = (*BI)->end(); I != E; ++I) 393 if (&*I != IgnoredStore && 394 (AA.getModRefInfo(I, StoreLoc) & Access)) 395 return true; 396 397 return false; 398 } 399 400 /// getMemSetPatternValue - If a strided store of the specified value is safe to 401 /// turn into a memset_pattern16, return a ConstantArray of 16 bytes that should 402 /// be passed in. Otherwise, return null. 403 /// 404 /// Note that we don't ever attempt to use memset_pattern8 or 4, because these 405 /// just replicate their input array and then pass on to memset_pattern16. 406 static Constant *getMemSetPatternValue(Value *V, const TargetData &TD) { 407 // If the value isn't a constant, we can't promote it to being in a constant 408 // array. We could theoretically do a store to an alloca or something, but 409 // that doesn't seem worthwhile. 410 Constant *C = dyn_cast<Constant>(V); 411 if (C == 0) return 0; 412 413 // Only handle simple values that are a power of two bytes in size. 414 uint64_t Size = TD.getTypeSizeInBits(V->getType()); 415 if (Size == 0 || (Size & 7) || (Size & (Size-1))) 416 return 0; 417 418 // Don't care enough about darwin/ppc to implement this. 419 if (TD.isBigEndian()) 420 return 0; 421 422 // Convert to size in bytes. 423 Size /= 8; 424 425 // TODO: If CI is larger than 16-bytes, we can try slicing it in half to see 426 // if the top and bottom are the same (e.g. for vectors and large integers). 427 if (Size > 16) return 0; 428 429 // If the constant is exactly 16 bytes, just use it. 430 if (Size == 16) return C; 431 432 // Otherwise, we'll use an array of the constants. 433 unsigned ArraySize = 16/Size; 434 ArrayType *AT = ArrayType::get(V->getType(), ArraySize); 435 return ConstantArray::get(AT, std::vector<Constant*>(ArraySize, C)); 436 } 437 438 439 /// processLoopStridedStore - We see a strided store of some value. If we can 440 /// transform this into a memset or memset_pattern in the loop preheader, do so. 441 bool LoopIdiomRecognize:: 442 processLoopStridedStore(Value *DestPtr, unsigned StoreSize, 443 unsigned StoreAlignment, Value *StoredVal, 444 Instruction *TheStore, const SCEVAddRecExpr *Ev, 445 const SCEV *BECount) { 446 447 // If the stored value is a byte-wise value (like i32 -1), then it may be 448 // turned into a memset of i8 -1, assuming that all the consecutive bytes 449 // are stored. A store of i32 0x01020304 can never be turned into a memset, 450 // but it can be turned into memset_pattern if the target supports it. 451 Value *SplatValue = isBytewiseValue(StoredVal); 452 Constant *PatternValue = 0; 453 454 // If we're allowed to form a memset, and the stored value would be acceptable 455 // for memset, use it. 456 if (SplatValue && TLI->has(LibFunc::memset) && 457 // Verify that the stored value is loop invariant. If not, we can't 458 // promote the memset. 459 CurLoop->isLoopInvariant(SplatValue)) { 460 // Keep and use SplatValue. 461 PatternValue = 0; 462 } else if (TLI->has(LibFunc::memset_pattern16) && 463 (PatternValue = getMemSetPatternValue(StoredVal, *TD))) { 464 // It looks like we can use PatternValue! 465 SplatValue = 0; 466 } else { 467 // Otherwise, this isn't an idiom we can transform. For example, we can't 468 // do anything with a 3-byte store. 469 return false; 470 } 471 472 // The trip count of the loop and the base pointer of the addrec SCEV is 473 // guaranteed to be loop invariant, which means that it should dominate the 474 // header. This allows us to insert code for it in the preheader. 475 BasicBlock *Preheader = CurLoop->getLoopPreheader(); 476 IRBuilder<> Builder(Preheader->getTerminator()); 477 SCEVExpander Expander(*SE, "loop-idiom"); 478 479 // Okay, we have a strided store "p[i]" of a splattable value. We can turn 480 // this into a memset in the loop preheader now if we want. However, this 481 // would be unsafe to do if there is anything else in the loop that may read 482 // or write to the aliased location. Check for any overlap by generating the 483 // base pointer and checking the region. 484 unsigned AddrSpace = cast<PointerType>(DestPtr->getType())->getAddressSpace(); 485 Value *BasePtr = 486 Expander.expandCodeFor(Ev->getStart(), Builder.getInt8PtrTy(AddrSpace), 487 Preheader->getTerminator()); 488 489 490 if (mayLoopAccessLocation(BasePtr, AliasAnalysis::ModRef, 491 CurLoop, BECount, 492 StoreSize, getAnalysis<AliasAnalysis>(), TheStore)){ 493 Expander.clear(); 494 // If we generated new code for the base pointer, clean up. 495 deleteIfDeadInstruction(BasePtr, *SE, TLI); 496 return false; 497 } 498 499 // Okay, everything looks good, insert the memset. 500 501 // The # stored bytes is (BECount+1)*Size. Expand the trip count out to 502 // pointer size if it isn't already. 503 Type *IntPtr = TD->getIntPtrType(DestPtr->getContext()); 504 BECount = SE->getTruncateOrZeroExtend(BECount, IntPtr); 505 506 const SCEV *NumBytesS = SE->getAddExpr(BECount, SE->getConstant(IntPtr, 1), 507 SCEV::FlagNUW); 508 if (StoreSize != 1) 509 NumBytesS = SE->getMulExpr(NumBytesS, SE->getConstant(IntPtr, StoreSize), 510 SCEV::FlagNUW); 511 512 Value *NumBytes = 513 Expander.expandCodeFor(NumBytesS, IntPtr, Preheader->getTerminator()); 514 515 CallInst *NewCall; 516 if (SplatValue) 517 NewCall = Builder.CreateMemSet(BasePtr, SplatValue,NumBytes,StoreAlignment); 518 else { 519 Module *M = TheStore->getParent()->getParent()->getParent(); 520 Value *MSP = M->getOrInsertFunction("memset_pattern16", 521 Builder.getVoidTy(), 522 Builder.getInt8PtrTy(), 523 Builder.getInt8PtrTy(), IntPtr, 524 (void*)0); 525 526 // Otherwise we should form a memset_pattern16. PatternValue is known to be 527 // an constant array of 16-bytes. Plop the value into a mergable global. 528 GlobalVariable *GV = new GlobalVariable(*M, PatternValue->getType(), true, 529 GlobalValue::InternalLinkage, 530 PatternValue, ".memset_pattern"); 531 GV->setUnnamedAddr(true); // Ok to merge these. 532 GV->setAlignment(16); 533 Value *PatternPtr = ConstantExpr::getBitCast(GV, Builder.getInt8PtrTy()); 534 NewCall = Builder.CreateCall3(MSP, BasePtr, PatternPtr, NumBytes); 535 } 536 537 DEBUG(dbgs() << " Formed memset: " << *NewCall << "\n" 538 << " from store to: " << *Ev << " at: " << *TheStore << "\n"); 539 NewCall->setDebugLoc(TheStore->getDebugLoc()); 540 541 // Okay, the memset has been formed. Zap the original store and anything that 542 // feeds into it. 543 deleteDeadInstruction(TheStore, *SE, TLI); 544 ++NumMemSet; 545 return true; 546 } 547 548 /// processLoopStoreOfLoopLoad - We see a strided store whose value is a 549 /// same-strided load. 550 bool LoopIdiomRecognize:: 551 processLoopStoreOfLoopLoad(StoreInst *SI, unsigned StoreSize, 552 const SCEVAddRecExpr *StoreEv, 553 const SCEVAddRecExpr *LoadEv, 554 const SCEV *BECount) { 555 // If we're not allowed to form memcpy, we fail. 556 if (!TLI->has(LibFunc::memcpy)) 557 return false; 558 559 LoadInst *LI = cast<LoadInst>(SI->getValueOperand()); 560 561 // The trip count of the loop and the base pointer of the addrec SCEV is 562 // guaranteed to be loop invariant, which means that it should dominate the 563 // header. This allows us to insert code for it in the preheader. 564 BasicBlock *Preheader = CurLoop->getLoopPreheader(); 565 IRBuilder<> Builder(Preheader->getTerminator()); 566 SCEVExpander Expander(*SE, "loop-idiom"); 567 568 // Okay, we have a strided store "p[i]" of a loaded value. We can turn 569 // this into a memcpy in the loop preheader now if we want. However, this 570 // would be unsafe to do if there is anything else in the loop that may read 571 // or write the memory region we're storing to. This includes the load that 572 // feeds the stores. Check for an alias by generating the base address and 573 // checking everything. 574 Value *StoreBasePtr = 575 Expander.expandCodeFor(StoreEv->getStart(), 576 Builder.getInt8PtrTy(SI->getPointerAddressSpace()), 577 Preheader->getTerminator()); 578 579 if (mayLoopAccessLocation(StoreBasePtr, AliasAnalysis::ModRef, 580 CurLoop, BECount, StoreSize, 581 getAnalysis<AliasAnalysis>(), SI)) { 582 Expander.clear(); 583 // If we generated new code for the base pointer, clean up. 584 deleteIfDeadInstruction(StoreBasePtr, *SE, TLI); 585 return false; 586 } 587 588 // For a memcpy, we have to make sure that the input array is not being 589 // mutated by the loop. 590 Value *LoadBasePtr = 591 Expander.expandCodeFor(LoadEv->getStart(), 592 Builder.getInt8PtrTy(LI->getPointerAddressSpace()), 593 Preheader->getTerminator()); 594 595 if (mayLoopAccessLocation(LoadBasePtr, AliasAnalysis::Mod, CurLoop, BECount, 596 StoreSize, getAnalysis<AliasAnalysis>(), SI)) { 597 Expander.clear(); 598 // If we generated new code for the base pointer, clean up. 599 deleteIfDeadInstruction(LoadBasePtr, *SE, TLI); 600 deleteIfDeadInstruction(StoreBasePtr, *SE, TLI); 601 return false; 602 } 603 604 // Okay, everything is safe, we can transform this! 605 606 607 // The # stored bytes is (BECount+1)*Size. Expand the trip count out to 608 // pointer size if it isn't already. 609 Type *IntPtr = TD->getIntPtrType(SI->getContext()); 610 BECount = SE->getTruncateOrZeroExtend(BECount, IntPtr); 611 612 const SCEV *NumBytesS = SE->getAddExpr(BECount, SE->getConstant(IntPtr, 1), 613 SCEV::FlagNUW); 614 if (StoreSize != 1) 615 NumBytesS = SE->getMulExpr(NumBytesS, SE->getConstant(IntPtr, StoreSize), 616 SCEV::FlagNUW); 617 618 Value *NumBytes = 619 Expander.expandCodeFor(NumBytesS, IntPtr, Preheader->getTerminator()); 620 621 CallInst *NewCall = 622 Builder.CreateMemCpy(StoreBasePtr, LoadBasePtr, NumBytes, 623 std::min(SI->getAlignment(), LI->getAlignment())); 624 NewCall->setDebugLoc(SI->getDebugLoc()); 625 626 DEBUG(dbgs() << " Formed memcpy: " << *NewCall << "\n" 627 << " from load ptr=" << *LoadEv << " at: " << *LI << "\n" 628 << " from store ptr=" << *StoreEv << " at: " << *SI << "\n"); 629 630 631 // Okay, the memset has been formed. Zap the original store and anything that 632 // feeds into it. 633 deleteDeadInstruction(SI, *SE, TLI); 634 ++NumMemCpy; 635 return true; 636 } 637