1 //===- ScalarEvolutionExpander.cpp - Scalar Evolution Analysis --*- C++ -*-===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file contains the implementation of the scalar evolution expander, 11 // which is used to generate the code corresponding to a given scalar evolution 12 // expression. 13 // 14 //===----------------------------------------------------------------------===// 15 16 #include "llvm/Analysis/ScalarEvolutionExpander.h" 17 #include "llvm/Analysis/LoopInfo.h" 18 #include "llvm/IntrinsicInst.h" 19 #include "llvm/LLVMContext.h" 20 #include "llvm/Target/TargetData.h" 21 #include "llvm/ADT/STLExtras.h" 22 23 using namespace llvm; 24 25 /// ReuseOrCreateCast - Arrange for there to be a cast of V to Ty at IP, 26 /// reusing an existing cast if a suitable one exists, moving an existing 27 /// cast if a suitable one exists but isn't in the right place, or 28 /// creating a new one. 29 Value *SCEVExpander::ReuseOrCreateCast(Value *V, Type *Ty, 30 Instruction::CastOps Op, 31 BasicBlock::iterator IP) { 32 // Check to see if there is already a cast! 33 for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); 34 UI != E; ++UI) { 35 User *U = *UI; 36 if (U->getType() == Ty) 37 if (CastInst *CI = dyn_cast<CastInst>(U)) 38 if (CI->getOpcode() == Op) { 39 // If the cast isn't where we want it, fix it. 40 if (BasicBlock::iterator(CI) != IP) { 41 // Create a new cast, and leave the old cast in place in case 42 // it is being used as an insert point. Clear its operand 43 // so that it doesn't hold anything live. 44 Instruction *NewCI = CastInst::Create(Op, V, Ty, "", IP); 45 NewCI->takeName(CI); 46 CI->replaceAllUsesWith(NewCI); 47 CI->setOperand(0, UndefValue::get(V->getType())); 48 rememberInstruction(NewCI); 49 return NewCI; 50 } 51 rememberInstruction(CI); 52 return CI; 53 } 54 } 55 56 // Create a new cast. 57 Instruction *I = CastInst::Create(Op, V, Ty, V->getName(), IP); 58 rememberInstruction(I); 59 return I; 60 } 61 62 /// InsertNoopCastOfTo - Insert a cast of V to the specified type, 63 /// which must be possible with a noop cast, doing what we can to share 64 /// the casts. 65 Value *SCEVExpander::InsertNoopCastOfTo(Value *V, Type *Ty) { 66 Instruction::CastOps Op = CastInst::getCastOpcode(V, false, Ty, false); 67 assert((Op == Instruction::BitCast || 68 Op == Instruction::PtrToInt || 69 Op == Instruction::IntToPtr) && 70 "InsertNoopCastOfTo cannot perform non-noop casts!"); 71 assert(SE.getTypeSizeInBits(V->getType()) == SE.getTypeSizeInBits(Ty) && 72 "InsertNoopCastOfTo cannot change sizes!"); 73 74 // Short-circuit unnecessary bitcasts. 75 if (Op == Instruction::BitCast && V->getType() == Ty) 76 return V; 77 78 // Short-circuit unnecessary inttoptr<->ptrtoint casts. 79 if ((Op == Instruction::PtrToInt || Op == Instruction::IntToPtr) && 80 SE.getTypeSizeInBits(Ty) == SE.getTypeSizeInBits(V->getType())) { 81 if (CastInst *CI = dyn_cast<CastInst>(V)) 82 if ((CI->getOpcode() == Instruction::PtrToInt || 83 CI->getOpcode() == Instruction::IntToPtr) && 84 SE.getTypeSizeInBits(CI->getType()) == 85 SE.getTypeSizeInBits(CI->getOperand(0)->getType())) 86 return CI->getOperand(0); 87 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) 88 if ((CE->getOpcode() == Instruction::PtrToInt || 89 CE->getOpcode() == Instruction::IntToPtr) && 90 SE.getTypeSizeInBits(CE->getType()) == 91 SE.getTypeSizeInBits(CE->getOperand(0)->getType())) 92 return CE->getOperand(0); 93 } 94 95 // Fold a cast of a constant. 96 if (Constant *C = dyn_cast<Constant>(V)) 97 return ConstantExpr::getCast(Op, C, Ty); 98 99 // Cast the argument at the beginning of the entry block, after 100 // any bitcasts of other arguments. 101 if (Argument *A = dyn_cast<Argument>(V)) { 102 BasicBlock::iterator IP = A->getParent()->getEntryBlock().begin(); 103 while ((isa<BitCastInst>(IP) && 104 isa<Argument>(cast<BitCastInst>(IP)->getOperand(0)) && 105 cast<BitCastInst>(IP)->getOperand(0) != A) || 106 isa<DbgInfoIntrinsic>(IP)) 107 ++IP; 108 return ReuseOrCreateCast(A, Ty, Op, IP); 109 } 110 111 // Cast the instruction immediately after the instruction. 112 Instruction *I = cast<Instruction>(V); 113 BasicBlock::iterator IP = I; ++IP; 114 if (InvokeInst *II = dyn_cast<InvokeInst>(I)) 115 IP = II->getNormalDest()->begin(); 116 while (isa<PHINode>(IP) || isa<DbgInfoIntrinsic>(IP)) ++IP; 117 return ReuseOrCreateCast(I, Ty, Op, IP); 118 } 119 120 /// InsertBinop - Insert the specified binary operator, doing a small amount 121 /// of work to avoid inserting an obviously redundant operation. 122 Value *SCEVExpander::InsertBinop(Instruction::BinaryOps Opcode, 123 Value *LHS, Value *RHS) { 124 // Fold a binop with constant operands. 125 if (Constant *CLHS = dyn_cast<Constant>(LHS)) 126 if (Constant *CRHS = dyn_cast<Constant>(RHS)) 127 return ConstantExpr::get(Opcode, CLHS, CRHS); 128 129 // Do a quick scan to see if we have this binop nearby. If so, reuse it. 130 unsigned ScanLimit = 6; 131 BasicBlock::iterator BlockBegin = Builder.GetInsertBlock()->begin(); 132 // Scanning starts from the last instruction before the insertion point. 133 BasicBlock::iterator IP = Builder.GetInsertPoint(); 134 if (IP != BlockBegin) { 135 --IP; 136 for (; ScanLimit; --IP, --ScanLimit) { 137 // Don't count dbg.value against the ScanLimit, to avoid perturbing the 138 // generated code. 139 if (isa<DbgInfoIntrinsic>(IP)) 140 ScanLimit++; 141 if (IP->getOpcode() == (unsigned)Opcode && IP->getOperand(0) == LHS && 142 IP->getOperand(1) == RHS) 143 return IP; 144 if (IP == BlockBegin) break; 145 } 146 } 147 148 // Save the original insertion point so we can restore it when we're done. 149 BasicBlock *SaveInsertBB = Builder.GetInsertBlock(); 150 BasicBlock::iterator SaveInsertPt = Builder.GetInsertPoint(); 151 152 // Move the insertion point out of as many loops as we can. 153 while (const Loop *L = SE.LI->getLoopFor(Builder.GetInsertBlock())) { 154 if (!L->isLoopInvariant(LHS) || !L->isLoopInvariant(RHS)) break; 155 BasicBlock *Preheader = L->getLoopPreheader(); 156 if (!Preheader) break; 157 158 // Ok, move up a level. 159 Builder.SetInsertPoint(Preheader, Preheader->getTerminator()); 160 } 161 162 // If we haven't found this binop, insert it. 163 Instruction *BO = cast<Instruction>(Builder.CreateBinOp(Opcode, LHS, RHS, "tmp")); 164 BO->setDebugLoc(SaveInsertPt->getDebugLoc()); 165 rememberInstruction(BO); 166 167 // Restore the original insert point. 168 if (SaveInsertBB) 169 restoreInsertPoint(SaveInsertBB, SaveInsertPt); 170 171 return BO; 172 } 173 174 /// FactorOutConstant - Test if S is divisible by Factor, using signed 175 /// division. If so, update S with Factor divided out and return true. 176 /// S need not be evenly divisible if a reasonable remainder can be 177 /// computed. 178 /// TODO: When ScalarEvolution gets a SCEVSDivExpr, this can be made 179 /// unnecessary; in its place, just signed-divide Ops[i] by the scale and 180 /// check to see if the divide was folded. 181 static bool FactorOutConstant(const SCEV *&S, 182 const SCEV *&Remainder, 183 const SCEV *Factor, 184 ScalarEvolution &SE, 185 const TargetData *TD) { 186 // Everything is divisible by one. 187 if (Factor->isOne()) 188 return true; 189 190 // x/x == 1. 191 if (S == Factor) { 192 S = SE.getConstant(S->getType(), 1); 193 return true; 194 } 195 196 // For a Constant, check for a multiple of the given factor. 197 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) { 198 // 0/x == 0. 199 if (C->isZero()) 200 return true; 201 // Check for divisibility. 202 if (const SCEVConstant *FC = dyn_cast<SCEVConstant>(Factor)) { 203 ConstantInt *CI = 204 ConstantInt::get(SE.getContext(), 205 C->getValue()->getValue().sdiv( 206 FC->getValue()->getValue())); 207 // If the quotient is zero and the remainder is non-zero, reject 208 // the value at this scale. It will be considered for subsequent 209 // smaller scales. 210 if (!CI->isZero()) { 211 const SCEV *Div = SE.getConstant(CI); 212 S = Div; 213 Remainder = 214 SE.getAddExpr(Remainder, 215 SE.getConstant(C->getValue()->getValue().srem( 216 FC->getValue()->getValue()))); 217 return true; 218 } 219 } 220 } 221 222 // In a Mul, check if there is a constant operand which is a multiple 223 // of the given factor. 224 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(S)) { 225 if (TD) { 226 // With TargetData, the size is known. Check if there is a constant 227 // operand which is a multiple of the given factor. If so, we can 228 // factor it. 229 const SCEVConstant *FC = cast<SCEVConstant>(Factor); 230 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(M->getOperand(0))) 231 if (!C->getValue()->getValue().srem(FC->getValue()->getValue())) { 232 SmallVector<const SCEV *, 4> NewMulOps(M->op_begin(), M->op_end()); 233 NewMulOps[0] = 234 SE.getConstant(C->getValue()->getValue().sdiv( 235 FC->getValue()->getValue())); 236 S = SE.getMulExpr(NewMulOps); 237 return true; 238 } 239 } else { 240 // Without TargetData, check if Factor can be factored out of any of the 241 // Mul's operands. If so, we can just remove it. 242 for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) { 243 const SCEV *SOp = M->getOperand(i); 244 const SCEV *Remainder = SE.getConstant(SOp->getType(), 0); 245 if (FactorOutConstant(SOp, Remainder, Factor, SE, TD) && 246 Remainder->isZero()) { 247 SmallVector<const SCEV *, 4> NewMulOps(M->op_begin(), M->op_end()); 248 NewMulOps[i] = SOp; 249 S = SE.getMulExpr(NewMulOps); 250 return true; 251 } 252 } 253 } 254 } 255 256 // In an AddRec, check if both start and step are divisible. 257 if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(S)) { 258 const SCEV *Step = A->getStepRecurrence(SE); 259 const SCEV *StepRem = SE.getConstant(Step->getType(), 0); 260 if (!FactorOutConstant(Step, StepRem, Factor, SE, TD)) 261 return false; 262 if (!StepRem->isZero()) 263 return false; 264 const SCEV *Start = A->getStart(); 265 if (!FactorOutConstant(Start, Remainder, Factor, SE, TD)) 266 return false; 267 // FIXME: can use A->getNoWrapFlags(FlagNW) 268 S = SE.getAddRecExpr(Start, Step, A->getLoop(), SCEV::FlagAnyWrap); 269 return true; 270 } 271 272 return false; 273 } 274 275 /// SimplifyAddOperands - Sort and simplify a list of add operands. NumAddRecs 276 /// is the number of SCEVAddRecExprs present, which are kept at the end of 277 /// the list. 278 /// 279 static void SimplifyAddOperands(SmallVectorImpl<const SCEV *> &Ops, 280 Type *Ty, 281 ScalarEvolution &SE) { 282 unsigned NumAddRecs = 0; 283 for (unsigned i = Ops.size(); i > 0 && isa<SCEVAddRecExpr>(Ops[i-1]); --i) 284 ++NumAddRecs; 285 // Group Ops into non-addrecs and addrecs. 286 SmallVector<const SCEV *, 8> NoAddRecs(Ops.begin(), Ops.end() - NumAddRecs); 287 SmallVector<const SCEV *, 8> AddRecs(Ops.end() - NumAddRecs, Ops.end()); 288 // Let ScalarEvolution sort and simplify the non-addrecs list. 289 const SCEV *Sum = NoAddRecs.empty() ? 290 SE.getConstant(Ty, 0) : 291 SE.getAddExpr(NoAddRecs); 292 // If it returned an add, use the operands. Otherwise it simplified 293 // the sum into a single value, so just use that. 294 Ops.clear(); 295 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Sum)) 296 Ops.append(Add->op_begin(), Add->op_end()); 297 else if (!Sum->isZero()) 298 Ops.push_back(Sum); 299 // Then append the addrecs. 300 Ops.append(AddRecs.begin(), AddRecs.end()); 301 } 302 303 /// SplitAddRecs - Flatten a list of add operands, moving addrec start values 304 /// out to the top level. For example, convert {a + b,+,c} to a, b, {0,+,d}. 305 /// This helps expose more opportunities for folding parts of the expressions 306 /// into GEP indices. 307 /// 308 static void SplitAddRecs(SmallVectorImpl<const SCEV *> &Ops, 309 Type *Ty, 310 ScalarEvolution &SE) { 311 // Find the addrecs. 312 SmallVector<const SCEV *, 8> AddRecs; 313 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 314 while (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(Ops[i])) { 315 const SCEV *Start = A->getStart(); 316 if (Start->isZero()) break; 317 const SCEV *Zero = SE.getConstant(Ty, 0); 318 AddRecs.push_back(SE.getAddRecExpr(Zero, 319 A->getStepRecurrence(SE), 320 A->getLoop(), 321 // FIXME: A->getNoWrapFlags(FlagNW) 322 SCEV::FlagAnyWrap)); 323 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Start)) { 324 Ops[i] = Zero; 325 Ops.append(Add->op_begin(), Add->op_end()); 326 e += Add->getNumOperands(); 327 } else { 328 Ops[i] = Start; 329 } 330 } 331 if (!AddRecs.empty()) { 332 // Add the addrecs onto the end of the list. 333 Ops.append(AddRecs.begin(), AddRecs.end()); 334 // Resort the operand list, moving any constants to the front. 335 SimplifyAddOperands(Ops, Ty, SE); 336 } 337 } 338 339 /// expandAddToGEP - Expand an addition expression with a pointer type into 340 /// a GEP instead of using ptrtoint+arithmetic+inttoptr. This helps 341 /// BasicAliasAnalysis and other passes analyze the result. See the rules 342 /// for getelementptr vs. inttoptr in 343 /// http://llvm.org/docs/LangRef.html#pointeraliasing 344 /// for details. 345 /// 346 /// Design note: The correctness of using getelementptr here depends on 347 /// ScalarEvolution not recognizing inttoptr and ptrtoint operators, as 348 /// they may introduce pointer arithmetic which may not be safely converted 349 /// into getelementptr. 350 /// 351 /// Design note: It might seem desirable for this function to be more 352 /// loop-aware. If some of the indices are loop-invariant while others 353 /// aren't, it might seem desirable to emit multiple GEPs, keeping the 354 /// loop-invariant portions of the overall computation outside the loop. 355 /// However, there are a few reasons this is not done here. Hoisting simple 356 /// arithmetic is a low-level optimization that often isn't very 357 /// important until late in the optimization process. In fact, passes 358 /// like InstructionCombining will combine GEPs, even if it means 359 /// pushing loop-invariant computation down into loops, so even if the 360 /// GEPs were split here, the work would quickly be undone. The 361 /// LoopStrengthReduction pass, which is usually run quite late (and 362 /// after the last InstructionCombining pass), takes care of hoisting 363 /// loop-invariant portions of expressions, after considering what 364 /// can be folded using target addressing modes. 365 /// 366 Value *SCEVExpander::expandAddToGEP(const SCEV *const *op_begin, 367 const SCEV *const *op_end, 368 PointerType *PTy, 369 Type *Ty, 370 Value *V) { 371 Type *ElTy = PTy->getElementType(); 372 SmallVector<Value *, 4> GepIndices; 373 SmallVector<const SCEV *, 8> Ops(op_begin, op_end); 374 bool AnyNonZeroIndices = false; 375 376 // Split AddRecs up into parts as either of the parts may be usable 377 // without the other. 378 SplitAddRecs(Ops, Ty, SE); 379 380 // Descend down the pointer's type and attempt to convert the other 381 // operands into GEP indices, at each level. The first index in a GEP 382 // indexes into the array implied by the pointer operand; the rest of 383 // the indices index into the element or field type selected by the 384 // preceding index. 385 for (;;) { 386 // If the scale size is not 0, attempt to factor out a scale for 387 // array indexing. 388 SmallVector<const SCEV *, 8> ScaledOps; 389 if (ElTy->isSized()) { 390 const SCEV *ElSize = SE.getSizeOfExpr(ElTy); 391 if (!ElSize->isZero()) { 392 SmallVector<const SCEV *, 8> NewOps; 393 for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 394 const SCEV *Op = Ops[i]; 395 const SCEV *Remainder = SE.getConstant(Ty, 0); 396 if (FactorOutConstant(Op, Remainder, ElSize, SE, SE.TD)) { 397 // Op now has ElSize factored out. 398 ScaledOps.push_back(Op); 399 if (!Remainder->isZero()) 400 NewOps.push_back(Remainder); 401 AnyNonZeroIndices = true; 402 } else { 403 // The operand was not divisible, so add it to the list of operands 404 // we'll scan next iteration. 405 NewOps.push_back(Ops[i]); 406 } 407 } 408 // If we made any changes, update Ops. 409 if (!ScaledOps.empty()) { 410 Ops = NewOps; 411 SimplifyAddOperands(Ops, Ty, SE); 412 } 413 } 414 } 415 416 // Record the scaled array index for this level of the type. If 417 // we didn't find any operands that could be factored, tentatively 418 // assume that element zero was selected (since the zero offset 419 // would obviously be folded away). 420 Value *Scaled = ScaledOps.empty() ? 421 Constant::getNullValue(Ty) : 422 expandCodeFor(SE.getAddExpr(ScaledOps), Ty); 423 GepIndices.push_back(Scaled); 424 425 // Collect struct field index operands. 426 while (StructType *STy = dyn_cast<StructType>(ElTy)) { 427 bool FoundFieldNo = false; 428 // An empty struct has no fields. 429 if (STy->getNumElements() == 0) break; 430 if (SE.TD) { 431 // With TargetData, field offsets are known. See if a constant offset 432 // falls within any of the struct fields. 433 if (Ops.empty()) break; 434 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[0])) 435 if (SE.getTypeSizeInBits(C->getType()) <= 64) { 436 const StructLayout &SL = *SE.TD->getStructLayout(STy); 437 uint64_t FullOffset = C->getValue()->getZExtValue(); 438 if (FullOffset < SL.getSizeInBytes()) { 439 unsigned ElIdx = SL.getElementContainingOffset(FullOffset); 440 GepIndices.push_back( 441 ConstantInt::get(Type::getInt32Ty(Ty->getContext()), ElIdx)); 442 ElTy = STy->getTypeAtIndex(ElIdx); 443 Ops[0] = 444 SE.getConstant(Ty, FullOffset - SL.getElementOffset(ElIdx)); 445 AnyNonZeroIndices = true; 446 FoundFieldNo = true; 447 } 448 } 449 } else { 450 // Without TargetData, just check for an offsetof expression of the 451 // appropriate struct type. 452 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 453 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(Ops[i])) { 454 Type *CTy; 455 Constant *FieldNo; 456 if (U->isOffsetOf(CTy, FieldNo) && CTy == STy) { 457 GepIndices.push_back(FieldNo); 458 ElTy = 459 STy->getTypeAtIndex(cast<ConstantInt>(FieldNo)->getZExtValue()); 460 Ops[i] = SE.getConstant(Ty, 0); 461 AnyNonZeroIndices = true; 462 FoundFieldNo = true; 463 break; 464 } 465 } 466 } 467 // If no struct field offsets were found, tentatively assume that 468 // field zero was selected (since the zero offset would obviously 469 // be folded away). 470 if (!FoundFieldNo) { 471 ElTy = STy->getTypeAtIndex(0u); 472 GepIndices.push_back( 473 Constant::getNullValue(Type::getInt32Ty(Ty->getContext()))); 474 } 475 } 476 477 if (ArrayType *ATy = dyn_cast<ArrayType>(ElTy)) 478 ElTy = ATy->getElementType(); 479 else 480 break; 481 } 482 483 // If none of the operands were convertible to proper GEP indices, cast 484 // the base to i8* and do an ugly getelementptr with that. It's still 485 // better than ptrtoint+arithmetic+inttoptr at least. 486 if (!AnyNonZeroIndices) { 487 // Cast the base to i8*. 488 V = InsertNoopCastOfTo(V, 489 Type::getInt8PtrTy(Ty->getContext(), PTy->getAddressSpace())); 490 491 // Expand the operands for a plain byte offset. 492 Value *Idx = expandCodeFor(SE.getAddExpr(Ops), Ty); 493 494 // Fold a GEP with constant operands. 495 if (Constant *CLHS = dyn_cast<Constant>(V)) 496 if (Constant *CRHS = dyn_cast<Constant>(Idx)) 497 return ConstantExpr::getGetElementPtr(CLHS, &CRHS, 1); 498 499 // Do a quick scan to see if we have this GEP nearby. If so, reuse it. 500 unsigned ScanLimit = 6; 501 BasicBlock::iterator BlockBegin = Builder.GetInsertBlock()->begin(); 502 // Scanning starts from the last instruction before the insertion point. 503 BasicBlock::iterator IP = Builder.GetInsertPoint(); 504 if (IP != BlockBegin) { 505 --IP; 506 for (; ScanLimit; --IP, --ScanLimit) { 507 // Don't count dbg.value against the ScanLimit, to avoid perturbing the 508 // generated code. 509 if (isa<DbgInfoIntrinsic>(IP)) 510 ScanLimit++; 511 if (IP->getOpcode() == Instruction::GetElementPtr && 512 IP->getOperand(0) == V && IP->getOperand(1) == Idx) 513 return IP; 514 if (IP == BlockBegin) break; 515 } 516 } 517 518 // Save the original insertion point so we can restore it when we're done. 519 BasicBlock *SaveInsertBB = Builder.GetInsertBlock(); 520 BasicBlock::iterator SaveInsertPt = Builder.GetInsertPoint(); 521 522 // Move the insertion point out of as many loops as we can. 523 while (const Loop *L = SE.LI->getLoopFor(Builder.GetInsertBlock())) { 524 if (!L->isLoopInvariant(V) || !L->isLoopInvariant(Idx)) break; 525 BasicBlock *Preheader = L->getLoopPreheader(); 526 if (!Preheader) break; 527 528 // Ok, move up a level. 529 Builder.SetInsertPoint(Preheader, Preheader->getTerminator()); 530 } 531 532 // Emit a GEP. 533 Value *GEP = Builder.CreateGEP(V, Idx, "uglygep"); 534 rememberInstruction(GEP); 535 536 // Restore the original insert point. 537 if (SaveInsertBB) 538 restoreInsertPoint(SaveInsertBB, SaveInsertPt); 539 540 return GEP; 541 } 542 543 // Save the original insertion point so we can restore it when we're done. 544 BasicBlock *SaveInsertBB = Builder.GetInsertBlock(); 545 BasicBlock::iterator SaveInsertPt = Builder.GetInsertPoint(); 546 547 // Move the insertion point out of as many loops as we can. 548 while (const Loop *L = SE.LI->getLoopFor(Builder.GetInsertBlock())) { 549 if (!L->isLoopInvariant(V)) break; 550 551 bool AnyIndexNotLoopInvariant = false; 552 for (SmallVectorImpl<Value *>::const_iterator I = GepIndices.begin(), 553 E = GepIndices.end(); I != E; ++I) 554 if (!L->isLoopInvariant(*I)) { 555 AnyIndexNotLoopInvariant = true; 556 break; 557 } 558 if (AnyIndexNotLoopInvariant) 559 break; 560 561 BasicBlock *Preheader = L->getLoopPreheader(); 562 if (!Preheader) break; 563 564 // Ok, move up a level. 565 Builder.SetInsertPoint(Preheader, Preheader->getTerminator()); 566 } 567 568 // Insert a pretty getelementptr. Note that this GEP is not marked inbounds, 569 // because ScalarEvolution may have changed the address arithmetic to 570 // compute a value which is beyond the end of the allocated object. 571 Value *Casted = V; 572 if (V->getType() != PTy) 573 Casted = InsertNoopCastOfTo(Casted, PTy); 574 Value *GEP = Builder.CreateGEP(Casted, 575 GepIndices.begin(), 576 GepIndices.end(), 577 "scevgep"); 578 Ops.push_back(SE.getUnknown(GEP)); 579 rememberInstruction(GEP); 580 581 // Restore the original insert point. 582 if (SaveInsertBB) 583 restoreInsertPoint(SaveInsertBB, SaveInsertPt); 584 585 return expand(SE.getAddExpr(Ops)); 586 } 587 588 /// isNonConstantNegative - Return true if the specified scev is negated, but 589 /// not a constant. 590 static bool isNonConstantNegative(const SCEV *F) { 591 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(F); 592 if (!Mul) return false; 593 594 // If there is a constant factor, it will be first. 595 const SCEVConstant *SC = dyn_cast<SCEVConstant>(Mul->getOperand(0)); 596 if (!SC) return false; 597 598 // Return true if the value is negative, this matches things like (-42 * V). 599 return SC->getValue()->getValue().isNegative(); 600 } 601 602 /// PickMostRelevantLoop - Given two loops pick the one that's most relevant for 603 /// SCEV expansion. If they are nested, this is the most nested. If they are 604 /// neighboring, pick the later. 605 static const Loop *PickMostRelevantLoop(const Loop *A, const Loop *B, 606 DominatorTree &DT) { 607 if (!A) return B; 608 if (!B) return A; 609 if (A->contains(B)) return B; 610 if (B->contains(A)) return A; 611 if (DT.dominates(A->getHeader(), B->getHeader())) return B; 612 if (DT.dominates(B->getHeader(), A->getHeader())) return A; 613 return A; // Arbitrarily break the tie. 614 } 615 616 /// getRelevantLoop - Get the most relevant loop associated with the given 617 /// expression, according to PickMostRelevantLoop. 618 const Loop *SCEVExpander::getRelevantLoop(const SCEV *S) { 619 // Test whether we've already computed the most relevant loop for this SCEV. 620 std::pair<DenseMap<const SCEV *, const Loop *>::iterator, bool> Pair = 621 RelevantLoops.insert(std::make_pair(S, static_cast<const Loop *>(0))); 622 if (!Pair.second) 623 return Pair.first->second; 624 625 if (isa<SCEVConstant>(S)) 626 // A constant has no relevant loops. 627 return 0; 628 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) { 629 if (const Instruction *I = dyn_cast<Instruction>(U->getValue())) 630 return Pair.first->second = SE.LI->getLoopFor(I->getParent()); 631 // A non-instruction has no relevant loops. 632 return 0; 633 } 634 if (const SCEVNAryExpr *N = dyn_cast<SCEVNAryExpr>(S)) { 635 const Loop *L = 0; 636 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) 637 L = AR->getLoop(); 638 for (SCEVNAryExpr::op_iterator I = N->op_begin(), E = N->op_end(); 639 I != E; ++I) 640 L = PickMostRelevantLoop(L, getRelevantLoop(*I), *SE.DT); 641 return RelevantLoops[N] = L; 642 } 643 if (const SCEVCastExpr *C = dyn_cast<SCEVCastExpr>(S)) { 644 const Loop *Result = getRelevantLoop(C->getOperand()); 645 return RelevantLoops[C] = Result; 646 } 647 if (const SCEVUDivExpr *D = dyn_cast<SCEVUDivExpr>(S)) { 648 const Loop *Result = 649 PickMostRelevantLoop(getRelevantLoop(D->getLHS()), 650 getRelevantLoop(D->getRHS()), 651 *SE.DT); 652 return RelevantLoops[D] = Result; 653 } 654 llvm_unreachable("Unexpected SCEV type!"); 655 return 0; 656 } 657 658 namespace { 659 660 /// LoopCompare - Compare loops by PickMostRelevantLoop. 661 class LoopCompare { 662 DominatorTree &DT; 663 public: 664 explicit LoopCompare(DominatorTree &dt) : DT(dt) {} 665 666 bool operator()(std::pair<const Loop *, const SCEV *> LHS, 667 std::pair<const Loop *, const SCEV *> RHS) const { 668 // Keep pointer operands sorted at the end. 669 if (LHS.second->getType()->isPointerTy() != 670 RHS.second->getType()->isPointerTy()) 671 return LHS.second->getType()->isPointerTy(); 672 673 // Compare loops with PickMostRelevantLoop. 674 if (LHS.first != RHS.first) 675 return PickMostRelevantLoop(LHS.first, RHS.first, DT) != LHS.first; 676 677 // If one operand is a non-constant negative and the other is not, 678 // put the non-constant negative on the right so that a sub can 679 // be used instead of a negate and add. 680 if (isNonConstantNegative(LHS.second)) { 681 if (!isNonConstantNegative(RHS.second)) 682 return false; 683 } else if (isNonConstantNegative(RHS.second)) 684 return true; 685 686 // Otherwise they are equivalent according to this comparison. 687 return false; 688 } 689 }; 690 691 } 692 693 Value *SCEVExpander::visitAddExpr(const SCEVAddExpr *S) { 694 Type *Ty = SE.getEffectiveSCEVType(S->getType()); 695 696 // Collect all the add operands in a loop, along with their associated loops. 697 // Iterate in reverse so that constants are emitted last, all else equal, and 698 // so that pointer operands are inserted first, which the code below relies on 699 // to form more involved GEPs. 700 SmallVector<std::pair<const Loop *, const SCEV *>, 8> OpsAndLoops; 701 for (std::reverse_iterator<SCEVAddExpr::op_iterator> I(S->op_end()), 702 E(S->op_begin()); I != E; ++I) 703 OpsAndLoops.push_back(std::make_pair(getRelevantLoop(*I), *I)); 704 705 // Sort by loop. Use a stable sort so that constants follow non-constants and 706 // pointer operands precede non-pointer operands. 707 std::stable_sort(OpsAndLoops.begin(), OpsAndLoops.end(), LoopCompare(*SE.DT)); 708 709 // Emit instructions to add all the operands. Hoist as much as possible 710 // out of loops, and form meaningful getelementptrs where possible. 711 Value *Sum = 0; 712 for (SmallVectorImpl<std::pair<const Loop *, const SCEV *> >::iterator 713 I = OpsAndLoops.begin(), E = OpsAndLoops.end(); I != E; ) { 714 const Loop *CurLoop = I->first; 715 const SCEV *Op = I->second; 716 if (!Sum) { 717 // This is the first operand. Just expand it. 718 Sum = expand(Op); 719 ++I; 720 } else if (PointerType *PTy = dyn_cast<PointerType>(Sum->getType())) { 721 // The running sum expression is a pointer. Try to form a getelementptr 722 // at this level with that as the base. 723 SmallVector<const SCEV *, 4> NewOps; 724 for (; I != E && I->first == CurLoop; ++I) { 725 // If the operand is SCEVUnknown and not instructions, peek through 726 // it, to enable more of it to be folded into the GEP. 727 const SCEV *X = I->second; 728 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(X)) 729 if (!isa<Instruction>(U->getValue())) 730 X = SE.getSCEV(U->getValue()); 731 NewOps.push_back(X); 732 } 733 Sum = expandAddToGEP(NewOps.begin(), NewOps.end(), PTy, Ty, Sum); 734 } else if (PointerType *PTy = dyn_cast<PointerType>(Op->getType())) { 735 // The running sum is an integer, and there's a pointer at this level. 736 // Try to form a getelementptr. If the running sum is instructions, 737 // use a SCEVUnknown to avoid re-analyzing them. 738 SmallVector<const SCEV *, 4> NewOps; 739 NewOps.push_back(isa<Instruction>(Sum) ? SE.getUnknown(Sum) : 740 SE.getSCEV(Sum)); 741 for (++I; I != E && I->first == CurLoop; ++I) 742 NewOps.push_back(I->second); 743 Sum = expandAddToGEP(NewOps.begin(), NewOps.end(), PTy, Ty, expand(Op)); 744 } else if (isNonConstantNegative(Op)) { 745 // Instead of doing a negate and add, just do a subtract. 746 Value *W = expandCodeFor(SE.getNegativeSCEV(Op), Ty); 747 Sum = InsertNoopCastOfTo(Sum, Ty); 748 Sum = InsertBinop(Instruction::Sub, Sum, W); 749 ++I; 750 } else { 751 // A simple add. 752 Value *W = expandCodeFor(Op, Ty); 753 Sum = InsertNoopCastOfTo(Sum, Ty); 754 // Canonicalize a constant to the RHS. 755 if (isa<Constant>(Sum)) std::swap(Sum, W); 756 Sum = InsertBinop(Instruction::Add, Sum, W); 757 ++I; 758 } 759 } 760 761 return Sum; 762 } 763 764 Value *SCEVExpander::visitMulExpr(const SCEVMulExpr *S) { 765 Type *Ty = SE.getEffectiveSCEVType(S->getType()); 766 767 // Collect all the mul operands in a loop, along with their associated loops. 768 // Iterate in reverse so that constants are emitted last, all else equal. 769 SmallVector<std::pair<const Loop *, const SCEV *>, 8> OpsAndLoops; 770 for (std::reverse_iterator<SCEVMulExpr::op_iterator> I(S->op_end()), 771 E(S->op_begin()); I != E; ++I) 772 OpsAndLoops.push_back(std::make_pair(getRelevantLoop(*I), *I)); 773 774 // Sort by loop. Use a stable sort so that constants follow non-constants. 775 std::stable_sort(OpsAndLoops.begin(), OpsAndLoops.end(), LoopCompare(*SE.DT)); 776 777 // Emit instructions to mul all the operands. Hoist as much as possible 778 // out of loops. 779 Value *Prod = 0; 780 for (SmallVectorImpl<std::pair<const Loop *, const SCEV *> >::iterator 781 I = OpsAndLoops.begin(), E = OpsAndLoops.end(); I != E; ) { 782 const SCEV *Op = I->second; 783 if (!Prod) { 784 // This is the first operand. Just expand it. 785 Prod = expand(Op); 786 ++I; 787 } else if (Op->isAllOnesValue()) { 788 // Instead of doing a multiply by negative one, just do a negate. 789 Prod = InsertNoopCastOfTo(Prod, Ty); 790 Prod = InsertBinop(Instruction::Sub, Constant::getNullValue(Ty), Prod); 791 ++I; 792 } else { 793 // A simple mul. 794 Value *W = expandCodeFor(Op, Ty); 795 Prod = InsertNoopCastOfTo(Prod, Ty); 796 // Canonicalize a constant to the RHS. 797 if (isa<Constant>(Prod)) std::swap(Prod, W); 798 Prod = InsertBinop(Instruction::Mul, Prod, W); 799 ++I; 800 } 801 } 802 803 return Prod; 804 } 805 806 Value *SCEVExpander::visitUDivExpr(const SCEVUDivExpr *S) { 807 Type *Ty = SE.getEffectiveSCEVType(S->getType()); 808 809 Value *LHS = expandCodeFor(S->getLHS(), Ty); 810 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(S->getRHS())) { 811 const APInt &RHS = SC->getValue()->getValue(); 812 if (RHS.isPowerOf2()) 813 return InsertBinop(Instruction::LShr, LHS, 814 ConstantInt::get(Ty, RHS.logBase2())); 815 } 816 817 Value *RHS = expandCodeFor(S->getRHS(), Ty); 818 return InsertBinop(Instruction::UDiv, LHS, RHS); 819 } 820 821 /// Move parts of Base into Rest to leave Base with the minimal 822 /// expression that provides a pointer operand suitable for a 823 /// GEP expansion. 824 static void ExposePointerBase(const SCEV *&Base, const SCEV *&Rest, 825 ScalarEvolution &SE) { 826 while (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(Base)) { 827 Base = A->getStart(); 828 Rest = SE.getAddExpr(Rest, 829 SE.getAddRecExpr(SE.getConstant(A->getType(), 0), 830 A->getStepRecurrence(SE), 831 A->getLoop(), 832 // FIXME: A->getNoWrapFlags(FlagNW) 833 SCEV::FlagAnyWrap)); 834 } 835 if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(Base)) { 836 Base = A->getOperand(A->getNumOperands()-1); 837 SmallVector<const SCEV *, 8> NewAddOps(A->op_begin(), A->op_end()); 838 NewAddOps.back() = Rest; 839 Rest = SE.getAddExpr(NewAddOps); 840 ExposePointerBase(Base, Rest, SE); 841 } 842 } 843 844 /// getAddRecExprPHILiterally - Helper for expandAddRecExprLiterally. Expand 845 /// the base addrec, which is the addrec without any non-loop-dominating 846 /// values, and return the PHI. 847 PHINode * 848 SCEVExpander::getAddRecExprPHILiterally(const SCEVAddRecExpr *Normalized, 849 const Loop *L, 850 Type *ExpandTy, 851 Type *IntTy) { 852 assert((!IVIncInsertLoop||IVIncInsertPos) && "Uninitialized insert position"); 853 854 // Reuse a previously-inserted PHI, if present. 855 for (BasicBlock::iterator I = L->getHeader()->begin(); 856 PHINode *PN = dyn_cast<PHINode>(I); ++I) 857 if (SE.isSCEVable(PN->getType()) && 858 (SE.getEffectiveSCEVType(PN->getType()) == 859 SE.getEffectiveSCEVType(Normalized->getType())) && 860 SE.getSCEV(PN) == Normalized) 861 if (BasicBlock *LatchBlock = L->getLoopLatch()) { 862 Instruction *IncV = 863 cast<Instruction>(PN->getIncomingValueForBlock(LatchBlock)); 864 865 // Determine if this is a well-behaved chain of instructions leading 866 // back to the PHI. It probably will be, if we're scanning an inner 867 // loop already visited by LSR for example, but it wouldn't have 868 // to be. 869 do { 870 if (IncV->getNumOperands() == 0 || isa<PHINode>(IncV) || 871 (isa<CastInst>(IncV) && !isa<BitCastInst>(IncV))) { 872 IncV = 0; 873 break; 874 } 875 // If any of the operands don't dominate the insert position, bail. 876 // Addrec operands are always loop-invariant, so this can only happen 877 // if there are instructions which haven't been hoisted. 878 if (L == IVIncInsertLoop) { 879 for (User::op_iterator OI = IncV->op_begin()+1, 880 OE = IncV->op_end(); OI != OE; ++OI) 881 if (Instruction *OInst = dyn_cast<Instruction>(OI)) 882 if (!SE.DT->dominates(OInst, IVIncInsertPos)) { 883 IncV = 0; 884 break; 885 } 886 } 887 if (!IncV) 888 break; 889 // Advance to the next instruction. 890 IncV = dyn_cast<Instruction>(IncV->getOperand(0)); 891 if (!IncV) 892 break; 893 if (IncV->mayHaveSideEffects()) { 894 IncV = 0; 895 break; 896 } 897 } while (IncV != PN); 898 899 if (IncV) { 900 // Ok, the add recurrence looks usable. 901 // Remember this PHI, even in post-inc mode. 902 InsertedValues.insert(PN); 903 // Remember the increment. 904 IncV = cast<Instruction>(PN->getIncomingValueForBlock(LatchBlock)); 905 rememberInstruction(IncV); 906 if (L == IVIncInsertLoop) 907 do { 908 if (SE.DT->dominates(IncV, IVIncInsertPos)) 909 break; 910 // Make sure the increment is where we want it. But don't move it 911 // down past a potential existing post-inc user. 912 IncV->moveBefore(IVIncInsertPos); 913 IVIncInsertPos = IncV; 914 IncV = cast<Instruction>(IncV->getOperand(0)); 915 } while (IncV != PN); 916 return PN; 917 } 918 } 919 920 // Save the original insertion point so we can restore it when we're done. 921 BasicBlock *SaveInsertBB = Builder.GetInsertBlock(); 922 BasicBlock::iterator SaveInsertPt = Builder.GetInsertPoint(); 923 924 // Expand code for the start value. 925 Value *StartV = expandCodeFor(Normalized->getStart(), ExpandTy, 926 L->getHeader()->begin()); 927 928 // StartV must be hoisted into L's preheader to dominate the new phi. 929 assert(!isa<Instruction>(StartV) || 930 SE.DT->properlyDominates(cast<Instruction>(StartV)->getParent(), 931 L->getHeader())); 932 933 // Expand code for the step value. Insert instructions right before the 934 // terminator corresponding to the back-edge. Do this before creating the PHI 935 // so that PHI reuse code doesn't see an incomplete PHI. If the stride is 936 // negative, insert a sub instead of an add for the increment (unless it's a 937 // constant, because subtracts of constants are canonicalized to adds). 938 const SCEV *Step = Normalized->getStepRecurrence(SE); 939 bool isPointer = ExpandTy->isPointerTy(); 940 bool isNegative = !isPointer && isNonConstantNegative(Step); 941 if (isNegative) 942 Step = SE.getNegativeSCEV(Step); 943 Value *StepV = expandCodeFor(Step, IntTy, L->getHeader()->begin()); 944 945 // Create the PHI. 946 BasicBlock *Header = L->getHeader(); 947 Builder.SetInsertPoint(Header, Header->begin()); 948 pred_iterator HPB = pred_begin(Header), HPE = pred_end(Header); 949 PHINode *PN = Builder.CreatePHI(ExpandTy, std::distance(HPB, HPE), 950 Twine(IVName) + ".iv"); 951 rememberInstruction(PN); 952 953 // Create the step instructions and populate the PHI. 954 for (pred_iterator HPI = HPB; HPI != HPE; ++HPI) { 955 BasicBlock *Pred = *HPI; 956 957 // Add a start value. 958 if (!L->contains(Pred)) { 959 PN->addIncoming(StartV, Pred); 960 continue; 961 } 962 963 // Create a step value and add it to the PHI. If IVIncInsertLoop is 964 // non-null and equal to the addrec's loop, insert the instructions 965 // at IVIncInsertPos. 966 Instruction *InsertPos = L == IVIncInsertLoop ? 967 IVIncInsertPos : Pred->getTerminator(); 968 Builder.SetInsertPoint(InsertPos); 969 Value *IncV; 970 // If the PHI is a pointer, use a GEP, otherwise use an add or sub. 971 if (isPointer) { 972 PointerType *GEPPtrTy = cast<PointerType>(ExpandTy); 973 // If the step isn't constant, don't use an implicitly scaled GEP, because 974 // that would require a multiply inside the loop. 975 if (!isa<ConstantInt>(StepV)) 976 GEPPtrTy = PointerType::get(Type::getInt1Ty(SE.getContext()), 977 GEPPtrTy->getAddressSpace()); 978 const SCEV *const StepArray[1] = { SE.getSCEV(StepV) }; 979 IncV = expandAddToGEP(StepArray, StepArray+1, GEPPtrTy, IntTy, PN); 980 if (IncV->getType() != PN->getType()) { 981 IncV = Builder.CreateBitCast(IncV, PN->getType(), "tmp"); 982 rememberInstruction(IncV); 983 } 984 } else { 985 IncV = isNegative ? 986 Builder.CreateSub(PN, StepV, Twine(IVName) + ".iv.next") : 987 Builder.CreateAdd(PN, StepV, Twine(IVName) + ".iv.next"); 988 rememberInstruction(IncV); 989 } 990 PN->addIncoming(IncV, Pred); 991 } 992 993 // Restore the original insert point. 994 if (SaveInsertBB) 995 restoreInsertPoint(SaveInsertBB, SaveInsertPt); 996 997 // Remember this PHI, even in post-inc mode. 998 InsertedValues.insert(PN); 999 1000 return PN; 1001 } 1002 1003 Value *SCEVExpander::expandAddRecExprLiterally(const SCEVAddRecExpr *S) { 1004 Type *STy = S->getType(); 1005 Type *IntTy = SE.getEffectiveSCEVType(STy); 1006 const Loop *L = S->getLoop(); 1007 1008 // Determine a normalized form of this expression, which is the expression 1009 // before any post-inc adjustment is made. 1010 const SCEVAddRecExpr *Normalized = S; 1011 if (PostIncLoops.count(L)) { 1012 PostIncLoopSet Loops; 1013 Loops.insert(L); 1014 Normalized = 1015 cast<SCEVAddRecExpr>(TransformForPostIncUse(Normalize, S, 0, 0, 1016 Loops, SE, *SE.DT)); 1017 } 1018 1019 // Strip off any non-loop-dominating component from the addrec start. 1020 const SCEV *Start = Normalized->getStart(); 1021 const SCEV *PostLoopOffset = 0; 1022 if (!SE.properlyDominates(Start, L->getHeader())) { 1023 PostLoopOffset = Start; 1024 Start = SE.getConstant(Normalized->getType(), 0); 1025 Normalized = cast<SCEVAddRecExpr>( 1026 SE.getAddRecExpr(Start, Normalized->getStepRecurrence(SE), 1027 Normalized->getLoop(), 1028 // FIXME: Normalized->getNoWrapFlags(FlagNW) 1029 SCEV::FlagAnyWrap)); 1030 } 1031 1032 // Strip off any non-loop-dominating component from the addrec step. 1033 const SCEV *Step = Normalized->getStepRecurrence(SE); 1034 const SCEV *PostLoopScale = 0; 1035 if (!SE.dominates(Step, L->getHeader())) { 1036 PostLoopScale = Step; 1037 Step = SE.getConstant(Normalized->getType(), 1); 1038 Normalized = 1039 cast<SCEVAddRecExpr>(SE.getAddRecExpr(Start, Step, 1040 Normalized->getLoop(), 1041 // FIXME: Normalized 1042 // ->getNoWrapFlags(FlagNW) 1043 SCEV::FlagAnyWrap)); 1044 } 1045 1046 // Expand the core addrec. If we need post-loop scaling, force it to 1047 // expand to an integer type to avoid the need for additional casting. 1048 Type *ExpandTy = PostLoopScale ? IntTy : STy; 1049 PHINode *PN = getAddRecExprPHILiterally(Normalized, L, ExpandTy, IntTy); 1050 1051 // Accommodate post-inc mode, if necessary. 1052 Value *Result; 1053 if (!PostIncLoops.count(L)) 1054 Result = PN; 1055 else { 1056 // In PostInc mode, use the post-incremented value. 1057 BasicBlock *LatchBlock = L->getLoopLatch(); 1058 assert(LatchBlock && "PostInc mode requires a unique loop latch!"); 1059 Result = PN->getIncomingValueForBlock(LatchBlock); 1060 } 1061 1062 // Re-apply any non-loop-dominating scale. 1063 if (PostLoopScale) { 1064 Result = InsertNoopCastOfTo(Result, IntTy); 1065 Result = Builder.CreateMul(Result, 1066 expandCodeFor(PostLoopScale, IntTy)); 1067 rememberInstruction(Result); 1068 } 1069 1070 // Re-apply any non-loop-dominating offset. 1071 if (PostLoopOffset) { 1072 if (PointerType *PTy = dyn_cast<PointerType>(ExpandTy)) { 1073 const SCEV *const OffsetArray[1] = { PostLoopOffset }; 1074 Result = expandAddToGEP(OffsetArray, OffsetArray+1, PTy, IntTy, Result); 1075 } else { 1076 Result = InsertNoopCastOfTo(Result, IntTy); 1077 Result = Builder.CreateAdd(Result, 1078 expandCodeFor(PostLoopOffset, IntTy)); 1079 rememberInstruction(Result); 1080 } 1081 } 1082 1083 return Result; 1084 } 1085 1086 Value *SCEVExpander::visitAddRecExpr(const SCEVAddRecExpr *S) { 1087 if (!CanonicalMode) return expandAddRecExprLiterally(S); 1088 1089 Type *Ty = SE.getEffectiveSCEVType(S->getType()); 1090 const Loop *L = S->getLoop(); 1091 1092 // First check for an existing canonical IV in a suitable type. 1093 PHINode *CanonicalIV = 0; 1094 if (PHINode *PN = L->getCanonicalInductionVariable()) 1095 if (SE.getTypeSizeInBits(PN->getType()) >= SE.getTypeSizeInBits(Ty)) 1096 CanonicalIV = PN; 1097 1098 // Rewrite an AddRec in terms of the canonical induction variable, if 1099 // its type is more narrow. 1100 if (CanonicalIV && 1101 SE.getTypeSizeInBits(CanonicalIV->getType()) > 1102 SE.getTypeSizeInBits(Ty)) { 1103 SmallVector<const SCEV *, 4> NewOps(S->getNumOperands()); 1104 for (unsigned i = 0, e = S->getNumOperands(); i != e; ++i) 1105 NewOps[i] = SE.getAnyExtendExpr(S->op_begin()[i], CanonicalIV->getType()); 1106 Value *V = expand(SE.getAddRecExpr(NewOps, S->getLoop(), 1107 // FIXME: S->getNoWrapFlags(FlagNW) 1108 SCEV::FlagAnyWrap)); 1109 BasicBlock *SaveInsertBB = Builder.GetInsertBlock(); 1110 BasicBlock::iterator SaveInsertPt = Builder.GetInsertPoint(); 1111 BasicBlock::iterator NewInsertPt = 1112 llvm::next(BasicBlock::iterator(cast<Instruction>(V))); 1113 while (isa<PHINode>(NewInsertPt) || isa<DbgInfoIntrinsic>(NewInsertPt)) 1114 ++NewInsertPt; 1115 V = expandCodeFor(SE.getTruncateExpr(SE.getUnknown(V), Ty), 0, 1116 NewInsertPt); 1117 restoreInsertPoint(SaveInsertBB, SaveInsertPt); 1118 return V; 1119 } 1120 1121 // {X,+,F} --> X + {0,+,F} 1122 if (!S->getStart()->isZero()) { 1123 SmallVector<const SCEV *, 4> NewOps(S->op_begin(), S->op_end()); 1124 NewOps[0] = SE.getConstant(Ty, 0); 1125 // FIXME: can use S->getNoWrapFlags() 1126 const SCEV *Rest = SE.getAddRecExpr(NewOps, L, SCEV::FlagAnyWrap); 1127 1128 // Turn things like ptrtoint+arithmetic+inttoptr into GEP. See the 1129 // comments on expandAddToGEP for details. 1130 const SCEV *Base = S->getStart(); 1131 const SCEV *RestArray[1] = { Rest }; 1132 // Dig into the expression to find the pointer base for a GEP. 1133 ExposePointerBase(Base, RestArray[0], SE); 1134 // If we found a pointer, expand the AddRec with a GEP. 1135 if (PointerType *PTy = dyn_cast<PointerType>(Base->getType())) { 1136 // Make sure the Base isn't something exotic, such as a multiplied 1137 // or divided pointer value. In those cases, the result type isn't 1138 // actually a pointer type. 1139 if (!isa<SCEVMulExpr>(Base) && !isa<SCEVUDivExpr>(Base)) { 1140 Value *StartV = expand(Base); 1141 assert(StartV->getType() == PTy && "Pointer type mismatch for GEP!"); 1142 return expandAddToGEP(RestArray, RestArray+1, PTy, Ty, StartV); 1143 } 1144 } 1145 1146 // Just do a normal add. Pre-expand the operands to suppress folding. 1147 return expand(SE.getAddExpr(SE.getUnknown(expand(S->getStart())), 1148 SE.getUnknown(expand(Rest)))); 1149 } 1150 1151 // If we don't yet have a canonical IV, create one. 1152 if (!CanonicalIV) { 1153 // Create and insert the PHI node for the induction variable in the 1154 // specified loop. 1155 BasicBlock *Header = L->getHeader(); 1156 pred_iterator HPB = pred_begin(Header), HPE = pred_end(Header); 1157 CanonicalIV = PHINode::Create(Ty, std::distance(HPB, HPE), "indvar", 1158 Header->begin()); 1159 rememberInstruction(CanonicalIV); 1160 1161 Constant *One = ConstantInt::get(Ty, 1); 1162 for (pred_iterator HPI = HPB; HPI != HPE; ++HPI) { 1163 BasicBlock *HP = *HPI; 1164 if (L->contains(HP)) { 1165 // Insert a unit add instruction right before the terminator 1166 // corresponding to the back-edge. 1167 Instruction *Add = BinaryOperator::CreateAdd(CanonicalIV, One, 1168 "indvar.next", 1169 HP->getTerminator()); 1170 Add->setDebugLoc(HP->getTerminator()->getDebugLoc()); 1171 rememberInstruction(Add); 1172 CanonicalIV->addIncoming(Add, HP); 1173 } else { 1174 CanonicalIV->addIncoming(Constant::getNullValue(Ty), HP); 1175 } 1176 } 1177 } 1178 1179 // {0,+,1} --> Insert a canonical induction variable into the loop! 1180 if (S->isAffine() && S->getOperand(1)->isOne()) { 1181 assert(Ty == SE.getEffectiveSCEVType(CanonicalIV->getType()) && 1182 "IVs with types different from the canonical IV should " 1183 "already have been handled!"); 1184 return CanonicalIV; 1185 } 1186 1187 // {0,+,F} --> {0,+,1} * F 1188 1189 // If this is a simple linear addrec, emit it now as a special case. 1190 if (S->isAffine()) // {0,+,F} --> i*F 1191 return 1192 expand(SE.getTruncateOrNoop( 1193 SE.getMulExpr(SE.getUnknown(CanonicalIV), 1194 SE.getNoopOrAnyExtend(S->getOperand(1), 1195 CanonicalIV->getType())), 1196 Ty)); 1197 1198 // If this is a chain of recurrences, turn it into a closed form, using the 1199 // folders, then expandCodeFor the closed form. This allows the folders to 1200 // simplify the expression without having to build a bunch of special code 1201 // into this folder. 1202 const SCEV *IH = SE.getUnknown(CanonicalIV); // Get I as a "symbolic" SCEV. 1203 1204 // Promote S up to the canonical IV type, if the cast is foldable. 1205 const SCEV *NewS = S; 1206 const SCEV *Ext = SE.getNoopOrAnyExtend(S, CanonicalIV->getType()); 1207 if (isa<SCEVAddRecExpr>(Ext)) 1208 NewS = Ext; 1209 1210 const SCEV *V = cast<SCEVAddRecExpr>(NewS)->evaluateAtIteration(IH, SE); 1211 //cerr << "Evaluated: " << *this << "\n to: " << *V << "\n"; 1212 1213 // Truncate the result down to the original type, if needed. 1214 const SCEV *T = SE.getTruncateOrNoop(V, Ty); 1215 return expand(T); 1216 } 1217 1218 Value *SCEVExpander::visitTruncateExpr(const SCEVTruncateExpr *S) { 1219 Type *Ty = SE.getEffectiveSCEVType(S->getType()); 1220 Value *V = expandCodeFor(S->getOperand(), 1221 SE.getEffectiveSCEVType(S->getOperand()->getType())); 1222 Value *I = Builder.CreateTrunc(V, Ty, "tmp"); 1223 rememberInstruction(I); 1224 return I; 1225 } 1226 1227 Value *SCEVExpander::visitZeroExtendExpr(const SCEVZeroExtendExpr *S) { 1228 Type *Ty = SE.getEffectiveSCEVType(S->getType()); 1229 Value *V = expandCodeFor(S->getOperand(), 1230 SE.getEffectiveSCEVType(S->getOperand()->getType())); 1231 Value *I = Builder.CreateZExt(V, Ty, "tmp"); 1232 rememberInstruction(I); 1233 return I; 1234 } 1235 1236 Value *SCEVExpander::visitSignExtendExpr(const SCEVSignExtendExpr *S) { 1237 Type *Ty = SE.getEffectiveSCEVType(S->getType()); 1238 Value *V = expandCodeFor(S->getOperand(), 1239 SE.getEffectiveSCEVType(S->getOperand()->getType())); 1240 Value *I = Builder.CreateSExt(V, Ty, "tmp"); 1241 rememberInstruction(I); 1242 return I; 1243 } 1244 1245 Value *SCEVExpander::visitSMaxExpr(const SCEVSMaxExpr *S) { 1246 Value *LHS = expand(S->getOperand(S->getNumOperands()-1)); 1247 Type *Ty = LHS->getType(); 1248 for (int i = S->getNumOperands()-2; i >= 0; --i) { 1249 // In the case of mixed integer and pointer types, do the 1250 // rest of the comparisons as integer. 1251 if (S->getOperand(i)->getType() != Ty) { 1252 Ty = SE.getEffectiveSCEVType(Ty); 1253 LHS = InsertNoopCastOfTo(LHS, Ty); 1254 } 1255 Value *RHS = expandCodeFor(S->getOperand(i), Ty); 1256 Value *ICmp = Builder.CreateICmpSGT(LHS, RHS, "tmp"); 1257 rememberInstruction(ICmp); 1258 Value *Sel = Builder.CreateSelect(ICmp, LHS, RHS, "smax"); 1259 rememberInstruction(Sel); 1260 LHS = Sel; 1261 } 1262 // In the case of mixed integer and pointer types, cast the 1263 // final result back to the pointer type. 1264 if (LHS->getType() != S->getType()) 1265 LHS = InsertNoopCastOfTo(LHS, S->getType()); 1266 return LHS; 1267 } 1268 1269 Value *SCEVExpander::visitUMaxExpr(const SCEVUMaxExpr *S) { 1270 Value *LHS = expand(S->getOperand(S->getNumOperands()-1)); 1271 Type *Ty = LHS->getType(); 1272 for (int i = S->getNumOperands()-2; i >= 0; --i) { 1273 // In the case of mixed integer and pointer types, do the 1274 // rest of the comparisons as integer. 1275 if (S->getOperand(i)->getType() != Ty) { 1276 Ty = SE.getEffectiveSCEVType(Ty); 1277 LHS = InsertNoopCastOfTo(LHS, Ty); 1278 } 1279 Value *RHS = expandCodeFor(S->getOperand(i), Ty); 1280 Value *ICmp = Builder.CreateICmpUGT(LHS, RHS, "tmp"); 1281 rememberInstruction(ICmp); 1282 Value *Sel = Builder.CreateSelect(ICmp, LHS, RHS, "umax"); 1283 rememberInstruction(Sel); 1284 LHS = Sel; 1285 } 1286 // In the case of mixed integer and pointer types, cast the 1287 // final result back to the pointer type. 1288 if (LHS->getType() != S->getType()) 1289 LHS = InsertNoopCastOfTo(LHS, S->getType()); 1290 return LHS; 1291 } 1292 1293 Value *SCEVExpander::expandCodeFor(const SCEV *SH, Type *Ty, 1294 Instruction *I) { 1295 BasicBlock::iterator IP = I; 1296 while (isInsertedInstruction(IP) || isa<DbgInfoIntrinsic>(IP)) 1297 ++IP; 1298 Builder.SetInsertPoint(IP->getParent(), IP); 1299 return expandCodeFor(SH, Ty); 1300 } 1301 1302 Value *SCEVExpander::expandCodeFor(const SCEV *SH, Type *Ty) { 1303 // Expand the code for this SCEV. 1304 Value *V = expand(SH); 1305 if (Ty) { 1306 assert(SE.getTypeSizeInBits(Ty) == SE.getTypeSizeInBits(SH->getType()) && 1307 "non-trivial casts should be done with the SCEVs directly!"); 1308 V = InsertNoopCastOfTo(V, Ty); 1309 } 1310 return V; 1311 } 1312 1313 Value *SCEVExpander::expand(const SCEV *S) { 1314 // Compute an insertion point for this SCEV object. Hoist the instructions 1315 // as far out in the loop nest as possible. 1316 Instruction *InsertPt = Builder.GetInsertPoint(); 1317 for (Loop *L = SE.LI->getLoopFor(Builder.GetInsertBlock()); ; 1318 L = L->getParentLoop()) 1319 if (SE.isLoopInvariant(S, L)) { 1320 if (!L) break; 1321 if (BasicBlock *Preheader = L->getLoopPreheader()) 1322 InsertPt = Preheader->getTerminator(); 1323 } else { 1324 // If the SCEV is computable at this level, insert it into the header 1325 // after the PHIs (and after any other instructions that we've inserted 1326 // there) so that it is guaranteed to dominate any user inside the loop. 1327 if (L && SE.hasComputableLoopEvolution(S, L) && !PostIncLoops.count(L)) 1328 InsertPt = L->getHeader()->getFirstNonPHI(); 1329 while (isInsertedInstruction(InsertPt) || isa<DbgInfoIntrinsic>(InsertPt)) 1330 InsertPt = llvm::next(BasicBlock::iterator(InsertPt)); 1331 break; 1332 } 1333 1334 // Check to see if we already expanded this here. 1335 std::map<std::pair<const SCEV *, Instruction *>, 1336 AssertingVH<Value> >::iterator I = 1337 InsertedExpressions.find(std::make_pair(S, InsertPt)); 1338 if (I != InsertedExpressions.end()) 1339 return I->second; 1340 1341 BasicBlock *SaveInsertBB = Builder.GetInsertBlock(); 1342 BasicBlock::iterator SaveInsertPt = Builder.GetInsertPoint(); 1343 Builder.SetInsertPoint(InsertPt->getParent(), InsertPt); 1344 1345 // Expand the expression into instructions. 1346 Value *V = visit(S); 1347 1348 // Remember the expanded value for this SCEV at this location. 1349 if (PostIncLoops.empty()) 1350 InsertedExpressions[std::make_pair(S, InsertPt)] = V; 1351 1352 restoreInsertPoint(SaveInsertBB, SaveInsertPt); 1353 return V; 1354 } 1355 1356 void SCEVExpander::rememberInstruction(Value *I) { 1357 if (!PostIncLoops.empty()) 1358 InsertedPostIncValues.insert(I); 1359 else 1360 InsertedValues.insert(I); 1361 1362 // If we just claimed an existing instruction and that instruction had 1363 // been the insert point, adjust the insert point forward so that 1364 // subsequently inserted code will be dominated. 1365 if (Builder.GetInsertPoint() == I) { 1366 BasicBlock::iterator It = cast<Instruction>(I); 1367 do { ++It; } while (isInsertedInstruction(It) || 1368 isa<DbgInfoIntrinsic>(It)); 1369 Builder.SetInsertPoint(Builder.GetInsertBlock(), It); 1370 } 1371 } 1372 1373 void SCEVExpander::restoreInsertPoint(BasicBlock *BB, BasicBlock::iterator I) { 1374 // If we acquired more instructions since the old insert point was saved, 1375 // advance past them. 1376 while (isInsertedInstruction(I) || isa<DbgInfoIntrinsic>(I)) ++I; 1377 1378 Builder.SetInsertPoint(BB, I); 1379 } 1380 1381 /// getOrInsertCanonicalInductionVariable - This method returns the 1382 /// canonical induction variable of the specified type for the specified 1383 /// loop (inserting one if there is none). A canonical induction variable 1384 /// starts at zero and steps by one on each iteration. 1385 PHINode * 1386 SCEVExpander::getOrInsertCanonicalInductionVariable(const Loop *L, 1387 Type *Ty) { 1388 assert(Ty->isIntegerTy() && "Can only insert integer induction variables!"); 1389 1390 // Build a SCEV for {0,+,1}<L>. 1391 // Conservatively use FlagAnyWrap for now. 1392 const SCEV *H = SE.getAddRecExpr(SE.getConstant(Ty, 0), 1393 SE.getConstant(Ty, 1), L, SCEV::FlagAnyWrap); 1394 1395 // Emit code for it. 1396 BasicBlock *SaveInsertBB = Builder.GetInsertBlock(); 1397 BasicBlock::iterator SaveInsertPt = Builder.GetInsertPoint(); 1398 PHINode *V = cast<PHINode>(expandCodeFor(H, 0, L->getHeader()->begin())); 1399 if (SaveInsertBB) 1400 restoreInsertPoint(SaveInsertBB, SaveInsertPt); 1401 1402 return V; 1403 } 1404