Home | History | Annotate | Download | only in InstCombine
      1 //===- InstructionCombining.cpp - Combine multiple instructions -----------===//
      2 //
      3 //                     The LLVM Compiler Infrastructure
      4 //
      5 // This file is distributed under the University of Illinois Open Source
      6 // License. See LICENSE.TXT for details.
      7 //
      8 //===----------------------------------------------------------------------===//
      9 //
     10 // InstructionCombining - Combine instructions to form fewer, simple
     11 // instructions.  This pass does not modify the CFG.  This pass is where
     12 // algebraic simplification happens.
     13 //
     14 // This pass combines things like:
     15 //    %Y = add i32 %X, 1
     16 //    %Z = add i32 %Y, 1
     17 // into:
     18 //    %Z = add i32 %X, 2
     19 //
     20 // This is a simple worklist driven algorithm.
     21 //
     22 // This pass guarantees that the following canonicalizations are performed on
     23 // the program:
     24 //    1. If a binary operator has a constant operand, it is moved to the RHS
     25 //    2. Bitwise operators with constant operands are always grouped so that
     26 //       shifts are performed first, then or's, then and's, then xor's.
     27 //    3. Compare instructions are converted from <,>,<=,>= to ==,!= if possible
     28 //    4. All cmp instructions on boolean values are replaced with logical ops
     29 //    5. add X, X is represented as (X*2) => (X << 1)
     30 //    6. Multiplies with a power-of-two constant argument are transformed into
     31 //       shifts.
     32 //   ... etc.
     33 //
     34 //===----------------------------------------------------------------------===//
     35 
     36 #define DEBUG_TYPE "instcombine"
     37 #include "llvm/Transforms/Scalar.h"
     38 #include "InstCombine.h"
     39 #include "llvm-c/Initialization.h"
     40 #include "llvm/ADT/SmallPtrSet.h"
     41 #include "llvm/ADT/Statistic.h"
     42 #include "llvm/ADT/StringSwitch.h"
     43 #include "llvm/Analysis/ConstantFolding.h"
     44 #include "llvm/Analysis/InstructionSimplify.h"
     45 #include "llvm/Analysis/MemoryBuiltins.h"
     46 #include "llvm/IR/DataLayout.h"
     47 #include "llvm/IR/IntrinsicInst.h"
     48 #include "llvm/Support/CFG.h"
     49 #include "llvm/Support/CommandLine.h"
     50 #include "llvm/Support/Debug.h"
     51 #include "llvm/Support/GetElementPtrTypeIterator.h"
     52 #include "llvm/Support/PatternMatch.h"
     53 #include "llvm/Support/ValueHandle.h"
     54 #include "llvm/Target/TargetLibraryInfo.h"
     55 #include "llvm/Transforms/Utils/Local.h"
     56 #include <algorithm>
     57 #include <climits>
     58 using namespace llvm;
     59 using namespace llvm::PatternMatch;
     60 
     61 STATISTIC(NumCombined , "Number of insts combined");
     62 STATISTIC(NumConstProp, "Number of constant folds");
     63 STATISTIC(NumDeadInst , "Number of dead inst eliminated");
     64 STATISTIC(NumSunkInst , "Number of instructions sunk");
     65 STATISTIC(NumExpand,    "Number of expansions");
     66 STATISTIC(NumFactor   , "Number of factorizations");
     67 STATISTIC(NumReassoc  , "Number of reassociations");
     68 
     69 static cl::opt<bool> UnsafeFPShrink("enable-double-float-shrink", cl::Hidden,
     70                                    cl::init(false),
     71                                    cl::desc("Enable unsafe double to float "
     72                                             "shrinking for math lib calls"));
     73 
     74 // Initialization Routines
     75 void llvm::initializeInstCombine(PassRegistry &Registry) {
     76   initializeInstCombinerPass(Registry);
     77 }
     78 
     79 void LLVMInitializeInstCombine(LLVMPassRegistryRef R) {
     80   initializeInstCombine(*unwrap(R));
     81 }
     82 
     83 char InstCombiner::ID = 0;
     84 INITIALIZE_PASS_BEGIN(InstCombiner, "instcombine",
     85                 "Combine redundant instructions", false, false)
     86 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfo)
     87 INITIALIZE_PASS_END(InstCombiner, "instcombine",
     88                 "Combine redundant instructions", false, false)
     89 
     90 void InstCombiner::getAnalysisUsage(AnalysisUsage &AU) const {
     91   AU.setPreservesCFG();
     92   AU.addRequired<TargetLibraryInfo>();
     93 }
     94 
     95 
     96 Value *InstCombiner::EmitGEPOffset(User *GEP) {
     97   return llvm::EmitGEPOffset(Builder, *getDataLayout(), GEP);
     98 }
     99 
    100 /// ShouldChangeType - Return true if it is desirable to convert a computation
    101 /// from 'From' to 'To'.  We don't want to convert from a legal to an illegal
    102 /// type for example, or from a smaller to a larger illegal type.
    103 bool InstCombiner::ShouldChangeType(Type *From, Type *To) const {
    104   assert(From->isIntegerTy() && To->isIntegerTy());
    105 
    106   // If we don't have TD, we don't know if the source/dest are legal.
    107   if (!TD) return false;
    108 
    109   unsigned FromWidth = From->getPrimitiveSizeInBits();
    110   unsigned ToWidth = To->getPrimitiveSizeInBits();
    111   bool FromLegal = TD->isLegalInteger(FromWidth);
    112   bool ToLegal = TD->isLegalInteger(ToWidth);
    113 
    114   // If this is a legal integer from type, and the result would be an illegal
    115   // type, don't do the transformation.
    116   if (FromLegal && !ToLegal)
    117     return false;
    118 
    119   // Otherwise, if both are illegal, do not increase the size of the result. We
    120   // do allow things like i160 -> i64, but not i64 -> i160.
    121   if (!FromLegal && !ToLegal && ToWidth > FromWidth)
    122     return false;
    123 
    124   return true;
    125 }
    126 
    127 // Return true, if No Signed Wrap should be maintained for I.
    128 // The No Signed Wrap flag can be kept if the operation "B (I.getOpcode) C",
    129 // where both B and C should be ConstantInts, results in a constant that does
    130 // not overflow. This function only handles the Add and Sub opcodes. For
    131 // all other opcodes, the function conservatively returns false.
    132 static bool MaintainNoSignedWrap(BinaryOperator &I, Value *B, Value *C) {
    133   OverflowingBinaryOperator *OBO = dyn_cast<OverflowingBinaryOperator>(&I);
    134   if (!OBO || !OBO->hasNoSignedWrap()) {
    135     return false;
    136   }
    137 
    138   // We reason about Add and Sub Only.
    139   Instruction::BinaryOps Opcode = I.getOpcode();
    140   if (Opcode != Instruction::Add &&
    141       Opcode != Instruction::Sub) {
    142     return false;
    143   }
    144 
    145   ConstantInt *CB = dyn_cast<ConstantInt>(B);
    146   ConstantInt *CC = dyn_cast<ConstantInt>(C);
    147 
    148   if (!CB || !CC) {
    149     return false;
    150   }
    151 
    152   const APInt &BVal = CB->getValue();
    153   const APInt &CVal = CC->getValue();
    154   bool Overflow = false;
    155 
    156   if (Opcode == Instruction::Add) {
    157     BVal.sadd_ov(CVal, Overflow);
    158   } else {
    159     BVal.ssub_ov(CVal, Overflow);
    160   }
    161 
    162   return !Overflow;
    163 }
    164 
    165 /// Conservatively clears subclassOptionalData after a reassociation or
    166 /// commutation. We preserve fast-math flags when applicable as they can be
    167 /// preserved.
    168 static void ClearSubclassDataAfterReassociation(BinaryOperator &I) {
    169   FPMathOperator *FPMO = dyn_cast<FPMathOperator>(&I);
    170   if (!FPMO) {
    171     I.clearSubclassOptionalData();
    172     return;
    173   }
    174 
    175   FastMathFlags FMF = I.getFastMathFlags();
    176   I.clearSubclassOptionalData();
    177   I.setFastMathFlags(FMF);
    178 }
    179 
    180 /// SimplifyAssociativeOrCommutative - This performs a few simplifications for
    181 /// operators which are associative or commutative:
    182 //
    183 //  Commutative operators:
    184 //
    185 //  1. Order operands such that they are listed from right (least complex) to
    186 //     left (most complex).  This puts constants before unary operators before
    187 //     binary operators.
    188 //
    189 //  Associative operators:
    190 //
    191 //  2. Transform: "(A op B) op C" ==> "A op (B op C)" if "B op C" simplifies.
    192 //  3. Transform: "A op (B op C)" ==> "(A op B) op C" if "A op B" simplifies.
    193 //
    194 //  Associative and commutative operators:
    195 //
    196 //  4. Transform: "(A op B) op C" ==> "(C op A) op B" if "C op A" simplifies.
    197 //  5. Transform: "A op (B op C)" ==> "B op (C op A)" if "C op A" simplifies.
    198 //  6. Transform: "(A op C1) op (B op C2)" ==> "(A op B) op (C1 op C2)"
    199 //     if C1 and C2 are constants.
    200 //
    201 bool InstCombiner::SimplifyAssociativeOrCommutative(BinaryOperator &I) {
    202   Instruction::BinaryOps Opcode = I.getOpcode();
    203   bool Changed = false;
    204 
    205   do {
    206     // Order operands such that they are listed from right (least complex) to
    207     // left (most complex).  This puts constants before unary operators before
    208     // binary operators.
    209     if (I.isCommutative() && getComplexity(I.getOperand(0)) <
    210         getComplexity(I.getOperand(1)))
    211       Changed = !I.swapOperands();
    212 
    213     BinaryOperator *Op0 = dyn_cast<BinaryOperator>(I.getOperand(0));
    214     BinaryOperator *Op1 = dyn_cast<BinaryOperator>(I.getOperand(1));
    215 
    216     if (I.isAssociative()) {
    217       // Transform: "(A op B) op C" ==> "A op (B op C)" if "B op C" simplifies.
    218       if (Op0 && Op0->getOpcode() == Opcode) {
    219         Value *A = Op0->getOperand(0);
    220         Value *B = Op0->getOperand(1);
    221         Value *C = I.getOperand(1);
    222 
    223         // Does "B op C" simplify?
    224         if (Value *V = SimplifyBinOp(Opcode, B, C, TD)) {
    225           // It simplifies to V.  Form "A op V".
    226           I.setOperand(0, A);
    227           I.setOperand(1, V);
    228           // Conservatively clear the optional flags, since they may not be
    229           // preserved by the reassociation.
    230           if (MaintainNoSignedWrap(I, B, C) &&
    231               (!Op0 || (isa<BinaryOperator>(Op0) && Op0->hasNoSignedWrap()))) {
    232             // Note: this is only valid because SimplifyBinOp doesn't look at
    233             // the operands to Op0.
    234             I.clearSubclassOptionalData();
    235             I.setHasNoSignedWrap(true);
    236           } else {
    237             ClearSubclassDataAfterReassociation(I);
    238           }
    239 
    240           Changed = true;
    241           ++NumReassoc;
    242           continue;
    243         }
    244       }
    245 
    246       // Transform: "A op (B op C)" ==> "(A op B) op C" if "A op B" simplifies.
    247       if (Op1 && Op1->getOpcode() == Opcode) {
    248         Value *A = I.getOperand(0);
    249         Value *B = Op1->getOperand(0);
    250         Value *C = Op1->getOperand(1);
    251 
    252         // Does "A op B" simplify?
    253         if (Value *V = SimplifyBinOp(Opcode, A, B, TD)) {
    254           // It simplifies to V.  Form "V op C".
    255           I.setOperand(0, V);
    256           I.setOperand(1, C);
    257           // Conservatively clear the optional flags, since they may not be
    258           // preserved by the reassociation.
    259           ClearSubclassDataAfterReassociation(I);
    260           Changed = true;
    261           ++NumReassoc;
    262           continue;
    263         }
    264       }
    265     }
    266 
    267     if (I.isAssociative() && I.isCommutative()) {
    268       // Transform: "(A op B) op C" ==> "(C op A) op B" if "C op A" simplifies.
    269       if (Op0 && Op0->getOpcode() == Opcode) {
    270         Value *A = Op0->getOperand(0);
    271         Value *B = Op0->getOperand(1);
    272         Value *C = I.getOperand(1);
    273 
    274         // Does "C op A" simplify?
    275         if (Value *V = SimplifyBinOp(Opcode, C, A, TD)) {
    276           // It simplifies to V.  Form "V op B".
    277           I.setOperand(0, V);
    278           I.setOperand(1, B);
    279           // Conservatively clear the optional flags, since they may not be
    280           // preserved by the reassociation.
    281           ClearSubclassDataAfterReassociation(I);
    282           Changed = true;
    283           ++NumReassoc;
    284           continue;
    285         }
    286       }
    287 
    288       // Transform: "A op (B op C)" ==> "B op (C op A)" if "C op A" simplifies.
    289       if (Op1 && Op1->getOpcode() == Opcode) {
    290         Value *A = I.getOperand(0);
    291         Value *B = Op1->getOperand(0);
    292         Value *C = Op1->getOperand(1);
    293 
    294         // Does "C op A" simplify?
    295         if (Value *V = SimplifyBinOp(Opcode, C, A, TD)) {
    296           // It simplifies to V.  Form "B op V".
    297           I.setOperand(0, B);
    298           I.setOperand(1, V);
    299           // Conservatively clear the optional flags, since they may not be
    300           // preserved by the reassociation.
    301           ClearSubclassDataAfterReassociation(I);
    302           Changed = true;
    303           ++NumReassoc;
    304           continue;
    305         }
    306       }
    307 
    308       // Transform: "(A op C1) op (B op C2)" ==> "(A op B) op (C1 op C2)"
    309       // if C1 and C2 are constants.
    310       if (Op0 && Op1 &&
    311           Op0->getOpcode() == Opcode && Op1->getOpcode() == Opcode &&
    312           isa<Constant>(Op0->getOperand(1)) &&
    313           isa<Constant>(Op1->getOperand(1)) &&
    314           Op0->hasOneUse() && Op1->hasOneUse()) {
    315         Value *A = Op0->getOperand(0);
    316         Constant *C1 = cast<Constant>(Op0->getOperand(1));
    317         Value *B = Op1->getOperand(0);
    318         Constant *C2 = cast<Constant>(Op1->getOperand(1));
    319 
    320         Constant *Folded = ConstantExpr::get(Opcode, C1, C2);
    321         BinaryOperator *New = BinaryOperator::Create(Opcode, A, B);
    322         InsertNewInstWith(New, I);
    323         New->takeName(Op1);
    324         I.setOperand(0, New);
    325         I.setOperand(1, Folded);
    326         // Conservatively clear the optional flags, since they may not be
    327         // preserved by the reassociation.
    328         ClearSubclassDataAfterReassociation(I);
    329 
    330         Changed = true;
    331         continue;
    332       }
    333     }
    334 
    335     // No further simplifications.
    336     return Changed;
    337   } while (1);
    338 }
    339 
    340 /// LeftDistributesOverRight - Whether "X LOp (Y ROp Z)" is always equal to
    341 /// "(X LOp Y) ROp (X LOp Z)".
    342 static bool LeftDistributesOverRight(Instruction::BinaryOps LOp,
    343                                      Instruction::BinaryOps ROp) {
    344   switch (LOp) {
    345   default:
    346     return false;
    347 
    348   case Instruction::And:
    349     // And distributes over Or and Xor.
    350     switch (ROp) {
    351     default:
    352       return false;
    353     case Instruction::Or:
    354     case Instruction::Xor:
    355       return true;
    356     }
    357 
    358   case Instruction::Mul:
    359     // Multiplication distributes over addition and subtraction.
    360     switch (ROp) {
    361     default:
    362       return false;
    363     case Instruction::Add:
    364     case Instruction::Sub:
    365       return true;
    366     }
    367 
    368   case Instruction::Or:
    369     // Or distributes over And.
    370     switch (ROp) {
    371     default:
    372       return false;
    373     case Instruction::And:
    374       return true;
    375     }
    376   }
    377 }
    378 
    379 /// RightDistributesOverLeft - Whether "(X LOp Y) ROp Z" is always equal to
    380 /// "(X ROp Z) LOp (Y ROp Z)".
    381 static bool RightDistributesOverLeft(Instruction::BinaryOps LOp,
    382                                      Instruction::BinaryOps ROp) {
    383   if (Instruction::isCommutative(ROp))
    384     return LeftDistributesOverRight(ROp, LOp);
    385   // TODO: It would be nice to handle division, aka "(X + Y)/Z = X/Z + Y/Z",
    386   // but this requires knowing that the addition does not overflow and other
    387   // such subtleties.
    388   return false;
    389 }
    390 
    391 /// SimplifyUsingDistributiveLaws - This tries to simplify binary operations
    392 /// which some other binary operation distributes over either by factorizing
    393 /// out common terms (eg "(A*B)+(A*C)" -> "A*(B+C)") or expanding out if this
    394 /// results in simplifications (eg: "A & (B | C) -> (A&B) | (A&C)" if this is
    395 /// a win).  Returns the simplified value, or null if it didn't simplify.
    396 Value *InstCombiner::SimplifyUsingDistributiveLaws(BinaryOperator &I) {
    397   Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
    398   BinaryOperator *Op0 = dyn_cast<BinaryOperator>(LHS);
    399   BinaryOperator *Op1 = dyn_cast<BinaryOperator>(RHS);
    400   Instruction::BinaryOps TopLevelOpcode = I.getOpcode(); // op
    401 
    402   // Factorization.
    403   if (Op0 && Op1 && Op0->getOpcode() == Op1->getOpcode()) {
    404     // The instruction has the form "(A op' B) op (C op' D)".  Try to factorize
    405     // a common term.
    406     Value *A = Op0->getOperand(0), *B = Op0->getOperand(1);
    407     Value *C = Op1->getOperand(0), *D = Op1->getOperand(1);
    408     Instruction::BinaryOps InnerOpcode = Op0->getOpcode(); // op'
    409 
    410     // Does "X op' Y" always equal "Y op' X"?
    411     bool InnerCommutative = Instruction::isCommutative(InnerOpcode);
    412 
    413     // Does "X op' (Y op Z)" always equal "(X op' Y) op (X op' Z)"?
    414     if (LeftDistributesOverRight(InnerOpcode, TopLevelOpcode))
    415       // Does the instruction have the form "(A op' B) op (A op' D)" or, in the
    416       // commutative case, "(A op' B) op (C op' A)"?
    417       if (A == C || (InnerCommutative && A == D)) {
    418         if (A != C)
    419           std::swap(C, D);
    420         // Consider forming "A op' (B op D)".
    421         // If "B op D" simplifies then it can be formed with no cost.
    422         Value *V = SimplifyBinOp(TopLevelOpcode, B, D, TD);
    423         // If "B op D" doesn't simplify then only go on if both of the existing
    424         // operations "A op' B" and "C op' D" will be zapped as no longer used.
    425         if (!V && Op0->hasOneUse() && Op1->hasOneUse())
    426           V = Builder->CreateBinOp(TopLevelOpcode, B, D, Op1->getName());
    427         if (V) {
    428           ++NumFactor;
    429           V = Builder->CreateBinOp(InnerOpcode, A, V);
    430           V->takeName(&I);
    431           return V;
    432         }
    433       }
    434 
    435     // Does "(X op Y) op' Z" always equal "(X op' Z) op (Y op' Z)"?
    436     if (RightDistributesOverLeft(TopLevelOpcode, InnerOpcode))
    437       // Does the instruction have the form "(A op' B) op (C op' B)" or, in the
    438       // commutative case, "(A op' B) op (B op' D)"?
    439       if (B == D || (InnerCommutative && B == C)) {
    440         if (B != D)
    441           std::swap(C, D);
    442         // Consider forming "(A op C) op' B".
    443         // If "A op C" simplifies then it can be formed with no cost.
    444         Value *V = SimplifyBinOp(TopLevelOpcode, A, C, TD);
    445         // If "A op C" doesn't simplify then only go on if both of the existing
    446         // operations "A op' B" and "C op' D" will be zapped as no longer used.
    447         if (!V && Op0->hasOneUse() && Op1->hasOneUse())
    448           V = Builder->CreateBinOp(TopLevelOpcode, A, C, Op0->getName());
    449         if (V) {
    450           ++NumFactor;
    451           V = Builder->CreateBinOp(InnerOpcode, V, B);
    452           V->takeName(&I);
    453           return V;
    454         }
    455       }
    456   }
    457 
    458   // Expansion.
    459   if (Op0 && RightDistributesOverLeft(Op0->getOpcode(), TopLevelOpcode)) {
    460     // The instruction has the form "(A op' B) op C".  See if expanding it out
    461     // to "(A op C) op' (B op C)" results in simplifications.
    462     Value *A = Op0->getOperand(0), *B = Op0->getOperand(1), *C = RHS;
    463     Instruction::BinaryOps InnerOpcode = Op0->getOpcode(); // op'
    464 
    465     // Do "A op C" and "B op C" both simplify?
    466     if (Value *L = SimplifyBinOp(TopLevelOpcode, A, C, TD))
    467       if (Value *R = SimplifyBinOp(TopLevelOpcode, B, C, TD)) {
    468         // They do! Return "L op' R".
    469         ++NumExpand;
    470         // If "L op' R" equals "A op' B" then "L op' R" is just the LHS.
    471         if ((L == A && R == B) ||
    472             (Instruction::isCommutative(InnerOpcode) && L == B && R == A))
    473           return Op0;
    474         // Otherwise return "L op' R" if it simplifies.
    475         if (Value *V = SimplifyBinOp(InnerOpcode, L, R, TD))
    476           return V;
    477         // Otherwise, create a new instruction.
    478         C = Builder->CreateBinOp(InnerOpcode, L, R);
    479         C->takeName(&I);
    480         return C;
    481       }
    482   }
    483 
    484   if (Op1 && LeftDistributesOverRight(TopLevelOpcode, Op1->getOpcode())) {
    485     // The instruction has the form "A op (B op' C)".  See if expanding it out
    486     // to "(A op B) op' (A op C)" results in simplifications.
    487     Value *A = LHS, *B = Op1->getOperand(0), *C = Op1->getOperand(1);
    488     Instruction::BinaryOps InnerOpcode = Op1->getOpcode(); // op'
    489 
    490     // Do "A op B" and "A op C" both simplify?
    491     if (Value *L = SimplifyBinOp(TopLevelOpcode, A, B, TD))
    492       if (Value *R = SimplifyBinOp(TopLevelOpcode, A, C, TD)) {
    493         // They do! Return "L op' R".
    494         ++NumExpand;
    495         // If "L op' R" equals "B op' C" then "L op' R" is just the RHS.
    496         if ((L == B && R == C) ||
    497             (Instruction::isCommutative(InnerOpcode) && L == C && R == B))
    498           return Op1;
    499         // Otherwise return "L op' R" if it simplifies.
    500         if (Value *V = SimplifyBinOp(InnerOpcode, L, R, TD))
    501           return V;
    502         // Otherwise, create a new instruction.
    503         A = Builder->CreateBinOp(InnerOpcode, L, R);
    504         A->takeName(&I);
    505         return A;
    506       }
    507   }
    508 
    509   return 0;
    510 }
    511 
    512 // dyn_castNegVal - Given a 'sub' instruction, return the RHS of the instruction
    513 // if the LHS is a constant zero (which is the 'negate' form).
    514 //
    515 Value *InstCombiner::dyn_castNegVal(Value *V) const {
    516   if (BinaryOperator::isNeg(V))
    517     return BinaryOperator::getNegArgument(V);
    518 
    519   // Constants can be considered to be negated values if they can be folded.
    520   if (ConstantInt *C = dyn_cast<ConstantInt>(V))
    521     return ConstantExpr::getNeg(C);
    522 
    523   if (ConstantDataVector *C = dyn_cast<ConstantDataVector>(V))
    524     if (C->getType()->getElementType()->isIntegerTy())
    525       return ConstantExpr::getNeg(C);
    526 
    527   return 0;
    528 }
    529 
    530 // dyn_castFNegVal - Given a 'fsub' instruction, return the RHS of the
    531 // instruction if the LHS is a constant negative zero (which is the 'negate'
    532 // form).
    533 //
    534 Value *InstCombiner::dyn_castFNegVal(Value *V, bool IgnoreZeroSign) const {
    535   if (BinaryOperator::isFNeg(V, IgnoreZeroSign))
    536     return BinaryOperator::getFNegArgument(V);
    537 
    538   // Constants can be considered to be negated values if they can be folded.
    539   if (ConstantFP *C = dyn_cast<ConstantFP>(V))
    540     return ConstantExpr::getFNeg(C);
    541 
    542   if (ConstantDataVector *C = dyn_cast<ConstantDataVector>(V))
    543     if (C->getType()->getElementType()->isFloatingPointTy())
    544       return ConstantExpr::getFNeg(C);
    545 
    546   return 0;
    547 }
    548 
    549 static Value *FoldOperationIntoSelectOperand(Instruction &I, Value *SO,
    550                                              InstCombiner *IC) {
    551   if (CastInst *CI = dyn_cast<CastInst>(&I)) {
    552     return IC->Builder->CreateCast(CI->getOpcode(), SO, I.getType());
    553   }
    554 
    555   // Figure out if the constant is the left or the right argument.
    556   bool ConstIsRHS = isa<Constant>(I.getOperand(1));
    557   Constant *ConstOperand = cast<Constant>(I.getOperand(ConstIsRHS));
    558 
    559   if (Constant *SOC = dyn_cast<Constant>(SO)) {
    560     if (ConstIsRHS)
    561       return ConstantExpr::get(I.getOpcode(), SOC, ConstOperand);
    562     return ConstantExpr::get(I.getOpcode(), ConstOperand, SOC);
    563   }
    564 
    565   Value *Op0 = SO, *Op1 = ConstOperand;
    566   if (!ConstIsRHS)
    567     std::swap(Op0, Op1);
    568 
    569   if (BinaryOperator *BO = dyn_cast<BinaryOperator>(&I))
    570     return IC->Builder->CreateBinOp(BO->getOpcode(), Op0, Op1,
    571                                     SO->getName()+".op");
    572   if (ICmpInst *CI = dyn_cast<ICmpInst>(&I))
    573     return IC->Builder->CreateICmp(CI->getPredicate(), Op0, Op1,
    574                                    SO->getName()+".cmp");
    575   if (FCmpInst *CI = dyn_cast<FCmpInst>(&I))
    576     return IC->Builder->CreateICmp(CI->getPredicate(), Op0, Op1,
    577                                    SO->getName()+".cmp");
    578   llvm_unreachable("Unknown binary instruction type!");
    579 }
    580 
    581 // FoldOpIntoSelect - Given an instruction with a select as one operand and a
    582 // constant as the other operand, try to fold the binary operator into the
    583 // select arguments.  This also works for Cast instructions, which obviously do
    584 // not have a second operand.
    585 Instruction *InstCombiner::FoldOpIntoSelect(Instruction &Op, SelectInst *SI) {
    586   // Don't modify shared select instructions
    587   if (!SI->hasOneUse()) return 0;
    588   Value *TV = SI->getOperand(1);
    589   Value *FV = SI->getOperand(2);
    590 
    591   if (isa<Constant>(TV) || isa<Constant>(FV)) {
    592     // Bool selects with constant operands can be folded to logical ops.
    593     if (SI->getType()->isIntegerTy(1)) return 0;
    594 
    595     // If it's a bitcast involving vectors, make sure it has the same number of
    596     // elements on both sides.
    597     if (BitCastInst *BC = dyn_cast<BitCastInst>(&Op)) {
    598       VectorType *DestTy = dyn_cast<VectorType>(BC->getDestTy());
    599       VectorType *SrcTy = dyn_cast<VectorType>(BC->getSrcTy());
    600 
    601       // Verify that either both or neither are vectors.
    602       if ((SrcTy == NULL) != (DestTy == NULL)) return 0;
    603       // If vectors, verify that they have the same number of elements.
    604       if (SrcTy && SrcTy->getNumElements() != DestTy->getNumElements())
    605         return 0;
    606     }
    607 
    608     Value *SelectTrueVal = FoldOperationIntoSelectOperand(Op, TV, this);
    609     Value *SelectFalseVal = FoldOperationIntoSelectOperand(Op, FV, this);
    610 
    611     return SelectInst::Create(SI->getCondition(),
    612                               SelectTrueVal, SelectFalseVal);
    613   }
    614   return 0;
    615 }
    616 
    617 
    618 /// FoldOpIntoPhi - Given a binary operator, cast instruction, or select which
    619 /// has a PHI node as operand #0, see if we can fold the instruction into the
    620 /// PHI (which is only possible if all operands to the PHI are constants).
    621 ///
    622 Instruction *InstCombiner::FoldOpIntoPhi(Instruction &I) {
    623   PHINode *PN = cast<PHINode>(I.getOperand(0));
    624   unsigned NumPHIValues = PN->getNumIncomingValues();
    625   if (NumPHIValues == 0)
    626     return 0;
    627 
    628   // We normally only transform phis with a single use.  However, if a PHI has
    629   // multiple uses and they are all the same operation, we can fold *all* of the
    630   // uses into the PHI.
    631   if (!PN->hasOneUse()) {
    632     // Walk the use list for the instruction, comparing them to I.
    633     for (Value::use_iterator UI = PN->use_begin(), E = PN->use_end();
    634          UI != E; ++UI) {
    635       Instruction *User = cast<Instruction>(*UI);
    636       if (User != &I && !I.isIdenticalTo(User))
    637         return 0;
    638     }
    639     // Otherwise, we can replace *all* users with the new PHI we form.
    640   }
    641 
    642   // Check to see if all of the operands of the PHI are simple constants
    643   // (constantint/constantfp/undef).  If there is one non-constant value,
    644   // remember the BB it is in.  If there is more than one or if *it* is a PHI,
    645   // bail out.  We don't do arbitrary constant expressions here because moving
    646   // their computation can be expensive without a cost model.
    647   BasicBlock *NonConstBB = 0;
    648   for (unsigned i = 0; i != NumPHIValues; ++i) {
    649     Value *InVal = PN->getIncomingValue(i);
    650     if (isa<Constant>(InVal) && !isa<ConstantExpr>(InVal))
    651       continue;
    652 
    653     if (isa<PHINode>(InVal)) return 0;  // Itself a phi.
    654     if (NonConstBB) return 0;  // More than one non-const value.
    655 
    656     NonConstBB = PN->getIncomingBlock(i);
    657 
    658     // If the InVal is an invoke at the end of the pred block, then we can't
    659     // insert a computation after it without breaking the edge.
    660     if (InvokeInst *II = dyn_cast<InvokeInst>(InVal))
    661       if (II->getParent() == NonConstBB)
    662         return 0;
    663 
    664     // If the incoming non-constant value is in I's block, we will remove one
    665     // instruction, but insert another equivalent one, leading to infinite
    666     // instcombine.
    667     if (NonConstBB == I.getParent())
    668       return 0;
    669   }
    670 
    671   // If there is exactly one non-constant value, we can insert a copy of the
    672   // operation in that block.  However, if this is a critical edge, we would be
    673   // inserting the computation one some other paths (e.g. inside a loop).  Only
    674   // do this if the pred block is unconditionally branching into the phi block.
    675   if (NonConstBB != 0) {
    676     BranchInst *BI = dyn_cast<BranchInst>(NonConstBB->getTerminator());
    677     if (!BI || !BI->isUnconditional()) return 0;
    678   }
    679 
    680   // Okay, we can do the transformation: create the new PHI node.
    681   PHINode *NewPN = PHINode::Create(I.getType(), PN->getNumIncomingValues());
    682   InsertNewInstBefore(NewPN, *PN);
    683   NewPN->takeName(PN);
    684 
    685   // If we are going to have to insert a new computation, do so right before the
    686   // predecessors terminator.
    687   if (NonConstBB)
    688     Builder->SetInsertPoint(NonConstBB->getTerminator());
    689 
    690   // Next, add all of the operands to the PHI.
    691   if (SelectInst *SI = dyn_cast<SelectInst>(&I)) {
    692     // We only currently try to fold the condition of a select when it is a phi,
    693     // not the true/false values.
    694     Value *TrueV = SI->getTrueValue();
    695     Value *FalseV = SI->getFalseValue();
    696     BasicBlock *PhiTransBB = PN->getParent();
    697     for (unsigned i = 0; i != NumPHIValues; ++i) {
    698       BasicBlock *ThisBB = PN->getIncomingBlock(i);
    699       Value *TrueVInPred = TrueV->DoPHITranslation(PhiTransBB, ThisBB);
    700       Value *FalseVInPred = FalseV->DoPHITranslation(PhiTransBB, ThisBB);
    701       Value *InV = 0;
    702       if (Constant *InC = dyn_cast<Constant>(PN->getIncomingValue(i)))
    703         InV = InC->isNullValue() ? FalseVInPred : TrueVInPred;
    704       else
    705         InV = Builder->CreateSelect(PN->getIncomingValue(i),
    706                                     TrueVInPred, FalseVInPred, "phitmp");
    707       NewPN->addIncoming(InV, ThisBB);
    708     }
    709   } else if (CmpInst *CI = dyn_cast<CmpInst>(&I)) {
    710     Constant *C = cast<Constant>(I.getOperand(1));
    711     for (unsigned i = 0; i != NumPHIValues; ++i) {
    712       Value *InV = 0;
    713       if (Constant *InC = dyn_cast<Constant>(PN->getIncomingValue(i)))
    714         InV = ConstantExpr::getCompare(CI->getPredicate(), InC, C);
    715       else if (isa<ICmpInst>(CI))
    716         InV = Builder->CreateICmp(CI->getPredicate(), PN->getIncomingValue(i),
    717                                   C, "phitmp");
    718       else
    719         InV = Builder->CreateFCmp(CI->getPredicate(), PN->getIncomingValue(i),
    720                                   C, "phitmp");
    721       NewPN->addIncoming(InV, PN->getIncomingBlock(i));
    722     }
    723   } else if (I.getNumOperands() == 2) {
    724     Constant *C = cast<Constant>(I.getOperand(1));
    725     for (unsigned i = 0; i != NumPHIValues; ++i) {
    726       Value *InV = 0;
    727       if (Constant *InC = dyn_cast<Constant>(PN->getIncomingValue(i)))
    728         InV = ConstantExpr::get(I.getOpcode(), InC, C);
    729       else
    730         InV = Builder->CreateBinOp(cast<BinaryOperator>(I).getOpcode(),
    731                                    PN->getIncomingValue(i), C, "phitmp");
    732       NewPN->addIncoming(InV, PN->getIncomingBlock(i));
    733     }
    734   } else {
    735     CastInst *CI = cast<CastInst>(&I);
    736     Type *RetTy = CI->getType();
    737     for (unsigned i = 0; i != NumPHIValues; ++i) {
    738       Value *InV;
    739       if (Constant *InC = dyn_cast<Constant>(PN->getIncomingValue(i)))
    740         InV = ConstantExpr::getCast(CI->getOpcode(), InC, RetTy);
    741       else
    742         InV = Builder->CreateCast(CI->getOpcode(),
    743                                 PN->getIncomingValue(i), I.getType(), "phitmp");
    744       NewPN->addIncoming(InV, PN->getIncomingBlock(i));
    745     }
    746   }
    747 
    748   for (Value::use_iterator UI = PN->use_begin(), E = PN->use_end();
    749        UI != E; ) {
    750     Instruction *User = cast<Instruction>(*UI++);
    751     if (User == &I) continue;
    752     ReplaceInstUsesWith(*User, NewPN);
    753     EraseInstFromFunction(*User);
    754   }
    755   return ReplaceInstUsesWith(I, NewPN);
    756 }
    757 
    758 /// FindElementAtOffset - Given a type and a constant offset, determine whether
    759 /// or not there is a sequence of GEP indices into the type that will land us at
    760 /// the specified offset.  If so, fill them into NewIndices and return the
    761 /// resultant element type, otherwise return null.
    762 Type *InstCombiner::FindElementAtOffset(Type *Ty, int64_t Offset,
    763                                           SmallVectorImpl<Value*> &NewIndices) {
    764   if (!TD) return 0;
    765   if (!Ty->isSized()) return 0;
    766 
    767   // Start with the index over the outer type.  Note that the type size
    768   // might be zero (even if the offset isn't zero) if the indexed type
    769   // is something like [0 x {int, int}]
    770   Type *IntPtrTy = TD->getIntPtrType(Ty->getContext());
    771   int64_t FirstIdx = 0;
    772   if (int64_t TySize = TD->getTypeAllocSize(Ty)) {
    773     FirstIdx = Offset/TySize;
    774     Offset -= FirstIdx*TySize;
    775 
    776     // Handle hosts where % returns negative instead of values [0..TySize).
    777     if (Offset < 0) {
    778       --FirstIdx;
    779       Offset += TySize;
    780       assert(Offset >= 0);
    781     }
    782     assert((uint64_t)Offset < (uint64_t)TySize && "Out of range offset");
    783   }
    784 
    785   NewIndices.push_back(ConstantInt::get(IntPtrTy, FirstIdx));
    786 
    787   // Index into the types.  If we fail, set OrigBase to null.
    788   while (Offset) {
    789     // Indexing into tail padding between struct/array elements.
    790     if (uint64_t(Offset*8) >= TD->getTypeSizeInBits(Ty))
    791       return 0;
    792 
    793     if (StructType *STy = dyn_cast<StructType>(Ty)) {
    794       const StructLayout *SL = TD->getStructLayout(STy);
    795       assert(Offset < (int64_t)SL->getSizeInBytes() &&
    796              "Offset must stay within the indexed type");
    797 
    798       unsigned Elt = SL->getElementContainingOffset(Offset);
    799       NewIndices.push_back(ConstantInt::get(Type::getInt32Ty(Ty->getContext()),
    800                                             Elt));
    801 
    802       Offset -= SL->getElementOffset(Elt);
    803       Ty = STy->getElementType(Elt);
    804     } else if (ArrayType *AT = dyn_cast<ArrayType>(Ty)) {
    805       uint64_t EltSize = TD->getTypeAllocSize(AT->getElementType());
    806       assert(EltSize && "Cannot index into a zero-sized array");
    807       NewIndices.push_back(ConstantInt::get(IntPtrTy,Offset/EltSize));
    808       Offset %= EltSize;
    809       Ty = AT->getElementType();
    810     } else {
    811       // Otherwise, we can't index into the middle of this atomic type, bail.
    812       return 0;
    813     }
    814   }
    815 
    816   return Ty;
    817 }
    818 
    819 static bool shouldMergeGEPs(GEPOperator &GEP, GEPOperator &Src) {
    820   // If this GEP has only 0 indices, it is the same pointer as
    821   // Src. If Src is not a trivial GEP too, don't combine
    822   // the indices.
    823   if (GEP.hasAllZeroIndices() && !Src.hasAllZeroIndices() &&
    824       !Src.hasOneUse())
    825     return false;
    826   return true;
    827 }
    828 
    829 /// Descale - Return a value X such that Val = X * Scale, or null if none.  If
    830 /// the multiplication is known not to overflow then NoSignedWrap is set.
    831 Value *InstCombiner::Descale(Value *Val, APInt Scale, bool &NoSignedWrap) {
    832   assert(isa<IntegerType>(Val->getType()) && "Can only descale integers!");
    833   assert(cast<IntegerType>(Val->getType())->getBitWidth() ==
    834          Scale.getBitWidth() && "Scale not compatible with value!");
    835 
    836   // If Val is zero or Scale is one then Val = Val * Scale.
    837   if (match(Val, m_Zero()) || Scale == 1) {
    838     NoSignedWrap = true;
    839     return Val;
    840   }
    841 
    842   // If Scale is zero then it does not divide Val.
    843   if (Scale.isMinValue())
    844     return 0;
    845 
    846   // Look through chains of multiplications, searching for a constant that is
    847   // divisible by Scale.  For example, descaling X*(Y*(Z*4)) by a factor of 4
    848   // will find the constant factor 4 and produce X*(Y*Z).  Descaling X*(Y*8) by
    849   // a factor of 4 will produce X*(Y*2).  The principle of operation is to bore
    850   // down from Val:
    851   //
    852   //     Val = M1 * X          ||   Analysis starts here and works down
    853   //      M1 = M2 * Y          ||   Doesn't descend into terms with more
    854   //      M2 =  Z * 4          \/   than one use
    855   //
    856   // Then to modify a term at the bottom:
    857   //
    858   //     Val = M1 * X
    859   //      M1 =  Z * Y          ||   Replaced M2 with Z
    860   //
    861   // Then to work back up correcting nsw flags.
    862 
    863   // Op - the term we are currently analyzing.  Starts at Val then drills down.
    864   // Replaced with its descaled value before exiting from the drill down loop.
    865   Value *Op = Val;
    866 
    867   // Parent - initially null, but after drilling down notes where Op came from.
    868   // In the example above, Parent is (Val, 0) when Op is M1, because M1 is the
    869   // 0'th operand of Val.
    870   std::pair<Instruction*, unsigned> Parent;
    871 
    872   // RequireNoSignedWrap - Set if the transform requires a descaling at deeper
    873   // levels that doesn't overflow.
    874   bool RequireNoSignedWrap = false;
    875 
    876   // logScale - log base 2 of the scale.  Negative if not a power of 2.
    877   int32_t logScale = Scale.exactLogBase2();
    878 
    879   for (;; Op = Parent.first->getOperand(Parent.second)) { // Drill down
    880 
    881     if (ConstantInt *CI = dyn_cast<ConstantInt>(Op)) {
    882       // If Op is a constant divisible by Scale then descale to the quotient.
    883       APInt Quotient(Scale), Remainder(Scale); // Init ensures right bitwidth.
    884       APInt::sdivrem(CI->getValue(), Scale, Quotient, Remainder);
    885       if (!Remainder.isMinValue())
    886         // Not divisible by Scale.
    887         return 0;
    888       // Replace with the quotient in the parent.
    889       Op = ConstantInt::get(CI->getType(), Quotient);
    890       NoSignedWrap = true;
    891       break;
    892     }
    893 
    894     if (BinaryOperator *BO = dyn_cast<BinaryOperator>(Op)) {
    895 
    896       if (BO->getOpcode() == Instruction::Mul) {
    897         // Multiplication.
    898         NoSignedWrap = BO->hasNoSignedWrap();
    899         if (RequireNoSignedWrap && !NoSignedWrap)
    900           return 0;
    901 
    902         // There are three cases for multiplication: multiplication by exactly
    903         // the scale, multiplication by a constant different to the scale, and
    904         // multiplication by something else.
    905         Value *LHS = BO->getOperand(0);
    906         Value *RHS = BO->getOperand(1);
    907 
    908         if (ConstantInt *CI = dyn_cast<ConstantInt>(RHS)) {
    909           // Multiplication by a constant.
    910           if (CI->getValue() == Scale) {
    911             // Multiplication by exactly the scale, replace the multiplication
    912             // by its left-hand side in the parent.
    913             Op = LHS;
    914             break;
    915           }
    916 
    917           // Otherwise drill down into the constant.
    918           if (!Op->hasOneUse())
    919             return 0;
    920 
    921           Parent = std::make_pair(BO, 1);
    922           continue;
    923         }
    924 
    925         // Multiplication by something else. Drill down into the left-hand side
    926         // since that's where the reassociate pass puts the good stuff.
    927         if (!Op->hasOneUse())
    928           return 0;
    929 
    930         Parent = std::make_pair(BO, 0);
    931         continue;
    932       }
    933 
    934       if (logScale > 0 && BO->getOpcode() == Instruction::Shl &&
    935           isa<ConstantInt>(BO->getOperand(1))) {
    936         // Multiplication by a power of 2.
    937         NoSignedWrap = BO->hasNoSignedWrap();
    938         if (RequireNoSignedWrap && !NoSignedWrap)
    939           return 0;
    940 
    941         Value *LHS = BO->getOperand(0);
    942         int32_t Amt = cast<ConstantInt>(BO->getOperand(1))->
    943           getLimitedValue(Scale.getBitWidth());
    944         // Op = LHS << Amt.
    945 
    946         if (Amt == logScale) {
    947           // Multiplication by exactly the scale, replace the multiplication
    948           // by its left-hand side in the parent.
    949           Op = LHS;
    950           break;
    951         }
    952         if (Amt < logScale || !Op->hasOneUse())
    953           return 0;
    954 
    955         // Multiplication by more than the scale.  Reduce the multiplying amount
    956         // by the scale in the parent.
    957         Parent = std::make_pair(BO, 1);
    958         Op = ConstantInt::get(BO->getType(), Amt - logScale);
    959         break;
    960       }
    961     }
    962 
    963     if (!Op->hasOneUse())
    964       return 0;
    965 
    966     if (CastInst *Cast = dyn_cast<CastInst>(Op)) {
    967       if (Cast->getOpcode() == Instruction::SExt) {
    968         // Op is sign-extended from a smaller type, descale in the smaller type.
    969         unsigned SmallSize = Cast->getSrcTy()->getPrimitiveSizeInBits();
    970         APInt SmallScale = Scale.trunc(SmallSize);
    971         // Suppose Op = sext X, and we descale X as Y * SmallScale.  We want to
    972         // descale Op as (sext Y) * Scale.  In order to have
    973         //   sext (Y * SmallScale) = (sext Y) * Scale
    974         // some conditions need to hold however: SmallScale must sign-extend to
    975         // Scale and the multiplication Y * SmallScale should not overflow.
    976         if (SmallScale.sext(Scale.getBitWidth()) != Scale)
    977           // SmallScale does not sign-extend to Scale.
    978           return 0;
    979         assert(SmallScale.exactLogBase2() == logScale);
    980         // Require that Y * SmallScale must not overflow.
    981         RequireNoSignedWrap = true;
    982 
    983         // Drill down through the cast.
    984         Parent = std::make_pair(Cast, 0);
    985         Scale = SmallScale;
    986         continue;
    987       }
    988 
    989       if (Cast->getOpcode() == Instruction::Trunc) {
    990         // Op is truncated from a larger type, descale in the larger type.
    991         // Suppose Op = trunc X, and we descale X as Y * sext Scale.  Then
    992         //   trunc (Y * sext Scale) = (trunc Y) * Scale
    993         // always holds.  However (trunc Y) * Scale may overflow even if
    994         // trunc (Y * sext Scale) does not, so nsw flags need to be cleared
    995         // from this point up in the expression (see later).
    996         if (RequireNoSignedWrap)
    997           return 0;
    998 
    999         // Drill down through the cast.
   1000         unsigned LargeSize = Cast->getSrcTy()->getPrimitiveSizeInBits();
   1001         Parent = std::make_pair(Cast, 0);
   1002         Scale = Scale.sext(LargeSize);
   1003         if (logScale + 1 == (int32_t)Cast->getType()->getPrimitiveSizeInBits())
   1004           logScale = -1;
   1005         assert(Scale.exactLogBase2() == logScale);
   1006         continue;
   1007       }
   1008     }
   1009 
   1010     // Unsupported expression, bail out.
   1011     return 0;
   1012   }
   1013 
   1014   // We know that we can successfully descale, so from here on we can safely
   1015   // modify the IR.  Op holds the descaled version of the deepest term in the
   1016   // expression.  NoSignedWrap is 'true' if multiplying Op by Scale is known
   1017   // not to overflow.
   1018 
   1019   if (!Parent.first)
   1020     // The expression only had one term.
   1021     return Op;
   1022 
   1023   // Rewrite the parent using the descaled version of its operand.
   1024   assert(Parent.first->hasOneUse() && "Drilled down when more than one use!");
   1025   assert(Op != Parent.first->getOperand(Parent.second) &&
   1026          "Descaling was a no-op?");
   1027   Parent.first->setOperand(Parent.second, Op);
   1028   Worklist.Add(Parent.first);
   1029 
   1030   // Now work back up the expression correcting nsw flags.  The logic is based
   1031   // on the following observation: if X * Y is known not to overflow as a signed
   1032   // multiplication, and Y is replaced by a value Z with smaller absolute value,
   1033   // then X * Z will not overflow as a signed multiplication either.  As we work
   1034   // our way up, having NoSignedWrap 'true' means that the descaled value at the
   1035   // current level has strictly smaller absolute value than the original.
   1036   Instruction *Ancestor = Parent.first;
   1037   do {
   1038     if (BinaryOperator *BO = dyn_cast<BinaryOperator>(Ancestor)) {
   1039       // If the multiplication wasn't nsw then we can't say anything about the
   1040       // value of the descaled multiplication, and we have to clear nsw flags
   1041       // from this point on up.
   1042       bool OpNoSignedWrap = BO->hasNoSignedWrap();
   1043       NoSignedWrap &= OpNoSignedWrap;
   1044       if (NoSignedWrap != OpNoSignedWrap) {
   1045         BO->setHasNoSignedWrap(NoSignedWrap);
   1046         Worklist.Add(Ancestor);
   1047       }
   1048     } else if (Ancestor->getOpcode() == Instruction::Trunc) {
   1049       // The fact that the descaled input to the trunc has smaller absolute
   1050       // value than the original input doesn't tell us anything useful about
   1051       // the absolute values of the truncations.
   1052       NoSignedWrap = false;
   1053     }
   1054     assert((Ancestor->getOpcode() != Instruction::SExt || NoSignedWrap) &&
   1055            "Failed to keep proper track of nsw flags while drilling down?");
   1056 
   1057     if (Ancestor == Val)
   1058       // Got to the top, all done!
   1059       return Val;
   1060 
   1061     // Move up one level in the expression.
   1062     assert(Ancestor->hasOneUse() && "Drilled down when more than one use!");
   1063     Ancestor = Ancestor->use_back();
   1064   } while (1);
   1065 }
   1066 
   1067 Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
   1068   SmallVector<Value*, 8> Ops(GEP.op_begin(), GEP.op_end());
   1069 
   1070   if (Value *V = SimplifyGEPInst(Ops, TD))
   1071     return ReplaceInstUsesWith(GEP, V);
   1072 
   1073   Value *PtrOp = GEP.getOperand(0);
   1074 
   1075   // Eliminate unneeded casts for indices, and replace indices which displace
   1076   // by multiples of a zero size type with zero.
   1077   if (TD) {
   1078     bool MadeChange = false;
   1079     Type *IntPtrTy = TD->getIntPtrType(GEP.getPointerOperandType());
   1080 
   1081     gep_type_iterator GTI = gep_type_begin(GEP);
   1082     for (User::op_iterator I = GEP.op_begin() + 1, E = GEP.op_end();
   1083          I != E; ++I, ++GTI) {
   1084       // Skip indices into struct types.
   1085       SequentialType *SeqTy = dyn_cast<SequentialType>(*GTI);
   1086       if (!SeqTy) continue;
   1087 
   1088       // If the element type has zero size then any index over it is equivalent
   1089       // to an index of zero, so replace it with zero if it is not zero already.
   1090       if (SeqTy->getElementType()->isSized() &&
   1091           TD->getTypeAllocSize(SeqTy->getElementType()) == 0)
   1092         if (!isa<Constant>(*I) || !cast<Constant>(*I)->isNullValue()) {
   1093           *I = Constant::getNullValue(IntPtrTy);
   1094           MadeChange = true;
   1095         }
   1096 
   1097       Type *IndexTy = (*I)->getType();
   1098       if (IndexTy != IntPtrTy) {
   1099         // If we are using a wider index than needed for this platform, shrink
   1100         // it to what we need.  If narrower, sign-extend it to what we need.
   1101         // This explicit cast can make subsequent optimizations more obvious.
   1102         *I = Builder->CreateIntCast(*I, IntPtrTy, true);
   1103         MadeChange = true;
   1104       }
   1105     }
   1106     if (MadeChange) return &GEP;
   1107   }
   1108 
   1109   // Combine Indices - If the source pointer to this getelementptr instruction
   1110   // is a getelementptr instruction, combine the indices of the two
   1111   // getelementptr instructions into a single instruction.
   1112   //
   1113   if (GEPOperator *Src = dyn_cast<GEPOperator>(PtrOp)) {
   1114     if (!shouldMergeGEPs(*cast<GEPOperator>(&GEP), *Src))
   1115       return 0;
   1116 
   1117     // Note that if our source is a gep chain itself then we wait for that
   1118     // chain to be resolved before we perform this transformation.  This
   1119     // avoids us creating a TON of code in some cases.
   1120     if (GEPOperator *SrcGEP =
   1121           dyn_cast<GEPOperator>(Src->getOperand(0)))
   1122       if (SrcGEP->getNumOperands() == 2 && shouldMergeGEPs(*Src, *SrcGEP))
   1123         return 0;   // Wait until our source is folded to completion.
   1124 
   1125     SmallVector<Value*, 8> Indices;
   1126 
   1127     // Find out whether the last index in the source GEP is a sequential idx.
   1128     bool EndsWithSequential = false;
   1129     for (gep_type_iterator I = gep_type_begin(*Src), E = gep_type_end(*Src);
   1130          I != E; ++I)
   1131       EndsWithSequential = !(*I)->isStructTy();
   1132 
   1133     // Can we combine the two pointer arithmetics offsets?
   1134     if (EndsWithSequential) {
   1135       // Replace: gep (gep %P, long B), long A, ...
   1136       // With:    T = long A+B; gep %P, T, ...
   1137       //
   1138       Value *Sum;
   1139       Value *SO1 = Src->getOperand(Src->getNumOperands()-1);
   1140       Value *GO1 = GEP.getOperand(1);
   1141       if (SO1 == Constant::getNullValue(SO1->getType())) {
   1142         Sum = GO1;
   1143       } else if (GO1 == Constant::getNullValue(GO1->getType())) {
   1144         Sum = SO1;
   1145       } else {
   1146         // If they aren't the same type, then the input hasn't been processed
   1147         // by the loop above yet (which canonicalizes sequential index types to
   1148         // intptr_t).  Just avoid transforming this until the input has been
   1149         // normalized.
   1150         if (SO1->getType() != GO1->getType())
   1151           return 0;
   1152         Sum = Builder->CreateAdd(SO1, GO1, PtrOp->getName()+".sum");
   1153       }
   1154 
   1155       // Update the GEP in place if possible.
   1156       if (Src->getNumOperands() == 2) {
   1157         GEP.setOperand(0, Src->getOperand(0));
   1158         GEP.setOperand(1, Sum);
   1159         return &GEP;
   1160       }
   1161       Indices.append(Src->op_begin()+1, Src->op_end()-1);
   1162       Indices.push_back(Sum);
   1163       Indices.append(GEP.op_begin()+2, GEP.op_end());
   1164     } else if (isa<Constant>(*GEP.idx_begin()) &&
   1165                cast<Constant>(*GEP.idx_begin())->isNullValue() &&
   1166                Src->getNumOperands() != 1) {
   1167       // Otherwise we can do the fold if the first index of the GEP is a zero
   1168       Indices.append(Src->op_begin()+1, Src->op_end());
   1169       Indices.append(GEP.idx_begin()+1, GEP.idx_end());
   1170     }
   1171 
   1172     if (!Indices.empty())
   1173       return (GEP.isInBounds() && Src->isInBounds()) ?
   1174         GetElementPtrInst::CreateInBounds(Src->getOperand(0), Indices,
   1175                                           GEP.getName()) :
   1176         GetElementPtrInst::Create(Src->getOperand(0), Indices, GEP.getName());
   1177   }
   1178 
   1179   // Handle gep(bitcast x) and gep(gep x, 0, 0, 0).
   1180   Value *StrippedPtr = PtrOp->stripPointerCasts();
   1181   PointerType *StrippedPtrTy = dyn_cast<PointerType>(StrippedPtr->getType());
   1182 
   1183   // We do not handle pointer-vector geps here.
   1184   if (!StrippedPtrTy)
   1185     return 0;
   1186 
   1187   if (StrippedPtr != PtrOp &&
   1188     StrippedPtrTy->getAddressSpace() == GEP.getPointerAddressSpace()) {
   1189 
   1190     bool HasZeroPointerIndex = false;
   1191     if (ConstantInt *C = dyn_cast<ConstantInt>(GEP.getOperand(1)))
   1192       HasZeroPointerIndex = C->isZero();
   1193 
   1194     // Transform: GEP (bitcast [10 x i8]* X to [0 x i8]*), i32 0, ...
   1195     // into     : GEP [10 x i8]* X, i32 0, ...
   1196     //
   1197     // Likewise, transform: GEP (bitcast i8* X to [0 x i8]*), i32 0, ...
   1198     //           into     : GEP i8* X, ...
   1199     //
   1200     // This occurs when the program declares an array extern like "int X[];"
   1201     if (HasZeroPointerIndex) {
   1202       PointerType *CPTy = cast<PointerType>(PtrOp->getType());
   1203       if (ArrayType *CATy =
   1204           dyn_cast<ArrayType>(CPTy->getElementType())) {
   1205         // GEP (bitcast i8* X to [0 x i8]*), i32 0, ... ?
   1206         if (CATy->getElementType() == StrippedPtrTy->getElementType()) {
   1207           // -> GEP i8* X, ...
   1208           SmallVector<Value*, 8> Idx(GEP.idx_begin()+1, GEP.idx_end());
   1209           GetElementPtrInst *Res =
   1210             GetElementPtrInst::Create(StrippedPtr, Idx, GEP.getName());
   1211           Res->setIsInBounds(GEP.isInBounds());
   1212           return Res;
   1213         }
   1214 
   1215         if (ArrayType *XATy =
   1216               dyn_cast<ArrayType>(StrippedPtrTy->getElementType())){
   1217           // GEP (bitcast [10 x i8]* X to [0 x i8]*), i32 0, ... ?
   1218           if (CATy->getElementType() == XATy->getElementType()) {
   1219             // -> GEP [10 x i8]* X, i32 0, ...
   1220             // At this point, we know that the cast source type is a pointer
   1221             // to an array of the same type as the destination pointer
   1222             // array.  Because the array type is never stepped over (there
   1223             // is a leading zero) we can fold the cast into this GEP.
   1224             GEP.setOperand(0, StrippedPtr);
   1225             return &GEP;
   1226           }
   1227         }
   1228       }
   1229     } else if (GEP.getNumOperands() == 2) {
   1230       // Transform things like:
   1231       // %t = getelementptr i32* bitcast ([2 x i32]* %str to i32*), i32 %V
   1232       // into:  %t1 = getelementptr [2 x i32]* %str, i32 0, i32 %V; bitcast
   1233       Type *SrcElTy = StrippedPtrTy->getElementType();
   1234       Type *ResElTy=cast<PointerType>(PtrOp->getType())->getElementType();
   1235       if (TD && SrcElTy->isArrayTy() &&
   1236           TD->getTypeAllocSize(cast<ArrayType>(SrcElTy)->getElementType()) ==
   1237           TD->getTypeAllocSize(ResElTy)) {
   1238         Value *Idx[2];
   1239         Idx[0] = Constant::getNullValue(Type::getInt32Ty(GEP.getContext()));
   1240         Idx[1] = GEP.getOperand(1);
   1241         Value *NewGEP = GEP.isInBounds() ?
   1242           Builder->CreateInBoundsGEP(StrippedPtr, Idx, GEP.getName()) :
   1243           Builder->CreateGEP(StrippedPtr, Idx, GEP.getName());
   1244         // V and GEP are both pointer types --> BitCast
   1245         return new BitCastInst(NewGEP, GEP.getType());
   1246       }
   1247 
   1248       // Transform things like:
   1249       // %V = mul i64 %N, 4
   1250       // %t = getelementptr i8* bitcast (i32* %arr to i8*), i32 %V
   1251       // into:  %t1 = getelementptr i32* %arr, i32 %N; bitcast
   1252       if (TD && ResElTy->isSized() && SrcElTy->isSized()) {
   1253         // Check that changing the type amounts to dividing the index by a scale
   1254         // factor.
   1255         uint64_t ResSize = TD->getTypeAllocSize(ResElTy);
   1256         uint64_t SrcSize = TD->getTypeAllocSize(SrcElTy);
   1257         if (ResSize && SrcSize % ResSize == 0) {
   1258           Value *Idx = GEP.getOperand(1);
   1259           unsigned BitWidth = Idx->getType()->getPrimitiveSizeInBits();
   1260           uint64_t Scale = SrcSize / ResSize;
   1261 
   1262           // Earlier transforms ensure that the index has type IntPtrType, which
   1263           // considerably simplifies the logic by eliminating implicit casts.
   1264           assert(Idx->getType() == TD->getIntPtrType(GEP.getContext()) &&
   1265                  "Index not cast to pointer width?");
   1266 
   1267           bool NSW;
   1268           if (Value *NewIdx = Descale(Idx, APInt(BitWidth, Scale), NSW)) {
   1269             // Successfully decomposed Idx as NewIdx * Scale, form a new GEP.
   1270             // If the multiplication NewIdx * Scale may overflow then the new
   1271             // GEP may not be "inbounds".
   1272             Value *NewGEP = GEP.isInBounds() && NSW ?
   1273               Builder->CreateInBoundsGEP(StrippedPtr, NewIdx, GEP.getName()) :
   1274               Builder->CreateGEP(StrippedPtr, NewIdx, GEP.getName());
   1275             // The NewGEP must be pointer typed, so must the old one -> BitCast
   1276             return new BitCastInst(NewGEP, GEP.getType());
   1277           }
   1278         }
   1279       }
   1280 
   1281       // Similarly, transform things like:
   1282       // getelementptr i8* bitcast ([100 x double]* X to i8*), i32 %tmp
   1283       //   (where tmp = 8*tmp2) into:
   1284       // getelementptr [100 x double]* %arr, i32 0, i32 %tmp2; bitcast
   1285       if (TD && ResElTy->isSized() && SrcElTy->isSized() &&
   1286           SrcElTy->isArrayTy()) {
   1287         // Check that changing to the array element type amounts to dividing the
   1288         // index by a scale factor.
   1289         uint64_t ResSize = TD->getTypeAllocSize(ResElTy);
   1290         uint64_t ArrayEltSize =
   1291           TD->getTypeAllocSize(cast<ArrayType>(SrcElTy)->getElementType());
   1292         if (ResSize && ArrayEltSize % ResSize == 0) {
   1293           Value *Idx = GEP.getOperand(1);
   1294           unsigned BitWidth = Idx->getType()->getPrimitiveSizeInBits();
   1295           uint64_t Scale = ArrayEltSize / ResSize;
   1296 
   1297           // Earlier transforms ensure that the index has type IntPtrType, which
   1298           // considerably simplifies the logic by eliminating implicit casts.
   1299           assert(Idx->getType() == TD->getIntPtrType(GEP.getContext()) &&
   1300                  "Index not cast to pointer width?");
   1301 
   1302           bool NSW;
   1303           if (Value *NewIdx = Descale(Idx, APInt(BitWidth, Scale), NSW)) {
   1304             // Successfully decomposed Idx as NewIdx * Scale, form a new GEP.
   1305             // If the multiplication NewIdx * Scale may overflow then the new
   1306             // GEP may not be "inbounds".
   1307             Value *Off[2];
   1308             Off[0] = Constant::getNullValue(Type::getInt32Ty(GEP.getContext()));
   1309             Off[1] = NewIdx;
   1310             Value *NewGEP = GEP.isInBounds() && NSW ?
   1311               Builder->CreateInBoundsGEP(StrippedPtr, Off, GEP.getName()) :
   1312               Builder->CreateGEP(StrippedPtr, Off, GEP.getName());
   1313             // The NewGEP must be pointer typed, so must the old one -> BitCast
   1314             return new BitCastInst(NewGEP, GEP.getType());
   1315           }
   1316         }
   1317       }
   1318     }
   1319   }
   1320 
   1321   /// See if we can simplify:
   1322   ///   X = bitcast A* to B*
   1323   ///   Y = gep X, <...constant indices...>
   1324   /// into a gep of the original struct.  This is important for SROA and alias
   1325   /// analysis of unions.  If "A" is also a bitcast, wait for A/X to be merged.
   1326   if (BitCastInst *BCI = dyn_cast<BitCastInst>(PtrOp)) {
   1327     APInt Offset(TD ? TD->getPointerSizeInBits() : 1, 0);
   1328     if (TD &&
   1329         !isa<BitCastInst>(BCI->getOperand(0)) &&
   1330         GEP.accumulateConstantOffset(*TD, Offset) &&
   1331         StrippedPtrTy->getAddressSpace() == GEP.getPointerAddressSpace()) {
   1332 
   1333       // If this GEP instruction doesn't move the pointer, just replace the GEP
   1334       // with a bitcast of the real input to the dest type.
   1335       if (!Offset) {
   1336         // If the bitcast is of an allocation, and the allocation will be
   1337         // converted to match the type of the cast, don't touch this.
   1338         if (isa<AllocaInst>(BCI->getOperand(0)) ||
   1339             isAllocationFn(BCI->getOperand(0), TLI)) {
   1340           // See if the bitcast simplifies, if so, don't nuke this GEP yet.
   1341           if (Instruction *I = visitBitCast(*BCI)) {
   1342             if (I != BCI) {
   1343               I->takeName(BCI);
   1344               BCI->getParent()->getInstList().insert(BCI, I);
   1345               ReplaceInstUsesWith(*BCI, I);
   1346             }
   1347             return &GEP;
   1348           }
   1349         }
   1350         return new BitCastInst(BCI->getOperand(0), GEP.getType());
   1351       }
   1352 
   1353       // Otherwise, if the offset is non-zero, we need to find out if there is a
   1354       // field at Offset in 'A's type.  If so, we can pull the cast through the
   1355       // GEP.
   1356       SmallVector<Value*, 8> NewIndices;
   1357       Type *InTy =
   1358         cast<PointerType>(BCI->getOperand(0)->getType())->getElementType();
   1359       if (FindElementAtOffset(InTy, Offset.getSExtValue(), NewIndices)) {
   1360         Value *NGEP = GEP.isInBounds() ?
   1361           Builder->CreateInBoundsGEP(BCI->getOperand(0), NewIndices) :
   1362           Builder->CreateGEP(BCI->getOperand(0), NewIndices);
   1363 
   1364         if (NGEP->getType() == GEP.getType())
   1365           return ReplaceInstUsesWith(GEP, NGEP);
   1366         NGEP->takeName(&GEP);
   1367         return new BitCastInst(NGEP, GEP.getType());
   1368       }
   1369     }
   1370   }
   1371 
   1372   return 0;
   1373 }
   1374 
   1375 
   1376 
   1377 static bool
   1378 isAllocSiteRemovable(Instruction *AI, SmallVectorImpl<WeakVH> &Users,
   1379                      const TargetLibraryInfo *TLI) {
   1380   SmallVector<Instruction*, 4> Worklist;
   1381   Worklist.push_back(AI);
   1382 
   1383   do {
   1384     Instruction *PI = Worklist.pop_back_val();
   1385     for (Value::use_iterator UI = PI->use_begin(), UE = PI->use_end(); UI != UE;
   1386          ++UI) {
   1387       Instruction *I = cast<Instruction>(*UI);
   1388       switch (I->getOpcode()) {
   1389       default:
   1390         // Give up the moment we see something we can't handle.
   1391         return false;
   1392 
   1393       case Instruction::BitCast:
   1394       case Instruction::GetElementPtr:
   1395         Users.push_back(I);
   1396         Worklist.push_back(I);
   1397         continue;
   1398 
   1399       case Instruction::ICmp: {
   1400         ICmpInst *ICI = cast<ICmpInst>(I);
   1401         // We can fold eq/ne comparisons with null to false/true, respectively.
   1402         if (!ICI->isEquality() || !isa<ConstantPointerNull>(ICI->getOperand(1)))
   1403           return false;
   1404         Users.push_back(I);
   1405         continue;
   1406       }
   1407 
   1408       case Instruction::Call:
   1409         // Ignore no-op and store intrinsics.
   1410         if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
   1411           switch (II->getIntrinsicID()) {
   1412           default:
   1413             return false;
   1414 
   1415           case Intrinsic::memmove:
   1416           case Intrinsic::memcpy:
   1417           case Intrinsic::memset: {
   1418             MemIntrinsic *MI = cast<MemIntrinsic>(II);
   1419             if (MI->isVolatile() || MI->getRawDest() != PI)
   1420               return false;
   1421           }
   1422           // fall through
   1423           case Intrinsic::dbg_declare:
   1424           case Intrinsic::dbg_value:
   1425           case Intrinsic::invariant_start:
   1426           case Intrinsic::invariant_end:
   1427           case Intrinsic::lifetime_start:
   1428           case Intrinsic::lifetime_end:
   1429           case Intrinsic::objectsize:
   1430             Users.push_back(I);
   1431             continue;
   1432           }
   1433         }
   1434 
   1435         if (isFreeCall(I, TLI)) {
   1436           Users.push_back(I);
   1437           continue;
   1438         }
   1439         return false;
   1440 
   1441       case Instruction::Store: {
   1442         StoreInst *SI = cast<StoreInst>(I);
   1443         if (SI->isVolatile() || SI->getPointerOperand() != PI)
   1444           return false;
   1445         Users.push_back(I);
   1446         continue;
   1447       }
   1448       }
   1449       llvm_unreachable("missing a return?");
   1450     }
   1451   } while (!Worklist.empty());
   1452   return true;
   1453 }
   1454 
   1455 Instruction *InstCombiner::visitAllocSite(Instruction &MI) {
   1456   // If we have a malloc call which is only used in any amount of comparisons
   1457   // to null and free calls, delete the calls and replace the comparisons with
   1458   // true or false as appropriate.
   1459   SmallVector<WeakVH, 64> Users;
   1460   if (isAllocSiteRemovable(&MI, Users, TLI)) {
   1461     for (unsigned i = 0, e = Users.size(); i != e; ++i) {
   1462       Instruction *I = cast_or_null<Instruction>(&*Users[i]);
   1463       if (!I) continue;
   1464 
   1465       if (ICmpInst *C = dyn_cast<ICmpInst>(I)) {
   1466         ReplaceInstUsesWith(*C,
   1467                             ConstantInt::get(Type::getInt1Ty(C->getContext()),
   1468                                              C->isFalseWhenEqual()));
   1469       } else if (isa<BitCastInst>(I) || isa<GetElementPtrInst>(I)) {
   1470         ReplaceInstUsesWith(*I, UndefValue::get(I->getType()));
   1471       } else if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
   1472         if (II->getIntrinsicID() == Intrinsic::objectsize) {
   1473           ConstantInt *CI = cast<ConstantInt>(II->getArgOperand(1));
   1474           uint64_t DontKnow = CI->isZero() ? -1ULL : 0;
   1475           ReplaceInstUsesWith(*I, ConstantInt::get(I->getType(), DontKnow));
   1476         }
   1477       }
   1478       EraseInstFromFunction(*I);
   1479     }
   1480 
   1481     if (InvokeInst *II = dyn_cast<InvokeInst>(&MI)) {
   1482       // Replace invoke with a NOP intrinsic to maintain the original CFG
   1483       Module *M = II->getParent()->getParent()->getParent();
   1484       Function *F = Intrinsic::getDeclaration(M, Intrinsic::donothing);
   1485       InvokeInst::Create(F, II->getNormalDest(), II->getUnwindDest(),
   1486                          None, "", II->getParent());
   1487     }
   1488     return EraseInstFromFunction(MI);
   1489   }
   1490   return 0;
   1491 }
   1492 
   1493 /// \brief Move the call to free before a NULL test.
   1494 ///
   1495 /// Check if this free is accessed after its argument has been test
   1496 /// against NULL (property 0).
   1497 /// If yes, it is legal to move this call in its predecessor block.
   1498 ///
   1499 /// The move is performed only if the block containing the call to free
   1500 /// will be removed, i.e.:
   1501 /// 1. it has only one predecessor P, and P has two successors
   1502 /// 2. it contains the call and an unconditional branch
   1503 /// 3. its successor is the same as its predecessor's successor
   1504 ///
   1505 /// The profitability is out-of concern here and this function should
   1506 /// be called only if the caller knows this transformation would be
   1507 /// profitable (e.g., for code size).
   1508 static Instruction *
   1509 tryToMoveFreeBeforeNullTest(CallInst &FI) {
   1510   Value *Op = FI.getArgOperand(0);
   1511   BasicBlock *FreeInstrBB = FI.getParent();
   1512   BasicBlock *PredBB = FreeInstrBB->getSinglePredecessor();
   1513 
   1514   // Validate part of constraint #1: Only one predecessor
   1515   // FIXME: We can extend the number of predecessor, but in that case, we
   1516   //        would duplicate the call to free in each predecessor and it may
   1517   //        not be profitable even for code size.
   1518   if (!PredBB)
   1519     return 0;
   1520 
   1521   // Validate constraint #2: Does this block contains only the call to
   1522   //                         free and an unconditional branch?
   1523   // FIXME: We could check if we can speculate everything in the
   1524   //        predecessor block
   1525   if (FreeInstrBB->size() != 2)
   1526     return 0;
   1527   BasicBlock *SuccBB;
   1528   if (!match(FreeInstrBB->getTerminator(), m_UnconditionalBr(SuccBB)))
   1529     return 0;
   1530 
   1531   // Validate the rest of constraint #1 by matching on the pred branch.
   1532   TerminatorInst *TI = PredBB->getTerminator();
   1533   BasicBlock *TrueBB, *FalseBB;
   1534   ICmpInst::Predicate Pred;
   1535   if (!match(TI, m_Br(m_ICmp(Pred, m_Specific(Op), m_Zero()), TrueBB, FalseBB)))
   1536     return 0;
   1537   if (Pred != ICmpInst::ICMP_EQ && Pred != ICmpInst::ICMP_NE)
   1538     return 0;
   1539 
   1540   // Validate constraint #3: Ensure the null case just falls through.
   1541   if (SuccBB != (Pred == ICmpInst::ICMP_EQ ? TrueBB : FalseBB))
   1542     return 0;
   1543   assert(FreeInstrBB == (Pred == ICmpInst::ICMP_EQ ? FalseBB : TrueBB) &&
   1544          "Broken CFG: missing edge from predecessor to successor");
   1545 
   1546   FI.moveBefore(TI);
   1547   return &FI;
   1548 }
   1549 
   1550 
   1551 Instruction *InstCombiner::visitFree(CallInst &FI) {
   1552   Value *Op = FI.getArgOperand(0);
   1553 
   1554   // free undef -> unreachable.
   1555   if (isa<UndefValue>(Op)) {
   1556     // Insert a new store to null because we cannot modify the CFG here.
   1557     Builder->CreateStore(ConstantInt::getTrue(FI.getContext()),
   1558                          UndefValue::get(Type::getInt1PtrTy(FI.getContext())));
   1559     return EraseInstFromFunction(FI);
   1560   }
   1561 
   1562   // If we have 'free null' delete the instruction.  This can happen in stl code
   1563   // when lots of inlining happens.
   1564   if (isa<ConstantPointerNull>(Op))
   1565     return EraseInstFromFunction(FI);
   1566 
   1567   // If we optimize for code size, try to move the call to free before the null
   1568   // test so that simplify cfg can remove the empty block and dead code
   1569   // elimination the branch. I.e., helps to turn something like:
   1570   // if (foo) free(foo);
   1571   // into
   1572   // free(foo);
   1573   if (MinimizeSize)
   1574     if (Instruction *I = tryToMoveFreeBeforeNullTest(FI))
   1575       return I;
   1576 
   1577   return 0;
   1578 }
   1579 
   1580 
   1581 
   1582 Instruction *InstCombiner::visitBranchInst(BranchInst &BI) {
   1583   // Change br (not X), label True, label False to: br X, label False, True
   1584   Value *X = 0;
   1585   BasicBlock *TrueDest;
   1586   BasicBlock *FalseDest;
   1587   if (match(&BI, m_Br(m_Not(m_Value(X)), TrueDest, FalseDest)) &&
   1588       !isa<Constant>(X)) {
   1589     // Swap Destinations and condition...
   1590     BI.setCondition(X);
   1591     BI.swapSuccessors();
   1592     return &BI;
   1593   }
   1594 
   1595   // Cannonicalize fcmp_one -> fcmp_oeq
   1596   FCmpInst::Predicate FPred; Value *Y;
   1597   if (match(&BI, m_Br(m_FCmp(FPred, m_Value(X), m_Value(Y)),
   1598                              TrueDest, FalseDest)) &&
   1599       BI.getCondition()->hasOneUse())
   1600     if (FPred == FCmpInst::FCMP_ONE || FPred == FCmpInst::FCMP_OLE ||
   1601         FPred == FCmpInst::FCMP_OGE) {
   1602       FCmpInst *Cond = cast<FCmpInst>(BI.getCondition());
   1603       Cond->setPredicate(FCmpInst::getInversePredicate(FPred));
   1604 
   1605       // Swap Destinations and condition.
   1606       BI.swapSuccessors();
   1607       Worklist.Add(Cond);
   1608       return &BI;
   1609     }
   1610 
   1611   // Cannonicalize icmp_ne -> icmp_eq
   1612   ICmpInst::Predicate IPred;
   1613   if (match(&BI, m_Br(m_ICmp(IPred, m_Value(X), m_Value(Y)),
   1614                       TrueDest, FalseDest)) &&
   1615       BI.getCondition()->hasOneUse())
   1616     if (IPred == ICmpInst::ICMP_NE  || IPred == ICmpInst::ICMP_ULE ||
   1617         IPred == ICmpInst::ICMP_SLE || IPred == ICmpInst::ICMP_UGE ||
   1618         IPred == ICmpInst::ICMP_SGE) {
   1619       ICmpInst *Cond = cast<ICmpInst>(BI.getCondition());
   1620       Cond->setPredicate(ICmpInst::getInversePredicate(IPred));
   1621       // Swap Destinations and condition.
   1622       BI.swapSuccessors();
   1623       Worklist.Add(Cond);
   1624       return &BI;
   1625     }
   1626 
   1627   return 0;
   1628 }
   1629 
   1630 Instruction *InstCombiner::visitSwitchInst(SwitchInst &SI) {
   1631   Value *Cond = SI.getCondition();
   1632   if (Instruction *I = dyn_cast<Instruction>(Cond)) {
   1633     if (I->getOpcode() == Instruction::Add)
   1634       if (ConstantInt *AddRHS = dyn_cast<ConstantInt>(I->getOperand(1))) {
   1635         // change 'switch (X+4) case 1:' into 'switch (X) case -3'
   1636         // Skip the first item since that's the default case.
   1637         for (SwitchInst::CaseIt i = SI.case_begin(), e = SI.case_end();
   1638              i != e; ++i) {
   1639           ConstantInt* CaseVal = i.getCaseValue();
   1640           Constant* NewCaseVal = ConstantExpr::getSub(cast<Constant>(CaseVal),
   1641                                                       AddRHS);
   1642           assert(isa<ConstantInt>(NewCaseVal) &&
   1643                  "Result of expression should be constant");
   1644           i.setValue(cast<ConstantInt>(NewCaseVal));
   1645         }
   1646         SI.setCondition(I->getOperand(0));
   1647         Worklist.Add(I);
   1648         return &SI;
   1649       }
   1650   }
   1651   return 0;
   1652 }
   1653 
   1654 Instruction *InstCombiner::visitExtractValueInst(ExtractValueInst &EV) {
   1655   Value *Agg = EV.getAggregateOperand();
   1656 
   1657   if (!EV.hasIndices())
   1658     return ReplaceInstUsesWith(EV, Agg);
   1659 
   1660   if (Constant *C = dyn_cast<Constant>(Agg)) {
   1661     if (Constant *C2 = C->getAggregateElement(*EV.idx_begin())) {
   1662       if (EV.getNumIndices() == 0)
   1663         return ReplaceInstUsesWith(EV, C2);
   1664       // Extract the remaining indices out of the constant indexed by the
   1665       // first index
   1666       return ExtractValueInst::Create(C2, EV.getIndices().slice(1));
   1667     }
   1668     return 0; // Can't handle other constants
   1669   }
   1670 
   1671   if (InsertValueInst *IV = dyn_cast<InsertValueInst>(Agg)) {
   1672     // We're extracting from an insertvalue instruction, compare the indices
   1673     const unsigned *exti, *exte, *insi, *inse;
   1674     for (exti = EV.idx_begin(), insi = IV->idx_begin(),
   1675          exte = EV.idx_end(), inse = IV->idx_end();
   1676          exti != exte && insi != inse;
   1677          ++exti, ++insi) {
   1678       if (*insi != *exti)
   1679         // The insert and extract both reference distinctly different elements.
   1680         // This means the extract is not influenced by the insert, and we can
   1681         // replace the aggregate operand of the extract with the aggregate
   1682         // operand of the insert. i.e., replace
   1683         // %I = insertvalue { i32, { i32 } } %A, { i32 } { i32 42 }, 1
   1684         // %E = extractvalue { i32, { i32 } } %I, 0
   1685         // with
   1686         // %E = extractvalue { i32, { i32 } } %A, 0
   1687         return ExtractValueInst::Create(IV->getAggregateOperand(),
   1688                                         EV.getIndices());
   1689     }
   1690     if (exti == exte && insi == inse)
   1691       // Both iterators are at the end: Index lists are identical. Replace
   1692       // %B = insertvalue { i32, { i32 } } %A, i32 42, 1, 0
   1693       // %C = extractvalue { i32, { i32 } } %B, 1, 0
   1694       // with "i32 42"
   1695       return ReplaceInstUsesWith(EV, IV->getInsertedValueOperand());
   1696     if (exti == exte) {
   1697       // The extract list is a prefix of the insert list. i.e. replace
   1698       // %I = insertvalue { i32, { i32 } } %A, i32 42, 1, 0
   1699       // %E = extractvalue { i32, { i32 } } %I, 1
   1700       // with
   1701       // %X = extractvalue { i32, { i32 } } %A, 1
   1702       // %E = insertvalue { i32 } %X, i32 42, 0
   1703       // by switching the order of the insert and extract (though the
   1704       // insertvalue should be left in, since it may have other uses).
   1705       Value *NewEV = Builder->CreateExtractValue(IV->getAggregateOperand(),
   1706                                                  EV.getIndices());
   1707       return InsertValueInst::Create(NewEV, IV->getInsertedValueOperand(),
   1708                                      makeArrayRef(insi, inse));
   1709     }
   1710     if (insi == inse)
   1711       // The insert list is a prefix of the extract list
   1712       // We can simply remove the common indices from the extract and make it
   1713       // operate on the inserted value instead of the insertvalue result.
   1714       // i.e., replace
   1715       // %I = insertvalue { i32, { i32 } } %A, { i32 } { i32 42 }, 1
   1716       // %E = extractvalue { i32, { i32 } } %I, 1, 0
   1717       // with
   1718       // %E extractvalue { i32 } { i32 42 }, 0
   1719       return ExtractValueInst::Create(IV->getInsertedValueOperand(),
   1720                                       makeArrayRef(exti, exte));
   1721   }
   1722   if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Agg)) {
   1723     // We're extracting from an intrinsic, see if we're the only user, which
   1724     // allows us to simplify multiple result intrinsics to simpler things that
   1725     // just get one value.
   1726     if (II->hasOneUse()) {
   1727       // Check if we're grabbing the overflow bit or the result of a 'with
   1728       // overflow' intrinsic.  If it's the latter we can remove the intrinsic
   1729       // and replace it with a traditional binary instruction.
   1730       switch (II->getIntrinsicID()) {
   1731       case Intrinsic::uadd_with_overflow:
   1732       case Intrinsic::sadd_with_overflow:
   1733         if (*EV.idx_begin() == 0) {  // Normal result.
   1734           Value *LHS = II->getArgOperand(0), *RHS = II->getArgOperand(1);
   1735           ReplaceInstUsesWith(*II, UndefValue::get(II->getType()));
   1736           EraseInstFromFunction(*II);
   1737           return BinaryOperator::CreateAdd(LHS, RHS);
   1738         }
   1739 
   1740         // If the normal result of the add is dead, and the RHS is a constant,
   1741         // we can transform this into a range comparison.
   1742         // overflow = uadd a, -4  -->  overflow = icmp ugt a, 3
   1743         if (II->getIntrinsicID() == Intrinsic::uadd_with_overflow)
   1744           if (ConstantInt *CI = dyn_cast<ConstantInt>(II->getArgOperand(1)))
   1745             return new ICmpInst(ICmpInst::ICMP_UGT, II->getArgOperand(0),
   1746                                 ConstantExpr::getNot(CI));
   1747         break;
   1748       case Intrinsic::usub_with_overflow:
   1749       case Intrinsic::ssub_with_overflow:
   1750         if (*EV.idx_begin() == 0) {  // Normal result.
   1751           Value *LHS = II->getArgOperand(0), *RHS = II->getArgOperand(1);
   1752           ReplaceInstUsesWith(*II, UndefValue::get(II->getType()));
   1753           EraseInstFromFunction(*II);
   1754           return BinaryOperator::CreateSub(LHS, RHS);
   1755         }
   1756         break;
   1757       case Intrinsic::umul_with_overflow:
   1758       case Intrinsic::smul_with_overflow:
   1759         if (*EV.idx_begin() == 0) {  // Normal result.
   1760           Value *LHS = II->getArgOperand(0), *RHS = II->getArgOperand(1);
   1761           ReplaceInstUsesWith(*II, UndefValue::get(II->getType()));
   1762           EraseInstFromFunction(*II);
   1763           return BinaryOperator::CreateMul(LHS, RHS);
   1764         }
   1765         break;
   1766       default:
   1767         break;
   1768       }
   1769     }
   1770   }
   1771   if (LoadInst *L = dyn_cast<LoadInst>(Agg))
   1772     // If the (non-volatile) load only has one use, we can rewrite this to a
   1773     // load from a GEP. This reduces the size of the load.
   1774     // FIXME: If a load is used only by extractvalue instructions then this
   1775     //        could be done regardless of having multiple uses.
   1776     if (L->isSimple() && L->hasOneUse()) {
   1777       // extractvalue has integer indices, getelementptr has Value*s. Convert.
   1778       SmallVector<Value*, 4> Indices;
   1779       // Prefix an i32 0 since we need the first element.
   1780       Indices.push_back(Builder->getInt32(0));
   1781       for (ExtractValueInst::idx_iterator I = EV.idx_begin(), E = EV.idx_end();
   1782             I != E; ++I)
   1783         Indices.push_back(Builder->getInt32(*I));
   1784 
   1785       // We need to insert these at the location of the old load, not at that of
   1786       // the extractvalue.
   1787       Builder->SetInsertPoint(L->getParent(), L);
   1788       Value *GEP = Builder->CreateInBoundsGEP(L->getPointerOperand(), Indices);
   1789       // Returning the load directly will cause the main loop to insert it in
   1790       // the wrong spot, so use ReplaceInstUsesWith().
   1791       return ReplaceInstUsesWith(EV, Builder->CreateLoad(GEP));
   1792     }
   1793   // We could simplify extracts from other values. Note that nested extracts may
   1794   // already be simplified implicitly by the above: extract (extract (insert) )
   1795   // will be translated into extract ( insert ( extract ) ) first and then just
   1796   // the value inserted, if appropriate. Similarly for extracts from single-use
   1797   // loads: extract (extract (load)) will be translated to extract (load (gep))
   1798   // and if again single-use then via load (gep (gep)) to load (gep).
   1799   // However, double extracts from e.g. function arguments or return values
   1800   // aren't handled yet.
   1801   return 0;
   1802 }
   1803 
   1804 enum Personality_Type {
   1805   Unknown_Personality,
   1806   GNU_Ada_Personality,
   1807   GNU_CXX_Personality,
   1808   GNU_ObjC_Personality
   1809 };
   1810 
   1811 /// RecognizePersonality - See if the given exception handling personality
   1812 /// function is one that we understand.  If so, return a description of it;
   1813 /// otherwise return Unknown_Personality.
   1814 static Personality_Type RecognizePersonality(Value *Pers) {
   1815   Function *F = dyn_cast<Function>(Pers->stripPointerCasts());
   1816   if (!F)
   1817     return Unknown_Personality;
   1818   return StringSwitch<Personality_Type>(F->getName())
   1819     .Case("__gnat_eh_personality", GNU_Ada_Personality)
   1820     .Case("__gxx_personality_v0",  GNU_CXX_Personality)
   1821     .Case("__objc_personality_v0", GNU_ObjC_Personality)
   1822     .Default(Unknown_Personality);
   1823 }
   1824 
   1825 /// isCatchAll - Return 'true' if the given typeinfo will match anything.
   1826 static bool isCatchAll(Personality_Type Personality, Constant *TypeInfo) {
   1827   switch (Personality) {
   1828   case Unknown_Personality:
   1829     return false;
   1830   case GNU_Ada_Personality:
   1831     // While __gnat_all_others_value will match any Ada exception, it doesn't
   1832     // match foreign exceptions (or didn't, before gcc-4.7).
   1833     return false;
   1834   case GNU_CXX_Personality:
   1835   case GNU_ObjC_Personality:
   1836     return TypeInfo->isNullValue();
   1837   }
   1838   llvm_unreachable("Unknown personality!");
   1839 }
   1840 
   1841 static bool shorter_filter(const Value *LHS, const Value *RHS) {
   1842   return
   1843     cast<ArrayType>(LHS->getType())->getNumElements()
   1844   <
   1845     cast<ArrayType>(RHS->getType())->getNumElements();
   1846 }
   1847 
   1848 Instruction *InstCombiner::visitLandingPadInst(LandingPadInst &LI) {
   1849   // The logic here should be correct for any real-world personality function.
   1850   // However if that turns out not to be true, the offending logic can always
   1851   // be conditioned on the personality function, like the catch-all logic is.
   1852   Personality_Type Personality = RecognizePersonality(LI.getPersonalityFn());
   1853 
   1854   // Simplify the list of clauses, eg by removing repeated catch clauses
   1855   // (these are often created by inlining).
   1856   bool MakeNewInstruction = false; // If true, recreate using the following:
   1857   SmallVector<Value *, 16> NewClauses; // - Clauses for the new instruction;
   1858   bool CleanupFlag = LI.isCleanup();   // - The new instruction is a cleanup.
   1859 
   1860   SmallPtrSet<Value *, 16> AlreadyCaught; // Typeinfos known caught already.
   1861   for (unsigned i = 0, e = LI.getNumClauses(); i != e; ++i) {
   1862     bool isLastClause = i + 1 == e;
   1863     if (LI.isCatch(i)) {
   1864       // A catch clause.
   1865       Value *CatchClause = LI.getClause(i);
   1866       Constant *TypeInfo = cast<Constant>(CatchClause->stripPointerCasts());
   1867 
   1868       // If we already saw this clause, there is no point in having a second
   1869       // copy of it.
   1870       if (AlreadyCaught.insert(TypeInfo)) {
   1871         // This catch clause was not already seen.
   1872         NewClauses.push_back(CatchClause);
   1873       } else {
   1874         // Repeated catch clause - drop the redundant copy.
   1875         MakeNewInstruction = true;
   1876       }
   1877 
   1878       // If this is a catch-all then there is no point in keeping any following
   1879       // clauses or marking the landingpad as having a cleanup.
   1880       if (isCatchAll(Personality, TypeInfo)) {
   1881         if (!isLastClause)
   1882           MakeNewInstruction = true;
   1883         CleanupFlag = false;
   1884         break;
   1885       }
   1886     } else {
   1887       // A filter clause.  If any of the filter elements were already caught
   1888       // then they can be dropped from the filter.  It is tempting to try to
   1889       // exploit the filter further by saying that any typeinfo that does not
   1890       // occur in the filter can't be caught later (and thus can be dropped).
   1891       // However this would be wrong, since typeinfos can match without being
   1892       // equal (for example if one represents a C++ class, and the other some
   1893       // class derived from it).
   1894       assert(LI.isFilter(i) && "Unsupported landingpad clause!");
   1895       Value *FilterClause = LI.getClause(i);
   1896       ArrayType *FilterType = cast<ArrayType>(FilterClause->getType());
   1897       unsigned NumTypeInfos = FilterType->getNumElements();
   1898 
   1899       // An empty filter catches everything, so there is no point in keeping any
   1900       // following clauses or marking the landingpad as having a cleanup.  By
   1901       // dealing with this case here the following code is made a bit simpler.
   1902       if (!NumTypeInfos) {
   1903         NewClauses.push_back(FilterClause);
   1904         if (!isLastClause)
   1905           MakeNewInstruction = true;
   1906         CleanupFlag = false;
   1907         break;
   1908       }
   1909 
   1910       bool MakeNewFilter = false; // If true, make a new filter.
   1911       SmallVector<Constant *, 16> NewFilterElts; // New elements.
   1912       if (isa<ConstantAggregateZero>(FilterClause)) {
   1913         // Not an empty filter - it contains at least one null typeinfo.
   1914         assert(NumTypeInfos > 0 && "Should have handled empty filter already!");
   1915         Constant *TypeInfo =
   1916           Constant::getNullValue(FilterType->getElementType());
   1917         // If this typeinfo is a catch-all then the filter can never match.
   1918         if (isCatchAll(Personality, TypeInfo)) {
   1919           // Throw the filter away.
   1920           MakeNewInstruction = true;
   1921           continue;
   1922         }
   1923 
   1924         // There is no point in having multiple copies of this typeinfo, so
   1925         // discard all but the first copy if there is more than one.
   1926         NewFilterElts.push_back(TypeInfo);
   1927         if (NumTypeInfos > 1)
   1928           MakeNewFilter = true;
   1929       } else {
   1930         ConstantArray *Filter = cast<ConstantArray>(FilterClause);
   1931         SmallPtrSet<Value *, 16> SeenInFilter; // For uniquing the elements.
   1932         NewFilterElts.reserve(NumTypeInfos);
   1933 
   1934         // Remove any filter elements that were already caught or that already
   1935         // occurred in the filter.  While there, see if any of the elements are
   1936         // catch-alls.  If so, the filter can be discarded.
   1937         bool SawCatchAll = false;
   1938         for (unsigned j = 0; j != NumTypeInfos; ++j) {
   1939           Value *Elt = Filter->getOperand(j);
   1940           Constant *TypeInfo = cast<Constant>(Elt->stripPointerCasts());
   1941           if (isCatchAll(Personality, TypeInfo)) {
   1942             // This element is a catch-all.  Bail out, noting this fact.
   1943             SawCatchAll = true;
   1944             break;
   1945           }
   1946           if (AlreadyCaught.count(TypeInfo))
   1947             // Already caught by an earlier clause, so having it in the filter
   1948             // is pointless.
   1949             continue;
   1950           // There is no point in having multiple copies of the same typeinfo in
   1951           // a filter, so only add it if we didn't already.
   1952           if (SeenInFilter.insert(TypeInfo))
   1953             NewFilterElts.push_back(cast<Constant>(Elt));
   1954         }
   1955         // A filter containing a catch-all cannot match anything by definition.
   1956         if (SawCatchAll) {
   1957           // Throw the filter away.
   1958           MakeNewInstruction = true;
   1959           continue;
   1960         }
   1961 
   1962         // If we dropped something from the filter, make a new one.
   1963         if (NewFilterElts.size() < NumTypeInfos)
   1964           MakeNewFilter = true;
   1965       }
   1966       if (MakeNewFilter) {
   1967         FilterType = ArrayType::get(FilterType->getElementType(),
   1968                                     NewFilterElts.size());
   1969         FilterClause = ConstantArray::get(FilterType, NewFilterElts);
   1970         MakeNewInstruction = true;
   1971       }
   1972 
   1973       NewClauses.push_back(FilterClause);
   1974 
   1975       // If the new filter is empty then it will catch everything so there is
   1976       // no point in keeping any following clauses or marking the landingpad
   1977       // as having a cleanup.  The case of the original filter being empty was
   1978       // already handled above.
   1979       if (MakeNewFilter && !NewFilterElts.size()) {
   1980         assert(MakeNewInstruction && "New filter but not a new instruction!");
   1981         CleanupFlag = false;
   1982         break;
   1983       }
   1984     }
   1985   }
   1986 
   1987   // If several filters occur in a row then reorder them so that the shortest
   1988   // filters come first (those with the smallest number of elements).  This is
   1989   // advantageous because shorter filters are more likely to match, speeding up
   1990   // unwinding, but mostly because it increases the effectiveness of the other
   1991   // filter optimizations below.
   1992   for (unsigned i = 0, e = NewClauses.size(); i + 1 < e; ) {
   1993     unsigned j;
   1994     // Find the maximal 'j' s.t. the range [i, j) consists entirely of filters.
   1995     for (j = i; j != e; ++j)
   1996       if (!isa<ArrayType>(NewClauses[j]->getType()))
   1997         break;
   1998 
   1999     // Check whether the filters are already sorted by length.  We need to know
   2000     // if sorting them is actually going to do anything so that we only make a
   2001     // new landingpad instruction if it does.
   2002     for (unsigned k = i; k + 1 < j; ++k)
   2003       if (shorter_filter(NewClauses[k+1], NewClauses[k])) {
   2004         // Not sorted, so sort the filters now.  Doing an unstable sort would be
   2005         // correct too but reordering filters pointlessly might confuse users.
   2006         std::stable_sort(NewClauses.begin() + i, NewClauses.begin() + j,
   2007                          shorter_filter);
   2008         MakeNewInstruction = true;
   2009         break;
   2010       }
   2011 
   2012     // Look for the next batch of filters.
   2013     i = j + 1;
   2014   }
   2015 
   2016   // If typeinfos matched if and only if equal, then the elements of a filter L
   2017   // that occurs later than a filter F could be replaced by the intersection of
   2018   // the elements of F and L.  In reality two typeinfos can match without being
   2019   // equal (for example if one represents a C++ class, and the other some class
   2020   // derived from it) so it would be wrong to perform this transform in general.
   2021   // However the transform is correct and useful if F is a subset of L.  In that
   2022   // case L can be replaced by F, and thus removed altogether since repeating a
   2023   // filter is pointless.  So here we look at all pairs of filters F and L where
   2024   // L follows F in the list of clauses, and remove L if every element of F is
   2025   // an element of L.  This can occur when inlining C++ functions with exception
   2026   // specifications.
   2027   for (unsigned i = 0; i + 1 < NewClauses.size(); ++i) {
   2028     // Examine each filter in turn.
   2029     Value *Filter = NewClauses[i];
   2030     ArrayType *FTy = dyn_cast<ArrayType>(Filter->getType());
   2031     if (!FTy)
   2032       // Not a filter - skip it.
   2033       continue;
   2034     unsigned FElts = FTy->getNumElements();
   2035     // Examine each filter following this one.  Doing this backwards means that
   2036     // we don't have to worry about filters disappearing under us when removed.
   2037     for (unsigned j = NewClauses.size() - 1; j != i; --j) {
   2038       Value *LFilter = NewClauses[j];
   2039       ArrayType *LTy = dyn_cast<ArrayType>(LFilter->getType());
   2040       if (!LTy)
   2041         // Not a filter - skip it.
   2042         continue;
   2043       // If Filter is a subset of LFilter, i.e. every element of Filter is also
   2044       // an element of LFilter, then discard LFilter.
   2045       SmallVectorImpl<Value *>::iterator J = NewClauses.begin() + j;
   2046       // If Filter is empty then it is a subset of LFilter.
   2047       if (!FElts) {
   2048         // Discard LFilter.
   2049         NewClauses.erase(J);
   2050         MakeNewInstruction = true;
   2051         // Move on to the next filter.
   2052         continue;
   2053       }
   2054       unsigned LElts = LTy->getNumElements();
   2055       // If Filter is longer than LFilter then it cannot be a subset of it.
   2056       if (FElts > LElts)
   2057         // Move on to the next filter.
   2058         continue;
   2059       // At this point we know that LFilter has at least one element.
   2060       if (isa<ConstantAggregateZero>(LFilter)) { // LFilter only contains zeros.
   2061         // Filter is a subset of LFilter iff Filter contains only zeros (as we
   2062         // already know that Filter is not longer than LFilter).
   2063         if (isa<ConstantAggregateZero>(Filter)) {
   2064           assert(FElts <= LElts && "Should have handled this case earlier!");
   2065           // Discard LFilter.
   2066           NewClauses.erase(J);
   2067           MakeNewInstruction = true;
   2068         }
   2069         // Move on to the next filter.
   2070         continue;
   2071       }
   2072       ConstantArray *LArray = cast<ConstantArray>(LFilter);
   2073       if (isa<ConstantAggregateZero>(Filter)) { // Filter only contains zeros.
   2074         // Since Filter is non-empty and contains only zeros, it is a subset of
   2075         // LFilter iff LFilter contains a zero.
   2076         assert(FElts > 0 && "Should have eliminated the empty filter earlier!");
   2077         for (unsigned l = 0; l != LElts; ++l)
   2078           if (LArray->getOperand(l)->isNullValue()) {
   2079             // LFilter contains a zero - discard it.
   2080             NewClauses.erase(J);
   2081             MakeNewInstruction = true;
   2082             break;
   2083           }
   2084         // Move on to the next filter.
   2085         continue;
   2086       }
   2087       // At this point we know that both filters are ConstantArrays.  Loop over
   2088       // operands to see whether every element of Filter is also an element of
   2089       // LFilter.  Since filters tend to be short this is probably faster than
   2090       // using a method that scales nicely.
   2091       ConstantArray *FArray = cast<ConstantArray>(Filter);
   2092       bool AllFound = true;
   2093       for (unsigned f = 0; f != FElts; ++f) {
   2094         Value *FTypeInfo = FArray->getOperand(f)->stripPointerCasts();
   2095         AllFound = false;
   2096         for (unsigned l = 0; l != LElts; ++l) {
   2097           Value *LTypeInfo = LArray->getOperand(l)->stripPointerCasts();
   2098           if (LTypeInfo == FTypeInfo) {
   2099             AllFound = true;
   2100             break;
   2101           }
   2102         }
   2103         if (!AllFound)
   2104           break;
   2105       }
   2106       if (AllFound) {
   2107         // Discard LFilter.
   2108         NewClauses.erase(J);
   2109         MakeNewInstruction = true;
   2110       }
   2111       // Move on to the next filter.
   2112     }
   2113   }
   2114 
   2115   // If we changed any of the clauses, replace the old landingpad instruction
   2116   // with a new one.
   2117   if (MakeNewInstruction) {
   2118     LandingPadInst *NLI = LandingPadInst::Create(LI.getType(),
   2119                                                  LI.getPersonalityFn(),
   2120                                                  NewClauses.size());
   2121     for (unsigned i = 0, e = NewClauses.size(); i != e; ++i)
   2122       NLI->addClause(NewClauses[i]);
   2123     // A landing pad with no clauses must have the cleanup flag set.  It is
   2124     // theoretically possible, though highly unlikely, that we eliminated all
   2125     // clauses.  If so, force the cleanup flag to true.
   2126     if (NewClauses.empty())
   2127       CleanupFlag = true;
   2128     NLI->setCleanup(CleanupFlag);
   2129     return NLI;
   2130   }
   2131 
   2132   // Even if none of the clauses changed, we may nonetheless have understood
   2133   // that the cleanup flag is pointless.  Clear it if so.
   2134   if (LI.isCleanup() != CleanupFlag) {
   2135     assert(!CleanupFlag && "Adding a cleanup, not removing one?!");
   2136     LI.setCleanup(CleanupFlag);
   2137     return &LI;
   2138   }
   2139 
   2140   return 0;
   2141 }
   2142 
   2143 
   2144 
   2145 
   2146 /// TryToSinkInstruction - Try to move the specified instruction from its
   2147 /// current block into the beginning of DestBlock, which can only happen if it's
   2148 /// safe to move the instruction past all of the instructions between it and the
   2149 /// end of its block.
   2150 static bool TryToSinkInstruction(Instruction *I, BasicBlock *DestBlock) {
   2151   assert(I->hasOneUse() && "Invariants didn't hold!");
   2152 
   2153   // Cannot move control-flow-involving, volatile loads, vaarg, etc.
   2154   if (isa<PHINode>(I) || isa<LandingPadInst>(I) || I->mayHaveSideEffects() ||
   2155       isa<TerminatorInst>(I))
   2156     return false;
   2157 
   2158   // Do not sink alloca instructions out of the entry block.
   2159   if (isa<AllocaInst>(I) && I->getParent() ==
   2160         &DestBlock->getParent()->getEntryBlock())
   2161     return false;
   2162 
   2163   // We can only sink load instructions if there is nothing between the load and
   2164   // the end of block that could change the value.
   2165   if (I->mayReadFromMemory()) {
   2166     for (BasicBlock::iterator Scan = I, E = I->getParent()->end();
   2167          Scan != E; ++Scan)
   2168       if (Scan->mayWriteToMemory())
   2169         return false;
   2170   }
   2171 
   2172   BasicBlock::iterator InsertPos = DestBlock->getFirstInsertionPt();
   2173   I->moveBefore(InsertPos);
   2174   ++NumSunkInst;
   2175   return true;
   2176 }
   2177 
   2178 
   2179 /// AddReachableCodeToWorklist - Walk the function in depth-first order, adding
   2180 /// all reachable code to the worklist.
   2181 ///
   2182 /// This has a couple of tricks to make the code faster and more powerful.  In
   2183 /// particular, we constant fold and DCE instructions as we go, to avoid adding
   2184 /// them to the worklist (this significantly speeds up instcombine on code where
   2185 /// many instructions are dead or constant).  Additionally, if we find a branch
   2186 /// whose condition is a known constant, we only visit the reachable successors.
   2187 ///
   2188 static bool AddReachableCodeToWorklist(BasicBlock *BB,
   2189                                        SmallPtrSet<BasicBlock*, 64> &Visited,
   2190                                        InstCombiner &IC,
   2191                                        const DataLayout *TD,
   2192                                        const TargetLibraryInfo *TLI) {
   2193   bool MadeIRChange = false;
   2194   SmallVector<BasicBlock*, 256> Worklist;
   2195   Worklist.push_back(BB);
   2196 
   2197   SmallVector<Instruction*, 128> InstrsForInstCombineWorklist;
   2198   DenseMap<ConstantExpr*, Constant*> FoldedConstants;
   2199 
   2200   do {
   2201     BB = Worklist.pop_back_val();
   2202 
   2203     // We have now visited this block!  If we've already been here, ignore it.
   2204     if (!Visited.insert(BB)) continue;
   2205 
   2206     for (BasicBlock::iterator BBI = BB->begin(), E = BB->end(); BBI != E; ) {
   2207       Instruction *Inst = BBI++;
   2208 
   2209       // DCE instruction if trivially dead.
   2210       if (isInstructionTriviallyDead(Inst, TLI)) {
   2211         ++NumDeadInst;
   2212         DEBUG(errs() << "IC: DCE: " << *Inst << '\n');
   2213         Inst->eraseFromParent();
   2214         continue;
   2215       }
   2216 
   2217       // ConstantProp instruction if trivially constant.
   2218       if (!Inst->use_empty() && isa<Constant>(Inst->getOperand(0)))
   2219         if (Constant *C = ConstantFoldInstruction(Inst, TD, TLI)) {
   2220           DEBUG(errs() << "IC: ConstFold to: " << *C << " from: "
   2221                        << *Inst << '\n');
   2222           Inst->replaceAllUsesWith(C);
   2223           ++NumConstProp;
   2224           Inst->eraseFromParent();
   2225           continue;
   2226         }
   2227 
   2228       if (TD) {
   2229         // See if we can constant fold its operands.
   2230         for (User::op_iterator i = Inst->op_begin(), e = Inst->op_end();
   2231              i != e; ++i) {
   2232           ConstantExpr *CE = dyn_cast<ConstantExpr>(i);
   2233           if (CE == 0) continue;
   2234 
   2235           Constant*& FoldRes = FoldedConstants[CE];
   2236           if (!FoldRes)
   2237             FoldRes = ConstantFoldConstantExpression(CE, TD, TLI);
   2238           if (!FoldRes)
   2239             FoldRes = CE;
   2240 
   2241           if (FoldRes != CE) {
   2242             *i = FoldRes;
   2243             MadeIRChange = true;
   2244           }
   2245         }
   2246       }
   2247 
   2248       InstrsForInstCombineWorklist.push_back(Inst);
   2249     }
   2250 
   2251     // Recursively visit successors.  If this is a branch or switch on a
   2252     // constant, only visit the reachable successor.
   2253     TerminatorInst *TI = BB->getTerminator();
   2254     if (BranchInst *BI = dyn_cast<BranchInst>(TI)) {
   2255       if (BI->isConditional() && isa<ConstantInt>(BI->getCondition())) {
   2256         bool CondVal = cast<ConstantInt>(BI->getCondition())->getZExtValue();
   2257         BasicBlock *ReachableBB = BI->getSuccessor(!CondVal);
   2258         Worklist.push_back(ReachableBB);
   2259         continue;
   2260       }
   2261     } else if (SwitchInst *SI = dyn_cast<SwitchInst>(TI)) {
   2262       if (ConstantInt *Cond = dyn_cast<ConstantInt>(SI->getCondition())) {
   2263         // See if this is an explicit destination.
   2264         for (SwitchInst::CaseIt i = SI->case_begin(), e = SI->case_end();
   2265              i != e; ++i)
   2266           if (i.getCaseValue() == Cond) {
   2267             BasicBlock *ReachableBB = i.getCaseSuccessor();
   2268             Worklist.push_back(ReachableBB);
   2269             continue;
   2270           }
   2271 
   2272         // Otherwise it is the default destination.
   2273         Worklist.push_back(SI->getDefaultDest());
   2274         continue;
   2275       }
   2276     }
   2277 
   2278     for (unsigned i = 0, e = TI->getNumSuccessors(); i != e; ++i)
   2279       Worklist.push_back(TI->getSuccessor(i));
   2280   } while (!Worklist.empty());
   2281 
   2282   // Once we've found all of the instructions to add to instcombine's worklist,
   2283   // add them in reverse order.  This way instcombine will visit from the top
   2284   // of the function down.  This jives well with the way that it adds all uses
   2285   // of instructions to the worklist after doing a transformation, thus avoiding
   2286   // some N^2 behavior in pathological cases.
   2287   IC.Worklist.AddInitialGroup(&InstrsForInstCombineWorklist[0],
   2288                               InstrsForInstCombineWorklist.size());
   2289 
   2290   return MadeIRChange;
   2291 }
   2292 
   2293 bool InstCombiner::DoOneIteration(Function &F, unsigned Iteration) {
   2294   MadeIRChange = false;
   2295 
   2296   DEBUG(errs() << "\n\nINSTCOMBINE ITERATION #" << Iteration << " on "
   2297                << F.getName() << "\n");
   2298 
   2299   {
   2300     // Do a depth-first traversal of the function, populate the worklist with
   2301     // the reachable instructions.  Ignore blocks that are not reachable.  Keep
   2302     // track of which blocks we visit.
   2303     SmallPtrSet<BasicBlock*, 64> Visited;
   2304     MadeIRChange |= AddReachableCodeToWorklist(F.begin(), Visited, *this, TD,
   2305                                                TLI);
   2306 
   2307     // Do a quick scan over the function.  If we find any blocks that are
   2308     // unreachable, remove any instructions inside of them.  This prevents
   2309     // the instcombine code from having to deal with some bad special cases.
   2310     for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB) {
   2311       if (Visited.count(BB)) continue;
   2312 
   2313       // Delete the instructions backwards, as it has a reduced likelihood of
   2314       // having to update as many def-use and use-def chains.
   2315       Instruction *EndInst = BB->getTerminator(); // Last not to be deleted.
   2316       while (EndInst != BB->begin()) {
   2317         // Delete the next to last instruction.
   2318         BasicBlock::iterator I = EndInst;
   2319         Instruction *Inst = --I;
   2320         if (!Inst->use_empty())
   2321           Inst->replaceAllUsesWith(UndefValue::get(Inst->getType()));
   2322         if (isa<LandingPadInst>(Inst)) {
   2323           EndInst = Inst;
   2324           continue;
   2325         }
   2326         if (!isa<DbgInfoIntrinsic>(Inst)) {
   2327           ++NumDeadInst;
   2328           MadeIRChange = true;
   2329         }
   2330         Inst->eraseFromParent();
   2331       }
   2332     }
   2333   }
   2334 
   2335   while (!Worklist.isEmpty()) {
   2336     Instruction *I = Worklist.RemoveOne();
   2337     if (I == 0) continue;  // skip null values.
   2338 
   2339     // Check to see if we can DCE the instruction.
   2340     if (isInstructionTriviallyDead(I, TLI)) {
   2341       DEBUG(errs() << "IC: DCE: " << *I << '\n');
   2342       EraseInstFromFunction(*I);
   2343       ++NumDeadInst;
   2344       MadeIRChange = true;
   2345       continue;
   2346     }
   2347 
   2348     // Instruction isn't dead, see if we can constant propagate it.
   2349     if (!I->use_empty() && isa<Constant>(I->getOperand(0)))
   2350       if (Constant *C = ConstantFoldInstruction(I, TD, TLI)) {
   2351         DEBUG(errs() << "IC: ConstFold to: " << *C << " from: " << *I << '\n');
   2352 
   2353         // Add operands to the worklist.
   2354         ReplaceInstUsesWith(*I, C);
   2355         ++NumConstProp;
   2356         EraseInstFromFunction(*I);
   2357         MadeIRChange = true;
   2358         continue;
   2359       }
   2360 
   2361     // See if we can trivially sink this instruction to a successor basic block.
   2362     if (I->hasOneUse()) {
   2363       BasicBlock *BB = I->getParent();
   2364       Instruction *UserInst = cast<Instruction>(I->use_back());
   2365       BasicBlock *UserParent;
   2366 
   2367       // Get the block the use occurs in.
   2368       if (PHINode *PN = dyn_cast<PHINode>(UserInst))
   2369         UserParent = PN->getIncomingBlock(I->use_begin().getUse());
   2370       else
   2371         UserParent = UserInst->getParent();
   2372 
   2373       if (UserParent != BB) {
   2374         bool UserIsSuccessor = false;
   2375         // See if the user is one of our successors.
   2376         for (succ_iterator SI = succ_begin(BB), E = succ_end(BB); SI != E; ++SI)
   2377           if (*SI == UserParent) {
   2378             UserIsSuccessor = true;
   2379             break;
   2380           }
   2381 
   2382         // If the user is one of our immediate successors, and if that successor
   2383         // only has us as a predecessors (we'd have to split the critical edge
   2384         // otherwise), we can keep going.
   2385         if (UserIsSuccessor && UserParent->getSinglePredecessor())
   2386           // Okay, the CFG is simple enough, try to sink this instruction.
   2387           MadeIRChange |= TryToSinkInstruction(I, UserParent);
   2388       }
   2389     }
   2390 
   2391     // Now that we have an instruction, try combining it to simplify it.
   2392     Builder->SetInsertPoint(I->getParent(), I);
   2393     Builder->SetCurrentDebugLocation(I->getDebugLoc());
   2394 
   2395 #ifndef NDEBUG
   2396     std::string OrigI;
   2397 #endif
   2398     DEBUG(raw_string_ostream SS(OrigI); I->print(SS); OrigI = SS.str(););
   2399     DEBUG(errs() << "IC: Visiting: " << OrigI << '\n');
   2400 
   2401     if (Instruction *Result = visit(*I)) {
   2402       ++NumCombined;
   2403       // Should we replace the old instruction with a new one?
   2404       if (Result != I) {
   2405         DEBUG(errs() << "IC: Old = " << *I << '\n'
   2406                      << "    New = " << *Result << '\n');
   2407 
   2408         if (!I->getDebugLoc().isUnknown())
   2409           Result->setDebugLoc(I->getDebugLoc());
   2410         // Everything uses the new instruction now.
   2411         I->replaceAllUsesWith(Result);
   2412 
   2413         // Move the name to the new instruction first.
   2414         Result->takeName(I);
   2415 
   2416         // Push the new instruction and any users onto the worklist.
   2417         Worklist.Add(Result);
   2418         Worklist.AddUsersToWorkList(*Result);
   2419 
   2420         // Insert the new instruction into the basic block...
   2421         BasicBlock *InstParent = I->getParent();
   2422         BasicBlock::iterator InsertPos = I;
   2423 
   2424         // If we replace a PHI with something that isn't a PHI, fix up the
   2425         // insertion point.
   2426         if (!isa<PHINode>(Result) && isa<PHINode>(InsertPos))
   2427           InsertPos = InstParent->getFirstInsertionPt();
   2428 
   2429         InstParent->getInstList().insert(InsertPos, Result);
   2430 
   2431         EraseInstFromFunction(*I);
   2432       } else {
   2433 #ifndef NDEBUG
   2434         DEBUG(errs() << "IC: Mod = " << OrigI << '\n'
   2435                      << "    New = " << *I << '\n');
   2436 #endif
   2437 
   2438         // If the instruction was modified, it's possible that it is now dead.
   2439         // if so, remove it.
   2440         if (isInstructionTriviallyDead(I, TLI)) {
   2441           EraseInstFromFunction(*I);
   2442         } else {
   2443           Worklist.Add(I);
   2444           Worklist.AddUsersToWorkList(*I);
   2445         }
   2446       }
   2447       MadeIRChange = true;
   2448     }
   2449   }
   2450 
   2451   Worklist.Zap();
   2452   return MadeIRChange;
   2453 }
   2454 
   2455 namespace {
   2456 class InstCombinerLibCallSimplifier : public LibCallSimplifier {
   2457   InstCombiner *IC;
   2458 public:
   2459   InstCombinerLibCallSimplifier(const DataLayout *TD,
   2460                                 const TargetLibraryInfo *TLI,
   2461                                 InstCombiner *IC)
   2462     : LibCallSimplifier(TD, TLI, UnsafeFPShrink) {
   2463     this->IC = IC;
   2464   }
   2465 
   2466   /// replaceAllUsesWith - override so that instruction replacement
   2467   /// can be defined in terms of the instruction combiner framework.
   2468   virtual void replaceAllUsesWith(Instruction *I, Value *With) const {
   2469     IC->ReplaceInstUsesWith(*I, With);
   2470   }
   2471 };
   2472 }
   2473 
   2474 bool InstCombiner::runOnFunction(Function &F) {
   2475   TD = getAnalysisIfAvailable<DataLayout>();
   2476   TLI = &getAnalysis<TargetLibraryInfo>();
   2477   // Minimizing size?
   2478   MinimizeSize = F.getAttributes().hasAttribute(AttributeSet::FunctionIndex,
   2479                                                 Attribute::MinSize);
   2480 
   2481   /// Builder - This is an IRBuilder that automatically inserts new
   2482   /// instructions into the worklist when they are created.
   2483   IRBuilder<true, TargetFolder, InstCombineIRInserter>
   2484     TheBuilder(F.getContext(), TargetFolder(TD),
   2485                InstCombineIRInserter(Worklist));
   2486   Builder = &TheBuilder;
   2487 
   2488   InstCombinerLibCallSimplifier TheSimplifier(TD, TLI, this);
   2489   Simplifier = &TheSimplifier;
   2490 
   2491   bool EverMadeChange = false;
   2492 
   2493   // Lower dbg.declare intrinsics otherwise their value may be clobbered
   2494   // by instcombiner.
   2495   EverMadeChange = LowerDbgDeclare(F);
   2496 
   2497   // Iterate while there is work to do.
   2498   unsigned Iteration = 0;
   2499   while (DoOneIteration(F, Iteration++))
   2500     EverMadeChange = true;
   2501 
   2502   Builder = 0;
   2503   return EverMadeChange;
   2504 }
   2505 
   2506 FunctionPass *llvm::createInstructionCombiningPass() {
   2507   return new InstCombiner();
   2508 }
   2509