Home | History | Annotate | Download | only in InstCombine
      1 //===- InstructionCombining.cpp - Combine multiple instructions -----------===//
      2 //
      3 //                     The LLVM Compiler Infrastructure
      4 //
      5 // This file is distributed under the University of Illinois Open Source
      6 // License. See LICENSE.TXT for details.
      7 //
      8 //===----------------------------------------------------------------------===//
      9 //
     10 // InstructionCombining - Combine instructions to form fewer, simple
     11 // instructions.  This pass does not modify the CFG.  This pass is where
     12 // algebraic simplification happens.
     13 //
     14 // This pass combines things like:
     15 //    %Y = add i32 %X, 1
     16 //    %Z = add i32 %Y, 1
     17 // into:
     18 //    %Z = add i32 %X, 2
     19 //
     20 // This is a simple worklist driven algorithm.
     21 //
     22 // This pass guarantees that the following canonicalizations are performed on
     23 // the program:
     24 //    1. If a binary operator has a constant operand, it is moved to the RHS
     25 //    2. Bitwise operators with constant operands are always grouped so that
     26 //       shifts are performed first, then or's, then and's, then xor's.
     27 //    3. Compare instructions are converted from <,>,<=,>= to ==,!= if possible
     28 //    4. All cmp instructions on boolean values are replaced with logical ops
     29 //    5. add X, X is represented as (X*2) => (X << 1)
     30 //    6. Multiplies with a power-of-two constant argument are transformed into
     31 //       shifts.
     32 //   ... etc.
     33 //
     34 //===----------------------------------------------------------------------===//
     35 
     36 #include "llvm/Transforms/InstCombine/InstCombine.h"
     37 #include "InstCombineInternal.h"
     38 #include "llvm-c/Initialization.h"
     39 #include "llvm/ADT/SmallPtrSet.h"
     40 #include "llvm/ADT/Statistic.h"
     41 #include "llvm/ADT/StringSwitch.h"
     42 #include "llvm/Analysis/AssumptionCache.h"
     43 #include "llvm/Analysis/CFG.h"
     44 #include "llvm/Analysis/ConstantFolding.h"
     45 #include "llvm/Analysis/EHPersonalities.h"
     46 #include "llvm/Analysis/GlobalsModRef.h"
     47 #include "llvm/Analysis/InstructionSimplify.h"
     48 #include "llvm/Analysis/LoopInfo.h"
     49 #include "llvm/Analysis/MemoryBuiltins.h"
     50 #include "llvm/Analysis/TargetLibraryInfo.h"
     51 #include "llvm/Analysis/ValueTracking.h"
     52 #include "llvm/IR/CFG.h"
     53 #include "llvm/IR/DataLayout.h"
     54 #include "llvm/IR/Dominators.h"
     55 #include "llvm/IR/GetElementPtrTypeIterator.h"
     56 #include "llvm/IR/IntrinsicInst.h"
     57 #include "llvm/IR/PatternMatch.h"
     58 #include "llvm/IR/ValueHandle.h"
     59 #include "llvm/Support/CommandLine.h"
     60 #include "llvm/Support/Debug.h"
     61 #include "llvm/Support/raw_ostream.h"
     62 #include "llvm/Transforms/Scalar.h"
     63 #include "llvm/Transforms/Utils/Local.h"
     64 #include <algorithm>
     65 #include <climits>
     66 using namespace llvm;
     67 using namespace llvm::PatternMatch;
     68 
     69 #define DEBUG_TYPE "instcombine"
     70 
     71 STATISTIC(NumCombined , "Number of insts combined");
     72 STATISTIC(NumConstProp, "Number of constant folds");
     73 STATISTIC(NumDeadInst , "Number of dead inst eliminated");
     74 STATISTIC(NumSunkInst , "Number of instructions sunk");
     75 STATISTIC(NumExpand,    "Number of expansions");
     76 STATISTIC(NumFactor   , "Number of factorizations");
     77 STATISTIC(NumReassoc  , "Number of reassociations");
     78 
     79 Value *InstCombiner::EmitGEPOffset(User *GEP) {
     80   return llvm::EmitGEPOffset(Builder, DL, GEP);
     81 }
     82 
     83 /// Return true if it is desirable to convert an integer computation from a
     84 /// given bit width to a new bit width.
     85 /// We don't want to convert from a legal to an illegal type for example or from
     86 /// a smaller to a larger illegal type.
     87 bool InstCombiner::ShouldChangeType(unsigned FromWidth,
     88                                     unsigned ToWidth) const {
     89   bool FromLegal = DL.isLegalInteger(FromWidth);
     90   bool ToLegal = DL.isLegalInteger(ToWidth);
     91 
     92   // If this is a legal integer from type, and the result would be an illegal
     93   // type, don't do the transformation.
     94   if (FromLegal && !ToLegal)
     95     return false;
     96 
     97   // Otherwise, if both are illegal, do not increase the size of the result. We
     98   // do allow things like i160 -> i64, but not i64 -> i160.
     99   if (!FromLegal && !ToLegal && ToWidth > FromWidth)
    100     return false;
    101 
    102   return true;
    103 }
    104 
    105 /// Return true if it is desirable to convert a computation from 'From' to 'To'.
    106 /// We don't want to convert from a legal to an illegal type for example or from
    107 /// a smaller to a larger illegal type.
    108 bool InstCombiner::ShouldChangeType(Type *From, Type *To) const {
    109   assert(From->isIntegerTy() && To->isIntegerTy());
    110 
    111   unsigned FromWidth = From->getPrimitiveSizeInBits();
    112   unsigned ToWidth = To->getPrimitiveSizeInBits();
    113   return ShouldChangeType(FromWidth, ToWidth);
    114 }
    115 
    116 // Return true, if No Signed Wrap should be maintained for I.
    117 // The No Signed Wrap flag can be kept if the operation "B (I.getOpcode) C",
    118 // where both B and C should be ConstantInts, results in a constant that does
    119 // not overflow. This function only handles the Add and Sub opcodes. For
    120 // all other opcodes, the function conservatively returns false.
    121 static bool MaintainNoSignedWrap(BinaryOperator &I, Value *B, Value *C) {
    122   OverflowingBinaryOperator *OBO = dyn_cast<OverflowingBinaryOperator>(&I);
    123   if (!OBO || !OBO->hasNoSignedWrap()) {
    124     return false;
    125   }
    126 
    127   // We reason about Add and Sub Only.
    128   Instruction::BinaryOps Opcode = I.getOpcode();
    129   if (Opcode != Instruction::Add &&
    130       Opcode != Instruction::Sub) {
    131     return false;
    132   }
    133 
    134   ConstantInt *CB = dyn_cast<ConstantInt>(B);
    135   ConstantInt *CC = dyn_cast<ConstantInt>(C);
    136 
    137   if (!CB || !CC) {
    138     return false;
    139   }
    140 
    141   const APInt &BVal = CB->getValue();
    142   const APInt &CVal = CC->getValue();
    143   bool Overflow = false;
    144 
    145   if (Opcode == Instruction::Add) {
    146     BVal.sadd_ov(CVal, Overflow);
    147   } else {
    148     BVal.ssub_ov(CVal, Overflow);
    149   }
    150 
    151   return !Overflow;
    152 }
    153 
    154 /// Conservatively clears subclassOptionalData after a reassociation or
    155 /// commutation. We preserve fast-math flags when applicable as they can be
    156 /// preserved.
    157 static void ClearSubclassDataAfterReassociation(BinaryOperator &I) {
    158   FPMathOperator *FPMO = dyn_cast<FPMathOperator>(&I);
    159   if (!FPMO) {
    160     I.clearSubclassOptionalData();
    161     return;
    162   }
    163 
    164   FastMathFlags FMF = I.getFastMathFlags();
    165   I.clearSubclassOptionalData();
    166   I.setFastMathFlags(FMF);
    167 }
    168 
    169 /// This performs a few simplifications for operators that are associative or
    170 /// commutative:
    171 ///
    172 ///  Commutative operators:
    173 ///
    174 ///  1. Order operands such that they are listed from right (least complex) to
    175 ///     left (most complex).  This puts constants before unary operators before
    176 ///     binary operators.
    177 ///
    178 ///  Associative operators:
    179 ///
    180 ///  2. Transform: "(A op B) op C" ==> "A op (B op C)" if "B op C" simplifies.
    181 ///  3. Transform: "A op (B op C)" ==> "(A op B) op C" if "A op B" simplifies.
    182 ///
    183 ///  Associative and commutative operators:
    184 ///
    185 ///  4. Transform: "(A op B) op C" ==> "(C op A) op B" if "C op A" simplifies.
    186 ///  5. Transform: "A op (B op C)" ==> "B op (C op A)" if "C op A" simplifies.
    187 ///  6. Transform: "(A op C1) op (B op C2)" ==> "(A op B) op (C1 op C2)"
    188 ///     if C1 and C2 are constants.
    189 bool InstCombiner::SimplifyAssociativeOrCommutative(BinaryOperator &I) {
    190   Instruction::BinaryOps Opcode = I.getOpcode();
    191   bool Changed = false;
    192 
    193   do {
    194     // Order operands such that they are listed from right (least complex) to
    195     // left (most complex).  This puts constants before unary operators before
    196     // binary operators.
    197     if (I.isCommutative() && getComplexity(I.getOperand(0)) <
    198         getComplexity(I.getOperand(1)))
    199       Changed = !I.swapOperands();
    200 
    201     BinaryOperator *Op0 = dyn_cast<BinaryOperator>(I.getOperand(0));
    202     BinaryOperator *Op1 = dyn_cast<BinaryOperator>(I.getOperand(1));
    203 
    204     if (I.isAssociative()) {
    205       // Transform: "(A op B) op C" ==> "A op (B op C)" if "B op C" simplifies.
    206       if (Op0 && Op0->getOpcode() == Opcode) {
    207         Value *A = Op0->getOperand(0);
    208         Value *B = Op0->getOperand(1);
    209         Value *C = I.getOperand(1);
    210 
    211         // Does "B op C" simplify?
    212         if (Value *V = SimplifyBinOp(Opcode, B, C, DL)) {
    213           // It simplifies to V.  Form "A op V".
    214           I.setOperand(0, A);
    215           I.setOperand(1, V);
    216           // Conservatively clear the optional flags, since they may not be
    217           // preserved by the reassociation.
    218           if (MaintainNoSignedWrap(I, B, C) &&
    219               (!Op0 || (isa<BinaryOperator>(Op0) && Op0->hasNoSignedWrap()))) {
    220             // Note: this is only valid because SimplifyBinOp doesn't look at
    221             // the operands to Op0.
    222             I.clearSubclassOptionalData();
    223             I.setHasNoSignedWrap(true);
    224           } else {
    225             ClearSubclassDataAfterReassociation(I);
    226           }
    227 
    228           Changed = true;
    229           ++NumReassoc;
    230           continue;
    231         }
    232       }
    233 
    234       // Transform: "A op (B op C)" ==> "(A op B) op C" if "A op B" simplifies.
    235       if (Op1 && Op1->getOpcode() == Opcode) {
    236         Value *A = I.getOperand(0);
    237         Value *B = Op1->getOperand(0);
    238         Value *C = Op1->getOperand(1);
    239 
    240         // Does "A op B" simplify?
    241         if (Value *V = SimplifyBinOp(Opcode, A, B, DL)) {
    242           // It simplifies to V.  Form "V op C".
    243           I.setOperand(0, V);
    244           I.setOperand(1, C);
    245           // Conservatively clear the optional flags, since they may not be
    246           // preserved by the reassociation.
    247           ClearSubclassDataAfterReassociation(I);
    248           Changed = true;
    249           ++NumReassoc;
    250           continue;
    251         }
    252       }
    253     }
    254 
    255     if (I.isAssociative() && I.isCommutative()) {
    256       // Transform: "(A op B) op C" ==> "(C op A) op B" if "C op A" simplifies.
    257       if (Op0 && Op0->getOpcode() == Opcode) {
    258         Value *A = Op0->getOperand(0);
    259         Value *B = Op0->getOperand(1);
    260         Value *C = I.getOperand(1);
    261 
    262         // Does "C op A" simplify?
    263         if (Value *V = SimplifyBinOp(Opcode, C, A, DL)) {
    264           // It simplifies to V.  Form "V op B".
    265           I.setOperand(0, V);
    266           I.setOperand(1, B);
    267           // Conservatively clear the optional flags, since they may not be
    268           // preserved by the reassociation.
    269           ClearSubclassDataAfterReassociation(I);
    270           Changed = true;
    271           ++NumReassoc;
    272           continue;
    273         }
    274       }
    275 
    276       // Transform: "A op (B op C)" ==> "B op (C op A)" if "C op A" simplifies.
    277       if (Op1 && Op1->getOpcode() == Opcode) {
    278         Value *A = I.getOperand(0);
    279         Value *B = Op1->getOperand(0);
    280         Value *C = Op1->getOperand(1);
    281 
    282         // Does "C op A" simplify?
    283         if (Value *V = SimplifyBinOp(Opcode, C, A, DL)) {
    284           // It simplifies to V.  Form "B op V".
    285           I.setOperand(0, B);
    286           I.setOperand(1, V);
    287           // Conservatively clear the optional flags, since they may not be
    288           // preserved by the reassociation.
    289           ClearSubclassDataAfterReassociation(I);
    290           Changed = true;
    291           ++NumReassoc;
    292           continue;
    293         }
    294       }
    295 
    296       // Transform: "(A op C1) op (B op C2)" ==> "(A op B) op (C1 op C2)"
    297       // if C1 and C2 are constants.
    298       if (Op0 && Op1 &&
    299           Op0->getOpcode() == Opcode && Op1->getOpcode() == Opcode &&
    300           isa<Constant>(Op0->getOperand(1)) &&
    301           isa<Constant>(Op1->getOperand(1)) &&
    302           Op0->hasOneUse() && Op1->hasOneUse()) {
    303         Value *A = Op0->getOperand(0);
    304         Constant *C1 = cast<Constant>(Op0->getOperand(1));
    305         Value *B = Op1->getOperand(0);
    306         Constant *C2 = cast<Constant>(Op1->getOperand(1));
    307 
    308         Constant *Folded = ConstantExpr::get(Opcode, C1, C2);
    309         BinaryOperator *New = BinaryOperator::Create(Opcode, A, B);
    310         if (isa<FPMathOperator>(New)) {
    311           FastMathFlags Flags = I.getFastMathFlags();
    312           Flags &= Op0->getFastMathFlags();
    313           Flags &= Op1->getFastMathFlags();
    314           New->setFastMathFlags(Flags);
    315         }
    316         InsertNewInstWith(New, I);
    317         New->takeName(Op1);
    318         I.setOperand(0, New);
    319         I.setOperand(1, Folded);
    320         // Conservatively clear the optional flags, since they may not be
    321         // preserved by the reassociation.
    322         ClearSubclassDataAfterReassociation(I);
    323 
    324         Changed = true;
    325         continue;
    326       }
    327     }
    328 
    329     // No further simplifications.
    330     return Changed;
    331   } while (1);
    332 }
    333 
    334 /// Return whether "X LOp (Y ROp Z)" is always equal to
    335 /// "(X LOp Y) ROp (X LOp Z)".
    336 static bool LeftDistributesOverRight(Instruction::BinaryOps LOp,
    337                                      Instruction::BinaryOps ROp) {
    338   switch (LOp) {
    339   default:
    340     return false;
    341 
    342   case Instruction::And:
    343     // And distributes over Or and Xor.
    344     switch (ROp) {
    345     default:
    346       return false;
    347     case Instruction::Or:
    348     case Instruction::Xor:
    349       return true;
    350     }
    351 
    352   case Instruction::Mul:
    353     // Multiplication distributes over addition and subtraction.
    354     switch (ROp) {
    355     default:
    356       return false;
    357     case Instruction::Add:
    358     case Instruction::Sub:
    359       return true;
    360     }
    361 
    362   case Instruction::Or:
    363     // Or distributes over And.
    364     switch (ROp) {
    365     default:
    366       return false;
    367     case Instruction::And:
    368       return true;
    369     }
    370   }
    371 }
    372 
    373 /// Return whether "(X LOp Y) ROp Z" is always equal to
    374 /// "(X ROp Z) LOp (Y ROp Z)".
    375 static bool RightDistributesOverLeft(Instruction::BinaryOps LOp,
    376                                      Instruction::BinaryOps ROp) {
    377   if (Instruction::isCommutative(ROp))
    378     return LeftDistributesOverRight(ROp, LOp);
    379 
    380   switch (LOp) {
    381   default:
    382     return false;
    383   // (X >> Z) & (Y >> Z)  -> (X&Y) >> Z  for all shifts.
    384   // (X >> Z) | (Y >> Z)  -> (X|Y) >> Z  for all shifts.
    385   // (X >> Z) ^ (Y >> Z)  -> (X^Y) >> Z  for all shifts.
    386   case Instruction::And:
    387   case Instruction::Or:
    388   case Instruction::Xor:
    389     switch (ROp) {
    390     default:
    391       return false;
    392     case Instruction::Shl:
    393     case Instruction::LShr:
    394     case Instruction::AShr:
    395       return true;
    396     }
    397   }
    398   // TODO: It would be nice to handle division, aka "(X + Y)/Z = X/Z + Y/Z",
    399   // but this requires knowing that the addition does not overflow and other
    400   // such subtleties.
    401   return false;
    402 }
    403 
    404 /// This function returns identity value for given opcode, which can be used to
    405 /// factor patterns like (X * 2) + X ==> (X * 2) + (X * 1) ==> X * (2 + 1).
    406 static Value *getIdentityValue(Instruction::BinaryOps OpCode, Value *V) {
    407   if (isa<Constant>(V))
    408     return nullptr;
    409 
    410   if (OpCode == Instruction::Mul)
    411     return ConstantInt::get(V->getType(), 1);
    412 
    413   // TODO: We can handle other cases e.g. Instruction::And, Instruction::Or etc.
    414 
    415   return nullptr;
    416 }
    417 
    418 /// This function factors binary ops which can be combined using distributive
    419 /// laws. This function tries to transform 'Op' based TopLevelOpcode to enable
    420 /// factorization e.g for ADD(SHL(X , 2), MUL(X, 5)), When this function called
    421 /// with TopLevelOpcode == Instruction::Add and Op = SHL(X, 2), transforms
    422 /// SHL(X, 2) to MUL(X, 4) i.e. returns Instruction::Mul with LHS set to 'X' and
    423 /// RHS to 4.
    424 static Instruction::BinaryOps
    425 getBinOpsForFactorization(Instruction::BinaryOps TopLevelOpcode,
    426                           BinaryOperator *Op, Value *&LHS, Value *&RHS) {
    427   if (!Op)
    428     return Instruction::BinaryOpsEnd;
    429 
    430   LHS = Op->getOperand(0);
    431   RHS = Op->getOperand(1);
    432 
    433   switch (TopLevelOpcode) {
    434   default:
    435     return Op->getOpcode();
    436 
    437   case Instruction::Add:
    438   case Instruction::Sub:
    439     if (Op->getOpcode() == Instruction::Shl) {
    440       if (Constant *CST = dyn_cast<Constant>(Op->getOperand(1))) {
    441         // The multiplier is really 1 << CST.
    442         RHS = ConstantExpr::getShl(ConstantInt::get(Op->getType(), 1), CST);
    443         return Instruction::Mul;
    444       }
    445     }
    446     return Op->getOpcode();
    447   }
    448 
    449   // TODO: We can add other conversions e.g. shr => div etc.
    450 }
    451 
    452 /// This tries to simplify binary operations by factorizing out common terms
    453 /// (e. g. "(A*B)+(A*C)" -> "A*(B+C)").
    454 static Value *tryFactorization(InstCombiner::BuilderTy *Builder,
    455                                const DataLayout &DL, BinaryOperator &I,
    456                                Instruction::BinaryOps InnerOpcode, Value *A,
    457                                Value *B, Value *C, Value *D) {
    458 
    459   // If any of A, B, C, D are null, we can not factor I, return early.
    460   // Checking A and C should be enough.
    461   if (!A || !C || !B || !D)
    462     return nullptr;
    463 
    464   Value *V = nullptr;
    465   Value *SimplifiedInst = nullptr;
    466   Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
    467   Instruction::BinaryOps TopLevelOpcode = I.getOpcode();
    468 
    469   // Does "X op' Y" always equal "Y op' X"?
    470   bool InnerCommutative = Instruction::isCommutative(InnerOpcode);
    471 
    472   // Does "X op' (Y op Z)" always equal "(X op' Y) op (X op' Z)"?
    473   if (LeftDistributesOverRight(InnerOpcode, TopLevelOpcode))
    474     // Does the instruction have the form "(A op' B) op (A op' D)" or, in the
    475     // commutative case, "(A op' B) op (C op' A)"?
    476     if (A == C || (InnerCommutative && A == D)) {
    477       if (A != C)
    478         std::swap(C, D);
    479       // Consider forming "A op' (B op D)".
    480       // If "B op D" simplifies then it can be formed with no cost.
    481       V = SimplifyBinOp(TopLevelOpcode, B, D, DL);
    482       // If "B op D" doesn't simplify then only go on if both of the existing
    483       // operations "A op' B" and "C op' D" will be zapped as no longer used.
    484       if (!V && LHS->hasOneUse() && RHS->hasOneUse())
    485         V = Builder->CreateBinOp(TopLevelOpcode, B, D, RHS->getName());
    486       if (V) {
    487         SimplifiedInst = Builder->CreateBinOp(InnerOpcode, A, V);
    488       }
    489     }
    490 
    491   // Does "(X op Y) op' Z" always equal "(X op' Z) op (Y op' Z)"?
    492   if (!SimplifiedInst && RightDistributesOverLeft(TopLevelOpcode, InnerOpcode))
    493     // Does the instruction have the form "(A op' B) op (C op' B)" or, in the
    494     // commutative case, "(A op' B) op (B op' D)"?
    495     if (B == D || (InnerCommutative && B == C)) {
    496       if (B != D)
    497         std::swap(C, D);
    498       // Consider forming "(A op C) op' B".
    499       // If "A op C" simplifies then it can be formed with no cost.
    500       V = SimplifyBinOp(TopLevelOpcode, A, C, DL);
    501 
    502       // If "A op C" doesn't simplify then only go on if both of the existing
    503       // operations "A op' B" and "C op' D" will be zapped as no longer used.
    504       if (!V && LHS->hasOneUse() && RHS->hasOneUse())
    505         V = Builder->CreateBinOp(TopLevelOpcode, A, C, LHS->getName());
    506       if (V) {
    507         SimplifiedInst = Builder->CreateBinOp(InnerOpcode, V, B);
    508       }
    509     }
    510 
    511   if (SimplifiedInst) {
    512     ++NumFactor;
    513     SimplifiedInst->takeName(&I);
    514 
    515     // Check if we can add NSW flag to SimplifiedInst. If so, set NSW flag.
    516     // TODO: Check for NUW.
    517     if (BinaryOperator *BO = dyn_cast<BinaryOperator>(SimplifiedInst)) {
    518       if (isa<OverflowingBinaryOperator>(SimplifiedInst)) {
    519         bool HasNSW = false;
    520         if (isa<OverflowingBinaryOperator>(&I))
    521           HasNSW = I.hasNoSignedWrap();
    522 
    523         if (BinaryOperator *Op0 = dyn_cast<BinaryOperator>(LHS))
    524           if (isa<OverflowingBinaryOperator>(Op0))
    525             HasNSW &= Op0->hasNoSignedWrap();
    526 
    527         if (BinaryOperator *Op1 = dyn_cast<BinaryOperator>(RHS))
    528           if (isa<OverflowingBinaryOperator>(Op1))
    529             HasNSW &= Op1->hasNoSignedWrap();
    530 
    531         // We can propagate 'nsw' if we know that
    532         //  %Y = mul nsw i16 %X, C
    533         //  %Z = add nsw i16 %Y, %X
    534         // =>
    535         //  %Z = mul nsw i16 %X, C+1
    536         //
    537         // iff C+1 isn't INT_MIN
    538         const APInt *CInt;
    539         if (TopLevelOpcode == Instruction::Add &&
    540             InnerOpcode == Instruction::Mul)
    541           if (match(V, m_APInt(CInt)) && !CInt->isMinSignedValue())
    542             BO->setHasNoSignedWrap(HasNSW);
    543       }
    544     }
    545   }
    546   return SimplifiedInst;
    547 }
    548 
    549 /// This tries to simplify binary operations which some other binary operation
    550 /// distributes over either by factorizing out common terms
    551 /// (eg "(A*B)+(A*C)" -> "A*(B+C)") or expanding out if this results in
    552 /// simplifications (eg: "A & (B | C) -> (A&B) | (A&C)" if this is a win).
    553 /// Returns the simplified value, or null if it didn't simplify.
    554 Value *InstCombiner::SimplifyUsingDistributiveLaws(BinaryOperator &I) {
    555   Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
    556   BinaryOperator *Op0 = dyn_cast<BinaryOperator>(LHS);
    557   BinaryOperator *Op1 = dyn_cast<BinaryOperator>(RHS);
    558 
    559   // Factorization.
    560   Value *A = nullptr, *B = nullptr, *C = nullptr, *D = nullptr;
    561   auto TopLevelOpcode = I.getOpcode();
    562   auto LHSOpcode = getBinOpsForFactorization(TopLevelOpcode, Op0, A, B);
    563   auto RHSOpcode = getBinOpsForFactorization(TopLevelOpcode, Op1, C, D);
    564 
    565   // The instruction has the form "(A op' B) op (C op' D)".  Try to factorize
    566   // a common term.
    567   if (LHSOpcode == RHSOpcode) {
    568     if (Value *V = tryFactorization(Builder, DL, I, LHSOpcode, A, B, C, D))
    569       return V;
    570   }
    571 
    572   // The instruction has the form "(A op' B) op (C)".  Try to factorize common
    573   // term.
    574   if (Value *V = tryFactorization(Builder, DL, I, LHSOpcode, A, B, RHS,
    575                                   getIdentityValue(LHSOpcode, RHS)))
    576     return V;
    577 
    578   // The instruction has the form "(B) op (C op' D)".  Try to factorize common
    579   // term.
    580   if (Value *V = tryFactorization(Builder, DL, I, RHSOpcode, LHS,
    581                                   getIdentityValue(RHSOpcode, LHS), C, D))
    582     return V;
    583 
    584   // Expansion.
    585   if (Op0 && RightDistributesOverLeft(Op0->getOpcode(), TopLevelOpcode)) {
    586     // The instruction has the form "(A op' B) op C".  See if expanding it out
    587     // to "(A op C) op' (B op C)" results in simplifications.
    588     Value *A = Op0->getOperand(0), *B = Op0->getOperand(1), *C = RHS;
    589     Instruction::BinaryOps InnerOpcode = Op0->getOpcode(); // op'
    590 
    591     // Do "A op C" and "B op C" both simplify?
    592     if (Value *L = SimplifyBinOp(TopLevelOpcode, A, C, DL))
    593       if (Value *R = SimplifyBinOp(TopLevelOpcode, B, C, DL)) {
    594         // They do! Return "L op' R".
    595         ++NumExpand;
    596         // If "L op' R" equals "A op' B" then "L op' R" is just the LHS.
    597         if ((L == A && R == B) ||
    598             (Instruction::isCommutative(InnerOpcode) && L == B && R == A))
    599           return Op0;
    600         // Otherwise return "L op' R" if it simplifies.
    601         if (Value *V = SimplifyBinOp(InnerOpcode, L, R, DL))
    602           return V;
    603         // Otherwise, create a new instruction.
    604         C = Builder->CreateBinOp(InnerOpcode, L, R);
    605         C->takeName(&I);
    606         return C;
    607       }
    608   }
    609 
    610   if (Op1 && LeftDistributesOverRight(TopLevelOpcode, Op1->getOpcode())) {
    611     // The instruction has the form "A op (B op' C)".  See if expanding it out
    612     // to "(A op B) op' (A op C)" results in simplifications.
    613     Value *A = LHS, *B = Op1->getOperand(0), *C = Op1->getOperand(1);
    614     Instruction::BinaryOps InnerOpcode = Op1->getOpcode(); // op'
    615 
    616     // Do "A op B" and "A op C" both simplify?
    617     if (Value *L = SimplifyBinOp(TopLevelOpcode, A, B, DL))
    618       if (Value *R = SimplifyBinOp(TopLevelOpcode, A, C, DL)) {
    619         // They do! Return "L op' R".
    620         ++NumExpand;
    621         // If "L op' R" equals "B op' C" then "L op' R" is just the RHS.
    622         if ((L == B && R == C) ||
    623             (Instruction::isCommutative(InnerOpcode) && L == C && R == B))
    624           return Op1;
    625         // Otherwise return "L op' R" if it simplifies.
    626         if (Value *V = SimplifyBinOp(InnerOpcode, L, R, DL))
    627           return V;
    628         // Otherwise, create a new instruction.
    629         A = Builder->CreateBinOp(InnerOpcode, L, R);
    630         A->takeName(&I);
    631         return A;
    632       }
    633   }
    634 
    635   // (op (select (a, c, b)), (select (a, d, b))) -> (select (a, (op c, d), 0))
    636   // (op (select (a, b, c)), (select (a, b, d))) -> (select (a, 0, (op c, d)))
    637   if (auto *SI0 = dyn_cast<SelectInst>(LHS)) {
    638     if (auto *SI1 = dyn_cast<SelectInst>(RHS)) {
    639       if (SI0->getCondition() == SI1->getCondition()) {
    640         Value *SI = nullptr;
    641         if (Value *V = SimplifyBinOp(TopLevelOpcode, SI0->getFalseValue(),
    642                                      SI1->getFalseValue(), DL, TLI, DT, AC))
    643           SI = Builder->CreateSelect(SI0->getCondition(),
    644                                      Builder->CreateBinOp(TopLevelOpcode,
    645                                                           SI0->getTrueValue(),
    646                                                           SI1->getTrueValue()),
    647                                      V);
    648         if (Value *V = SimplifyBinOp(TopLevelOpcode, SI0->getTrueValue(),
    649                                      SI1->getTrueValue(), DL, TLI, DT, AC))
    650           SI = Builder->CreateSelect(
    651               SI0->getCondition(), V,
    652               Builder->CreateBinOp(TopLevelOpcode, SI0->getFalseValue(),
    653                                    SI1->getFalseValue()));
    654         if (SI) {
    655           SI->takeName(&I);
    656           return SI;
    657         }
    658       }
    659     }
    660   }
    661 
    662   return nullptr;
    663 }
    664 
    665 /// Given a 'sub' instruction, return the RHS of the instruction if the LHS is a
    666 /// constant zero (which is the 'negate' form).
    667 Value *InstCombiner::dyn_castNegVal(Value *V) const {
    668   if (BinaryOperator::isNeg(V))
    669     return BinaryOperator::getNegArgument(V);
    670 
    671   // Constants can be considered to be negated values if they can be folded.
    672   if (ConstantInt *C = dyn_cast<ConstantInt>(V))
    673     return ConstantExpr::getNeg(C);
    674 
    675   if (ConstantDataVector *C = dyn_cast<ConstantDataVector>(V))
    676     if (C->getType()->getElementType()->isIntegerTy())
    677       return ConstantExpr::getNeg(C);
    678 
    679   return nullptr;
    680 }
    681 
    682 /// Given a 'fsub' instruction, return the RHS of the instruction if the LHS is
    683 /// a constant negative zero (which is the 'negate' form).
    684 Value *InstCombiner::dyn_castFNegVal(Value *V, bool IgnoreZeroSign) const {
    685   if (BinaryOperator::isFNeg(V, IgnoreZeroSign))
    686     return BinaryOperator::getFNegArgument(V);
    687 
    688   // Constants can be considered to be negated values if they can be folded.
    689   if (ConstantFP *C = dyn_cast<ConstantFP>(V))
    690     return ConstantExpr::getFNeg(C);
    691 
    692   if (ConstantDataVector *C = dyn_cast<ConstantDataVector>(V))
    693     if (C->getType()->getElementType()->isFloatingPointTy())
    694       return ConstantExpr::getFNeg(C);
    695 
    696   return nullptr;
    697 }
    698 
    699 static Value *FoldOperationIntoSelectOperand(Instruction &I, Value *SO,
    700                                              InstCombiner *IC) {
    701   if (CastInst *CI = dyn_cast<CastInst>(&I)) {
    702     return IC->Builder->CreateCast(CI->getOpcode(), SO, I.getType());
    703   }
    704 
    705   // Figure out if the constant is the left or the right argument.
    706   bool ConstIsRHS = isa<Constant>(I.getOperand(1));
    707   Constant *ConstOperand = cast<Constant>(I.getOperand(ConstIsRHS));
    708 
    709   if (Constant *SOC = dyn_cast<Constant>(SO)) {
    710     if (ConstIsRHS)
    711       return ConstantExpr::get(I.getOpcode(), SOC, ConstOperand);
    712     return ConstantExpr::get(I.getOpcode(), ConstOperand, SOC);
    713   }
    714 
    715   Value *Op0 = SO, *Op1 = ConstOperand;
    716   if (!ConstIsRHS)
    717     std::swap(Op0, Op1);
    718 
    719   if (BinaryOperator *BO = dyn_cast<BinaryOperator>(&I)) {
    720     Value *RI = IC->Builder->CreateBinOp(BO->getOpcode(), Op0, Op1,
    721                                     SO->getName()+".op");
    722     Instruction *FPInst = dyn_cast<Instruction>(RI);
    723     if (FPInst && isa<FPMathOperator>(FPInst))
    724       FPInst->copyFastMathFlags(BO);
    725     return RI;
    726   }
    727   if (ICmpInst *CI = dyn_cast<ICmpInst>(&I))
    728     return IC->Builder->CreateICmp(CI->getPredicate(), Op0, Op1,
    729                                    SO->getName()+".cmp");
    730   if (FCmpInst *CI = dyn_cast<FCmpInst>(&I))
    731     return IC->Builder->CreateICmp(CI->getPredicate(), Op0, Op1,
    732                                    SO->getName()+".cmp");
    733   llvm_unreachable("Unknown binary instruction type!");
    734 }
    735 
    736 /// Given an instruction with a select as one operand and a constant as the
    737 /// other operand, try to fold the binary operator into the select arguments.
    738 /// This also works for Cast instructions, which obviously do not have a second
    739 /// operand.
    740 Instruction *InstCombiner::FoldOpIntoSelect(Instruction &Op, SelectInst *SI) {
    741   // Don't modify shared select instructions
    742   if (!SI->hasOneUse()) return nullptr;
    743   Value *TV = SI->getOperand(1);
    744   Value *FV = SI->getOperand(2);
    745 
    746   if (isa<Constant>(TV) || isa<Constant>(FV)) {
    747     // Bool selects with constant operands can be folded to logical ops.
    748     if (SI->getType()->isIntegerTy(1)) return nullptr;
    749 
    750     // If it's a bitcast involving vectors, make sure it has the same number of
    751     // elements on both sides.
    752     if (BitCastInst *BC = dyn_cast<BitCastInst>(&Op)) {
    753       VectorType *DestTy = dyn_cast<VectorType>(BC->getDestTy());
    754       VectorType *SrcTy = dyn_cast<VectorType>(BC->getSrcTy());
    755 
    756       // Verify that either both or neither are vectors.
    757       if ((SrcTy == nullptr) != (DestTy == nullptr)) return nullptr;
    758       // If vectors, verify that they have the same number of elements.
    759       if (SrcTy && SrcTy->getNumElements() != DestTy->getNumElements())
    760         return nullptr;
    761     }
    762 
    763     // Test if a CmpInst instruction is used exclusively by a select as
    764     // part of a minimum or maximum operation. If so, refrain from doing
    765     // any other folding. This helps out other analyses which understand
    766     // non-obfuscated minimum and maximum idioms, such as ScalarEvolution
    767     // and CodeGen. And in this case, at least one of the comparison
    768     // operands has at least one user besides the compare (the select),
    769     // which would often largely negate the benefit of folding anyway.
    770     if (auto *CI = dyn_cast<CmpInst>(SI->getCondition())) {
    771       if (CI->hasOneUse()) {
    772         Value *Op0 = CI->getOperand(0), *Op1 = CI->getOperand(1);
    773         if ((SI->getOperand(1) == Op0 && SI->getOperand(2) == Op1) ||
    774             (SI->getOperand(2) == Op0 && SI->getOperand(1) == Op1))
    775           return nullptr;
    776       }
    777     }
    778 
    779     Value *SelectTrueVal = FoldOperationIntoSelectOperand(Op, TV, this);
    780     Value *SelectFalseVal = FoldOperationIntoSelectOperand(Op, FV, this);
    781 
    782     return SelectInst::Create(SI->getCondition(),
    783                               SelectTrueVal, SelectFalseVal);
    784   }
    785   return nullptr;
    786 }
    787 
    788 /// Given a binary operator, cast instruction, or select which has a PHI node as
    789 /// operand #0, see if we can fold the instruction into the PHI (which is only
    790 /// possible if all operands to the PHI are constants).
    791 Instruction *InstCombiner::FoldOpIntoPhi(Instruction &I) {
    792   PHINode *PN = cast<PHINode>(I.getOperand(0));
    793   unsigned NumPHIValues = PN->getNumIncomingValues();
    794   if (NumPHIValues == 0)
    795     return nullptr;
    796 
    797   // We normally only transform phis with a single use.  However, if a PHI has
    798   // multiple uses and they are all the same operation, we can fold *all* of the
    799   // uses into the PHI.
    800   if (!PN->hasOneUse()) {
    801     // Walk the use list for the instruction, comparing them to I.
    802     for (User *U : PN->users()) {
    803       Instruction *UI = cast<Instruction>(U);
    804       if (UI != &I && !I.isIdenticalTo(UI))
    805         return nullptr;
    806     }
    807     // Otherwise, we can replace *all* users with the new PHI we form.
    808   }
    809 
    810   // Check to see if all of the operands of the PHI are simple constants
    811   // (constantint/constantfp/undef).  If there is one non-constant value,
    812   // remember the BB it is in.  If there is more than one or if *it* is a PHI,
    813   // bail out.  We don't do arbitrary constant expressions here because moving
    814   // their computation can be expensive without a cost model.
    815   BasicBlock *NonConstBB = nullptr;
    816   for (unsigned i = 0; i != NumPHIValues; ++i) {
    817     Value *InVal = PN->getIncomingValue(i);
    818     if (isa<Constant>(InVal) && !isa<ConstantExpr>(InVal))
    819       continue;
    820 
    821     if (isa<PHINode>(InVal)) return nullptr;  // Itself a phi.
    822     if (NonConstBB) return nullptr;  // More than one non-const value.
    823 
    824     NonConstBB = PN->getIncomingBlock(i);
    825 
    826     // If the InVal is an invoke at the end of the pred block, then we can't
    827     // insert a computation after it without breaking the edge.
    828     if (InvokeInst *II = dyn_cast<InvokeInst>(InVal))
    829       if (II->getParent() == NonConstBB)
    830         return nullptr;
    831 
    832     // If the incoming non-constant value is in I's block, we will remove one
    833     // instruction, but insert another equivalent one, leading to infinite
    834     // instcombine.
    835     if (isPotentiallyReachable(I.getParent(), NonConstBB, DT, LI))
    836       return nullptr;
    837   }
    838 
    839   // If there is exactly one non-constant value, we can insert a copy of the
    840   // operation in that block.  However, if this is a critical edge, we would be
    841   // inserting the computation on some other paths (e.g. inside a loop).  Only
    842   // do this if the pred block is unconditionally branching into the phi block.
    843   if (NonConstBB != nullptr) {
    844     BranchInst *BI = dyn_cast<BranchInst>(NonConstBB->getTerminator());
    845     if (!BI || !BI->isUnconditional()) return nullptr;
    846   }
    847 
    848   // Okay, we can do the transformation: create the new PHI node.
    849   PHINode *NewPN = PHINode::Create(I.getType(), PN->getNumIncomingValues());
    850   InsertNewInstBefore(NewPN, *PN);
    851   NewPN->takeName(PN);
    852 
    853   // If we are going to have to insert a new computation, do so right before the
    854   // predecessor's terminator.
    855   if (NonConstBB)
    856     Builder->SetInsertPoint(NonConstBB->getTerminator());
    857 
    858   // Next, add all of the operands to the PHI.
    859   if (SelectInst *SI = dyn_cast<SelectInst>(&I)) {
    860     // We only currently try to fold the condition of a select when it is a phi,
    861     // not the true/false values.
    862     Value *TrueV = SI->getTrueValue();
    863     Value *FalseV = SI->getFalseValue();
    864     BasicBlock *PhiTransBB = PN->getParent();
    865     for (unsigned i = 0; i != NumPHIValues; ++i) {
    866       BasicBlock *ThisBB = PN->getIncomingBlock(i);
    867       Value *TrueVInPred = TrueV->DoPHITranslation(PhiTransBB, ThisBB);
    868       Value *FalseVInPred = FalseV->DoPHITranslation(PhiTransBB, ThisBB);
    869       Value *InV = nullptr;
    870       // Beware of ConstantExpr:  it may eventually evaluate to getNullValue,
    871       // even if currently isNullValue gives false.
    872       Constant *InC = dyn_cast<Constant>(PN->getIncomingValue(i));
    873       if (InC && !isa<ConstantExpr>(InC))
    874         InV = InC->isNullValue() ? FalseVInPred : TrueVInPred;
    875       else
    876         InV = Builder->CreateSelect(PN->getIncomingValue(i),
    877                                     TrueVInPred, FalseVInPred, "phitmp");
    878       NewPN->addIncoming(InV, ThisBB);
    879     }
    880   } else if (CmpInst *CI = dyn_cast<CmpInst>(&I)) {
    881     Constant *C = cast<Constant>(I.getOperand(1));
    882     for (unsigned i = 0; i != NumPHIValues; ++i) {
    883       Value *InV = nullptr;
    884       if (Constant *InC = dyn_cast<Constant>(PN->getIncomingValue(i)))
    885         InV = ConstantExpr::getCompare(CI->getPredicate(), InC, C);
    886       else if (isa<ICmpInst>(CI))
    887         InV = Builder->CreateICmp(CI->getPredicate(), PN->getIncomingValue(i),
    888                                   C, "phitmp");
    889       else
    890         InV = Builder->CreateFCmp(CI->getPredicate(), PN->getIncomingValue(i),
    891                                   C, "phitmp");
    892       NewPN->addIncoming(InV, PN->getIncomingBlock(i));
    893     }
    894   } else if (I.getNumOperands() == 2) {
    895     Constant *C = cast<Constant>(I.getOperand(1));
    896     for (unsigned i = 0; i != NumPHIValues; ++i) {
    897       Value *InV = nullptr;
    898       if (Constant *InC = dyn_cast<Constant>(PN->getIncomingValue(i)))
    899         InV = ConstantExpr::get(I.getOpcode(), InC, C);
    900       else
    901         InV = Builder->CreateBinOp(cast<BinaryOperator>(I).getOpcode(),
    902                                    PN->getIncomingValue(i), C, "phitmp");
    903       NewPN->addIncoming(InV, PN->getIncomingBlock(i));
    904     }
    905   } else {
    906     CastInst *CI = cast<CastInst>(&I);
    907     Type *RetTy = CI->getType();
    908     for (unsigned i = 0; i != NumPHIValues; ++i) {
    909       Value *InV;
    910       if (Constant *InC = dyn_cast<Constant>(PN->getIncomingValue(i)))
    911         InV = ConstantExpr::getCast(CI->getOpcode(), InC, RetTy);
    912       else
    913         InV = Builder->CreateCast(CI->getOpcode(),
    914                                 PN->getIncomingValue(i), I.getType(), "phitmp");
    915       NewPN->addIncoming(InV, PN->getIncomingBlock(i));
    916     }
    917   }
    918 
    919   for (auto UI = PN->user_begin(), E = PN->user_end(); UI != E;) {
    920     Instruction *User = cast<Instruction>(*UI++);
    921     if (User == &I) continue;
    922     ReplaceInstUsesWith(*User, NewPN);
    923     EraseInstFromFunction(*User);
    924   }
    925   return ReplaceInstUsesWith(I, NewPN);
    926 }
    927 
    928 /// Given a pointer type and a constant offset, determine whether or not there
    929 /// is a sequence of GEP indices into the pointed type that will land us at the
    930 /// specified offset. If so, fill them into NewIndices and return the resultant
    931 /// element type, otherwise return null.
    932 Type *InstCombiner::FindElementAtOffset(PointerType *PtrTy, int64_t Offset,
    933                                         SmallVectorImpl<Value *> &NewIndices) {
    934   Type *Ty = PtrTy->getElementType();
    935   if (!Ty->isSized())
    936     return nullptr;
    937 
    938   // Start with the index over the outer type.  Note that the type size
    939   // might be zero (even if the offset isn't zero) if the indexed type
    940   // is something like [0 x {int, int}]
    941   Type *IntPtrTy = DL.getIntPtrType(PtrTy);
    942   int64_t FirstIdx = 0;
    943   if (int64_t TySize = DL.getTypeAllocSize(Ty)) {
    944     FirstIdx = Offset/TySize;
    945     Offset -= FirstIdx*TySize;
    946 
    947     // Handle hosts where % returns negative instead of values [0..TySize).
    948     if (Offset < 0) {
    949       --FirstIdx;
    950       Offset += TySize;
    951       assert(Offset >= 0);
    952     }
    953     assert((uint64_t)Offset < (uint64_t)TySize && "Out of range offset");
    954   }
    955 
    956   NewIndices.push_back(ConstantInt::get(IntPtrTy, FirstIdx));
    957 
    958   // Index into the types.  If we fail, set OrigBase to null.
    959   while (Offset) {
    960     // Indexing into tail padding between struct/array elements.
    961     if (uint64_t(Offset * 8) >= DL.getTypeSizeInBits(Ty))
    962       return nullptr;
    963 
    964     if (StructType *STy = dyn_cast<StructType>(Ty)) {
    965       const StructLayout *SL = DL.getStructLayout(STy);
    966       assert(Offset < (int64_t)SL->getSizeInBytes() &&
    967              "Offset must stay within the indexed type");
    968 
    969       unsigned Elt = SL->getElementContainingOffset(Offset);
    970       NewIndices.push_back(ConstantInt::get(Type::getInt32Ty(Ty->getContext()),
    971                                             Elt));
    972 
    973       Offset -= SL->getElementOffset(Elt);
    974       Ty = STy->getElementType(Elt);
    975     } else if (ArrayType *AT = dyn_cast<ArrayType>(Ty)) {
    976       uint64_t EltSize = DL.getTypeAllocSize(AT->getElementType());
    977       assert(EltSize && "Cannot index into a zero-sized array");
    978       NewIndices.push_back(ConstantInt::get(IntPtrTy,Offset/EltSize));
    979       Offset %= EltSize;
    980       Ty = AT->getElementType();
    981     } else {
    982       // Otherwise, we can't index into the middle of this atomic type, bail.
    983       return nullptr;
    984     }
    985   }
    986 
    987   return Ty;
    988 }
    989 
    990 static bool shouldMergeGEPs(GEPOperator &GEP, GEPOperator &Src) {
    991   // If this GEP has only 0 indices, it is the same pointer as
    992   // Src. If Src is not a trivial GEP too, don't combine
    993   // the indices.
    994   if (GEP.hasAllZeroIndices() && !Src.hasAllZeroIndices() &&
    995       !Src.hasOneUse())
    996     return false;
    997   return true;
    998 }
    999 
   1000 /// Return a value X such that Val = X * Scale, or null if none.
   1001 /// If the multiplication is known not to overflow, then NoSignedWrap is set.
   1002 Value *InstCombiner::Descale(Value *Val, APInt Scale, bool &NoSignedWrap) {
   1003   assert(isa<IntegerType>(Val->getType()) && "Can only descale integers!");
   1004   assert(cast<IntegerType>(Val->getType())->getBitWidth() ==
   1005          Scale.getBitWidth() && "Scale not compatible with value!");
   1006 
   1007   // If Val is zero or Scale is one then Val = Val * Scale.
   1008   if (match(Val, m_Zero()) || Scale == 1) {
   1009     NoSignedWrap = true;
   1010     return Val;
   1011   }
   1012 
   1013   // If Scale is zero then it does not divide Val.
   1014   if (Scale.isMinValue())
   1015     return nullptr;
   1016 
   1017   // Look through chains of multiplications, searching for a constant that is
   1018   // divisible by Scale.  For example, descaling X*(Y*(Z*4)) by a factor of 4
   1019   // will find the constant factor 4 and produce X*(Y*Z).  Descaling X*(Y*8) by
   1020   // a factor of 4 will produce X*(Y*2).  The principle of operation is to bore
   1021   // down from Val:
   1022   //
   1023   //     Val = M1 * X          ||   Analysis starts here and works down
   1024   //      M1 = M2 * Y          ||   Doesn't descend into terms with more
   1025   //      M2 =  Z * 4          \/   than one use
   1026   //
   1027   // Then to modify a term at the bottom:
   1028   //
   1029   //     Val = M1 * X
   1030   //      M1 =  Z * Y          ||   Replaced M2 with Z
   1031   //
   1032   // Then to work back up correcting nsw flags.
   1033 
   1034   // Op - the term we are currently analyzing.  Starts at Val then drills down.
   1035   // Replaced with its descaled value before exiting from the drill down loop.
   1036   Value *Op = Val;
   1037 
   1038   // Parent - initially null, but after drilling down notes where Op came from.
   1039   // In the example above, Parent is (Val, 0) when Op is M1, because M1 is the
   1040   // 0'th operand of Val.
   1041   std::pair<Instruction*, unsigned> Parent;
   1042 
   1043   // Set if the transform requires a descaling at deeper levels that doesn't
   1044   // overflow.
   1045   bool RequireNoSignedWrap = false;
   1046 
   1047   // Log base 2 of the scale. Negative if not a power of 2.
   1048   int32_t logScale = Scale.exactLogBase2();
   1049 
   1050   for (;; Op = Parent.first->getOperand(Parent.second)) { // Drill down
   1051 
   1052     if (ConstantInt *CI = dyn_cast<ConstantInt>(Op)) {
   1053       // If Op is a constant divisible by Scale then descale to the quotient.
   1054       APInt Quotient(Scale), Remainder(Scale); // Init ensures right bitwidth.
   1055       APInt::sdivrem(CI->getValue(), Scale, Quotient, Remainder);
   1056       if (!Remainder.isMinValue())
   1057         // Not divisible by Scale.
   1058         return nullptr;
   1059       // Replace with the quotient in the parent.
   1060       Op = ConstantInt::get(CI->getType(), Quotient);
   1061       NoSignedWrap = true;
   1062       break;
   1063     }
   1064 
   1065     if (BinaryOperator *BO = dyn_cast<BinaryOperator>(Op)) {
   1066 
   1067       if (BO->getOpcode() == Instruction::Mul) {
   1068         // Multiplication.
   1069         NoSignedWrap = BO->hasNoSignedWrap();
   1070         if (RequireNoSignedWrap && !NoSignedWrap)
   1071           return nullptr;
   1072 
   1073         // There are three cases for multiplication: multiplication by exactly
   1074         // the scale, multiplication by a constant different to the scale, and
   1075         // multiplication by something else.
   1076         Value *LHS = BO->getOperand(0);
   1077         Value *RHS = BO->getOperand(1);
   1078 
   1079         if (ConstantInt *CI = dyn_cast<ConstantInt>(RHS)) {
   1080           // Multiplication by a constant.
   1081           if (CI->getValue() == Scale) {
   1082             // Multiplication by exactly the scale, replace the multiplication
   1083             // by its left-hand side in the parent.
   1084             Op = LHS;
   1085             break;
   1086           }
   1087 
   1088           // Otherwise drill down into the constant.
   1089           if (!Op->hasOneUse())
   1090             return nullptr;
   1091 
   1092           Parent = std::make_pair(BO, 1);
   1093           continue;
   1094         }
   1095 
   1096         // Multiplication by something else. Drill down into the left-hand side
   1097         // since that's where the reassociate pass puts the good stuff.
   1098         if (!Op->hasOneUse())
   1099           return nullptr;
   1100 
   1101         Parent = std::make_pair(BO, 0);
   1102         continue;
   1103       }
   1104 
   1105       if (logScale > 0 && BO->getOpcode() == Instruction::Shl &&
   1106           isa<ConstantInt>(BO->getOperand(1))) {
   1107         // Multiplication by a power of 2.
   1108         NoSignedWrap = BO->hasNoSignedWrap();
   1109         if (RequireNoSignedWrap && !NoSignedWrap)
   1110           return nullptr;
   1111 
   1112         Value *LHS = BO->getOperand(0);
   1113         int32_t Amt = cast<ConstantInt>(BO->getOperand(1))->
   1114           getLimitedValue(Scale.getBitWidth());
   1115         // Op = LHS << Amt.
   1116 
   1117         if (Amt == logScale) {
   1118           // Multiplication by exactly the scale, replace the multiplication
   1119           // by its left-hand side in the parent.
   1120           Op = LHS;
   1121           break;
   1122         }
   1123         if (Amt < logScale || !Op->hasOneUse())
   1124           return nullptr;
   1125 
   1126         // Multiplication by more than the scale.  Reduce the multiplying amount
   1127         // by the scale in the parent.
   1128         Parent = std::make_pair(BO, 1);
   1129         Op = ConstantInt::get(BO->getType(), Amt - logScale);
   1130         break;
   1131       }
   1132     }
   1133 
   1134     if (!Op->hasOneUse())
   1135       return nullptr;
   1136 
   1137     if (CastInst *Cast = dyn_cast<CastInst>(Op)) {
   1138       if (Cast->getOpcode() == Instruction::SExt) {
   1139         // Op is sign-extended from a smaller type, descale in the smaller type.
   1140         unsigned SmallSize = Cast->getSrcTy()->getPrimitiveSizeInBits();
   1141         APInt SmallScale = Scale.trunc(SmallSize);
   1142         // Suppose Op = sext X, and we descale X as Y * SmallScale.  We want to
   1143         // descale Op as (sext Y) * Scale.  In order to have
   1144         //   sext (Y * SmallScale) = (sext Y) * Scale
   1145         // some conditions need to hold however: SmallScale must sign-extend to
   1146         // Scale and the multiplication Y * SmallScale should not overflow.
   1147         if (SmallScale.sext(Scale.getBitWidth()) != Scale)
   1148           // SmallScale does not sign-extend to Scale.
   1149           return nullptr;
   1150         assert(SmallScale.exactLogBase2() == logScale);
   1151         // Require that Y * SmallScale must not overflow.
   1152         RequireNoSignedWrap = true;
   1153 
   1154         // Drill down through the cast.
   1155         Parent = std::make_pair(Cast, 0);
   1156         Scale = SmallScale;
   1157         continue;
   1158       }
   1159 
   1160       if (Cast->getOpcode() == Instruction::Trunc) {
   1161         // Op is truncated from a larger type, descale in the larger type.
   1162         // Suppose Op = trunc X, and we descale X as Y * sext Scale.  Then
   1163         //   trunc (Y * sext Scale) = (trunc Y) * Scale
   1164         // always holds.  However (trunc Y) * Scale may overflow even if
   1165         // trunc (Y * sext Scale) does not, so nsw flags need to be cleared
   1166         // from this point up in the expression (see later).
   1167         if (RequireNoSignedWrap)
   1168           return nullptr;
   1169 
   1170         // Drill down through the cast.
   1171         unsigned LargeSize = Cast->getSrcTy()->getPrimitiveSizeInBits();
   1172         Parent = std::make_pair(Cast, 0);
   1173         Scale = Scale.sext(LargeSize);
   1174         if (logScale + 1 == (int32_t)Cast->getType()->getPrimitiveSizeInBits())
   1175           logScale = -1;
   1176         assert(Scale.exactLogBase2() == logScale);
   1177         continue;
   1178       }
   1179     }
   1180 
   1181     // Unsupported expression, bail out.
   1182     return nullptr;
   1183   }
   1184 
   1185   // If Op is zero then Val = Op * Scale.
   1186   if (match(Op, m_Zero())) {
   1187     NoSignedWrap = true;
   1188     return Op;
   1189   }
   1190 
   1191   // We know that we can successfully descale, so from here on we can safely
   1192   // modify the IR.  Op holds the descaled version of the deepest term in the
   1193   // expression.  NoSignedWrap is 'true' if multiplying Op by Scale is known
   1194   // not to overflow.
   1195 
   1196   if (!Parent.first)
   1197     // The expression only had one term.
   1198     return Op;
   1199 
   1200   // Rewrite the parent using the descaled version of its operand.
   1201   assert(Parent.first->hasOneUse() && "Drilled down when more than one use!");
   1202   assert(Op != Parent.first->getOperand(Parent.second) &&
   1203          "Descaling was a no-op?");
   1204   Parent.first->setOperand(Parent.second, Op);
   1205   Worklist.Add(Parent.first);
   1206 
   1207   // Now work back up the expression correcting nsw flags.  The logic is based
   1208   // on the following observation: if X * Y is known not to overflow as a signed
   1209   // multiplication, and Y is replaced by a value Z with smaller absolute value,
   1210   // then X * Z will not overflow as a signed multiplication either.  As we work
   1211   // our way up, having NoSignedWrap 'true' means that the descaled value at the
   1212   // current level has strictly smaller absolute value than the original.
   1213   Instruction *Ancestor = Parent.first;
   1214   do {
   1215     if (BinaryOperator *BO = dyn_cast<BinaryOperator>(Ancestor)) {
   1216       // If the multiplication wasn't nsw then we can't say anything about the
   1217       // value of the descaled multiplication, and we have to clear nsw flags
   1218       // from this point on up.
   1219       bool OpNoSignedWrap = BO->hasNoSignedWrap();
   1220       NoSignedWrap &= OpNoSignedWrap;
   1221       if (NoSignedWrap != OpNoSignedWrap) {
   1222         BO->setHasNoSignedWrap(NoSignedWrap);
   1223         Worklist.Add(Ancestor);
   1224       }
   1225     } else if (Ancestor->getOpcode() == Instruction::Trunc) {
   1226       // The fact that the descaled input to the trunc has smaller absolute
   1227       // value than the original input doesn't tell us anything useful about
   1228       // the absolute values of the truncations.
   1229       NoSignedWrap = false;
   1230     }
   1231     assert((Ancestor->getOpcode() != Instruction::SExt || NoSignedWrap) &&
   1232            "Failed to keep proper track of nsw flags while drilling down?");
   1233 
   1234     if (Ancestor == Val)
   1235       // Got to the top, all done!
   1236       return Val;
   1237 
   1238     // Move up one level in the expression.
   1239     assert(Ancestor->hasOneUse() && "Drilled down when more than one use!");
   1240     Ancestor = Ancestor->user_back();
   1241   } while (1);
   1242 }
   1243 
   1244 /// \brief Creates node of binary operation with the same attributes as the
   1245 /// specified one but with other operands.
   1246 static Value *CreateBinOpAsGiven(BinaryOperator &Inst, Value *LHS, Value *RHS,
   1247                                  InstCombiner::BuilderTy *B) {
   1248   Value *BO = B->CreateBinOp(Inst.getOpcode(), LHS, RHS);
   1249   // If LHS and RHS are constant, BO won't be a binary operator.
   1250   if (BinaryOperator *NewBO = dyn_cast<BinaryOperator>(BO))
   1251     NewBO->copyIRFlags(&Inst);
   1252   return BO;
   1253 }
   1254 
   1255 /// \brief Makes transformation of binary operation specific for vector types.
   1256 /// \param Inst Binary operator to transform.
   1257 /// \return Pointer to node that must replace the original binary operator, or
   1258 ///         null pointer if no transformation was made.
   1259 Value *InstCombiner::SimplifyVectorOp(BinaryOperator &Inst) {
   1260   if (!Inst.getType()->isVectorTy()) return nullptr;
   1261 
   1262   // It may not be safe to reorder shuffles and things like div, urem, etc.
   1263   // because we may trap when executing those ops on unknown vector elements.
   1264   // See PR20059.
   1265   if (!isSafeToSpeculativelyExecute(&Inst))
   1266     return nullptr;
   1267 
   1268   unsigned VWidth = cast<VectorType>(Inst.getType())->getNumElements();
   1269   Value *LHS = Inst.getOperand(0), *RHS = Inst.getOperand(1);
   1270   assert(cast<VectorType>(LHS->getType())->getNumElements() == VWidth);
   1271   assert(cast<VectorType>(RHS->getType())->getNumElements() == VWidth);
   1272 
   1273   // If both arguments of binary operation are shuffles, which use the same
   1274   // mask and shuffle within a single vector, it is worthwhile to move the
   1275   // shuffle after binary operation:
   1276   //   Op(shuffle(v1, m), shuffle(v2, m)) -> shuffle(Op(v1, v2), m)
   1277   if (isa<ShuffleVectorInst>(LHS) && isa<ShuffleVectorInst>(RHS)) {
   1278     ShuffleVectorInst *LShuf = cast<ShuffleVectorInst>(LHS);
   1279     ShuffleVectorInst *RShuf = cast<ShuffleVectorInst>(RHS);
   1280     if (isa<UndefValue>(LShuf->getOperand(1)) &&
   1281         isa<UndefValue>(RShuf->getOperand(1)) &&
   1282         LShuf->getOperand(0)->getType() == RShuf->getOperand(0)->getType() &&
   1283         LShuf->getMask() == RShuf->getMask()) {
   1284       Value *NewBO = CreateBinOpAsGiven(Inst, LShuf->getOperand(0),
   1285           RShuf->getOperand(0), Builder);
   1286       return Builder->CreateShuffleVector(NewBO,
   1287           UndefValue::get(NewBO->getType()), LShuf->getMask());
   1288     }
   1289   }
   1290 
   1291   // If one argument is a shuffle within one vector, the other is a constant,
   1292   // try moving the shuffle after the binary operation.
   1293   ShuffleVectorInst *Shuffle = nullptr;
   1294   Constant *C1 = nullptr;
   1295   if (isa<ShuffleVectorInst>(LHS)) Shuffle = cast<ShuffleVectorInst>(LHS);
   1296   if (isa<ShuffleVectorInst>(RHS)) Shuffle = cast<ShuffleVectorInst>(RHS);
   1297   if (isa<Constant>(LHS)) C1 = cast<Constant>(LHS);
   1298   if (isa<Constant>(RHS)) C1 = cast<Constant>(RHS);
   1299   if (Shuffle && C1 &&
   1300       (isa<ConstantVector>(C1) || isa<ConstantDataVector>(C1)) &&
   1301       isa<UndefValue>(Shuffle->getOperand(1)) &&
   1302       Shuffle->getType() == Shuffle->getOperand(0)->getType()) {
   1303     SmallVector<int, 16> ShMask = Shuffle->getShuffleMask();
   1304     // Find constant C2 that has property:
   1305     //   shuffle(C2, ShMask) = C1
   1306     // If such constant does not exist (example: ShMask=<0,0> and C1=<1,2>)
   1307     // reorder is not possible.
   1308     SmallVector<Constant*, 16> C2M(VWidth,
   1309                                UndefValue::get(C1->getType()->getScalarType()));
   1310     bool MayChange = true;
   1311     for (unsigned I = 0; I < VWidth; ++I) {
   1312       if (ShMask[I] >= 0) {
   1313         assert(ShMask[I] < (int)VWidth);
   1314         if (!isa<UndefValue>(C2M[ShMask[I]])) {
   1315           MayChange = false;
   1316           break;
   1317         }
   1318         C2M[ShMask[I]] = C1->getAggregateElement(I);
   1319       }
   1320     }
   1321     if (MayChange) {
   1322       Constant *C2 = ConstantVector::get(C2M);
   1323       Value *NewLHS = isa<Constant>(LHS) ? C2 : Shuffle->getOperand(0);
   1324       Value *NewRHS = isa<Constant>(LHS) ? Shuffle->getOperand(0) : C2;
   1325       Value *NewBO = CreateBinOpAsGiven(Inst, NewLHS, NewRHS, Builder);
   1326       return Builder->CreateShuffleVector(NewBO,
   1327           UndefValue::get(Inst.getType()), Shuffle->getMask());
   1328     }
   1329   }
   1330 
   1331   return nullptr;
   1332 }
   1333 
   1334 Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
   1335   SmallVector<Value*, 8> Ops(GEP.op_begin(), GEP.op_end());
   1336 
   1337   if (Value *V = SimplifyGEPInst(Ops, DL, TLI, DT, AC))
   1338     return ReplaceInstUsesWith(GEP, V);
   1339 
   1340   Value *PtrOp = GEP.getOperand(0);
   1341 
   1342   // Eliminate unneeded casts for indices, and replace indices which displace
   1343   // by multiples of a zero size type with zero.
   1344   bool MadeChange = false;
   1345   Type *IntPtrTy =
   1346     DL.getIntPtrType(GEP.getPointerOperandType()->getScalarType());
   1347 
   1348   gep_type_iterator GTI = gep_type_begin(GEP);
   1349   for (User::op_iterator I = GEP.op_begin() + 1, E = GEP.op_end(); I != E;
   1350        ++I, ++GTI) {
   1351     // Skip indices into struct types.
   1352     SequentialType *SeqTy = dyn_cast<SequentialType>(*GTI);
   1353     if (!SeqTy)
   1354       continue;
   1355 
   1356     // Index type should have the same width as IntPtr
   1357     Type *IndexTy = (*I)->getType();
   1358     Type *NewIndexType = IndexTy->isVectorTy() ?
   1359       VectorType::get(IntPtrTy, IndexTy->getVectorNumElements()) : IntPtrTy;
   1360 
   1361     // If the element type has zero size then any index over it is equivalent
   1362     // to an index of zero, so replace it with zero if it is not zero already.
   1363     if (SeqTy->getElementType()->isSized() &&
   1364         DL.getTypeAllocSize(SeqTy->getElementType()) == 0)
   1365       if (!isa<Constant>(*I) || !cast<Constant>(*I)->isNullValue()) {
   1366         *I = Constant::getNullValue(NewIndexType);
   1367         MadeChange = true;
   1368       }
   1369 
   1370     if (IndexTy != NewIndexType) {
   1371       // If we are using a wider index than needed for this platform, shrink
   1372       // it to what we need.  If narrower, sign-extend it to what we need.
   1373       // This explicit cast can make subsequent optimizations more obvious.
   1374       *I = Builder->CreateIntCast(*I, NewIndexType, true);
   1375       MadeChange = true;
   1376     }
   1377   }
   1378   if (MadeChange)
   1379     return &GEP;
   1380 
   1381   // Check to see if the inputs to the PHI node are getelementptr instructions.
   1382   if (PHINode *PN = dyn_cast<PHINode>(PtrOp)) {
   1383     GetElementPtrInst *Op1 = dyn_cast<GetElementPtrInst>(PN->getOperand(0));
   1384     if (!Op1)
   1385       return nullptr;
   1386 
   1387     // Don't fold a GEP into itself through a PHI node. This can only happen
   1388     // through the back-edge of a loop. Folding a GEP into itself means that
   1389     // the value of the previous iteration needs to be stored in the meantime,
   1390     // thus requiring an additional register variable to be live, but not
   1391     // actually achieving anything (the GEP still needs to be executed once per
   1392     // loop iteration).
   1393     if (Op1 == &GEP)
   1394       return nullptr;
   1395 
   1396     signed DI = -1;
   1397 
   1398     for (auto I = PN->op_begin()+1, E = PN->op_end(); I !=E; ++I) {
   1399       GetElementPtrInst *Op2 = dyn_cast<GetElementPtrInst>(*I);
   1400       if (!Op2 || Op1->getNumOperands() != Op2->getNumOperands())
   1401         return nullptr;
   1402 
   1403       // As for Op1 above, don't try to fold a GEP into itself.
   1404       if (Op2 == &GEP)
   1405         return nullptr;
   1406 
   1407       // Keep track of the type as we walk the GEP.
   1408       Type *CurTy = Op1->getOperand(0)->getType()->getScalarType();
   1409 
   1410       for (unsigned J = 0, F = Op1->getNumOperands(); J != F; ++J) {
   1411         if (Op1->getOperand(J)->getType() != Op2->getOperand(J)->getType())
   1412           return nullptr;
   1413 
   1414         if (Op1->getOperand(J) != Op2->getOperand(J)) {
   1415           if (DI == -1) {
   1416             // We have not seen any differences yet in the GEPs feeding the
   1417             // PHI yet, so we record this one if it is allowed to be a
   1418             // variable.
   1419 
   1420             // The first two arguments can vary for any GEP, the rest have to be
   1421             // static for struct slots
   1422             if (J > 1 && CurTy->isStructTy())
   1423               return nullptr;
   1424 
   1425             DI = J;
   1426           } else {
   1427             // The GEP is different by more than one input. While this could be
   1428             // extended to support GEPs that vary by more than one variable it
   1429             // doesn't make sense since it greatly increases the complexity and
   1430             // would result in an R+R+R addressing mode which no backend
   1431             // directly supports and would need to be broken into several
   1432             // simpler instructions anyway.
   1433             return nullptr;
   1434           }
   1435         }
   1436 
   1437         // Sink down a layer of the type for the next iteration.
   1438         if (J > 0) {
   1439           if (CompositeType *CT = dyn_cast<CompositeType>(CurTy)) {
   1440             CurTy = CT->getTypeAtIndex(Op1->getOperand(J));
   1441           } else {
   1442             CurTy = nullptr;
   1443           }
   1444         }
   1445       }
   1446     }
   1447 
   1448     // If not all GEPs are identical we'll have to create a new PHI node.
   1449     // Check that the old PHI node has only one use so that it will get
   1450     // removed.
   1451     if (DI != -1 && !PN->hasOneUse())
   1452       return nullptr;
   1453 
   1454     GetElementPtrInst *NewGEP = cast<GetElementPtrInst>(Op1->clone());
   1455     if (DI == -1) {
   1456       // All the GEPs feeding the PHI are identical. Clone one down into our
   1457       // BB so that it can be merged with the current GEP.
   1458       GEP.getParent()->getInstList().insert(
   1459           GEP.getParent()->getFirstInsertionPt(), NewGEP);
   1460     } else {
   1461       // All the GEPs feeding the PHI differ at a single offset. Clone a GEP
   1462       // into the current block so it can be merged, and create a new PHI to
   1463       // set that index.
   1464       PHINode *NewPN;
   1465       {
   1466         IRBuilderBase::InsertPointGuard Guard(*Builder);
   1467         Builder->SetInsertPoint(PN);
   1468         NewPN = Builder->CreatePHI(Op1->getOperand(DI)->getType(),
   1469                                    PN->getNumOperands());
   1470       }
   1471 
   1472       for (auto &I : PN->operands())
   1473         NewPN->addIncoming(cast<GEPOperator>(I)->getOperand(DI),
   1474                            PN->getIncomingBlock(I));
   1475 
   1476       NewGEP->setOperand(DI, NewPN);
   1477       GEP.getParent()->getInstList().insert(
   1478           GEP.getParent()->getFirstInsertionPt(), NewGEP);
   1479       NewGEP->setOperand(DI, NewPN);
   1480     }
   1481 
   1482     GEP.setOperand(0, NewGEP);
   1483     PtrOp = NewGEP;
   1484   }
   1485 
   1486   // Combine Indices - If the source pointer to this getelementptr instruction
   1487   // is a getelementptr instruction, combine the indices of the two
   1488   // getelementptr instructions into a single instruction.
   1489   //
   1490   if (GEPOperator *Src = dyn_cast<GEPOperator>(PtrOp)) {
   1491     if (!shouldMergeGEPs(*cast<GEPOperator>(&GEP), *Src))
   1492       return nullptr;
   1493 
   1494     // Note that if our source is a gep chain itself then we wait for that
   1495     // chain to be resolved before we perform this transformation.  This
   1496     // avoids us creating a TON of code in some cases.
   1497     if (GEPOperator *SrcGEP =
   1498           dyn_cast<GEPOperator>(Src->getOperand(0)))
   1499       if (SrcGEP->getNumOperands() == 2 && shouldMergeGEPs(*Src, *SrcGEP))
   1500         return nullptr;   // Wait until our source is folded to completion.
   1501 
   1502     SmallVector<Value*, 8> Indices;
   1503 
   1504     // Find out whether the last index in the source GEP is a sequential idx.
   1505     bool EndsWithSequential = false;
   1506     for (gep_type_iterator I = gep_type_begin(*Src), E = gep_type_end(*Src);
   1507          I != E; ++I)
   1508       EndsWithSequential = !(*I)->isStructTy();
   1509 
   1510     // Can we combine the two pointer arithmetics offsets?
   1511     if (EndsWithSequential) {
   1512       // Replace: gep (gep %P, long B), long A, ...
   1513       // With:    T = long A+B; gep %P, T, ...
   1514       //
   1515       Value *Sum;
   1516       Value *SO1 = Src->getOperand(Src->getNumOperands()-1);
   1517       Value *GO1 = GEP.getOperand(1);
   1518       if (SO1 == Constant::getNullValue(SO1->getType())) {
   1519         Sum = GO1;
   1520       } else if (GO1 == Constant::getNullValue(GO1->getType())) {
   1521         Sum = SO1;
   1522       } else {
   1523         // If they aren't the same type, then the input hasn't been processed
   1524         // by the loop above yet (which canonicalizes sequential index types to
   1525         // intptr_t).  Just avoid transforming this until the input has been
   1526         // normalized.
   1527         if (SO1->getType() != GO1->getType())
   1528           return nullptr;
   1529         // Only do the combine when GO1 and SO1 are both constants. Only in
   1530         // this case, we are sure the cost after the merge is never more than
   1531         // that before the merge.
   1532         if (!isa<Constant>(GO1) || !isa<Constant>(SO1))
   1533           return nullptr;
   1534         Sum = Builder->CreateAdd(SO1, GO1, PtrOp->getName()+".sum");
   1535       }
   1536 
   1537       // Update the GEP in place if possible.
   1538       if (Src->getNumOperands() == 2) {
   1539         GEP.setOperand(0, Src->getOperand(0));
   1540         GEP.setOperand(1, Sum);
   1541         return &GEP;
   1542       }
   1543       Indices.append(Src->op_begin()+1, Src->op_end()-1);
   1544       Indices.push_back(Sum);
   1545       Indices.append(GEP.op_begin()+2, GEP.op_end());
   1546     } else if (isa<Constant>(*GEP.idx_begin()) &&
   1547                cast<Constant>(*GEP.idx_begin())->isNullValue() &&
   1548                Src->getNumOperands() != 1) {
   1549       // Otherwise we can do the fold if the first index of the GEP is a zero
   1550       Indices.append(Src->op_begin()+1, Src->op_end());
   1551       Indices.append(GEP.idx_begin()+1, GEP.idx_end());
   1552     }
   1553 
   1554     if (!Indices.empty())
   1555       return GEP.isInBounds() && Src->isInBounds()
   1556                  ? GetElementPtrInst::CreateInBounds(
   1557                        Src->getSourceElementType(), Src->getOperand(0), Indices,
   1558                        GEP.getName())
   1559                  : GetElementPtrInst::Create(Src->getSourceElementType(),
   1560                                              Src->getOperand(0), Indices,
   1561                                              GEP.getName());
   1562   }
   1563 
   1564   if (GEP.getNumIndices() == 1) {
   1565     unsigned AS = GEP.getPointerAddressSpace();
   1566     if (GEP.getOperand(1)->getType()->getScalarSizeInBits() ==
   1567         DL.getPointerSizeInBits(AS)) {
   1568       Type *PtrTy = GEP.getPointerOperandType();
   1569       Type *Ty = PtrTy->getPointerElementType();
   1570       uint64_t TyAllocSize = DL.getTypeAllocSize(Ty);
   1571 
   1572       bool Matched = false;
   1573       uint64_t C;
   1574       Value *V = nullptr;
   1575       if (TyAllocSize == 1) {
   1576         V = GEP.getOperand(1);
   1577         Matched = true;
   1578       } else if (match(GEP.getOperand(1),
   1579                        m_AShr(m_Value(V), m_ConstantInt(C)))) {
   1580         if (TyAllocSize == 1ULL << C)
   1581           Matched = true;
   1582       } else if (match(GEP.getOperand(1),
   1583                        m_SDiv(m_Value(V), m_ConstantInt(C)))) {
   1584         if (TyAllocSize == C)
   1585           Matched = true;
   1586       }
   1587 
   1588       if (Matched) {
   1589         // Canonicalize (gep i8* X, -(ptrtoint Y))
   1590         // to (inttoptr (sub (ptrtoint X), (ptrtoint Y)))
   1591         // The GEP pattern is emitted by the SCEV expander for certain kinds of
   1592         // pointer arithmetic.
   1593         if (match(V, m_Neg(m_PtrToInt(m_Value())))) {
   1594           Operator *Index = cast<Operator>(V);
   1595           Value *PtrToInt = Builder->CreatePtrToInt(PtrOp, Index->getType());
   1596           Value *NewSub = Builder->CreateSub(PtrToInt, Index->getOperand(1));
   1597           return CastInst::Create(Instruction::IntToPtr, NewSub, GEP.getType());
   1598         }
   1599         // Canonicalize (gep i8* X, (ptrtoint Y)-(ptrtoint X))
   1600         // to (bitcast Y)
   1601         Value *Y;
   1602         if (match(V, m_Sub(m_PtrToInt(m_Value(Y)),
   1603                            m_PtrToInt(m_Specific(GEP.getOperand(0)))))) {
   1604           return CastInst::CreatePointerBitCastOrAddrSpaceCast(Y,
   1605                                                                GEP.getType());
   1606         }
   1607       }
   1608     }
   1609   }
   1610 
   1611   // Handle gep(bitcast x) and gep(gep x, 0, 0, 0).
   1612   Value *StrippedPtr = PtrOp->stripPointerCasts();
   1613   PointerType *StrippedPtrTy = dyn_cast<PointerType>(StrippedPtr->getType());
   1614 
   1615   // We do not handle pointer-vector geps here.
   1616   if (!StrippedPtrTy)
   1617     return nullptr;
   1618 
   1619   if (StrippedPtr != PtrOp) {
   1620     bool HasZeroPointerIndex = false;
   1621     if (ConstantInt *C = dyn_cast<ConstantInt>(GEP.getOperand(1)))
   1622       HasZeroPointerIndex = C->isZero();
   1623 
   1624     // Transform: GEP (bitcast [10 x i8]* X to [0 x i8]*), i32 0, ...
   1625     // into     : GEP [10 x i8]* X, i32 0, ...
   1626     //
   1627     // Likewise, transform: GEP (bitcast i8* X to [0 x i8]*), i32 0, ...
   1628     //           into     : GEP i8* X, ...
   1629     //
   1630     // This occurs when the program declares an array extern like "int X[];"
   1631     if (HasZeroPointerIndex) {
   1632       PointerType *CPTy = cast<PointerType>(PtrOp->getType());
   1633       if (ArrayType *CATy =
   1634           dyn_cast<ArrayType>(CPTy->getElementType())) {
   1635         // GEP (bitcast i8* X to [0 x i8]*), i32 0, ... ?
   1636         if (CATy->getElementType() == StrippedPtrTy->getElementType()) {
   1637           // -> GEP i8* X, ...
   1638           SmallVector<Value*, 8> Idx(GEP.idx_begin()+1, GEP.idx_end());
   1639           GetElementPtrInst *Res = GetElementPtrInst::Create(
   1640               StrippedPtrTy->getElementType(), StrippedPtr, Idx, GEP.getName());
   1641           Res->setIsInBounds(GEP.isInBounds());
   1642           if (StrippedPtrTy->getAddressSpace() == GEP.getAddressSpace())
   1643             return Res;
   1644           // Insert Res, and create an addrspacecast.
   1645           // e.g.,
   1646           // GEP (addrspacecast i8 addrspace(1)* X to [0 x i8]*), i32 0, ...
   1647           // ->
   1648           // %0 = GEP i8 addrspace(1)* X, ...
   1649           // addrspacecast i8 addrspace(1)* %0 to i8*
   1650           return new AddrSpaceCastInst(Builder->Insert(Res), GEP.getType());
   1651         }
   1652 
   1653         if (ArrayType *XATy =
   1654               dyn_cast<ArrayType>(StrippedPtrTy->getElementType())){
   1655           // GEP (bitcast [10 x i8]* X to [0 x i8]*), i32 0, ... ?
   1656           if (CATy->getElementType() == XATy->getElementType()) {
   1657             // -> GEP [10 x i8]* X, i32 0, ...
   1658             // At this point, we know that the cast source type is a pointer
   1659             // to an array of the same type as the destination pointer
   1660             // array.  Because the array type is never stepped over (there
   1661             // is a leading zero) we can fold the cast into this GEP.
   1662             if (StrippedPtrTy->getAddressSpace() == GEP.getAddressSpace()) {
   1663               GEP.setOperand(0, StrippedPtr);
   1664               GEP.setSourceElementType(XATy);
   1665               return &GEP;
   1666             }
   1667             // Cannot replace the base pointer directly because StrippedPtr's
   1668             // address space is different. Instead, create a new GEP followed by
   1669             // an addrspacecast.
   1670             // e.g.,
   1671             // GEP (addrspacecast [10 x i8] addrspace(1)* X to [0 x i8]*),
   1672             //   i32 0, ...
   1673             // ->
   1674             // %0 = GEP [10 x i8] addrspace(1)* X, ...
   1675             // addrspacecast i8 addrspace(1)* %0 to i8*
   1676             SmallVector<Value*, 8> Idx(GEP.idx_begin(), GEP.idx_end());
   1677             Value *NewGEP = GEP.isInBounds()
   1678                                 ? Builder->CreateInBoundsGEP(
   1679                                       nullptr, StrippedPtr, Idx, GEP.getName())
   1680                                 : Builder->CreateGEP(nullptr, StrippedPtr, Idx,
   1681                                                      GEP.getName());
   1682             return new AddrSpaceCastInst(NewGEP, GEP.getType());
   1683           }
   1684         }
   1685       }
   1686     } else if (GEP.getNumOperands() == 2) {
   1687       // Transform things like:
   1688       // %t = getelementptr i32* bitcast ([2 x i32]* %str to i32*), i32 %V
   1689       // into:  %t1 = getelementptr [2 x i32]* %str, i32 0, i32 %V; bitcast
   1690       Type *SrcElTy = StrippedPtrTy->getElementType();
   1691       Type *ResElTy = PtrOp->getType()->getPointerElementType();
   1692       if (SrcElTy->isArrayTy() &&
   1693           DL.getTypeAllocSize(SrcElTy->getArrayElementType()) ==
   1694               DL.getTypeAllocSize(ResElTy)) {
   1695         Type *IdxType = DL.getIntPtrType(GEP.getType());
   1696         Value *Idx[2] = { Constant::getNullValue(IdxType), GEP.getOperand(1) };
   1697         Value *NewGEP =
   1698             GEP.isInBounds()
   1699                 ? Builder->CreateInBoundsGEP(nullptr, StrippedPtr, Idx,
   1700                                              GEP.getName())
   1701                 : Builder->CreateGEP(nullptr, StrippedPtr, Idx, GEP.getName());
   1702 
   1703         // V and GEP are both pointer types --> BitCast
   1704         return CastInst::CreatePointerBitCastOrAddrSpaceCast(NewGEP,
   1705                                                              GEP.getType());
   1706       }
   1707 
   1708       // Transform things like:
   1709       // %V = mul i64 %N, 4
   1710       // %t = getelementptr i8* bitcast (i32* %arr to i8*), i32 %V
   1711       // into:  %t1 = getelementptr i32* %arr, i32 %N; bitcast
   1712       if (ResElTy->isSized() && SrcElTy->isSized()) {
   1713         // Check that changing the type amounts to dividing the index by a scale
   1714         // factor.
   1715         uint64_t ResSize = DL.getTypeAllocSize(ResElTy);
   1716         uint64_t SrcSize = DL.getTypeAllocSize(SrcElTy);
   1717         if (ResSize && SrcSize % ResSize == 0) {
   1718           Value *Idx = GEP.getOperand(1);
   1719           unsigned BitWidth = Idx->getType()->getPrimitiveSizeInBits();
   1720           uint64_t Scale = SrcSize / ResSize;
   1721 
   1722           // Earlier transforms ensure that the index has type IntPtrType, which
   1723           // considerably simplifies the logic by eliminating implicit casts.
   1724           assert(Idx->getType() == DL.getIntPtrType(GEP.getType()) &&
   1725                  "Index not cast to pointer width?");
   1726 
   1727           bool NSW;
   1728           if (Value *NewIdx = Descale(Idx, APInt(BitWidth, Scale), NSW)) {
   1729             // Successfully decomposed Idx as NewIdx * Scale, form a new GEP.
   1730             // If the multiplication NewIdx * Scale may overflow then the new
   1731             // GEP may not be "inbounds".
   1732             Value *NewGEP =
   1733                 GEP.isInBounds() && NSW
   1734                     ? Builder->CreateInBoundsGEP(nullptr, StrippedPtr, NewIdx,
   1735                                                  GEP.getName())
   1736                     : Builder->CreateGEP(nullptr, StrippedPtr, NewIdx,
   1737                                          GEP.getName());
   1738 
   1739             // The NewGEP must be pointer typed, so must the old one -> BitCast
   1740             return CastInst::CreatePointerBitCastOrAddrSpaceCast(NewGEP,
   1741                                                                  GEP.getType());
   1742           }
   1743         }
   1744       }
   1745 
   1746       // Similarly, transform things like:
   1747       // getelementptr i8* bitcast ([100 x double]* X to i8*), i32 %tmp
   1748       //   (where tmp = 8*tmp2) into:
   1749       // getelementptr [100 x double]* %arr, i32 0, i32 %tmp2; bitcast
   1750       if (ResElTy->isSized() && SrcElTy->isSized() && SrcElTy->isArrayTy()) {
   1751         // Check that changing to the array element type amounts to dividing the
   1752         // index by a scale factor.
   1753         uint64_t ResSize = DL.getTypeAllocSize(ResElTy);
   1754         uint64_t ArrayEltSize =
   1755             DL.getTypeAllocSize(SrcElTy->getArrayElementType());
   1756         if (ResSize && ArrayEltSize % ResSize == 0) {
   1757           Value *Idx = GEP.getOperand(1);
   1758           unsigned BitWidth = Idx->getType()->getPrimitiveSizeInBits();
   1759           uint64_t Scale = ArrayEltSize / ResSize;
   1760 
   1761           // Earlier transforms ensure that the index has type IntPtrType, which
   1762           // considerably simplifies the logic by eliminating implicit casts.
   1763           assert(Idx->getType() == DL.getIntPtrType(GEP.getType()) &&
   1764                  "Index not cast to pointer width?");
   1765 
   1766           bool NSW;
   1767           if (Value *NewIdx = Descale(Idx, APInt(BitWidth, Scale), NSW)) {
   1768             // Successfully decomposed Idx as NewIdx * Scale, form a new GEP.
   1769             // If the multiplication NewIdx * Scale may overflow then the new
   1770             // GEP may not be "inbounds".
   1771             Value *Off[2] = {
   1772                 Constant::getNullValue(DL.getIntPtrType(GEP.getType())),
   1773                 NewIdx};
   1774 
   1775             Value *NewGEP = GEP.isInBounds() && NSW
   1776                                 ? Builder->CreateInBoundsGEP(
   1777                                       SrcElTy, StrippedPtr, Off, GEP.getName())
   1778                                 : Builder->CreateGEP(SrcElTy, StrippedPtr, Off,
   1779                                                      GEP.getName());
   1780             // The NewGEP must be pointer typed, so must the old one -> BitCast
   1781             return CastInst::CreatePointerBitCastOrAddrSpaceCast(NewGEP,
   1782                                                                  GEP.getType());
   1783           }
   1784         }
   1785       }
   1786     }
   1787   }
   1788 
   1789   // addrspacecast between types is canonicalized as a bitcast, then an
   1790   // addrspacecast. To take advantage of the below bitcast + struct GEP, look
   1791   // through the addrspacecast.
   1792   if (AddrSpaceCastInst *ASC = dyn_cast<AddrSpaceCastInst>(PtrOp)) {
   1793     //   X = bitcast A addrspace(1)* to B addrspace(1)*
   1794     //   Y = addrspacecast A addrspace(1)* to B addrspace(2)*
   1795     //   Z = gep Y, <...constant indices...>
   1796     // Into an addrspacecasted GEP of the struct.
   1797     if (BitCastInst *BC = dyn_cast<BitCastInst>(ASC->getOperand(0)))
   1798       PtrOp = BC;
   1799   }
   1800 
   1801   /// See if we can simplify:
   1802   ///   X = bitcast A* to B*
   1803   ///   Y = gep X, <...constant indices...>
   1804   /// into a gep of the original struct.  This is important for SROA and alias
   1805   /// analysis of unions.  If "A" is also a bitcast, wait for A/X to be merged.
   1806   if (BitCastInst *BCI = dyn_cast<BitCastInst>(PtrOp)) {
   1807     Value *Operand = BCI->getOperand(0);
   1808     PointerType *OpType = cast<PointerType>(Operand->getType());
   1809     unsigned OffsetBits = DL.getPointerTypeSizeInBits(GEP.getType());
   1810     APInt Offset(OffsetBits, 0);
   1811     if (!isa<BitCastInst>(Operand) &&
   1812         GEP.accumulateConstantOffset(DL, Offset)) {
   1813 
   1814       // If this GEP instruction doesn't move the pointer, just replace the GEP
   1815       // with a bitcast of the real input to the dest type.
   1816       if (!Offset) {
   1817         // If the bitcast is of an allocation, and the allocation will be
   1818         // converted to match the type of the cast, don't touch this.
   1819         if (isa<AllocaInst>(Operand) || isAllocationFn(Operand, TLI)) {
   1820           // See if the bitcast simplifies, if so, don't nuke this GEP yet.
   1821           if (Instruction *I = visitBitCast(*BCI)) {
   1822             if (I != BCI) {
   1823               I->takeName(BCI);
   1824               BCI->getParent()->getInstList().insert(BCI->getIterator(), I);
   1825               ReplaceInstUsesWith(*BCI, I);
   1826             }
   1827             return &GEP;
   1828           }
   1829         }
   1830 
   1831         if (Operand->getType()->getPointerAddressSpace() != GEP.getAddressSpace())
   1832           return new AddrSpaceCastInst(Operand, GEP.getType());
   1833         return new BitCastInst(Operand, GEP.getType());
   1834       }
   1835 
   1836       // Otherwise, if the offset is non-zero, we need to find out if there is a
   1837       // field at Offset in 'A's type.  If so, we can pull the cast through the
   1838       // GEP.
   1839       SmallVector<Value*, 8> NewIndices;
   1840       if (FindElementAtOffset(OpType, Offset.getSExtValue(), NewIndices)) {
   1841         Value *NGEP =
   1842             GEP.isInBounds()
   1843                 ? Builder->CreateInBoundsGEP(nullptr, Operand, NewIndices)
   1844                 : Builder->CreateGEP(nullptr, Operand, NewIndices);
   1845 
   1846         if (NGEP->getType() == GEP.getType())
   1847           return ReplaceInstUsesWith(GEP, NGEP);
   1848         NGEP->takeName(&GEP);
   1849 
   1850         if (NGEP->getType()->getPointerAddressSpace() != GEP.getAddressSpace())
   1851           return new AddrSpaceCastInst(NGEP, GEP.getType());
   1852         return new BitCastInst(NGEP, GEP.getType());
   1853       }
   1854     }
   1855   }
   1856 
   1857   return nullptr;
   1858 }
   1859 
   1860 static bool
   1861 isAllocSiteRemovable(Instruction *AI, SmallVectorImpl<WeakVH> &Users,
   1862                      const TargetLibraryInfo *TLI) {
   1863   SmallVector<Instruction*, 4> Worklist;
   1864   Worklist.push_back(AI);
   1865 
   1866   do {
   1867     Instruction *PI = Worklist.pop_back_val();
   1868     for (User *U : PI->users()) {
   1869       Instruction *I = cast<Instruction>(U);
   1870       switch (I->getOpcode()) {
   1871       default:
   1872         // Give up the moment we see something we can't handle.
   1873         return false;
   1874 
   1875       case Instruction::BitCast:
   1876       case Instruction::GetElementPtr:
   1877         Users.emplace_back(I);
   1878         Worklist.push_back(I);
   1879         continue;
   1880 
   1881       case Instruction::ICmp: {
   1882         ICmpInst *ICI = cast<ICmpInst>(I);
   1883         // We can fold eq/ne comparisons with null to false/true, respectively.
   1884         if (!ICI->isEquality() || !isa<ConstantPointerNull>(ICI->getOperand(1)))
   1885           return false;
   1886         Users.emplace_back(I);
   1887         continue;
   1888       }
   1889 
   1890       case Instruction::Call:
   1891         // Ignore no-op and store intrinsics.
   1892         if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
   1893           switch (II->getIntrinsicID()) {
   1894           default:
   1895             return false;
   1896 
   1897           case Intrinsic::memmove:
   1898           case Intrinsic::memcpy:
   1899           case Intrinsic::memset: {
   1900             MemIntrinsic *MI = cast<MemIntrinsic>(II);
   1901             if (MI->isVolatile() || MI->getRawDest() != PI)
   1902               return false;
   1903           }
   1904           // fall through
   1905           case Intrinsic::dbg_declare:
   1906           case Intrinsic::dbg_value:
   1907           case Intrinsic::invariant_start:
   1908           case Intrinsic::invariant_end:
   1909           case Intrinsic::lifetime_start:
   1910           case Intrinsic::lifetime_end:
   1911           case Intrinsic::objectsize:
   1912             Users.emplace_back(I);
   1913             continue;
   1914           }
   1915         }
   1916 
   1917         if (isFreeCall(I, TLI)) {
   1918           Users.emplace_back(I);
   1919           continue;
   1920         }
   1921         return false;
   1922 
   1923       case Instruction::Store: {
   1924         StoreInst *SI = cast<StoreInst>(I);
   1925         if (SI->isVolatile() || SI->getPointerOperand() != PI)
   1926           return false;
   1927         Users.emplace_back(I);
   1928         continue;
   1929       }
   1930       }
   1931       llvm_unreachable("missing a return?");
   1932     }
   1933   } while (!Worklist.empty());
   1934   return true;
   1935 }
   1936 
   1937 Instruction *InstCombiner::visitAllocSite(Instruction &MI) {
   1938   // If we have a malloc call which is only used in any amount of comparisons
   1939   // to null and free calls, delete the calls and replace the comparisons with
   1940   // true or false as appropriate.
   1941   SmallVector<WeakVH, 64> Users;
   1942   if (isAllocSiteRemovable(&MI, Users, TLI)) {
   1943     for (unsigned i = 0, e = Users.size(); i != e; ++i) {
   1944       Instruction *I = cast_or_null<Instruction>(&*Users[i]);
   1945       if (!I) continue;
   1946 
   1947       if (ICmpInst *C = dyn_cast<ICmpInst>(I)) {
   1948         ReplaceInstUsesWith(*C,
   1949                             ConstantInt::get(Type::getInt1Ty(C->getContext()),
   1950                                              C->isFalseWhenEqual()));
   1951       } else if (isa<BitCastInst>(I) || isa<GetElementPtrInst>(I)) {
   1952         ReplaceInstUsesWith(*I, UndefValue::get(I->getType()));
   1953       } else if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
   1954         if (II->getIntrinsicID() == Intrinsic::objectsize) {
   1955           ConstantInt *CI = cast<ConstantInt>(II->getArgOperand(1));
   1956           uint64_t DontKnow = CI->isZero() ? -1ULL : 0;
   1957           ReplaceInstUsesWith(*I, ConstantInt::get(I->getType(), DontKnow));
   1958         }
   1959       }
   1960       EraseInstFromFunction(*I);
   1961     }
   1962 
   1963     if (InvokeInst *II = dyn_cast<InvokeInst>(&MI)) {
   1964       // Replace invoke with a NOP intrinsic to maintain the original CFG
   1965       Module *M = II->getModule();
   1966       Function *F = Intrinsic::getDeclaration(M, Intrinsic::donothing);
   1967       InvokeInst::Create(F, II->getNormalDest(), II->getUnwindDest(),
   1968                          None, "", II->getParent());
   1969     }
   1970     return EraseInstFromFunction(MI);
   1971   }
   1972   return nullptr;
   1973 }
   1974 
   1975 /// \brief Move the call to free before a NULL test.
   1976 ///
   1977 /// Check if this free is accessed after its argument has been test
   1978 /// against NULL (property 0).
   1979 /// If yes, it is legal to move this call in its predecessor block.
   1980 ///
   1981 /// The move is performed only if the block containing the call to free
   1982 /// will be removed, i.e.:
   1983 /// 1. it has only one predecessor P, and P has two successors
   1984 /// 2. it contains the call and an unconditional branch
   1985 /// 3. its successor is the same as its predecessor's successor
   1986 ///
   1987 /// The profitability is out-of concern here and this function should
   1988 /// be called only if the caller knows this transformation would be
   1989 /// profitable (e.g., for code size).
   1990 static Instruction *
   1991 tryToMoveFreeBeforeNullTest(CallInst &FI) {
   1992   Value *Op = FI.getArgOperand(0);
   1993   BasicBlock *FreeInstrBB = FI.getParent();
   1994   BasicBlock *PredBB = FreeInstrBB->getSinglePredecessor();
   1995 
   1996   // Validate part of constraint #1: Only one predecessor
   1997   // FIXME: We can extend the number of predecessor, but in that case, we
   1998   //        would duplicate the call to free in each predecessor and it may
   1999   //        not be profitable even for code size.
   2000   if (!PredBB)
   2001     return nullptr;
   2002 
   2003   // Validate constraint #2: Does this block contains only the call to
   2004   //                         free and an unconditional branch?
   2005   // FIXME: We could check if we can speculate everything in the
   2006   //        predecessor block
   2007   if (FreeInstrBB->size() != 2)
   2008     return nullptr;
   2009   BasicBlock *SuccBB;
   2010   if (!match(FreeInstrBB->getTerminator(), m_UnconditionalBr(SuccBB)))
   2011     return nullptr;
   2012 
   2013   // Validate the rest of constraint #1 by matching on the pred branch.
   2014   TerminatorInst *TI = PredBB->getTerminator();
   2015   BasicBlock *TrueBB, *FalseBB;
   2016   ICmpInst::Predicate Pred;
   2017   if (!match(TI, m_Br(m_ICmp(Pred, m_Specific(Op), m_Zero()), TrueBB, FalseBB)))
   2018     return nullptr;
   2019   if (Pred != ICmpInst::ICMP_EQ && Pred != ICmpInst::ICMP_NE)
   2020     return nullptr;
   2021 
   2022   // Validate constraint #3: Ensure the null case just falls through.
   2023   if (SuccBB != (Pred == ICmpInst::ICMP_EQ ? TrueBB : FalseBB))
   2024     return nullptr;
   2025   assert(FreeInstrBB == (Pred == ICmpInst::ICMP_EQ ? FalseBB : TrueBB) &&
   2026          "Broken CFG: missing edge from predecessor to successor");
   2027 
   2028   FI.moveBefore(TI);
   2029   return &FI;
   2030 }
   2031 
   2032 
   2033 Instruction *InstCombiner::visitFree(CallInst &FI) {
   2034   Value *Op = FI.getArgOperand(0);
   2035 
   2036   // free undef -> unreachable.
   2037   if (isa<UndefValue>(Op)) {
   2038     // Insert a new store to null because we cannot modify the CFG here.
   2039     Builder->CreateStore(ConstantInt::getTrue(FI.getContext()),
   2040                          UndefValue::get(Type::getInt1PtrTy(FI.getContext())));
   2041     return EraseInstFromFunction(FI);
   2042   }
   2043 
   2044   // If we have 'free null' delete the instruction.  This can happen in stl code
   2045   // when lots of inlining happens.
   2046   if (isa<ConstantPointerNull>(Op))
   2047     return EraseInstFromFunction(FI);
   2048 
   2049   // If we optimize for code size, try to move the call to free before the null
   2050   // test so that simplify cfg can remove the empty block and dead code
   2051   // elimination the branch. I.e., helps to turn something like:
   2052   // if (foo) free(foo);
   2053   // into
   2054   // free(foo);
   2055   if (MinimizeSize)
   2056     if (Instruction *I = tryToMoveFreeBeforeNullTest(FI))
   2057       return I;
   2058 
   2059   return nullptr;
   2060 }
   2061 
   2062 Instruction *InstCombiner::visitReturnInst(ReturnInst &RI) {
   2063   if (RI.getNumOperands() == 0) // ret void
   2064     return nullptr;
   2065 
   2066   Value *ResultOp = RI.getOperand(0);
   2067   Type *VTy = ResultOp->getType();
   2068   if (!VTy->isIntegerTy())
   2069     return nullptr;
   2070 
   2071   // There might be assume intrinsics dominating this return that completely
   2072   // determine the value. If so, constant fold it.
   2073   unsigned BitWidth = VTy->getPrimitiveSizeInBits();
   2074   APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
   2075   computeKnownBits(ResultOp, KnownZero, KnownOne, 0, &RI);
   2076   if ((KnownZero|KnownOne).isAllOnesValue())
   2077     RI.setOperand(0, Constant::getIntegerValue(VTy, KnownOne));
   2078 
   2079   return nullptr;
   2080 }
   2081 
   2082 Instruction *InstCombiner::visitBranchInst(BranchInst &BI) {
   2083   // Change br (not X), label True, label False to: br X, label False, True
   2084   Value *X = nullptr;
   2085   BasicBlock *TrueDest;
   2086   BasicBlock *FalseDest;
   2087   if (match(&BI, m_Br(m_Not(m_Value(X)), TrueDest, FalseDest)) &&
   2088       !isa<Constant>(X)) {
   2089     // Swap Destinations and condition...
   2090     BI.setCondition(X);
   2091     BI.swapSuccessors();
   2092     return &BI;
   2093   }
   2094 
   2095   // If the condition is irrelevant, remove the use so that other
   2096   // transforms on the condition become more effective.
   2097   if (BI.isConditional() &&
   2098       BI.getSuccessor(0) == BI.getSuccessor(1) &&
   2099       !isa<UndefValue>(BI.getCondition())) {
   2100     BI.setCondition(UndefValue::get(BI.getCondition()->getType()));
   2101     return &BI;
   2102   }
   2103 
   2104   // Canonicalize fcmp_one -> fcmp_oeq
   2105   FCmpInst::Predicate FPred; Value *Y;
   2106   if (match(&BI, m_Br(m_FCmp(FPred, m_Value(X), m_Value(Y)),
   2107                              TrueDest, FalseDest)) &&
   2108       BI.getCondition()->hasOneUse())
   2109     if (FPred == FCmpInst::FCMP_ONE || FPred == FCmpInst::FCMP_OLE ||
   2110         FPred == FCmpInst::FCMP_OGE) {
   2111       FCmpInst *Cond = cast<FCmpInst>(BI.getCondition());
   2112       Cond->setPredicate(FCmpInst::getInversePredicate(FPred));
   2113 
   2114       // Swap Destinations and condition.
   2115       BI.swapSuccessors();
   2116       Worklist.Add(Cond);
   2117       return &BI;
   2118     }
   2119 
   2120   // Canonicalize icmp_ne -> icmp_eq
   2121   ICmpInst::Predicate IPred;
   2122   if (match(&BI, m_Br(m_ICmp(IPred, m_Value(X), m_Value(Y)),
   2123                       TrueDest, FalseDest)) &&
   2124       BI.getCondition()->hasOneUse())
   2125     if (IPred == ICmpInst::ICMP_NE  || IPred == ICmpInst::ICMP_ULE ||
   2126         IPred == ICmpInst::ICMP_SLE || IPred == ICmpInst::ICMP_UGE ||
   2127         IPred == ICmpInst::ICMP_SGE) {
   2128       ICmpInst *Cond = cast<ICmpInst>(BI.getCondition());
   2129       Cond->setPredicate(ICmpInst::getInversePredicate(IPred));
   2130       // Swap Destinations and condition.
   2131       BI.swapSuccessors();
   2132       Worklist.Add(Cond);
   2133       return &BI;
   2134     }
   2135 
   2136   return nullptr;
   2137 }
   2138 
   2139 Instruction *InstCombiner::visitSwitchInst(SwitchInst &SI) {
   2140   Value *Cond = SI.getCondition();
   2141   unsigned BitWidth = cast<IntegerType>(Cond->getType())->getBitWidth();
   2142   APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
   2143   computeKnownBits(Cond, KnownZero, KnownOne, 0, &SI);
   2144   unsigned LeadingKnownZeros = KnownZero.countLeadingOnes();
   2145   unsigned LeadingKnownOnes = KnownOne.countLeadingOnes();
   2146 
   2147   // Compute the number of leading bits we can ignore.
   2148   for (auto &C : SI.cases()) {
   2149     LeadingKnownZeros = std::min(
   2150         LeadingKnownZeros, C.getCaseValue()->getValue().countLeadingZeros());
   2151     LeadingKnownOnes = std::min(
   2152         LeadingKnownOnes, C.getCaseValue()->getValue().countLeadingOnes());
   2153   }
   2154 
   2155   unsigned NewWidth = BitWidth - std::max(LeadingKnownZeros, LeadingKnownOnes);
   2156 
   2157   // Truncate the condition operand if the new type is equal to or larger than
   2158   // the largest legal integer type. We need to be conservative here since
   2159   // x86 generates redundant zero-extension instructions if the operand is
   2160   // truncated to i8 or i16.
   2161   bool TruncCond = false;
   2162   if (NewWidth > 0 && BitWidth > NewWidth &&
   2163       NewWidth >= DL.getLargestLegalIntTypeSize()) {
   2164     TruncCond = true;
   2165     IntegerType *Ty = IntegerType::get(SI.getContext(), NewWidth);
   2166     Builder->SetInsertPoint(&SI);
   2167     Value *NewCond = Builder->CreateTrunc(SI.getCondition(), Ty, "trunc");
   2168     SI.setCondition(NewCond);
   2169 
   2170     for (auto &C : SI.cases())
   2171       static_cast<SwitchInst::CaseIt *>(&C)->setValue(ConstantInt::get(
   2172           SI.getContext(), C.getCaseValue()->getValue().trunc(NewWidth)));
   2173   }
   2174 
   2175   if (Instruction *I = dyn_cast<Instruction>(Cond)) {
   2176     if (I->getOpcode() == Instruction::Add)
   2177       if (ConstantInt *AddRHS = dyn_cast<ConstantInt>(I->getOperand(1))) {
   2178         // change 'switch (X+4) case 1:' into 'switch (X) case -3'
   2179         // Skip the first item since that's the default case.
   2180         for (SwitchInst::CaseIt i = SI.case_begin(), e = SI.case_end();
   2181              i != e; ++i) {
   2182           ConstantInt* CaseVal = i.getCaseValue();
   2183           Constant *LHS = CaseVal;
   2184           if (TruncCond)
   2185             LHS = LeadingKnownZeros
   2186                       ? ConstantExpr::getZExt(CaseVal, Cond->getType())
   2187                       : ConstantExpr::getSExt(CaseVal, Cond->getType());
   2188           Constant* NewCaseVal = ConstantExpr::getSub(LHS, AddRHS);
   2189           assert(isa<ConstantInt>(NewCaseVal) &&
   2190                  "Result of expression should be constant");
   2191           i.setValue(cast<ConstantInt>(NewCaseVal));
   2192         }
   2193         SI.setCondition(I->getOperand(0));
   2194         Worklist.Add(I);
   2195         return &SI;
   2196       }
   2197   }
   2198 
   2199   return TruncCond ? &SI : nullptr;
   2200 }
   2201 
   2202 Instruction *InstCombiner::visitExtractValueInst(ExtractValueInst &EV) {
   2203   Value *Agg = EV.getAggregateOperand();
   2204 
   2205   if (!EV.hasIndices())
   2206     return ReplaceInstUsesWith(EV, Agg);
   2207 
   2208   if (Value *V =
   2209           SimplifyExtractValueInst(Agg, EV.getIndices(), DL, TLI, DT, AC))
   2210     return ReplaceInstUsesWith(EV, V);
   2211 
   2212   if (InsertValueInst *IV = dyn_cast<InsertValueInst>(Agg)) {
   2213     // We're extracting from an insertvalue instruction, compare the indices
   2214     const unsigned *exti, *exte, *insi, *inse;
   2215     for (exti = EV.idx_begin(), insi = IV->idx_begin(),
   2216          exte = EV.idx_end(), inse = IV->idx_end();
   2217          exti != exte && insi != inse;
   2218          ++exti, ++insi) {
   2219       if (*insi != *exti)
   2220         // The insert and extract both reference distinctly different elements.
   2221         // This means the extract is not influenced by the insert, and we can
   2222         // replace the aggregate operand of the extract with the aggregate
   2223         // operand of the insert. i.e., replace
   2224         // %I = insertvalue { i32, { i32 } } %A, { i32 } { i32 42 }, 1
   2225         // %E = extractvalue { i32, { i32 } } %I, 0
   2226         // with
   2227         // %E = extractvalue { i32, { i32 } } %A, 0
   2228         return ExtractValueInst::Create(IV->getAggregateOperand(),
   2229                                         EV.getIndices());
   2230     }
   2231     if (exti == exte && insi == inse)
   2232       // Both iterators are at the end: Index lists are identical. Replace
   2233       // %B = insertvalue { i32, { i32 } } %A, i32 42, 1, 0
   2234       // %C = extractvalue { i32, { i32 } } %B, 1, 0
   2235       // with "i32 42"
   2236       return ReplaceInstUsesWith(EV, IV->getInsertedValueOperand());
   2237     if (exti == exte) {
   2238       // The extract list is a prefix of the insert list. i.e. replace
   2239       // %I = insertvalue { i32, { i32 } } %A, i32 42, 1, 0
   2240       // %E = extractvalue { i32, { i32 } } %I, 1
   2241       // with
   2242       // %X = extractvalue { i32, { i32 } } %A, 1
   2243       // %E = insertvalue { i32 } %X, i32 42, 0
   2244       // by switching the order of the insert and extract (though the
   2245       // insertvalue should be left in, since it may have other uses).
   2246       Value *NewEV = Builder->CreateExtractValue(IV->getAggregateOperand(),
   2247                                                  EV.getIndices());
   2248       return InsertValueInst::Create(NewEV, IV->getInsertedValueOperand(),
   2249                                      makeArrayRef(insi, inse));
   2250     }
   2251     if (insi == inse)
   2252       // The insert list is a prefix of the extract list
   2253       // We can simply remove the common indices from the extract and make it
   2254       // operate on the inserted value instead of the insertvalue result.
   2255       // i.e., replace
   2256       // %I = insertvalue { i32, { i32 } } %A, { i32 } { i32 42 }, 1
   2257       // %E = extractvalue { i32, { i32 } } %I, 1, 0
   2258       // with
   2259       // %E extractvalue { i32 } { i32 42 }, 0
   2260       return ExtractValueInst::Create(IV->getInsertedValueOperand(),
   2261                                       makeArrayRef(exti, exte));
   2262   }
   2263   if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Agg)) {
   2264     // We're extracting from an intrinsic, see if we're the only user, which
   2265     // allows us to simplify multiple result intrinsics to simpler things that
   2266     // just get one value.
   2267     if (II->hasOneUse()) {
   2268       // Check if we're grabbing the overflow bit or the result of a 'with
   2269       // overflow' intrinsic.  If it's the latter we can remove the intrinsic
   2270       // and replace it with a traditional binary instruction.
   2271       switch (II->getIntrinsicID()) {
   2272       case Intrinsic::uadd_with_overflow:
   2273       case Intrinsic::sadd_with_overflow:
   2274         if (*EV.idx_begin() == 0) {  // Normal result.
   2275           Value *LHS = II->getArgOperand(0), *RHS = II->getArgOperand(1);
   2276           ReplaceInstUsesWith(*II, UndefValue::get(II->getType()));
   2277           EraseInstFromFunction(*II);
   2278           return BinaryOperator::CreateAdd(LHS, RHS);
   2279         }
   2280 
   2281         // If the normal result of the add is dead, and the RHS is a constant,
   2282         // we can transform this into a range comparison.
   2283         // overflow = uadd a, -4  -->  overflow = icmp ugt a, 3
   2284         if (II->getIntrinsicID() == Intrinsic::uadd_with_overflow)
   2285           if (ConstantInt *CI = dyn_cast<ConstantInt>(II->getArgOperand(1)))
   2286             return new ICmpInst(ICmpInst::ICMP_UGT, II->getArgOperand(0),
   2287                                 ConstantExpr::getNot(CI));
   2288         break;
   2289       case Intrinsic::usub_with_overflow:
   2290       case Intrinsic::ssub_with_overflow:
   2291         if (*EV.idx_begin() == 0) {  // Normal result.
   2292           Value *LHS = II->getArgOperand(0), *RHS = II->getArgOperand(1);
   2293           ReplaceInstUsesWith(*II, UndefValue::get(II->getType()));
   2294           EraseInstFromFunction(*II);
   2295           return BinaryOperator::CreateSub(LHS, RHS);
   2296         }
   2297         break;
   2298       case Intrinsic::umul_with_overflow:
   2299       case Intrinsic::smul_with_overflow:
   2300         if (*EV.idx_begin() == 0) {  // Normal result.
   2301           Value *LHS = II->getArgOperand(0), *RHS = II->getArgOperand(1);
   2302           ReplaceInstUsesWith(*II, UndefValue::get(II->getType()));
   2303           EraseInstFromFunction(*II);
   2304           return BinaryOperator::CreateMul(LHS, RHS);
   2305         }
   2306         break;
   2307       default:
   2308         break;
   2309       }
   2310     }
   2311   }
   2312   if (LoadInst *L = dyn_cast<LoadInst>(Agg))
   2313     // If the (non-volatile) load only has one use, we can rewrite this to a
   2314     // load from a GEP. This reduces the size of the load. If a load is used
   2315     // only by extractvalue instructions then this either must have been
   2316     // optimized before, or it is a struct with padding, in which case we
   2317     // don't want to do the transformation as it loses padding knowledge.
   2318     if (L->isSimple() && L->hasOneUse()) {
   2319       // extractvalue has integer indices, getelementptr has Value*s. Convert.
   2320       SmallVector<Value*, 4> Indices;
   2321       // Prefix an i32 0 since we need the first element.
   2322       Indices.push_back(Builder->getInt32(0));
   2323       for (ExtractValueInst::idx_iterator I = EV.idx_begin(), E = EV.idx_end();
   2324             I != E; ++I)
   2325         Indices.push_back(Builder->getInt32(*I));
   2326 
   2327       // We need to insert these at the location of the old load, not at that of
   2328       // the extractvalue.
   2329       Builder->SetInsertPoint(L);
   2330       Value *GEP = Builder->CreateInBoundsGEP(L->getType(),
   2331                                               L->getPointerOperand(), Indices);
   2332       // Returning the load directly will cause the main loop to insert it in
   2333       // the wrong spot, so use ReplaceInstUsesWith().
   2334       return ReplaceInstUsesWith(EV, Builder->CreateLoad(GEP));
   2335     }
   2336   // We could simplify extracts from other values. Note that nested extracts may
   2337   // already be simplified implicitly by the above: extract (extract (insert) )
   2338   // will be translated into extract ( insert ( extract ) ) first and then just
   2339   // the value inserted, if appropriate. Similarly for extracts from single-use
   2340   // loads: extract (extract (load)) will be translated to extract (load (gep))
   2341   // and if again single-use then via load (gep (gep)) to load (gep).
   2342   // However, double extracts from e.g. function arguments or return values
   2343   // aren't handled yet.
   2344   return nullptr;
   2345 }
   2346 
   2347 /// Return 'true' if the given typeinfo will match anything.
   2348 static bool isCatchAll(EHPersonality Personality, Constant *TypeInfo) {
   2349   switch (Personality) {
   2350   case EHPersonality::GNU_C:
   2351     // The GCC C EH personality only exists to support cleanups, so it's not
   2352     // clear what the semantics of catch clauses are.
   2353     return false;
   2354   case EHPersonality::Unknown:
   2355     return false;
   2356   case EHPersonality::GNU_Ada:
   2357     // While __gnat_all_others_value will match any Ada exception, it doesn't
   2358     // match foreign exceptions (or didn't, before gcc-4.7).
   2359     return false;
   2360   case EHPersonality::GNU_CXX:
   2361   case EHPersonality::GNU_ObjC:
   2362   case EHPersonality::MSVC_X86SEH:
   2363   case EHPersonality::MSVC_Win64SEH:
   2364   case EHPersonality::MSVC_CXX:
   2365   case EHPersonality::CoreCLR:
   2366     return TypeInfo->isNullValue();
   2367   }
   2368   llvm_unreachable("invalid enum");
   2369 }
   2370 
   2371 static bool shorter_filter(const Value *LHS, const Value *RHS) {
   2372   return
   2373     cast<ArrayType>(LHS->getType())->getNumElements()
   2374   <
   2375     cast<ArrayType>(RHS->getType())->getNumElements();
   2376 }
   2377 
   2378 Instruction *InstCombiner::visitLandingPadInst(LandingPadInst &LI) {
   2379   // The logic here should be correct for any real-world personality function.
   2380   // However if that turns out not to be true, the offending logic can always
   2381   // be conditioned on the personality function, like the catch-all logic is.
   2382   EHPersonality Personality =
   2383       classifyEHPersonality(LI.getParent()->getParent()->getPersonalityFn());
   2384 
   2385   // Simplify the list of clauses, eg by removing repeated catch clauses
   2386   // (these are often created by inlining).
   2387   bool MakeNewInstruction = false; // If true, recreate using the following:
   2388   SmallVector<Constant *, 16> NewClauses; // - Clauses for the new instruction;
   2389   bool CleanupFlag = LI.isCleanup();   // - The new instruction is a cleanup.
   2390 
   2391   SmallPtrSet<Value *, 16> AlreadyCaught; // Typeinfos known caught already.
   2392   for (unsigned i = 0, e = LI.getNumClauses(); i != e; ++i) {
   2393     bool isLastClause = i + 1 == e;
   2394     if (LI.isCatch(i)) {
   2395       // A catch clause.
   2396       Constant *CatchClause = LI.getClause(i);
   2397       Constant *TypeInfo = CatchClause->stripPointerCasts();
   2398 
   2399       // If we already saw this clause, there is no point in having a second
   2400       // copy of it.
   2401       if (AlreadyCaught.insert(TypeInfo).second) {
   2402         // This catch clause was not already seen.
   2403         NewClauses.push_back(CatchClause);
   2404       } else {
   2405         // Repeated catch clause - drop the redundant copy.
   2406         MakeNewInstruction = true;
   2407       }
   2408 
   2409       // If this is a catch-all then there is no point in keeping any following
   2410       // clauses or marking the landingpad as having a cleanup.
   2411       if (isCatchAll(Personality, TypeInfo)) {
   2412         if (!isLastClause)
   2413           MakeNewInstruction = true;
   2414         CleanupFlag = false;
   2415         break;
   2416       }
   2417     } else {
   2418       // A filter clause.  If any of the filter elements were already caught
   2419       // then they can be dropped from the filter.  It is tempting to try to
   2420       // exploit the filter further by saying that any typeinfo that does not
   2421       // occur in the filter can't be caught later (and thus can be dropped).
   2422       // However this would be wrong, since typeinfos can match without being
   2423       // equal (for example if one represents a C++ class, and the other some
   2424       // class derived from it).
   2425       assert(LI.isFilter(i) && "Unsupported landingpad clause!");
   2426       Constant *FilterClause = LI.getClause(i);
   2427       ArrayType *FilterType = cast<ArrayType>(FilterClause->getType());
   2428       unsigned NumTypeInfos = FilterType->getNumElements();
   2429 
   2430       // An empty filter catches everything, so there is no point in keeping any
   2431       // following clauses or marking the landingpad as having a cleanup.  By
   2432       // dealing with this case here the following code is made a bit simpler.
   2433       if (!NumTypeInfos) {
   2434         NewClauses.push_back(FilterClause);
   2435         if (!isLastClause)
   2436           MakeNewInstruction = true;
   2437         CleanupFlag = false;
   2438         break;
   2439       }
   2440 
   2441       bool MakeNewFilter = false; // If true, make a new filter.
   2442       SmallVector<Constant *, 16> NewFilterElts; // New elements.
   2443       if (isa<ConstantAggregateZero>(FilterClause)) {
   2444         // Not an empty filter - it contains at least one null typeinfo.
   2445         assert(NumTypeInfos > 0 && "Should have handled empty filter already!");
   2446         Constant *TypeInfo =
   2447           Constant::getNullValue(FilterType->getElementType());
   2448         // If this typeinfo is a catch-all then the filter can never match.
   2449         if (isCatchAll(Personality, TypeInfo)) {
   2450           // Throw the filter away.
   2451           MakeNewInstruction = true;
   2452           continue;
   2453         }
   2454 
   2455         // There is no point in having multiple copies of this typeinfo, so
   2456         // discard all but the first copy if there is more than one.
   2457         NewFilterElts.push_back(TypeInfo);
   2458         if (NumTypeInfos > 1)
   2459           MakeNewFilter = true;
   2460       } else {
   2461         ConstantArray *Filter = cast<ConstantArray>(FilterClause);
   2462         SmallPtrSet<Value *, 16> SeenInFilter; // For uniquing the elements.
   2463         NewFilterElts.reserve(NumTypeInfos);
   2464 
   2465         // Remove any filter elements that were already caught or that already
   2466         // occurred in the filter.  While there, see if any of the elements are
   2467         // catch-alls.  If so, the filter can be discarded.
   2468         bool SawCatchAll = false;
   2469         for (unsigned j = 0; j != NumTypeInfos; ++j) {
   2470           Constant *Elt = Filter->getOperand(j);
   2471           Constant *TypeInfo = Elt->stripPointerCasts();
   2472           if (isCatchAll(Personality, TypeInfo)) {
   2473             // This element is a catch-all.  Bail out, noting this fact.
   2474             SawCatchAll = true;
   2475             break;
   2476           }
   2477 
   2478           // Even if we've seen a type in a catch clause, we don't want to
   2479           // remove it from the filter.  An unexpected type handler may be
   2480           // set up for a call site which throws an exception of the same
   2481           // type caught.  In order for the exception thrown by the unexpected
   2482           // handler to propogate correctly, the filter must be correctly
   2483           // described for the call site.
   2484           //
   2485           // Example:
   2486           //
   2487           // void unexpected() { throw 1;}
   2488           // void foo() throw (int) {
   2489           //   std::set_unexpected(unexpected);
   2490           //   try {
   2491           //     throw 2.0;
   2492           //   } catch (int i) {}
   2493           // }
   2494 
   2495           // There is no point in having multiple copies of the same typeinfo in
   2496           // a filter, so only add it if we didn't already.
   2497           if (SeenInFilter.insert(TypeInfo).second)
   2498             NewFilterElts.push_back(cast<Constant>(Elt));
   2499         }
   2500         // A filter containing a catch-all cannot match anything by definition.
   2501         if (SawCatchAll) {
   2502           // Throw the filter away.
   2503           MakeNewInstruction = true;
   2504           continue;
   2505         }
   2506 
   2507         // If we dropped something from the filter, make a new one.
   2508         if (NewFilterElts.size() < NumTypeInfos)
   2509           MakeNewFilter = true;
   2510       }
   2511       if (MakeNewFilter) {
   2512         FilterType = ArrayType::get(FilterType->getElementType(),
   2513                                     NewFilterElts.size());
   2514         FilterClause = ConstantArray::get(FilterType, NewFilterElts);
   2515         MakeNewInstruction = true;
   2516       }
   2517 
   2518       NewClauses.push_back(FilterClause);
   2519 
   2520       // If the new filter is empty then it will catch everything so there is
   2521       // no point in keeping any following clauses or marking the landingpad
   2522       // as having a cleanup.  The case of the original filter being empty was
   2523       // already handled above.
   2524       if (MakeNewFilter && !NewFilterElts.size()) {
   2525         assert(MakeNewInstruction && "New filter but not a new instruction!");
   2526         CleanupFlag = false;
   2527         break;
   2528       }
   2529     }
   2530   }
   2531 
   2532   // If several filters occur in a row then reorder them so that the shortest
   2533   // filters come first (those with the smallest number of elements).  This is
   2534   // advantageous because shorter filters are more likely to match, speeding up
   2535   // unwinding, but mostly because it increases the effectiveness of the other
   2536   // filter optimizations below.
   2537   for (unsigned i = 0, e = NewClauses.size(); i + 1 < e; ) {
   2538     unsigned j;
   2539     // Find the maximal 'j' s.t. the range [i, j) consists entirely of filters.
   2540     for (j = i; j != e; ++j)
   2541       if (!isa<ArrayType>(NewClauses[j]->getType()))
   2542         break;
   2543 
   2544     // Check whether the filters are already sorted by length.  We need to know
   2545     // if sorting them is actually going to do anything so that we only make a
   2546     // new landingpad instruction if it does.
   2547     for (unsigned k = i; k + 1 < j; ++k)
   2548       if (shorter_filter(NewClauses[k+1], NewClauses[k])) {
   2549         // Not sorted, so sort the filters now.  Doing an unstable sort would be
   2550         // correct too but reordering filters pointlessly might confuse users.
   2551         std::stable_sort(NewClauses.begin() + i, NewClauses.begin() + j,
   2552                          shorter_filter);
   2553         MakeNewInstruction = true;
   2554         break;
   2555       }
   2556 
   2557     // Look for the next batch of filters.
   2558     i = j + 1;
   2559   }
   2560 
   2561   // If typeinfos matched if and only if equal, then the elements of a filter L
   2562   // that occurs later than a filter F could be replaced by the intersection of
   2563   // the elements of F and L.  In reality two typeinfos can match without being
   2564   // equal (for example if one represents a C++ class, and the other some class
   2565   // derived from it) so it would be wrong to perform this transform in general.
   2566   // However the transform is correct and useful if F is a subset of L.  In that
   2567   // case L can be replaced by F, and thus removed altogether since repeating a
   2568   // filter is pointless.  So here we look at all pairs of filters F and L where
   2569   // L follows F in the list of clauses, and remove L if every element of F is
   2570   // an element of L.  This can occur when inlining C++ functions with exception
   2571   // specifications.
   2572   for (unsigned i = 0; i + 1 < NewClauses.size(); ++i) {
   2573     // Examine each filter in turn.
   2574     Value *Filter = NewClauses[i];
   2575     ArrayType *FTy = dyn_cast<ArrayType>(Filter->getType());
   2576     if (!FTy)
   2577       // Not a filter - skip it.
   2578       continue;
   2579     unsigned FElts = FTy->getNumElements();
   2580     // Examine each filter following this one.  Doing this backwards means that
   2581     // we don't have to worry about filters disappearing under us when removed.
   2582     for (unsigned j = NewClauses.size() - 1; j != i; --j) {
   2583       Value *LFilter = NewClauses[j];
   2584       ArrayType *LTy = dyn_cast<ArrayType>(LFilter->getType());
   2585       if (!LTy)
   2586         // Not a filter - skip it.
   2587         continue;
   2588       // If Filter is a subset of LFilter, i.e. every element of Filter is also
   2589       // an element of LFilter, then discard LFilter.
   2590       SmallVectorImpl<Constant *>::iterator J = NewClauses.begin() + j;
   2591       // If Filter is empty then it is a subset of LFilter.
   2592       if (!FElts) {
   2593         // Discard LFilter.
   2594         NewClauses.erase(J);
   2595         MakeNewInstruction = true;
   2596         // Move on to the next filter.
   2597         continue;
   2598       }
   2599       unsigned LElts = LTy->getNumElements();
   2600       // If Filter is longer than LFilter then it cannot be a subset of it.
   2601       if (FElts > LElts)
   2602         // Move on to the next filter.
   2603         continue;
   2604       // At this point we know that LFilter has at least one element.
   2605       if (isa<ConstantAggregateZero>(LFilter)) { // LFilter only contains zeros.
   2606         // Filter is a subset of LFilter iff Filter contains only zeros (as we
   2607         // already know that Filter is not longer than LFilter).
   2608         if (isa<ConstantAggregateZero>(Filter)) {
   2609           assert(FElts <= LElts && "Should have handled this case earlier!");
   2610           // Discard LFilter.
   2611           NewClauses.erase(J);
   2612           MakeNewInstruction = true;
   2613         }
   2614         // Move on to the next filter.
   2615         continue;
   2616       }
   2617       ConstantArray *LArray = cast<ConstantArray>(LFilter);
   2618       if (isa<ConstantAggregateZero>(Filter)) { // Filter only contains zeros.
   2619         // Since Filter is non-empty and contains only zeros, it is a subset of
   2620         // LFilter iff LFilter contains a zero.
   2621         assert(FElts > 0 && "Should have eliminated the empty filter earlier!");
   2622         for (unsigned l = 0; l != LElts; ++l)
   2623           if (LArray->getOperand(l)->isNullValue()) {
   2624             // LFilter contains a zero - discard it.
   2625             NewClauses.erase(J);
   2626             MakeNewInstruction = true;
   2627             break;
   2628           }
   2629         // Move on to the next filter.
   2630         continue;
   2631       }
   2632       // At this point we know that both filters are ConstantArrays.  Loop over
   2633       // operands to see whether every element of Filter is also an element of
   2634       // LFilter.  Since filters tend to be short this is probably faster than
   2635       // using a method that scales nicely.
   2636       ConstantArray *FArray = cast<ConstantArray>(Filter);
   2637       bool AllFound = true;
   2638       for (unsigned f = 0; f != FElts; ++f) {
   2639         Value *FTypeInfo = FArray->getOperand(f)->stripPointerCasts();
   2640         AllFound = false;
   2641         for (unsigned l = 0; l != LElts; ++l) {
   2642           Value *LTypeInfo = LArray->getOperand(l)->stripPointerCasts();
   2643           if (LTypeInfo == FTypeInfo) {
   2644             AllFound = true;
   2645             break;
   2646           }
   2647         }
   2648         if (!AllFound)
   2649           break;
   2650       }
   2651       if (AllFound) {
   2652         // Discard LFilter.
   2653         NewClauses.erase(J);
   2654         MakeNewInstruction = true;
   2655       }
   2656       // Move on to the next filter.
   2657     }
   2658   }
   2659 
   2660   // If we changed any of the clauses, replace the old landingpad instruction
   2661   // with a new one.
   2662   if (MakeNewInstruction) {
   2663     LandingPadInst *NLI = LandingPadInst::Create(LI.getType(),
   2664                                                  NewClauses.size());
   2665     for (unsigned i = 0, e = NewClauses.size(); i != e; ++i)
   2666       NLI->addClause(NewClauses[i]);
   2667     // A landing pad with no clauses must have the cleanup flag set.  It is
   2668     // theoretically possible, though highly unlikely, that we eliminated all
   2669     // clauses.  If so, force the cleanup flag to true.
   2670     if (NewClauses.empty())
   2671       CleanupFlag = true;
   2672     NLI->setCleanup(CleanupFlag);
   2673     return NLI;
   2674   }
   2675 
   2676   // Even if none of the clauses changed, we may nonetheless have understood
   2677   // that the cleanup flag is pointless.  Clear it if so.
   2678   if (LI.isCleanup() != CleanupFlag) {
   2679     assert(!CleanupFlag && "Adding a cleanup, not removing one?!");
   2680     LI.setCleanup(CleanupFlag);
   2681     return &LI;
   2682   }
   2683 
   2684   return nullptr;
   2685 }
   2686 
   2687 /// Try to move the specified instruction from its current block into the
   2688 /// beginning of DestBlock, which can only happen if it's safe to move the
   2689 /// instruction past all of the instructions between it and the end of its
   2690 /// block.
   2691 static bool TryToSinkInstruction(Instruction *I, BasicBlock *DestBlock) {
   2692   assert(I->hasOneUse() && "Invariants didn't hold!");
   2693 
   2694   // Cannot move control-flow-involving, volatile loads, vaarg, etc.
   2695   if (isa<PHINode>(I) || I->isEHPad() || I->mayHaveSideEffects() ||
   2696       isa<TerminatorInst>(I))
   2697     return false;
   2698 
   2699   // Do not sink alloca instructions out of the entry block.
   2700   if (isa<AllocaInst>(I) && I->getParent() ==
   2701         &DestBlock->getParent()->getEntryBlock())
   2702     return false;
   2703 
   2704   // Do not sink convergent call instructions.
   2705   if (auto *CI = dyn_cast<CallInst>(I)) {
   2706     if (CI->isConvergent())
   2707       return false;
   2708   }
   2709 
   2710   // We can only sink load instructions if there is nothing between the load and
   2711   // the end of block that could change the value.
   2712   if (I->mayReadFromMemory()) {
   2713     for (BasicBlock::iterator Scan = I->getIterator(),
   2714                               E = I->getParent()->end();
   2715          Scan != E; ++Scan)
   2716       if (Scan->mayWriteToMemory())
   2717         return false;
   2718   }
   2719 
   2720   BasicBlock::iterator InsertPos = DestBlock->getFirstInsertionPt();
   2721   I->moveBefore(&*InsertPos);
   2722   ++NumSunkInst;
   2723   return true;
   2724 }
   2725 
   2726 bool InstCombiner::run() {
   2727   while (!Worklist.isEmpty()) {
   2728     Instruction *I = Worklist.RemoveOne();
   2729     if (I == nullptr) continue;  // skip null values.
   2730 
   2731     // Check to see if we can DCE the instruction.
   2732     if (isInstructionTriviallyDead(I, TLI)) {
   2733       DEBUG(dbgs() << "IC: DCE: " << *I << '\n');
   2734       EraseInstFromFunction(*I);
   2735       ++NumDeadInst;
   2736       MadeIRChange = true;
   2737       continue;
   2738     }
   2739 
   2740     // Instruction isn't dead, see if we can constant propagate it.
   2741     if (!I->use_empty() &&
   2742         (I->getNumOperands() == 0 || isa<Constant>(I->getOperand(0)))) {
   2743       if (Constant *C = ConstantFoldInstruction(I, DL, TLI)) {
   2744         DEBUG(dbgs() << "IC: ConstFold to: " << *C << " from: " << *I << '\n');
   2745 
   2746         // Add operands to the worklist.
   2747         ReplaceInstUsesWith(*I, C);
   2748         ++NumConstProp;
   2749         EraseInstFromFunction(*I);
   2750         MadeIRChange = true;
   2751         continue;
   2752       }
   2753     }
   2754 
   2755     // In general, it is possible for computeKnownBits to determine all bits in a
   2756     // value even when the operands are not all constants.
   2757     if (!I->use_empty() && I->getType()->isIntegerTy()) {
   2758       unsigned BitWidth = I->getType()->getScalarSizeInBits();
   2759       APInt KnownZero(BitWidth, 0);
   2760       APInt KnownOne(BitWidth, 0);
   2761       computeKnownBits(I, KnownZero, KnownOne, /*Depth*/0, I);
   2762       if ((KnownZero | KnownOne).isAllOnesValue()) {
   2763         Constant *C = ConstantInt::get(I->getContext(), KnownOne);
   2764         DEBUG(dbgs() << "IC: ConstFold (all bits known) to: " << *C <<
   2765                         " from: " << *I << '\n');
   2766 
   2767         // Add operands to the worklist.
   2768         ReplaceInstUsesWith(*I, C);
   2769         ++NumConstProp;
   2770         EraseInstFromFunction(*I);
   2771         MadeIRChange = true;
   2772         continue;
   2773       }
   2774     }
   2775 
   2776     // See if we can trivially sink this instruction to a successor basic block.
   2777     if (I->hasOneUse()) {
   2778       BasicBlock *BB = I->getParent();
   2779       Instruction *UserInst = cast<Instruction>(*I->user_begin());
   2780       BasicBlock *UserParent;
   2781 
   2782       // Get the block the use occurs in.
   2783       if (PHINode *PN = dyn_cast<PHINode>(UserInst))
   2784         UserParent = PN->getIncomingBlock(*I->use_begin());
   2785       else
   2786         UserParent = UserInst->getParent();
   2787 
   2788       if (UserParent != BB) {
   2789         bool UserIsSuccessor = false;
   2790         // See if the user is one of our successors.
   2791         for (succ_iterator SI = succ_begin(BB), E = succ_end(BB); SI != E; ++SI)
   2792           if (*SI == UserParent) {
   2793             UserIsSuccessor = true;
   2794             break;
   2795           }
   2796 
   2797         // If the user is one of our immediate successors, and if that successor
   2798         // only has us as a predecessors (we'd have to split the critical edge
   2799         // otherwise), we can keep going.
   2800         if (UserIsSuccessor && UserParent->getSinglePredecessor()) {
   2801           // Okay, the CFG is simple enough, try to sink this instruction.
   2802           if (TryToSinkInstruction(I, UserParent)) {
   2803             MadeIRChange = true;
   2804             // We'll add uses of the sunk instruction below, but since sinking
   2805             // can expose opportunities for it's *operands* add them to the
   2806             // worklist
   2807             for (Use &U : I->operands())
   2808               if (Instruction *OpI = dyn_cast<Instruction>(U.get()))
   2809                 Worklist.Add(OpI);
   2810           }
   2811         }
   2812       }
   2813     }
   2814 
   2815     // Now that we have an instruction, try combining it to simplify it.
   2816     Builder->SetInsertPoint(I);
   2817     Builder->SetCurrentDebugLocation(I->getDebugLoc());
   2818 
   2819 #ifndef NDEBUG
   2820     std::string OrigI;
   2821 #endif
   2822     DEBUG(raw_string_ostream SS(OrigI); I->print(SS); OrigI = SS.str(););
   2823     DEBUG(dbgs() << "IC: Visiting: " << OrigI << '\n');
   2824 
   2825     if (Instruction *Result = visit(*I)) {
   2826       ++NumCombined;
   2827       // Should we replace the old instruction with a new one?
   2828       if (Result != I) {
   2829         DEBUG(dbgs() << "IC: Old = " << *I << '\n'
   2830                      << "    New = " << *Result << '\n');
   2831 
   2832         if (I->getDebugLoc())
   2833           Result->setDebugLoc(I->getDebugLoc());
   2834         // Everything uses the new instruction now.
   2835         I->replaceAllUsesWith(Result);
   2836 
   2837         // Move the name to the new instruction first.
   2838         Result->takeName(I);
   2839 
   2840         // Push the new instruction and any users onto the worklist.
   2841         Worklist.Add(Result);
   2842         Worklist.AddUsersToWorkList(*Result);
   2843 
   2844         // Insert the new instruction into the basic block...
   2845         BasicBlock *InstParent = I->getParent();
   2846         BasicBlock::iterator InsertPos = I->getIterator();
   2847 
   2848         // If we replace a PHI with something that isn't a PHI, fix up the
   2849         // insertion point.
   2850         if (!isa<PHINode>(Result) && isa<PHINode>(InsertPos))
   2851           InsertPos = InstParent->getFirstInsertionPt();
   2852 
   2853         InstParent->getInstList().insert(InsertPos, Result);
   2854 
   2855         EraseInstFromFunction(*I);
   2856       } else {
   2857 #ifndef NDEBUG
   2858         DEBUG(dbgs() << "IC: Mod = " << OrigI << '\n'
   2859                      << "    New = " << *I << '\n');
   2860 #endif
   2861 
   2862         // If the instruction was modified, it's possible that it is now dead.
   2863         // if so, remove it.
   2864         if (isInstructionTriviallyDead(I, TLI)) {
   2865           EraseInstFromFunction(*I);
   2866         } else {
   2867           Worklist.Add(I);
   2868           Worklist.AddUsersToWorkList(*I);
   2869         }
   2870       }
   2871       MadeIRChange = true;
   2872     }
   2873   }
   2874 
   2875   Worklist.Zap();
   2876   return MadeIRChange;
   2877 }
   2878 
   2879 /// Walk the function in depth-first order, adding all reachable code to the
   2880 /// worklist.
   2881 ///
   2882 /// This has a couple of tricks to make the code faster and more powerful.  In
   2883 /// particular, we constant fold and DCE instructions as we go, to avoid adding
   2884 /// them to the worklist (this significantly speeds up instcombine on code where
   2885 /// many instructions are dead or constant).  Additionally, if we find a branch
   2886 /// whose condition is a known constant, we only visit the reachable successors.
   2887 ///
   2888 static bool AddReachableCodeToWorklist(BasicBlock *BB, const DataLayout &DL,
   2889                                        SmallPtrSetImpl<BasicBlock *> &Visited,
   2890                                        InstCombineWorklist &ICWorklist,
   2891                                        const TargetLibraryInfo *TLI) {
   2892   bool MadeIRChange = false;
   2893   SmallVector<BasicBlock*, 256> Worklist;
   2894   Worklist.push_back(BB);
   2895 
   2896   SmallVector<Instruction*, 128> InstrsForInstCombineWorklist;
   2897   DenseMap<ConstantExpr*, Constant*> FoldedConstants;
   2898 
   2899   do {
   2900     BB = Worklist.pop_back_val();
   2901 
   2902     // We have now visited this block!  If we've already been here, ignore it.
   2903     if (!Visited.insert(BB).second)
   2904       continue;
   2905 
   2906     for (BasicBlock::iterator BBI = BB->begin(), E = BB->end(); BBI != E; ) {
   2907       Instruction *Inst = &*BBI++;
   2908 
   2909       // DCE instruction if trivially dead.
   2910       if (isInstructionTriviallyDead(Inst, TLI)) {
   2911         ++NumDeadInst;
   2912         DEBUG(dbgs() << "IC: DCE: " << *Inst << '\n');
   2913         Inst->eraseFromParent();
   2914         continue;
   2915       }
   2916 
   2917       // ConstantProp instruction if trivially constant.
   2918       if (!Inst->use_empty() &&
   2919           (Inst->getNumOperands() == 0 || isa<Constant>(Inst->getOperand(0))))
   2920         if (Constant *C = ConstantFoldInstruction(Inst, DL, TLI)) {
   2921           DEBUG(dbgs() << "IC: ConstFold to: " << *C << " from: "
   2922                        << *Inst << '\n');
   2923           Inst->replaceAllUsesWith(C);
   2924           ++NumConstProp;
   2925           Inst->eraseFromParent();
   2926           continue;
   2927         }
   2928 
   2929       // See if we can constant fold its operands.
   2930       for (User::op_iterator i = Inst->op_begin(), e = Inst->op_end(); i != e;
   2931            ++i) {
   2932         ConstantExpr *CE = dyn_cast<ConstantExpr>(i);
   2933         if (CE == nullptr)
   2934           continue;
   2935 
   2936         Constant *&FoldRes = FoldedConstants[CE];
   2937         if (!FoldRes)
   2938           FoldRes = ConstantFoldConstantExpression(CE, DL, TLI);
   2939         if (!FoldRes)
   2940           FoldRes = CE;
   2941 
   2942         if (FoldRes != CE) {
   2943           *i = FoldRes;
   2944           MadeIRChange = true;
   2945         }
   2946       }
   2947 
   2948       InstrsForInstCombineWorklist.push_back(Inst);
   2949     }
   2950 
   2951     // Recursively visit successors.  If this is a branch or switch on a
   2952     // constant, only visit the reachable successor.
   2953     TerminatorInst *TI = BB->getTerminator();
   2954     if (BranchInst *BI = dyn_cast<BranchInst>(TI)) {
   2955       if (BI->isConditional() && isa<ConstantInt>(BI->getCondition())) {
   2956         bool CondVal = cast<ConstantInt>(BI->getCondition())->getZExtValue();
   2957         BasicBlock *ReachableBB = BI->getSuccessor(!CondVal);
   2958         Worklist.push_back(ReachableBB);
   2959         continue;
   2960       }
   2961     } else if (SwitchInst *SI = dyn_cast<SwitchInst>(TI)) {
   2962       if (ConstantInt *Cond = dyn_cast<ConstantInt>(SI->getCondition())) {
   2963         // See if this is an explicit destination.
   2964         for (SwitchInst::CaseIt i = SI->case_begin(), e = SI->case_end();
   2965              i != e; ++i)
   2966           if (i.getCaseValue() == Cond) {
   2967             BasicBlock *ReachableBB = i.getCaseSuccessor();
   2968             Worklist.push_back(ReachableBB);
   2969             continue;
   2970           }
   2971 
   2972         // Otherwise it is the default destination.
   2973         Worklist.push_back(SI->getDefaultDest());
   2974         continue;
   2975       }
   2976     }
   2977 
   2978     for (BasicBlock *SuccBB : TI->successors())
   2979       Worklist.push_back(SuccBB);
   2980   } while (!Worklist.empty());
   2981 
   2982   // Once we've found all of the instructions to add to instcombine's worklist,
   2983   // add them in reverse order.  This way instcombine will visit from the top
   2984   // of the function down.  This jives well with the way that it adds all uses
   2985   // of instructions to the worklist after doing a transformation, thus avoiding
   2986   // some N^2 behavior in pathological cases.
   2987   ICWorklist.AddInitialGroup(InstrsForInstCombineWorklist);
   2988 
   2989   return MadeIRChange;
   2990 }
   2991 
   2992 /// \brief Populate the IC worklist from a function, and prune any dead basic
   2993 /// blocks discovered in the process.
   2994 ///
   2995 /// This also does basic constant propagation and other forward fixing to make
   2996 /// the combiner itself run much faster.
   2997 static bool prepareICWorklistFromFunction(Function &F, const DataLayout &DL,
   2998                                           TargetLibraryInfo *TLI,
   2999                                           InstCombineWorklist &ICWorklist) {
   3000   bool MadeIRChange = false;
   3001 
   3002   // Do a depth-first traversal of the function, populate the worklist with
   3003   // the reachable instructions.  Ignore blocks that are not reachable.  Keep
   3004   // track of which blocks we visit.
   3005   SmallPtrSet<BasicBlock *, 64> Visited;
   3006   MadeIRChange |=
   3007       AddReachableCodeToWorklist(&F.front(), DL, Visited, ICWorklist, TLI);
   3008 
   3009   // Do a quick scan over the function.  If we find any blocks that are
   3010   // unreachable, remove any instructions inside of them.  This prevents
   3011   // the instcombine code from having to deal with some bad special cases.
   3012   for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB) {
   3013     if (Visited.count(&*BB))
   3014       continue;
   3015 
   3016     // Delete the instructions backwards, as it has a reduced likelihood of
   3017     // having to update as many def-use and use-def chains.
   3018     Instruction *EndInst = BB->getTerminator(); // Last not to be deleted.
   3019     while (EndInst != BB->begin()) {
   3020       // Delete the next to last instruction.
   3021       Instruction *Inst = &*--EndInst->getIterator();
   3022       if (!Inst->use_empty() && !Inst->getType()->isTokenTy())
   3023         Inst->replaceAllUsesWith(UndefValue::get(Inst->getType()));
   3024       if (Inst->isEHPad()) {
   3025         EndInst = Inst;
   3026         continue;
   3027       }
   3028       if (!isa<DbgInfoIntrinsic>(Inst)) {
   3029         ++NumDeadInst;
   3030         MadeIRChange = true;
   3031       }
   3032       if (!Inst->getType()->isTokenTy())
   3033         Inst->eraseFromParent();
   3034     }
   3035   }
   3036 
   3037   return MadeIRChange;
   3038 }
   3039 
   3040 static bool
   3041 combineInstructionsOverFunction(Function &F, InstCombineWorklist &Worklist,
   3042                                 AliasAnalysis *AA, AssumptionCache &AC,
   3043                                 TargetLibraryInfo &TLI, DominatorTree &DT,
   3044                                 LoopInfo *LI = nullptr) {
   3045   auto &DL = F.getParent()->getDataLayout();
   3046 
   3047   /// Builder - This is an IRBuilder that automatically inserts new
   3048   /// instructions into the worklist when they are created.
   3049   IRBuilder<true, TargetFolder, InstCombineIRInserter> Builder(
   3050       F.getContext(), TargetFolder(DL), InstCombineIRInserter(Worklist, &AC));
   3051 
   3052   // Lower dbg.declare intrinsics otherwise their value may be clobbered
   3053   // by instcombiner.
   3054   bool DbgDeclaresChanged = LowerDbgDeclare(F);
   3055 
   3056   // Iterate while there is work to do.
   3057   int Iteration = 0;
   3058   for (;;) {
   3059     ++Iteration;
   3060     DEBUG(dbgs() << "\n\nINSTCOMBINE ITERATION #" << Iteration << " on "
   3061                  << F.getName() << "\n");
   3062 
   3063     bool Changed = false;
   3064     if (prepareICWorklistFromFunction(F, DL, &TLI, Worklist))
   3065       Changed = true;
   3066 
   3067     InstCombiner IC(Worklist, &Builder, F.optForMinSize(),
   3068                     AA, &AC, &TLI, &DT, DL, LI);
   3069     if (IC.run())
   3070       Changed = true;
   3071 
   3072     if (!Changed)
   3073       break;
   3074   }
   3075 
   3076   return DbgDeclaresChanged || Iteration > 1;
   3077 }
   3078 
   3079 PreservedAnalyses InstCombinePass::run(Function &F,
   3080                                        AnalysisManager<Function> *AM) {
   3081   auto &AC = AM->getResult<AssumptionAnalysis>(F);
   3082   auto &DT = AM->getResult<DominatorTreeAnalysis>(F);
   3083   auto &TLI = AM->getResult<TargetLibraryAnalysis>(F);
   3084 
   3085   auto *LI = AM->getCachedResult<LoopAnalysis>(F);
   3086 
   3087   // FIXME: The AliasAnalysis is not yet supported in the new pass manager
   3088   if (!combineInstructionsOverFunction(F, Worklist, nullptr, AC, TLI, DT, LI))
   3089     // No changes, all analyses are preserved.
   3090     return PreservedAnalyses::all();
   3091 
   3092   // Mark all the analyses that instcombine updates as preserved.
   3093   // FIXME: Need a way to preserve CFG analyses here!
   3094   PreservedAnalyses PA;
   3095   PA.preserve<DominatorTreeAnalysis>();
   3096   return PA;
   3097 }
   3098 
   3099 namespace {
   3100 /// \brief The legacy pass manager's instcombine pass.
   3101 ///
   3102 /// This is a basic whole-function wrapper around the instcombine utility. It
   3103 /// will try to combine all instructions in the function.
   3104 class InstructionCombiningPass : public FunctionPass {
   3105   InstCombineWorklist Worklist;
   3106 
   3107 public:
   3108   static char ID; // Pass identification, replacement for typeid
   3109 
   3110   InstructionCombiningPass() : FunctionPass(ID) {
   3111     initializeInstructionCombiningPassPass(*PassRegistry::getPassRegistry());
   3112   }
   3113 
   3114   void getAnalysisUsage(AnalysisUsage &AU) const override;
   3115   bool runOnFunction(Function &F) override;
   3116 };
   3117 }
   3118 
   3119 void InstructionCombiningPass::getAnalysisUsage(AnalysisUsage &AU) const {
   3120   AU.setPreservesCFG();
   3121   AU.addRequired<AAResultsWrapperPass>();
   3122   AU.addRequired<AssumptionCacheTracker>();
   3123   AU.addRequired<TargetLibraryInfoWrapperPass>();
   3124   AU.addRequired<DominatorTreeWrapperPass>();
   3125   AU.addPreserved<DominatorTreeWrapperPass>();
   3126   AU.addPreserved<GlobalsAAWrapperPass>();
   3127 }
   3128 
   3129 bool InstructionCombiningPass::runOnFunction(Function &F) {
   3130   if (skipOptnoneFunction(F))
   3131     return false;
   3132 
   3133   // Required analyses.
   3134   auto AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
   3135   auto &AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
   3136   auto &TLI = getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
   3137   auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
   3138 
   3139   // Optional analyses.
   3140   auto *LIWP = getAnalysisIfAvailable<LoopInfoWrapperPass>();
   3141   auto *LI = LIWP ? &LIWP->getLoopInfo() : nullptr;
   3142 
   3143   return combineInstructionsOverFunction(F, Worklist, AA, AC, TLI, DT, LI);
   3144 }
   3145 
   3146 char InstructionCombiningPass::ID = 0;
   3147 INITIALIZE_PASS_BEGIN(InstructionCombiningPass, "instcombine",
   3148                       "Combine redundant instructions", false, false)
   3149 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
   3150 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
   3151 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
   3152 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
   3153 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass)
   3154 INITIALIZE_PASS_END(InstructionCombiningPass, "instcombine",
   3155                     "Combine redundant instructions", false, false)
   3156 
   3157 // Initialization Routines
   3158 void llvm::initializeInstCombine(PassRegistry &Registry) {
   3159   initializeInstructionCombiningPassPass(Registry);
   3160 }
   3161 
   3162 void LLVMInitializeInstCombine(LLVMPassRegistryRef R) {
   3163   initializeInstructionCombiningPassPass(*unwrap(R));
   3164 }
   3165 
   3166 FunctionPass *llvm::createInstructionCombiningPass() {
   3167   return new InstructionCombiningPass();
   3168 }
   3169