1 //===- GlobalOpt.cpp - Optimize Global Variables --------------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This pass transforms simple global variables that never have their address 11 // taken. If obviously true, it marks read/write globals as constant, deletes 12 // variables only stored to, etc. 13 // 14 //===----------------------------------------------------------------------===// 15 16 #define DEBUG_TYPE "globalopt" 17 #include "llvm/Transforms/IPO.h" 18 #include "llvm/CallingConv.h" 19 #include "llvm/Constants.h" 20 #include "llvm/DerivedTypes.h" 21 #include "llvm/Instructions.h" 22 #include "llvm/IntrinsicInst.h" 23 #include "llvm/Module.h" 24 #include "llvm/Operator.h" 25 #include "llvm/Pass.h" 26 #include "llvm/Analysis/ConstantFolding.h" 27 #include "llvm/Analysis/MemoryBuiltins.h" 28 #include "llvm/Target/TargetData.h" 29 #include "llvm/Support/CallSite.h" 30 #include "llvm/Support/Debug.h" 31 #include "llvm/Support/ErrorHandling.h" 32 #include "llvm/Support/GetElementPtrTypeIterator.h" 33 #include "llvm/Support/MathExtras.h" 34 #include "llvm/Support/raw_ostream.h" 35 #include "llvm/ADT/DenseMap.h" 36 #include "llvm/ADT/SmallPtrSet.h" 37 #include "llvm/ADT/SmallVector.h" 38 #include "llvm/ADT/Statistic.h" 39 #include "llvm/ADT/STLExtras.h" 40 #include <algorithm> 41 using namespace llvm; 42 43 STATISTIC(NumMarked , "Number of globals marked constant"); 44 STATISTIC(NumUnnamed , "Number of globals marked unnamed_addr"); 45 STATISTIC(NumSRA , "Number of aggregate globals broken into scalars"); 46 STATISTIC(NumHeapSRA , "Number of heap objects SRA'd"); 47 STATISTIC(NumSubstitute,"Number of globals with initializers stored into them"); 48 STATISTIC(NumDeleted , "Number of globals deleted"); 49 STATISTIC(NumFnDeleted , "Number of functions deleted"); 50 STATISTIC(NumGlobUses , "Number of global uses devirtualized"); 51 STATISTIC(NumLocalized , "Number of globals localized"); 52 STATISTIC(NumShrunkToBool , "Number of global vars shrunk to booleans"); 53 STATISTIC(NumFastCallFns , "Number of functions converted to fastcc"); 54 STATISTIC(NumCtorsEvaluated, "Number of static ctors evaluated"); 55 STATISTIC(NumNestRemoved , "Number of nest attributes removed"); 56 STATISTIC(NumAliasesResolved, "Number of global aliases resolved"); 57 STATISTIC(NumAliasesRemoved, "Number of global aliases eliminated"); 58 STATISTIC(NumCXXDtorsRemoved, "Number of global C++ destructors removed"); 59 60 namespace { 61 struct GlobalStatus; 62 struct GlobalOpt : public ModulePass { 63 virtual void getAnalysisUsage(AnalysisUsage &AU) const { 64 } 65 static char ID; // Pass identification, replacement for typeid 66 GlobalOpt() : ModulePass(ID) { 67 initializeGlobalOptPass(*PassRegistry::getPassRegistry()); 68 } 69 70 bool runOnModule(Module &M); 71 72 private: 73 GlobalVariable *FindGlobalCtors(Module &M); 74 bool OptimizeFunctions(Module &M); 75 bool OptimizeGlobalVars(Module &M); 76 bool OptimizeGlobalAliases(Module &M); 77 bool OptimizeGlobalCtorsList(GlobalVariable *&GCL); 78 bool ProcessGlobal(GlobalVariable *GV,Module::global_iterator &GVI); 79 bool ProcessInternalGlobal(GlobalVariable *GV,Module::global_iterator &GVI, 80 const SmallPtrSet<const PHINode*, 16> &PHIUsers, 81 const GlobalStatus &GS); 82 bool OptimizeEmptyGlobalCXXDtors(Function *CXAAtExitFn); 83 }; 84 } 85 86 char GlobalOpt::ID = 0; 87 INITIALIZE_PASS(GlobalOpt, "globalopt", 88 "Global Variable Optimizer", false, false) 89 90 ModulePass *llvm::createGlobalOptimizerPass() { return new GlobalOpt(); } 91 92 namespace { 93 94 /// GlobalStatus - As we analyze each global, keep track of some information 95 /// about it. If we find out that the address of the global is taken, none of 96 /// this info will be accurate. 97 struct GlobalStatus { 98 /// isCompared - True if the global's address is used in a comparison. 99 bool isCompared; 100 101 /// isLoaded - True if the global is ever loaded. If the global isn't ever 102 /// loaded it can be deleted. 103 bool isLoaded; 104 105 /// StoredType - Keep track of what stores to the global look like. 106 /// 107 enum StoredType { 108 /// NotStored - There is no store to this global. It can thus be marked 109 /// constant. 110 NotStored, 111 112 /// isInitializerStored - This global is stored to, but the only thing 113 /// stored is the constant it was initialized with. This is only tracked 114 /// for scalar globals. 115 isInitializerStored, 116 117 /// isStoredOnce - This global is stored to, but only its initializer and 118 /// one other value is ever stored to it. If this global isStoredOnce, we 119 /// track the value stored to it in StoredOnceValue below. This is only 120 /// tracked for scalar globals. 121 isStoredOnce, 122 123 /// isStored - This global is stored to by multiple values or something else 124 /// that we cannot track. 125 isStored 126 } StoredType; 127 128 /// StoredOnceValue - If only one value (besides the initializer constant) is 129 /// ever stored to this global, keep track of what value it is. 130 Value *StoredOnceValue; 131 132 /// AccessingFunction/HasMultipleAccessingFunctions - These start out 133 /// null/false. When the first accessing function is noticed, it is recorded. 134 /// When a second different accessing function is noticed, 135 /// HasMultipleAccessingFunctions is set to true. 136 const Function *AccessingFunction; 137 bool HasMultipleAccessingFunctions; 138 139 /// HasNonInstructionUser - Set to true if this global has a user that is not 140 /// an instruction (e.g. a constant expr or GV initializer). 141 bool HasNonInstructionUser; 142 143 /// HasPHIUser - Set to true if this global has a user that is a PHI node. 144 bool HasPHIUser; 145 146 GlobalStatus() : isCompared(false), isLoaded(false), StoredType(NotStored), 147 StoredOnceValue(0), AccessingFunction(0), 148 HasMultipleAccessingFunctions(false), HasNonInstructionUser(false), 149 HasPHIUser(false) {} 150 }; 151 152 } 153 154 // SafeToDestroyConstant - It is safe to destroy a constant iff it is only used 155 // by constants itself. Note that constants cannot be cyclic, so this test is 156 // pretty easy to implement recursively. 157 // 158 static bool SafeToDestroyConstant(const Constant *C) { 159 if (isa<GlobalValue>(C)) return false; 160 161 for (Value::const_use_iterator UI = C->use_begin(), E = C->use_end(); UI != E; 162 ++UI) 163 if (const Constant *CU = dyn_cast<Constant>(*UI)) { 164 if (!SafeToDestroyConstant(CU)) return false; 165 } else 166 return false; 167 return true; 168 } 169 170 171 /// AnalyzeGlobal - Look at all uses of the global and fill in the GlobalStatus 172 /// structure. If the global has its address taken, return true to indicate we 173 /// can't do anything with it. 174 /// 175 static bool AnalyzeGlobal(const Value *V, GlobalStatus &GS, 176 SmallPtrSet<const PHINode*, 16> &PHIUsers) { 177 for (Value::const_use_iterator UI = V->use_begin(), E = V->use_end(); UI != E; 178 ++UI) { 179 const User *U = *UI; 180 if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(U)) { 181 GS.HasNonInstructionUser = true; 182 183 // If the result of the constantexpr isn't pointer type, then we won't 184 // know to expect it in various places. Just reject early. 185 if (!isa<PointerType>(CE->getType())) return true; 186 187 if (AnalyzeGlobal(CE, GS, PHIUsers)) return true; 188 } else if (const Instruction *I = dyn_cast<Instruction>(U)) { 189 if (!GS.HasMultipleAccessingFunctions) { 190 const Function *F = I->getParent()->getParent(); 191 if (GS.AccessingFunction == 0) 192 GS.AccessingFunction = F; 193 else if (GS.AccessingFunction != F) 194 GS.HasMultipleAccessingFunctions = true; 195 } 196 if (const LoadInst *LI = dyn_cast<LoadInst>(I)) { 197 GS.isLoaded = true; 198 if (LI->isVolatile()) return true; // Don't hack on volatile loads. 199 } else if (const StoreInst *SI = dyn_cast<StoreInst>(I)) { 200 // Don't allow a store OF the address, only stores TO the address. 201 if (SI->getOperand(0) == V) return true; 202 203 if (SI->isVolatile()) return true; // Don't hack on volatile stores. 204 205 // If this is a direct store to the global (i.e., the global is a scalar 206 // value, not an aggregate), keep more specific information about 207 // stores. 208 if (GS.StoredType != GlobalStatus::isStored) { 209 if (const GlobalVariable *GV = dyn_cast<GlobalVariable>( 210 SI->getOperand(1))) { 211 Value *StoredVal = SI->getOperand(0); 212 if (StoredVal == GV->getInitializer()) { 213 if (GS.StoredType < GlobalStatus::isInitializerStored) 214 GS.StoredType = GlobalStatus::isInitializerStored; 215 } else if (isa<LoadInst>(StoredVal) && 216 cast<LoadInst>(StoredVal)->getOperand(0) == GV) { 217 if (GS.StoredType < GlobalStatus::isInitializerStored) 218 GS.StoredType = GlobalStatus::isInitializerStored; 219 } else if (GS.StoredType < GlobalStatus::isStoredOnce) { 220 GS.StoredType = GlobalStatus::isStoredOnce; 221 GS.StoredOnceValue = StoredVal; 222 } else if (GS.StoredType == GlobalStatus::isStoredOnce && 223 GS.StoredOnceValue == StoredVal) { 224 // noop. 225 } else { 226 GS.StoredType = GlobalStatus::isStored; 227 } 228 } else { 229 GS.StoredType = GlobalStatus::isStored; 230 } 231 } 232 } else if (isa<GetElementPtrInst>(I)) { 233 if (AnalyzeGlobal(I, GS, PHIUsers)) return true; 234 } else if (isa<SelectInst>(I)) { 235 if (AnalyzeGlobal(I, GS, PHIUsers)) return true; 236 } else if (const PHINode *PN = dyn_cast<PHINode>(I)) { 237 // PHI nodes we can check just like select or GEP instructions, but we 238 // have to be careful about infinite recursion. 239 if (PHIUsers.insert(PN)) // Not already visited. 240 if (AnalyzeGlobal(I, GS, PHIUsers)) return true; 241 GS.HasPHIUser = true; 242 } else if (isa<CmpInst>(I)) { 243 GS.isCompared = true; 244 } else if (const MemTransferInst *MTI = dyn_cast<MemTransferInst>(I)) { 245 if (MTI->isVolatile()) return true; 246 if (MTI->getArgOperand(0) == V) 247 GS.StoredType = GlobalStatus::isStored; 248 if (MTI->getArgOperand(1) == V) 249 GS.isLoaded = true; 250 } else if (const MemSetInst *MSI = dyn_cast<MemSetInst>(I)) { 251 assert(MSI->getArgOperand(0) == V && "Memset only takes one pointer!"); 252 if (MSI->isVolatile()) return true; 253 GS.StoredType = GlobalStatus::isStored; 254 } else { 255 return true; // Any other non-load instruction might take address! 256 } 257 } else if (const Constant *C = dyn_cast<Constant>(U)) { 258 GS.HasNonInstructionUser = true; 259 // We might have a dead and dangling constant hanging off of here. 260 if (!SafeToDestroyConstant(C)) 261 return true; 262 } else { 263 GS.HasNonInstructionUser = true; 264 // Otherwise must be some other user. 265 return true; 266 } 267 } 268 269 return false; 270 } 271 272 static Constant *getAggregateConstantElement(Constant *Agg, Constant *Idx) { 273 ConstantInt *CI = dyn_cast<ConstantInt>(Idx); 274 if (!CI) return 0; 275 unsigned IdxV = CI->getZExtValue(); 276 277 if (ConstantStruct *CS = dyn_cast<ConstantStruct>(Agg)) { 278 if (IdxV < CS->getNumOperands()) return CS->getOperand(IdxV); 279 } else if (ConstantArray *CA = dyn_cast<ConstantArray>(Agg)) { 280 if (IdxV < CA->getNumOperands()) return CA->getOperand(IdxV); 281 } else if (ConstantVector *CP = dyn_cast<ConstantVector>(Agg)) { 282 if (IdxV < CP->getNumOperands()) return CP->getOperand(IdxV); 283 } else if (isa<ConstantAggregateZero>(Agg)) { 284 if (StructType *STy = dyn_cast<StructType>(Agg->getType())) { 285 if (IdxV < STy->getNumElements()) 286 return Constant::getNullValue(STy->getElementType(IdxV)); 287 } else if (SequentialType *STy = 288 dyn_cast<SequentialType>(Agg->getType())) { 289 return Constant::getNullValue(STy->getElementType()); 290 } 291 } else if (isa<UndefValue>(Agg)) { 292 if (StructType *STy = dyn_cast<StructType>(Agg->getType())) { 293 if (IdxV < STy->getNumElements()) 294 return UndefValue::get(STy->getElementType(IdxV)); 295 } else if (SequentialType *STy = 296 dyn_cast<SequentialType>(Agg->getType())) { 297 return UndefValue::get(STy->getElementType()); 298 } 299 } 300 return 0; 301 } 302 303 304 /// CleanupConstantGlobalUsers - We just marked GV constant. Loop over all 305 /// users of the global, cleaning up the obvious ones. This is largely just a 306 /// quick scan over the use list to clean up the easy and obvious cruft. This 307 /// returns true if it made a change. 308 static bool CleanupConstantGlobalUsers(Value *V, Constant *Init) { 309 bool Changed = false; 310 for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI != E;) { 311 User *U = *UI++; 312 313 if (LoadInst *LI = dyn_cast<LoadInst>(U)) { 314 if (Init) { 315 // Replace the load with the initializer. 316 LI->replaceAllUsesWith(Init); 317 LI->eraseFromParent(); 318 Changed = true; 319 } 320 } else if (StoreInst *SI = dyn_cast<StoreInst>(U)) { 321 // Store must be unreachable or storing Init into the global. 322 SI->eraseFromParent(); 323 Changed = true; 324 } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(U)) { 325 if (CE->getOpcode() == Instruction::GetElementPtr) { 326 Constant *SubInit = 0; 327 if (Init) 328 SubInit = ConstantFoldLoadThroughGEPConstantExpr(Init, CE); 329 Changed |= CleanupConstantGlobalUsers(CE, SubInit); 330 } else if (CE->getOpcode() == Instruction::BitCast && 331 CE->getType()->isPointerTy()) { 332 // Pointer cast, delete any stores and memsets to the global. 333 Changed |= CleanupConstantGlobalUsers(CE, 0); 334 } 335 336 if (CE->use_empty()) { 337 CE->destroyConstant(); 338 Changed = true; 339 } 340 } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(U)) { 341 // Do not transform "gepinst (gep constexpr (GV))" here, because forming 342 // "gepconstexpr (gep constexpr (GV))" will cause the two gep's to fold 343 // and will invalidate our notion of what Init is. 344 Constant *SubInit = 0; 345 if (!isa<ConstantExpr>(GEP->getOperand(0))) { 346 ConstantExpr *CE = 347 dyn_cast_or_null<ConstantExpr>(ConstantFoldInstruction(GEP)); 348 if (Init && CE && CE->getOpcode() == Instruction::GetElementPtr) 349 SubInit = ConstantFoldLoadThroughGEPConstantExpr(Init, CE); 350 } 351 Changed |= CleanupConstantGlobalUsers(GEP, SubInit); 352 353 if (GEP->use_empty()) { 354 GEP->eraseFromParent(); 355 Changed = true; 356 } 357 } else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(U)) { // memset/cpy/mv 358 if (MI->getRawDest() == V) { 359 MI->eraseFromParent(); 360 Changed = true; 361 } 362 363 } else if (Constant *C = dyn_cast<Constant>(U)) { 364 // If we have a chain of dead constantexprs or other things dangling from 365 // us, and if they are all dead, nuke them without remorse. 366 if (SafeToDestroyConstant(C)) { 367 C->destroyConstant(); 368 // This could have invalidated UI, start over from scratch. 369 CleanupConstantGlobalUsers(V, Init); 370 return true; 371 } 372 } 373 } 374 return Changed; 375 } 376 377 /// isSafeSROAElementUse - Return true if the specified instruction is a safe 378 /// user of a derived expression from a global that we want to SROA. 379 static bool isSafeSROAElementUse(Value *V) { 380 // We might have a dead and dangling constant hanging off of here. 381 if (Constant *C = dyn_cast<Constant>(V)) 382 return SafeToDestroyConstant(C); 383 384 Instruction *I = dyn_cast<Instruction>(V); 385 if (!I) return false; 386 387 // Loads are ok. 388 if (isa<LoadInst>(I)) return true; 389 390 // Stores *to* the pointer are ok. 391 if (StoreInst *SI = dyn_cast<StoreInst>(I)) 392 return SI->getOperand(0) != V; 393 394 // Otherwise, it must be a GEP. 395 GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(I); 396 if (GEPI == 0) return false; 397 398 if (GEPI->getNumOperands() < 3 || !isa<Constant>(GEPI->getOperand(1)) || 399 !cast<Constant>(GEPI->getOperand(1))->isNullValue()) 400 return false; 401 402 for (Value::use_iterator I = GEPI->use_begin(), E = GEPI->use_end(); 403 I != E; ++I) 404 if (!isSafeSROAElementUse(*I)) 405 return false; 406 return true; 407 } 408 409 410 /// IsUserOfGlobalSafeForSRA - U is a direct user of the specified global value. 411 /// Look at it and its uses and decide whether it is safe to SROA this global. 412 /// 413 static bool IsUserOfGlobalSafeForSRA(User *U, GlobalValue *GV) { 414 // The user of the global must be a GEP Inst or a ConstantExpr GEP. 415 if (!isa<GetElementPtrInst>(U) && 416 (!isa<ConstantExpr>(U) || 417 cast<ConstantExpr>(U)->getOpcode() != Instruction::GetElementPtr)) 418 return false; 419 420 // Check to see if this ConstantExpr GEP is SRA'able. In particular, we 421 // don't like < 3 operand CE's, and we don't like non-constant integer 422 // indices. This enforces that all uses are 'gep GV, 0, C, ...' for some 423 // value of C. 424 if (U->getNumOperands() < 3 || !isa<Constant>(U->getOperand(1)) || 425 !cast<Constant>(U->getOperand(1))->isNullValue() || 426 !isa<ConstantInt>(U->getOperand(2))) 427 return false; 428 429 gep_type_iterator GEPI = gep_type_begin(U), E = gep_type_end(U); 430 ++GEPI; // Skip over the pointer index. 431 432 // If this is a use of an array allocation, do a bit more checking for sanity. 433 if (ArrayType *AT = dyn_cast<ArrayType>(*GEPI)) { 434 uint64_t NumElements = AT->getNumElements(); 435 ConstantInt *Idx = cast<ConstantInt>(U->getOperand(2)); 436 437 // Check to make sure that index falls within the array. If not, 438 // something funny is going on, so we won't do the optimization. 439 // 440 if (Idx->getZExtValue() >= NumElements) 441 return false; 442 443 // We cannot scalar repl this level of the array unless any array 444 // sub-indices are in-range constants. In particular, consider: 445 // A[0][i]. We cannot know that the user isn't doing invalid things like 446 // allowing i to index an out-of-range subscript that accesses A[1]. 447 // 448 // Scalar replacing *just* the outer index of the array is probably not 449 // going to be a win anyway, so just give up. 450 for (++GEPI; // Skip array index. 451 GEPI != E; 452 ++GEPI) { 453 uint64_t NumElements; 454 if (ArrayType *SubArrayTy = dyn_cast<ArrayType>(*GEPI)) 455 NumElements = SubArrayTy->getNumElements(); 456 else if (VectorType *SubVectorTy = dyn_cast<VectorType>(*GEPI)) 457 NumElements = SubVectorTy->getNumElements(); 458 else { 459 assert((*GEPI)->isStructTy() && 460 "Indexed GEP type is not array, vector, or struct!"); 461 continue; 462 } 463 464 ConstantInt *IdxVal = dyn_cast<ConstantInt>(GEPI.getOperand()); 465 if (!IdxVal || IdxVal->getZExtValue() >= NumElements) 466 return false; 467 } 468 } 469 470 for (Value::use_iterator I = U->use_begin(), E = U->use_end(); I != E; ++I) 471 if (!isSafeSROAElementUse(*I)) 472 return false; 473 return true; 474 } 475 476 /// GlobalUsersSafeToSRA - Look at all uses of the global and decide whether it 477 /// is safe for us to perform this transformation. 478 /// 479 static bool GlobalUsersSafeToSRA(GlobalValue *GV) { 480 for (Value::use_iterator UI = GV->use_begin(), E = GV->use_end(); 481 UI != E; ++UI) { 482 if (!IsUserOfGlobalSafeForSRA(*UI, GV)) 483 return false; 484 } 485 return true; 486 } 487 488 489 /// SRAGlobal - Perform scalar replacement of aggregates on the specified global 490 /// variable. This opens the door for other optimizations by exposing the 491 /// behavior of the program in a more fine-grained way. We have determined that 492 /// this transformation is safe already. We return the first global variable we 493 /// insert so that the caller can reprocess it. 494 static GlobalVariable *SRAGlobal(GlobalVariable *GV, const TargetData &TD) { 495 // Make sure this global only has simple uses that we can SRA. 496 if (!GlobalUsersSafeToSRA(GV)) 497 return 0; 498 499 assert(GV->hasLocalLinkage() && !GV->isConstant()); 500 Constant *Init = GV->getInitializer(); 501 Type *Ty = Init->getType(); 502 503 std::vector<GlobalVariable*> NewGlobals; 504 Module::GlobalListType &Globals = GV->getParent()->getGlobalList(); 505 506 // Get the alignment of the global, either explicit or target-specific. 507 unsigned StartAlignment = GV->getAlignment(); 508 if (StartAlignment == 0) 509 StartAlignment = TD.getABITypeAlignment(GV->getType()); 510 511 if (StructType *STy = dyn_cast<StructType>(Ty)) { 512 NewGlobals.reserve(STy->getNumElements()); 513 const StructLayout &Layout = *TD.getStructLayout(STy); 514 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 515 Constant *In = getAggregateConstantElement(Init, 516 ConstantInt::get(Type::getInt32Ty(STy->getContext()), i)); 517 assert(In && "Couldn't get element of initializer?"); 518 GlobalVariable *NGV = new GlobalVariable(STy->getElementType(i), false, 519 GlobalVariable::InternalLinkage, 520 In, GV->getName()+"."+Twine(i), 521 GV->isThreadLocal(), 522 GV->getType()->getAddressSpace()); 523 Globals.insert(GV, NGV); 524 NewGlobals.push_back(NGV); 525 526 // Calculate the known alignment of the field. If the original aggregate 527 // had 256 byte alignment for example, something might depend on that: 528 // propagate info to each field. 529 uint64_t FieldOffset = Layout.getElementOffset(i); 530 unsigned NewAlign = (unsigned)MinAlign(StartAlignment, FieldOffset); 531 if (NewAlign > TD.getABITypeAlignment(STy->getElementType(i))) 532 NGV->setAlignment(NewAlign); 533 } 534 } else if (SequentialType *STy = dyn_cast<SequentialType>(Ty)) { 535 unsigned NumElements = 0; 536 if (ArrayType *ATy = dyn_cast<ArrayType>(STy)) 537 NumElements = ATy->getNumElements(); 538 else 539 NumElements = cast<VectorType>(STy)->getNumElements(); 540 541 if (NumElements > 16 && GV->hasNUsesOrMore(16)) 542 return 0; // It's not worth it. 543 NewGlobals.reserve(NumElements); 544 545 uint64_t EltSize = TD.getTypeAllocSize(STy->getElementType()); 546 unsigned EltAlign = TD.getABITypeAlignment(STy->getElementType()); 547 for (unsigned i = 0, e = NumElements; i != e; ++i) { 548 Constant *In = getAggregateConstantElement(Init, 549 ConstantInt::get(Type::getInt32Ty(Init->getContext()), i)); 550 assert(In && "Couldn't get element of initializer?"); 551 552 GlobalVariable *NGV = new GlobalVariable(STy->getElementType(), false, 553 GlobalVariable::InternalLinkage, 554 In, GV->getName()+"."+Twine(i), 555 GV->isThreadLocal(), 556 GV->getType()->getAddressSpace()); 557 Globals.insert(GV, NGV); 558 NewGlobals.push_back(NGV); 559 560 // Calculate the known alignment of the field. If the original aggregate 561 // had 256 byte alignment for example, something might depend on that: 562 // propagate info to each field. 563 unsigned NewAlign = (unsigned)MinAlign(StartAlignment, EltSize*i); 564 if (NewAlign > EltAlign) 565 NGV->setAlignment(NewAlign); 566 } 567 } 568 569 if (NewGlobals.empty()) 570 return 0; 571 572 DEBUG(dbgs() << "PERFORMING GLOBAL SRA ON: " << *GV); 573 574 Constant *NullInt =Constant::getNullValue(Type::getInt32Ty(GV->getContext())); 575 576 // Loop over all of the uses of the global, replacing the constantexpr geps, 577 // with smaller constantexpr geps or direct references. 578 while (!GV->use_empty()) { 579 User *GEP = GV->use_back(); 580 assert(((isa<ConstantExpr>(GEP) && 581 cast<ConstantExpr>(GEP)->getOpcode()==Instruction::GetElementPtr)|| 582 isa<GetElementPtrInst>(GEP)) && "NonGEP CE's are not SRAable!"); 583 584 // Ignore the 1th operand, which has to be zero or else the program is quite 585 // broken (undefined). Get the 2nd operand, which is the structure or array 586 // index. 587 unsigned Val = cast<ConstantInt>(GEP->getOperand(2))->getZExtValue(); 588 if (Val >= NewGlobals.size()) Val = 0; // Out of bound array access. 589 590 Value *NewPtr = NewGlobals[Val]; 591 592 // Form a shorter GEP if needed. 593 if (GEP->getNumOperands() > 3) { 594 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(GEP)) { 595 SmallVector<Constant*, 8> Idxs; 596 Idxs.push_back(NullInt); 597 for (unsigned i = 3, e = CE->getNumOperands(); i != e; ++i) 598 Idxs.push_back(CE->getOperand(i)); 599 NewPtr = ConstantExpr::getGetElementPtr(cast<Constant>(NewPtr), 600 &Idxs[0], Idxs.size()); 601 } else { 602 GetElementPtrInst *GEPI = cast<GetElementPtrInst>(GEP); 603 SmallVector<Value*, 8> Idxs; 604 Idxs.push_back(NullInt); 605 for (unsigned i = 3, e = GEPI->getNumOperands(); i != e; ++i) 606 Idxs.push_back(GEPI->getOperand(i)); 607 NewPtr = GetElementPtrInst::Create(NewPtr, Idxs.begin(), Idxs.end(), 608 GEPI->getName()+"."+Twine(Val),GEPI); 609 } 610 } 611 GEP->replaceAllUsesWith(NewPtr); 612 613 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(GEP)) 614 GEPI->eraseFromParent(); 615 else 616 cast<ConstantExpr>(GEP)->destroyConstant(); 617 } 618 619 // Delete the old global, now that it is dead. 620 Globals.erase(GV); 621 ++NumSRA; 622 623 // Loop over the new globals array deleting any globals that are obviously 624 // dead. This can arise due to scalarization of a structure or an array that 625 // has elements that are dead. 626 unsigned FirstGlobal = 0; 627 for (unsigned i = 0, e = NewGlobals.size(); i != e; ++i) 628 if (NewGlobals[i]->use_empty()) { 629 Globals.erase(NewGlobals[i]); 630 if (FirstGlobal == i) ++FirstGlobal; 631 } 632 633 return FirstGlobal != NewGlobals.size() ? NewGlobals[FirstGlobal] : 0; 634 } 635 636 /// AllUsesOfValueWillTrapIfNull - Return true if all users of the specified 637 /// value will trap if the value is dynamically null. PHIs keeps track of any 638 /// phi nodes we've seen to avoid reprocessing them. 639 static bool AllUsesOfValueWillTrapIfNull(const Value *V, 640 SmallPtrSet<const PHINode*, 8> &PHIs) { 641 for (Value::const_use_iterator UI = V->use_begin(), E = V->use_end(); UI != E; 642 ++UI) { 643 const User *U = *UI; 644 645 if (isa<LoadInst>(U)) { 646 // Will trap. 647 } else if (const StoreInst *SI = dyn_cast<StoreInst>(U)) { 648 if (SI->getOperand(0) == V) { 649 //cerr << "NONTRAPPING USE: " << *U; 650 return false; // Storing the value. 651 } 652 } else if (const CallInst *CI = dyn_cast<CallInst>(U)) { 653 if (CI->getCalledValue() != V) { 654 //cerr << "NONTRAPPING USE: " << *U; 655 return false; // Not calling the ptr 656 } 657 } else if (const InvokeInst *II = dyn_cast<InvokeInst>(U)) { 658 if (II->getCalledValue() != V) { 659 //cerr << "NONTRAPPING USE: " << *U; 660 return false; // Not calling the ptr 661 } 662 } else if (const BitCastInst *CI = dyn_cast<BitCastInst>(U)) { 663 if (!AllUsesOfValueWillTrapIfNull(CI, PHIs)) return false; 664 } else if (const GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(U)) { 665 if (!AllUsesOfValueWillTrapIfNull(GEPI, PHIs)) return false; 666 } else if (const PHINode *PN = dyn_cast<PHINode>(U)) { 667 // If we've already seen this phi node, ignore it, it has already been 668 // checked. 669 if (PHIs.insert(PN) && !AllUsesOfValueWillTrapIfNull(PN, PHIs)) 670 return false; 671 } else if (isa<ICmpInst>(U) && 672 isa<ConstantPointerNull>(UI->getOperand(1))) { 673 // Ignore icmp X, null 674 } else { 675 //cerr << "NONTRAPPING USE: " << *U; 676 return false; 677 } 678 } 679 return true; 680 } 681 682 /// AllUsesOfLoadedValueWillTrapIfNull - Return true if all uses of any loads 683 /// from GV will trap if the loaded value is null. Note that this also permits 684 /// comparisons of the loaded value against null, as a special case. 685 static bool AllUsesOfLoadedValueWillTrapIfNull(const GlobalVariable *GV) { 686 for (Value::const_use_iterator UI = GV->use_begin(), E = GV->use_end(); 687 UI != E; ++UI) { 688 const User *U = *UI; 689 690 if (const LoadInst *LI = dyn_cast<LoadInst>(U)) { 691 SmallPtrSet<const PHINode*, 8> PHIs; 692 if (!AllUsesOfValueWillTrapIfNull(LI, PHIs)) 693 return false; 694 } else if (isa<StoreInst>(U)) { 695 // Ignore stores to the global. 696 } else { 697 // We don't know or understand this user, bail out. 698 //cerr << "UNKNOWN USER OF GLOBAL!: " << *U; 699 return false; 700 } 701 } 702 return true; 703 } 704 705 static bool OptimizeAwayTrappingUsesOfValue(Value *V, Constant *NewV) { 706 bool Changed = false; 707 for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI != E; ) { 708 Instruction *I = cast<Instruction>(*UI++); 709 if (LoadInst *LI = dyn_cast<LoadInst>(I)) { 710 LI->setOperand(0, NewV); 711 Changed = true; 712 } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) { 713 if (SI->getOperand(1) == V) { 714 SI->setOperand(1, NewV); 715 Changed = true; 716 } 717 } else if (isa<CallInst>(I) || isa<InvokeInst>(I)) { 718 CallSite CS(I); 719 if (CS.getCalledValue() == V) { 720 // Calling through the pointer! Turn into a direct call, but be careful 721 // that the pointer is not also being passed as an argument. 722 CS.setCalledFunction(NewV); 723 Changed = true; 724 bool PassedAsArg = false; 725 for (unsigned i = 0, e = CS.arg_size(); i != e; ++i) 726 if (CS.getArgument(i) == V) { 727 PassedAsArg = true; 728 CS.setArgument(i, NewV); 729 } 730 731 if (PassedAsArg) { 732 // Being passed as an argument also. Be careful to not invalidate UI! 733 UI = V->use_begin(); 734 } 735 } 736 } else if (CastInst *CI = dyn_cast<CastInst>(I)) { 737 Changed |= OptimizeAwayTrappingUsesOfValue(CI, 738 ConstantExpr::getCast(CI->getOpcode(), 739 NewV, CI->getType())); 740 if (CI->use_empty()) { 741 Changed = true; 742 CI->eraseFromParent(); 743 } 744 } else if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(I)) { 745 // Should handle GEP here. 746 SmallVector<Constant*, 8> Idxs; 747 Idxs.reserve(GEPI->getNumOperands()-1); 748 for (User::op_iterator i = GEPI->op_begin() + 1, e = GEPI->op_end(); 749 i != e; ++i) 750 if (Constant *C = dyn_cast<Constant>(*i)) 751 Idxs.push_back(C); 752 else 753 break; 754 if (Idxs.size() == GEPI->getNumOperands()-1) 755 Changed |= OptimizeAwayTrappingUsesOfValue(GEPI, 756 ConstantExpr::getGetElementPtr(NewV, &Idxs[0], 757 Idxs.size())); 758 if (GEPI->use_empty()) { 759 Changed = true; 760 GEPI->eraseFromParent(); 761 } 762 } 763 } 764 765 return Changed; 766 } 767 768 769 /// OptimizeAwayTrappingUsesOfLoads - The specified global has only one non-null 770 /// value stored into it. If there are uses of the loaded value that would trap 771 /// if the loaded value is dynamically null, then we know that they cannot be 772 /// reachable with a null optimize away the load. 773 static bool OptimizeAwayTrappingUsesOfLoads(GlobalVariable *GV, Constant *LV) { 774 bool Changed = false; 775 776 // Keep track of whether we are able to remove all the uses of the global 777 // other than the store that defines it. 778 bool AllNonStoreUsesGone = true; 779 780 // Replace all uses of loads with uses of uses of the stored value. 781 for (Value::use_iterator GUI = GV->use_begin(), E = GV->use_end(); GUI != E;){ 782 User *GlobalUser = *GUI++; 783 if (LoadInst *LI = dyn_cast<LoadInst>(GlobalUser)) { 784 Changed |= OptimizeAwayTrappingUsesOfValue(LI, LV); 785 // If we were able to delete all uses of the loads 786 if (LI->use_empty()) { 787 LI->eraseFromParent(); 788 Changed = true; 789 } else { 790 AllNonStoreUsesGone = false; 791 } 792 } else if (isa<StoreInst>(GlobalUser)) { 793 // Ignore the store that stores "LV" to the global. 794 assert(GlobalUser->getOperand(1) == GV && 795 "Must be storing *to* the global"); 796 } else { 797 AllNonStoreUsesGone = false; 798 799 // If we get here we could have other crazy uses that are transitively 800 // loaded. 801 assert((isa<PHINode>(GlobalUser) || isa<SelectInst>(GlobalUser) || 802 isa<ConstantExpr>(GlobalUser) || isa<CmpInst>(GlobalUser)) && 803 "Only expect load and stores!"); 804 } 805 } 806 807 if (Changed) { 808 DEBUG(dbgs() << "OPTIMIZED LOADS FROM STORED ONCE POINTER: " << *GV); 809 ++NumGlobUses; 810 } 811 812 // If we nuked all of the loads, then none of the stores are needed either, 813 // nor is the global. 814 if (AllNonStoreUsesGone) { 815 DEBUG(dbgs() << " *** GLOBAL NOW DEAD!\n"); 816 CleanupConstantGlobalUsers(GV, 0); 817 if (GV->use_empty()) { 818 GV->eraseFromParent(); 819 ++NumDeleted; 820 } 821 Changed = true; 822 } 823 return Changed; 824 } 825 826 /// ConstantPropUsersOf - Walk the use list of V, constant folding all of the 827 /// instructions that are foldable. 828 static void ConstantPropUsersOf(Value *V) { 829 for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI != E; ) 830 if (Instruction *I = dyn_cast<Instruction>(*UI++)) 831 if (Constant *NewC = ConstantFoldInstruction(I)) { 832 I->replaceAllUsesWith(NewC); 833 834 // Advance UI to the next non-I use to avoid invalidating it! 835 // Instructions could multiply use V. 836 while (UI != E && *UI == I) 837 ++UI; 838 I->eraseFromParent(); 839 } 840 } 841 842 /// OptimizeGlobalAddressOfMalloc - This function takes the specified global 843 /// variable, and transforms the program as if it always contained the result of 844 /// the specified malloc. Because it is always the result of the specified 845 /// malloc, there is no reason to actually DO the malloc. Instead, turn the 846 /// malloc into a global, and any loads of GV as uses of the new global. 847 static GlobalVariable *OptimizeGlobalAddressOfMalloc(GlobalVariable *GV, 848 CallInst *CI, 849 Type *AllocTy, 850 ConstantInt *NElements, 851 TargetData* TD) { 852 DEBUG(errs() << "PROMOTING GLOBAL: " << *GV << " CALL = " << *CI << '\n'); 853 854 Type *GlobalType; 855 if (NElements->getZExtValue() == 1) 856 GlobalType = AllocTy; 857 else 858 // If we have an array allocation, the global variable is of an array. 859 GlobalType = ArrayType::get(AllocTy, NElements->getZExtValue()); 860 861 // Create the new global variable. The contents of the malloc'd memory is 862 // undefined, so initialize with an undef value. 863 GlobalVariable *NewGV = new GlobalVariable(*GV->getParent(), 864 GlobalType, false, 865 GlobalValue::InternalLinkage, 866 UndefValue::get(GlobalType), 867 GV->getName()+".body", 868 GV, 869 GV->isThreadLocal()); 870 871 // If there are bitcast users of the malloc (which is typical, usually we have 872 // a malloc + bitcast) then replace them with uses of the new global. Update 873 // other users to use the global as well. 874 BitCastInst *TheBC = 0; 875 while (!CI->use_empty()) { 876 Instruction *User = cast<Instruction>(CI->use_back()); 877 if (BitCastInst *BCI = dyn_cast<BitCastInst>(User)) { 878 if (BCI->getType() == NewGV->getType()) { 879 BCI->replaceAllUsesWith(NewGV); 880 BCI->eraseFromParent(); 881 } else { 882 BCI->setOperand(0, NewGV); 883 } 884 } else { 885 if (TheBC == 0) 886 TheBC = new BitCastInst(NewGV, CI->getType(), "newgv", CI); 887 User->replaceUsesOfWith(CI, TheBC); 888 } 889 } 890 891 Constant *RepValue = NewGV; 892 if (NewGV->getType() != GV->getType()->getElementType()) 893 RepValue = ConstantExpr::getBitCast(RepValue, 894 GV->getType()->getElementType()); 895 896 // If there is a comparison against null, we will insert a global bool to 897 // keep track of whether the global was initialized yet or not. 898 GlobalVariable *InitBool = 899 new GlobalVariable(Type::getInt1Ty(GV->getContext()), false, 900 GlobalValue::InternalLinkage, 901 ConstantInt::getFalse(GV->getContext()), 902 GV->getName()+".init", GV->isThreadLocal()); 903 bool InitBoolUsed = false; 904 905 // Loop over all uses of GV, processing them in turn. 906 while (!GV->use_empty()) { 907 if (StoreInst *SI = dyn_cast<StoreInst>(GV->use_back())) { 908 // The global is initialized when the store to it occurs. 909 new StoreInst(ConstantInt::getTrue(GV->getContext()), InitBool, SI); 910 SI->eraseFromParent(); 911 continue; 912 } 913 914 LoadInst *LI = cast<LoadInst>(GV->use_back()); 915 while (!LI->use_empty()) { 916 Use &LoadUse = LI->use_begin().getUse(); 917 if (!isa<ICmpInst>(LoadUse.getUser())) { 918 LoadUse = RepValue; 919 continue; 920 } 921 922 ICmpInst *ICI = cast<ICmpInst>(LoadUse.getUser()); 923 // Replace the cmp X, 0 with a use of the bool value. 924 Value *LV = new LoadInst(InitBool, InitBool->getName()+".val", ICI); 925 InitBoolUsed = true; 926 switch (ICI->getPredicate()) { 927 default: llvm_unreachable("Unknown ICmp Predicate!"); 928 case ICmpInst::ICMP_ULT: 929 case ICmpInst::ICMP_SLT: // X < null -> always false 930 LV = ConstantInt::getFalse(GV->getContext()); 931 break; 932 case ICmpInst::ICMP_ULE: 933 case ICmpInst::ICMP_SLE: 934 case ICmpInst::ICMP_EQ: 935 LV = BinaryOperator::CreateNot(LV, "notinit", ICI); 936 break; 937 case ICmpInst::ICMP_NE: 938 case ICmpInst::ICMP_UGE: 939 case ICmpInst::ICMP_SGE: 940 case ICmpInst::ICMP_UGT: 941 case ICmpInst::ICMP_SGT: 942 break; // no change. 943 } 944 ICI->replaceAllUsesWith(LV); 945 ICI->eraseFromParent(); 946 } 947 LI->eraseFromParent(); 948 } 949 950 // If the initialization boolean was used, insert it, otherwise delete it. 951 if (!InitBoolUsed) { 952 while (!InitBool->use_empty()) // Delete initializations 953 cast<StoreInst>(InitBool->use_back())->eraseFromParent(); 954 delete InitBool; 955 } else 956 GV->getParent()->getGlobalList().insert(GV, InitBool); 957 958 // Now the GV is dead, nuke it and the malloc.. 959 GV->eraseFromParent(); 960 CI->eraseFromParent(); 961 962 // To further other optimizations, loop over all users of NewGV and try to 963 // constant prop them. This will promote GEP instructions with constant 964 // indices into GEP constant-exprs, which will allow global-opt to hack on it. 965 ConstantPropUsersOf(NewGV); 966 if (RepValue != NewGV) 967 ConstantPropUsersOf(RepValue); 968 969 return NewGV; 970 } 971 972 /// ValueIsOnlyUsedLocallyOrStoredToOneGlobal - Scan the use-list of V checking 973 /// to make sure that there are no complex uses of V. We permit simple things 974 /// like dereferencing the pointer, but not storing through the address, unless 975 /// it is to the specified global. 976 static bool ValueIsOnlyUsedLocallyOrStoredToOneGlobal(const Instruction *V, 977 const GlobalVariable *GV, 978 SmallPtrSet<const PHINode*, 8> &PHIs) { 979 for (Value::const_use_iterator UI = V->use_begin(), E = V->use_end(); 980 UI != E; ++UI) { 981 const Instruction *Inst = cast<Instruction>(*UI); 982 983 if (isa<LoadInst>(Inst) || isa<CmpInst>(Inst)) { 984 continue; // Fine, ignore. 985 } 986 987 if (const StoreInst *SI = dyn_cast<StoreInst>(Inst)) { 988 if (SI->getOperand(0) == V && SI->getOperand(1) != GV) 989 return false; // Storing the pointer itself... bad. 990 continue; // Otherwise, storing through it, or storing into GV... fine. 991 } 992 993 // Must index into the array and into the struct. 994 if (isa<GetElementPtrInst>(Inst) && Inst->getNumOperands() >= 3) { 995 if (!ValueIsOnlyUsedLocallyOrStoredToOneGlobal(Inst, GV, PHIs)) 996 return false; 997 continue; 998 } 999 1000 if (const PHINode *PN = dyn_cast<PHINode>(Inst)) { 1001 // PHIs are ok if all uses are ok. Don't infinitely recurse through PHI 1002 // cycles. 1003 if (PHIs.insert(PN)) 1004 if (!ValueIsOnlyUsedLocallyOrStoredToOneGlobal(PN, GV, PHIs)) 1005 return false; 1006 continue; 1007 } 1008 1009 if (const BitCastInst *BCI = dyn_cast<BitCastInst>(Inst)) { 1010 if (!ValueIsOnlyUsedLocallyOrStoredToOneGlobal(BCI, GV, PHIs)) 1011 return false; 1012 continue; 1013 } 1014 1015 return false; 1016 } 1017 return true; 1018 } 1019 1020 /// ReplaceUsesOfMallocWithGlobal - The Alloc pointer is stored into GV 1021 /// somewhere. Transform all uses of the allocation into loads from the 1022 /// global and uses of the resultant pointer. Further, delete the store into 1023 /// GV. This assumes that these value pass the 1024 /// 'ValueIsOnlyUsedLocallyOrStoredToOneGlobal' predicate. 1025 static void ReplaceUsesOfMallocWithGlobal(Instruction *Alloc, 1026 GlobalVariable *GV) { 1027 while (!Alloc->use_empty()) { 1028 Instruction *U = cast<Instruction>(*Alloc->use_begin()); 1029 Instruction *InsertPt = U; 1030 if (StoreInst *SI = dyn_cast<StoreInst>(U)) { 1031 // If this is the store of the allocation into the global, remove it. 1032 if (SI->getOperand(1) == GV) { 1033 SI->eraseFromParent(); 1034 continue; 1035 } 1036 } else if (PHINode *PN = dyn_cast<PHINode>(U)) { 1037 // Insert the load in the corresponding predecessor, not right before the 1038 // PHI. 1039 InsertPt = PN->getIncomingBlock(Alloc->use_begin())->getTerminator(); 1040 } else if (isa<BitCastInst>(U)) { 1041 // Must be bitcast between the malloc and store to initialize the global. 1042 ReplaceUsesOfMallocWithGlobal(U, GV); 1043 U->eraseFromParent(); 1044 continue; 1045 } else if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(U)) { 1046 // If this is a "GEP bitcast" and the user is a store to the global, then 1047 // just process it as a bitcast. 1048 if (GEPI->hasAllZeroIndices() && GEPI->hasOneUse()) 1049 if (StoreInst *SI = dyn_cast<StoreInst>(GEPI->use_back())) 1050 if (SI->getOperand(1) == GV) { 1051 // Must be bitcast GEP between the malloc and store to initialize 1052 // the global. 1053 ReplaceUsesOfMallocWithGlobal(GEPI, GV); 1054 GEPI->eraseFromParent(); 1055 continue; 1056 } 1057 } 1058 1059 // Insert a load from the global, and use it instead of the malloc. 1060 Value *NL = new LoadInst(GV, GV->getName()+".val", InsertPt); 1061 U->replaceUsesOfWith(Alloc, NL); 1062 } 1063 } 1064 1065 /// LoadUsesSimpleEnoughForHeapSRA - Verify that all uses of V (a load, or a phi 1066 /// of a load) are simple enough to perform heap SRA on. This permits GEP's 1067 /// that index through the array and struct field, icmps of null, and PHIs. 1068 static bool LoadUsesSimpleEnoughForHeapSRA(const Value *V, 1069 SmallPtrSet<const PHINode*, 32> &LoadUsingPHIs, 1070 SmallPtrSet<const PHINode*, 32> &LoadUsingPHIsPerLoad) { 1071 // We permit two users of the load: setcc comparing against the null 1072 // pointer, and a getelementptr of a specific form. 1073 for (Value::const_use_iterator UI = V->use_begin(), E = V->use_end(); UI != E; 1074 ++UI) { 1075 const Instruction *User = cast<Instruction>(*UI); 1076 1077 // Comparison against null is ok. 1078 if (const ICmpInst *ICI = dyn_cast<ICmpInst>(User)) { 1079 if (!isa<ConstantPointerNull>(ICI->getOperand(1))) 1080 return false; 1081 continue; 1082 } 1083 1084 // getelementptr is also ok, but only a simple form. 1085 if (const GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(User)) { 1086 // Must index into the array and into the struct. 1087 if (GEPI->getNumOperands() < 3) 1088 return false; 1089 1090 // Otherwise the GEP is ok. 1091 continue; 1092 } 1093 1094 if (const PHINode *PN = dyn_cast<PHINode>(User)) { 1095 if (!LoadUsingPHIsPerLoad.insert(PN)) 1096 // This means some phi nodes are dependent on each other. 1097 // Avoid infinite looping! 1098 return false; 1099 if (!LoadUsingPHIs.insert(PN)) 1100 // If we have already analyzed this PHI, then it is safe. 1101 continue; 1102 1103 // Make sure all uses of the PHI are simple enough to transform. 1104 if (!LoadUsesSimpleEnoughForHeapSRA(PN, 1105 LoadUsingPHIs, LoadUsingPHIsPerLoad)) 1106 return false; 1107 1108 continue; 1109 } 1110 1111 // Otherwise we don't know what this is, not ok. 1112 return false; 1113 } 1114 1115 return true; 1116 } 1117 1118 1119 /// AllGlobalLoadUsesSimpleEnoughForHeapSRA - If all users of values loaded from 1120 /// GV are simple enough to perform HeapSRA, return true. 1121 static bool AllGlobalLoadUsesSimpleEnoughForHeapSRA(const GlobalVariable *GV, 1122 Instruction *StoredVal) { 1123 SmallPtrSet<const PHINode*, 32> LoadUsingPHIs; 1124 SmallPtrSet<const PHINode*, 32> LoadUsingPHIsPerLoad; 1125 for (Value::const_use_iterator UI = GV->use_begin(), E = GV->use_end(); 1126 UI != E; ++UI) 1127 if (const LoadInst *LI = dyn_cast<LoadInst>(*UI)) { 1128 if (!LoadUsesSimpleEnoughForHeapSRA(LI, LoadUsingPHIs, 1129 LoadUsingPHIsPerLoad)) 1130 return false; 1131 LoadUsingPHIsPerLoad.clear(); 1132 } 1133 1134 // If we reach here, we know that all uses of the loads and transitive uses 1135 // (through PHI nodes) are simple enough to transform. However, we don't know 1136 // that all inputs the to the PHI nodes are in the same equivalence sets. 1137 // Check to verify that all operands of the PHIs are either PHIS that can be 1138 // transformed, loads from GV, or MI itself. 1139 for (SmallPtrSet<const PHINode*, 32>::const_iterator I = LoadUsingPHIs.begin() 1140 , E = LoadUsingPHIs.end(); I != E; ++I) { 1141 const PHINode *PN = *I; 1142 for (unsigned op = 0, e = PN->getNumIncomingValues(); op != e; ++op) { 1143 Value *InVal = PN->getIncomingValue(op); 1144 1145 // PHI of the stored value itself is ok. 1146 if (InVal == StoredVal) continue; 1147 1148 if (const PHINode *InPN = dyn_cast<PHINode>(InVal)) { 1149 // One of the PHIs in our set is (optimistically) ok. 1150 if (LoadUsingPHIs.count(InPN)) 1151 continue; 1152 return false; 1153 } 1154 1155 // Load from GV is ok. 1156 if (const LoadInst *LI = dyn_cast<LoadInst>(InVal)) 1157 if (LI->getOperand(0) == GV) 1158 continue; 1159 1160 // UNDEF? NULL? 1161 1162 // Anything else is rejected. 1163 return false; 1164 } 1165 } 1166 1167 return true; 1168 } 1169 1170 static Value *GetHeapSROAValue(Value *V, unsigned FieldNo, 1171 DenseMap<Value*, std::vector<Value*> > &InsertedScalarizedValues, 1172 std::vector<std::pair<PHINode*, unsigned> > &PHIsToRewrite) { 1173 std::vector<Value*> &FieldVals = InsertedScalarizedValues[V]; 1174 1175 if (FieldNo >= FieldVals.size()) 1176 FieldVals.resize(FieldNo+1); 1177 1178 // If we already have this value, just reuse the previously scalarized 1179 // version. 1180 if (Value *FieldVal = FieldVals[FieldNo]) 1181 return FieldVal; 1182 1183 // Depending on what instruction this is, we have several cases. 1184 Value *Result; 1185 if (LoadInst *LI = dyn_cast<LoadInst>(V)) { 1186 // This is a scalarized version of the load from the global. Just create 1187 // a new Load of the scalarized global. 1188 Result = new LoadInst(GetHeapSROAValue(LI->getOperand(0), FieldNo, 1189 InsertedScalarizedValues, 1190 PHIsToRewrite), 1191 LI->getName()+".f"+Twine(FieldNo), LI); 1192 } else if (PHINode *PN = dyn_cast<PHINode>(V)) { 1193 // PN's type is pointer to struct. Make a new PHI of pointer to struct 1194 // field. 1195 StructType *ST = 1196 cast<StructType>(cast<PointerType>(PN->getType())->getElementType()); 1197 1198 PHINode *NewPN = 1199 PHINode::Create(PointerType::getUnqual(ST->getElementType(FieldNo)), 1200 PN->getNumIncomingValues(), 1201 PN->getName()+".f"+Twine(FieldNo), PN); 1202 Result = NewPN; 1203 PHIsToRewrite.push_back(std::make_pair(PN, FieldNo)); 1204 } else { 1205 llvm_unreachable("Unknown usable value"); 1206 Result = 0; 1207 } 1208 1209 return FieldVals[FieldNo] = Result; 1210 } 1211 1212 /// RewriteHeapSROALoadUser - Given a load instruction and a value derived from 1213 /// the load, rewrite the derived value to use the HeapSRoA'd load. 1214 static void RewriteHeapSROALoadUser(Instruction *LoadUser, 1215 DenseMap<Value*, std::vector<Value*> > &InsertedScalarizedValues, 1216 std::vector<std::pair<PHINode*, unsigned> > &PHIsToRewrite) { 1217 // If this is a comparison against null, handle it. 1218 if (ICmpInst *SCI = dyn_cast<ICmpInst>(LoadUser)) { 1219 assert(isa<ConstantPointerNull>(SCI->getOperand(1))); 1220 // If we have a setcc of the loaded pointer, we can use a setcc of any 1221 // field. 1222 Value *NPtr = GetHeapSROAValue(SCI->getOperand(0), 0, 1223 InsertedScalarizedValues, PHIsToRewrite); 1224 1225 Value *New = new ICmpInst(SCI, SCI->getPredicate(), NPtr, 1226 Constant::getNullValue(NPtr->getType()), 1227 SCI->getName()); 1228 SCI->replaceAllUsesWith(New); 1229 SCI->eraseFromParent(); 1230 return; 1231 } 1232 1233 // Handle 'getelementptr Ptr, Idx, i32 FieldNo ...' 1234 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(LoadUser)) { 1235 assert(GEPI->getNumOperands() >= 3 && isa<ConstantInt>(GEPI->getOperand(2)) 1236 && "Unexpected GEPI!"); 1237 1238 // Load the pointer for this field. 1239 unsigned FieldNo = cast<ConstantInt>(GEPI->getOperand(2))->getZExtValue(); 1240 Value *NewPtr = GetHeapSROAValue(GEPI->getOperand(0), FieldNo, 1241 InsertedScalarizedValues, PHIsToRewrite); 1242 1243 // Create the new GEP idx vector. 1244 SmallVector<Value*, 8> GEPIdx; 1245 GEPIdx.push_back(GEPI->getOperand(1)); 1246 GEPIdx.append(GEPI->op_begin()+3, GEPI->op_end()); 1247 1248 Value *NGEPI = GetElementPtrInst::Create(NewPtr, 1249 GEPIdx.begin(), GEPIdx.end(), 1250 GEPI->getName(), GEPI); 1251 GEPI->replaceAllUsesWith(NGEPI); 1252 GEPI->eraseFromParent(); 1253 return; 1254 } 1255 1256 // Recursively transform the users of PHI nodes. This will lazily create the 1257 // PHIs that are needed for individual elements. Keep track of what PHIs we 1258 // see in InsertedScalarizedValues so that we don't get infinite loops (very 1259 // antisocial). If the PHI is already in InsertedScalarizedValues, it has 1260 // already been seen first by another load, so its uses have already been 1261 // processed. 1262 PHINode *PN = cast<PHINode>(LoadUser); 1263 bool Inserted; 1264 DenseMap<Value*, std::vector<Value*> >::iterator InsertPos; 1265 tie(InsertPos, Inserted) = 1266 InsertedScalarizedValues.insert(std::make_pair(PN, std::vector<Value*>())); 1267 if (!Inserted) return; 1268 1269 // If this is the first time we've seen this PHI, recursively process all 1270 // users. 1271 for (Value::use_iterator UI = PN->use_begin(), E = PN->use_end(); UI != E; ) { 1272 Instruction *User = cast<Instruction>(*UI++); 1273 RewriteHeapSROALoadUser(User, InsertedScalarizedValues, PHIsToRewrite); 1274 } 1275 } 1276 1277 /// RewriteUsesOfLoadForHeapSRoA - We are performing Heap SRoA on a global. Ptr 1278 /// is a value loaded from the global. Eliminate all uses of Ptr, making them 1279 /// use FieldGlobals instead. All uses of loaded values satisfy 1280 /// AllGlobalLoadUsesSimpleEnoughForHeapSRA. 1281 static void RewriteUsesOfLoadForHeapSRoA(LoadInst *Load, 1282 DenseMap<Value*, std::vector<Value*> > &InsertedScalarizedValues, 1283 std::vector<std::pair<PHINode*, unsigned> > &PHIsToRewrite) { 1284 for (Value::use_iterator UI = Load->use_begin(), E = Load->use_end(); 1285 UI != E; ) { 1286 Instruction *User = cast<Instruction>(*UI++); 1287 RewriteHeapSROALoadUser(User, InsertedScalarizedValues, PHIsToRewrite); 1288 } 1289 1290 if (Load->use_empty()) { 1291 Load->eraseFromParent(); 1292 InsertedScalarizedValues.erase(Load); 1293 } 1294 } 1295 1296 /// PerformHeapAllocSRoA - CI is an allocation of an array of structures. Break 1297 /// it up into multiple allocations of arrays of the fields. 1298 static GlobalVariable *PerformHeapAllocSRoA(GlobalVariable *GV, CallInst *CI, 1299 Value* NElems, TargetData *TD) { 1300 DEBUG(dbgs() << "SROA HEAP ALLOC: " << *GV << " MALLOC = " << *CI << '\n'); 1301 Type* MAT = getMallocAllocatedType(CI); 1302 StructType *STy = cast<StructType>(MAT); 1303 1304 // There is guaranteed to be at least one use of the malloc (storing 1305 // it into GV). If there are other uses, change them to be uses of 1306 // the global to simplify later code. This also deletes the store 1307 // into GV. 1308 ReplaceUsesOfMallocWithGlobal(CI, GV); 1309 1310 // Okay, at this point, there are no users of the malloc. Insert N 1311 // new mallocs at the same place as CI, and N globals. 1312 std::vector<Value*> FieldGlobals; 1313 std::vector<Value*> FieldMallocs; 1314 1315 for (unsigned FieldNo = 0, e = STy->getNumElements(); FieldNo != e;++FieldNo){ 1316 Type *FieldTy = STy->getElementType(FieldNo); 1317 PointerType *PFieldTy = PointerType::getUnqual(FieldTy); 1318 1319 GlobalVariable *NGV = 1320 new GlobalVariable(*GV->getParent(), 1321 PFieldTy, false, GlobalValue::InternalLinkage, 1322 Constant::getNullValue(PFieldTy), 1323 GV->getName() + ".f" + Twine(FieldNo), GV, 1324 GV->isThreadLocal()); 1325 FieldGlobals.push_back(NGV); 1326 1327 unsigned TypeSize = TD->getTypeAllocSize(FieldTy); 1328 if (StructType *ST = dyn_cast<StructType>(FieldTy)) 1329 TypeSize = TD->getStructLayout(ST)->getSizeInBytes(); 1330 Type *IntPtrTy = TD->getIntPtrType(CI->getContext()); 1331 Value *NMI = CallInst::CreateMalloc(CI, IntPtrTy, FieldTy, 1332 ConstantInt::get(IntPtrTy, TypeSize), 1333 NElems, 0, 1334 CI->getName() + ".f" + Twine(FieldNo)); 1335 FieldMallocs.push_back(NMI); 1336 new StoreInst(NMI, NGV, CI); 1337 } 1338 1339 // The tricky aspect of this transformation is handling the case when malloc 1340 // fails. In the original code, malloc failing would set the result pointer 1341 // of malloc to null. In this case, some mallocs could succeed and others 1342 // could fail. As such, we emit code that looks like this: 1343 // F0 = malloc(field0) 1344 // F1 = malloc(field1) 1345 // F2 = malloc(field2) 1346 // if (F0 == 0 || F1 == 0 || F2 == 0) { 1347 // if (F0) { free(F0); F0 = 0; } 1348 // if (F1) { free(F1); F1 = 0; } 1349 // if (F2) { free(F2); F2 = 0; } 1350 // } 1351 // The malloc can also fail if its argument is too large. 1352 Constant *ConstantZero = ConstantInt::get(CI->getArgOperand(0)->getType(), 0); 1353 Value *RunningOr = new ICmpInst(CI, ICmpInst::ICMP_SLT, CI->getArgOperand(0), 1354 ConstantZero, "isneg"); 1355 for (unsigned i = 0, e = FieldMallocs.size(); i != e; ++i) { 1356 Value *Cond = new ICmpInst(CI, ICmpInst::ICMP_EQ, FieldMallocs[i], 1357 Constant::getNullValue(FieldMallocs[i]->getType()), 1358 "isnull"); 1359 RunningOr = BinaryOperator::CreateOr(RunningOr, Cond, "tmp", CI); 1360 } 1361 1362 // Split the basic block at the old malloc. 1363 BasicBlock *OrigBB = CI->getParent(); 1364 BasicBlock *ContBB = OrigBB->splitBasicBlock(CI, "malloc_cont"); 1365 1366 // Create the block to check the first condition. Put all these blocks at the 1367 // end of the function as they are unlikely to be executed. 1368 BasicBlock *NullPtrBlock = BasicBlock::Create(OrigBB->getContext(), 1369 "malloc_ret_null", 1370 OrigBB->getParent()); 1371 1372 // Remove the uncond branch from OrigBB to ContBB, turning it into a cond 1373 // branch on RunningOr. 1374 OrigBB->getTerminator()->eraseFromParent(); 1375 BranchInst::Create(NullPtrBlock, ContBB, RunningOr, OrigBB); 1376 1377 // Within the NullPtrBlock, we need to emit a comparison and branch for each 1378 // pointer, because some may be null while others are not. 1379 for (unsigned i = 0, e = FieldGlobals.size(); i != e; ++i) { 1380 Value *GVVal = new LoadInst(FieldGlobals[i], "tmp", NullPtrBlock); 1381 Value *Cmp = new ICmpInst(*NullPtrBlock, ICmpInst::ICMP_NE, GVVal, 1382 Constant::getNullValue(GVVal->getType()), 1383 "tmp"); 1384 BasicBlock *FreeBlock = BasicBlock::Create(Cmp->getContext(), "free_it", 1385 OrigBB->getParent()); 1386 BasicBlock *NextBlock = BasicBlock::Create(Cmp->getContext(), "next", 1387 OrigBB->getParent()); 1388 Instruction *BI = BranchInst::Create(FreeBlock, NextBlock, 1389 Cmp, NullPtrBlock); 1390 1391 // Fill in FreeBlock. 1392 CallInst::CreateFree(GVVal, BI); 1393 new StoreInst(Constant::getNullValue(GVVal->getType()), FieldGlobals[i], 1394 FreeBlock); 1395 BranchInst::Create(NextBlock, FreeBlock); 1396 1397 NullPtrBlock = NextBlock; 1398 } 1399 1400 BranchInst::Create(ContBB, NullPtrBlock); 1401 1402 // CI is no longer needed, remove it. 1403 CI->eraseFromParent(); 1404 1405 /// InsertedScalarizedLoads - As we process loads, if we can't immediately 1406 /// update all uses of the load, keep track of what scalarized loads are 1407 /// inserted for a given load. 1408 DenseMap<Value*, std::vector<Value*> > InsertedScalarizedValues; 1409 InsertedScalarizedValues[GV] = FieldGlobals; 1410 1411 std::vector<std::pair<PHINode*, unsigned> > PHIsToRewrite; 1412 1413 // Okay, the malloc site is completely handled. All of the uses of GV are now 1414 // loads, and all uses of those loads are simple. Rewrite them to use loads 1415 // of the per-field globals instead. 1416 for (Value::use_iterator UI = GV->use_begin(), E = GV->use_end(); UI != E;) { 1417 Instruction *User = cast<Instruction>(*UI++); 1418 1419 if (LoadInst *LI = dyn_cast<LoadInst>(User)) { 1420 RewriteUsesOfLoadForHeapSRoA(LI, InsertedScalarizedValues, PHIsToRewrite); 1421 continue; 1422 } 1423 1424 // Must be a store of null. 1425 StoreInst *SI = cast<StoreInst>(User); 1426 assert(isa<ConstantPointerNull>(SI->getOperand(0)) && 1427 "Unexpected heap-sra user!"); 1428 1429 // Insert a store of null into each global. 1430 for (unsigned i = 0, e = FieldGlobals.size(); i != e; ++i) { 1431 PointerType *PT = cast<PointerType>(FieldGlobals[i]->getType()); 1432 Constant *Null = Constant::getNullValue(PT->getElementType()); 1433 new StoreInst(Null, FieldGlobals[i], SI); 1434 } 1435 // Erase the original store. 1436 SI->eraseFromParent(); 1437 } 1438 1439 // While we have PHIs that are interesting to rewrite, do it. 1440 while (!PHIsToRewrite.empty()) { 1441 PHINode *PN = PHIsToRewrite.back().first; 1442 unsigned FieldNo = PHIsToRewrite.back().second; 1443 PHIsToRewrite.pop_back(); 1444 PHINode *FieldPN = cast<PHINode>(InsertedScalarizedValues[PN][FieldNo]); 1445 assert(FieldPN->getNumIncomingValues() == 0 &&"Already processed this phi"); 1446 1447 // Add all the incoming values. This can materialize more phis. 1448 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 1449 Value *InVal = PN->getIncomingValue(i); 1450 InVal = GetHeapSROAValue(InVal, FieldNo, InsertedScalarizedValues, 1451 PHIsToRewrite); 1452 FieldPN->addIncoming(InVal, PN->getIncomingBlock(i)); 1453 } 1454 } 1455 1456 // Drop all inter-phi links and any loads that made it this far. 1457 for (DenseMap<Value*, std::vector<Value*> >::iterator 1458 I = InsertedScalarizedValues.begin(), E = InsertedScalarizedValues.end(); 1459 I != E; ++I) { 1460 if (PHINode *PN = dyn_cast<PHINode>(I->first)) 1461 PN->dropAllReferences(); 1462 else if (LoadInst *LI = dyn_cast<LoadInst>(I->first)) 1463 LI->dropAllReferences(); 1464 } 1465 1466 // Delete all the phis and loads now that inter-references are dead. 1467 for (DenseMap<Value*, std::vector<Value*> >::iterator 1468 I = InsertedScalarizedValues.begin(), E = InsertedScalarizedValues.end(); 1469 I != E; ++I) { 1470 if (PHINode *PN = dyn_cast<PHINode>(I->first)) 1471 PN->eraseFromParent(); 1472 else if (LoadInst *LI = dyn_cast<LoadInst>(I->first)) 1473 LI->eraseFromParent(); 1474 } 1475 1476 // The old global is now dead, remove it. 1477 GV->eraseFromParent(); 1478 1479 ++NumHeapSRA; 1480 return cast<GlobalVariable>(FieldGlobals[0]); 1481 } 1482 1483 /// TryToOptimizeStoreOfMallocToGlobal - This function is called when we see a 1484 /// pointer global variable with a single value stored it that is a malloc or 1485 /// cast of malloc. 1486 static bool TryToOptimizeStoreOfMallocToGlobal(GlobalVariable *GV, 1487 CallInst *CI, 1488 Type *AllocTy, 1489 Module::global_iterator &GVI, 1490 TargetData *TD) { 1491 if (!TD) 1492 return false; 1493 1494 // If this is a malloc of an abstract type, don't touch it. 1495 if (!AllocTy->isSized()) 1496 return false; 1497 1498 // We can't optimize this global unless all uses of it are *known* to be 1499 // of the malloc value, not of the null initializer value (consider a use 1500 // that compares the global's value against zero to see if the malloc has 1501 // been reached). To do this, we check to see if all uses of the global 1502 // would trap if the global were null: this proves that they must all 1503 // happen after the malloc. 1504 if (!AllUsesOfLoadedValueWillTrapIfNull(GV)) 1505 return false; 1506 1507 // We can't optimize this if the malloc itself is used in a complex way, 1508 // for example, being stored into multiple globals. This allows the 1509 // malloc to be stored into the specified global, loaded setcc'd, and 1510 // GEP'd. These are all things we could transform to using the global 1511 // for. 1512 SmallPtrSet<const PHINode*, 8> PHIs; 1513 if (!ValueIsOnlyUsedLocallyOrStoredToOneGlobal(CI, GV, PHIs)) 1514 return false; 1515 1516 // If we have a global that is only initialized with a fixed size malloc, 1517 // transform the program to use global memory instead of malloc'd memory. 1518 // This eliminates dynamic allocation, avoids an indirection accessing the 1519 // data, and exposes the resultant global to further GlobalOpt. 1520 // We cannot optimize the malloc if we cannot determine malloc array size. 1521 Value *NElems = getMallocArraySize(CI, TD, true); 1522 if (!NElems) 1523 return false; 1524 1525 if (ConstantInt *NElements = dyn_cast<ConstantInt>(NElems)) 1526 // Restrict this transformation to only working on small allocations 1527 // (2048 bytes currently), as we don't want to introduce a 16M global or 1528 // something. 1529 if (NElements->getZExtValue() * TD->getTypeAllocSize(AllocTy) < 2048) { 1530 GVI = OptimizeGlobalAddressOfMalloc(GV, CI, AllocTy, NElements, TD); 1531 return true; 1532 } 1533 1534 // If the allocation is an array of structures, consider transforming this 1535 // into multiple malloc'd arrays, one for each field. This is basically 1536 // SRoA for malloc'd memory. 1537 1538 // If this is an allocation of a fixed size array of structs, analyze as a 1539 // variable size array. malloc [100 x struct],1 -> malloc struct, 100 1540 if (NElems == ConstantInt::get(CI->getArgOperand(0)->getType(), 1)) 1541 if (ArrayType *AT = dyn_cast<ArrayType>(AllocTy)) 1542 AllocTy = AT->getElementType(); 1543 1544 StructType *AllocSTy = dyn_cast<StructType>(AllocTy); 1545 if (!AllocSTy) 1546 return false; 1547 1548 // This the structure has an unreasonable number of fields, leave it 1549 // alone. 1550 if (AllocSTy->getNumElements() <= 16 && AllocSTy->getNumElements() != 0 && 1551 AllGlobalLoadUsesSimpleEnoughForHeapSRA(GV, CI)) { 1552 1553 // If this is a fixed size array, transform the Malloc to be an alloc of 1554 // structs. malloc [100 x struct],1 -> malloc struct, 100 1555 if (ArrayType *AT = dyn_cast<ArrayType>(getMallocAllocatedType(CI))) { 1556 Type *IntPtrTy = TD->getIntPtrType(CI->getContext()); 1557 unsigned TypeSize = TD->getStructLayout(AllocSTy)->getSizeInBytes(); 1558 Value *AllocSize = ConstantInt::get(IntPtrTy, TypeSize); 1559 Value *NumElements = ConstantInt::get(IntPtrTy, AT->getNumElements()); 1560 Instruction *Malloc = CallInst::CreateMalloc(CI, IntPtrTy, AllocSTy, 1561 AllocSize, NumElements, 1562 0, CI->getName()); 1563 Instruction *Cast = new BitCastInst(Malloc, CI->getType(), "tmp", CI); 1564 CI->replaceAllUsesWith(Cast); 1565 CI->eraseFromParent(); 1566 CI = dyn_cast<BitCastInst>(Malloc) ? 1567 extractMallocCallFromBitCast(Malloc) : cast<CallInst>(Malloc); 1568 } 1569 1570 GVI = PerformHeapAllocSRoA(GV, CI, getMallocArraySize(CI, TD, true),TD); 1571 return true; 1572 } 1573 1574 return false; 1575 } 1576 1577 // OptimizeOnceStoredGlobal - Try to optimize globals based on the knowledge 1578 // that only one value (besides its initializer) is ever stored to the global. 1579 static bool OptimizeOnceStoredGlobal(GlobalVariable *GV, Value *StoredOnceVal, 1580 Module::global_iterator &GVI, 1581 TargetData *TD) { 1582 // Ignore no-op GEPs and bitcasts. 1583 StoredOnceVal = StoredOnceVal->stripPointerCasts(); 1584 1585 // If we are dealing with a pointer global that is initialized to null and 1586 // only has one (non-null) value stored into it, then we can optimize any 1587 // users of the loaded value (often calls and loads) that would trap if the 1588 // value was null. 1589 if (GV->getInitializer()->getType()->isPointerTy() && 1590 GV->getInitializer()->isNullValue()) { 1591 if (Constant *SOVC = dyn_cast<Constant>(StoredOnceVal)) { 1592 if (GV->getInitializer()->getType() != SOVC->getType()) 1593 SOVC = ConstantExpr::getBitCast(SOVC, GV->getInitializer()->getType()); 1594 1595 // Optimize away any trapping uses of the loaded value. 1596 if (OptimizeAwayTrappingUsesOfLoads(GV, SOVC)) 1597 return true; 1598 } else if (CallInst *CI = extractMallocCall(StoredOnceVal)) { 1599 Type* MallocType = getMallocAllocatedType(CI); 1600 if (MallocType && TryToOptimizeStoreOfMallocToGlobal(GV, CI, MallocType, 1601 GVI, TD)) 1602 return true; 1603 } 1604 } 1605 1606 return false; 1607 } 1608 1609 /// TryToShrinkGlobalToBoolean - At this point, we have learned that the only 1610 /// two values ever stored into GV are its initializer and OtherVal. See if we 1611 /// can shrink the global into a boolean and select between the two values 1612 /// whenever it is used. This exposes the values to other scalar optimizations. 1613 static bool TryToShrinkGlobalToBoolean(GlobalVariable *GV, Constant *OtherVal) { 1614 Type *GVElType = GV->getType()->getElementType(); 1615 1616 // If GVElType is already i1, it is already shrunk. If the type of the GV is 1617 // an FP value, pointer or vector, don't do this optimization because a select 1618 // between them is very expensive and unlikely to lead to later 1619 // simplification. In these cases, we typically end up with "cond ? v1 : v2" 1620 // where v1 and v2 both require constant pool loads, a big loss. 1621 if (GVElType == Type::getInt1Ty(GV->getContext()) || 1622 GVElType->isFloatingPointTy() || 1623 GVElType->isPointerTy() || GVElType->isVectorTy()) 1624 return false; 1625 1626 // Walk the use list of the global seeing if all the uses are load or store. 1627 // If there is anything else, bail out. 1628 for (Value::use_iterator I = GV->use_begin(), E = GV->use_end(); I != E; ++I){ 1629 User *U = *I; 1630 if (!isa<LoadInst>(U) && !isa<StoreInst>(U)) 1631 return false; 1632 } 1633 1634 DEBUG(dbgs() << " *** SHRINKING TO BOOL: " << *GV); 1635 1636 // Create the new global, initializing it to false. 1637 GlobalVariable *NewGV = new GlobalVariable(Type::getInt1Ty(GV->getContext()), 1638 false, 1639 GlobalValue::InternalLinkage, 1640 ConstantInt::getFalse(GV->getContext()), 1641 GV->getName()+".b", 1642 GV->isThreadLocal()); 1643 GV->getParent()->getGlobalList().insert(GV, NewGV); 1644 1645 Constant *InitVal = GV->getInitializer(); 1646 assert(InitVal->getType() != Type::getInt1Ty(GV->getContext()) && 1647 "No reason to shrink to bool!"); 1648 1649 // If initialized to zero and storing one into the global, we can use a cast 1650 // instead of a select to synthesize the desired value. 1651 bool IsOneZero = false; 1652 if (ConstantInt *CI = dyn_cast<ConstantInt>(OtherVal)) 1653 IsOneZero = InitVal->isNullValue() && CI->isOne(); 1654 1655 while (!GV->use_empty()) { 1656 Instruction *UI = cast<Instruction>(GV->use_back()); 1657 if (StoreInst *SI = dyn_cast<StoreInst>(UI)) { 1658 // Change the store into a boolean store. 1659 bool StoringOther = SI->getOperand(0) == OtherVal; 1660 // Only do this if we weren't storing a loaded value. 1661 Value *StoreVal; 1662 if (StoringOther || SI->getOperand(0) == InitVal) 1663 StoreVal = ConstantInt::get(Type::getInt1Ty(GV->getContext()), 1664 StoringOther); 1665 else { 1666 // Otherwise, we are storing a previously loaded copy. To do this, 1667 // change the copy from copying the original value to just copying the 1668 // bool. 1669 Instruction *StoredVal = cast<Instruction>(SI->getOperand(0)); 1670 1671 // If we've already replaced the input, StoredVal will be a cast or 1672 // select instruction. If not, it will be a load of the original 1673 // global. 1674 if (LoadInst *LI = dyn_cast<LoadInst>(StoredVal)) { 1675 assert(LI->getOperand(0) == GV && "Not a copy!"); 1676 // Insert a new load, to preserve the saved value. 1677 StoreVal = new LoadInst(NewGV, LI->getName()+".b", LI); 1678 } else { 1679 assert((isa<CastInst>(StoredVal) || isa<SelectInst>(StoredVal)) && 1680 "This is not a form that we understand!"); 1681 StoreVal = StoredVal->getOperand(0); 1682 assert(isa<LoadInst>(StoreVal) && "Not a load of NewGV!"); 1683 } 1684 } 1685 new StoreInst(StoreVal, NewGV, SI); 1686 } else { 1687 // Change the load into a load of bool then a select. 1688 LoadInst *LI = cast<LoadInst>(UI); 1689 LoadInst *NLI = new LoadInst(NewGV, LI->getName()+".b", LI); 1690 Value *NSI; 1691 if (IsOneZero) 1692 NSI = new ZExtInst(NLI, LI->getType(), "", LI); 1693 else 1694 NSI = SelectInst::Create(NLI, OtherVal, InitVal, "", LI); 1695 NSI->takeName(LI); 1696 LI->replaceAllUsesWith(NSI); 1697 } 1698 UI->eraseFromParent(); 1699 } 1700 1701 GV->eraseFromParent(); 1702 return true; 1703 } 1704 1705 1706 /// ProcessInternalGlobal - Analyze the specified global variable and optimize 1707 /// it if possible. If we make a change, return true. 1708 bool GlobalOpt::ProcessGlobal(GlobalVariable *GV, 1709 Module::global_iterator &GVI) { 1710 if (!GV->hasLocalLinkage()) 1711 return false; 1712 1713 // Do more involved optimizations if the global is internal. 1714 GV->removeDeadConstantUsers(); 1715 1716 if (GV->use_empty()) { 1717 DEBUG(dbgs() << "GLOBAL DEAD: " << *GV); 1718 GV->eraseFromParent(); 1719 ++NumDeleted; 1720 return true; 1721 } 1722 1723 SmallPtrSet<const PHINode*, 16> PHIUsers; 1724 GlobalStatus GS; 1725 1726 if (AnalyzeGlobal(GV, GS, PHIUsers)) 1727 return false; 1728 1729 if (!GS.isCompared && !GV->hasUnnamedAddr()) { 1730 GV->setUnnamedAddr(true); 1731 NumUnnamed++; 1732 } 1733 1734 if (GV->isConstant() || !GV->hasInitializer()) 1735 return false; 1736 1737 return ProcessInternalGlobal(GV, GVI, PHIUsers, GS); 1738 } 1739 1740 /// ProcessInternalGlobal - Analyze the specified global variable and optimize 1741 /// it if possible. If we make a change, return true. 1742 bool GlobalOpt::ProcessInternalGlobal(GlobalVariable *GV, 1743 Module::global_iterator &GVI, 1744 const SmallPtrSet<const PHINode*, 16> &PHIUsers, 1745 const GlobalStatus &GS) { 1746 // If this is a first class global and has only one accessing function 1747 // and this function is main (which we know is not recursive we can make 1748 // this global a local variable) we replace the global with a local alloca 1749 // in this function. 1750 // 1751 // NOTE: It doesn't make sense to promote non single-value types since we 1752 // are just replacing static memory to stack memory. 1753 // 1754 // If the global is in different address space, don't bring it to stack. 1755 if (!GS.HasMultipleAccessingFunctions && 1756 GS.AccessingFunction && !GS.HasNonInstructionUser && 1757 GV->getType()->getElementType()->isSingleValueType() && 1758 GS.AccessingFunction->getName() == "main" && 1759 GS.AccessingFunction->hasExternalLinkage() && 1760 GV->getType()->getAddressSpace() == 0) { 1761 DEBUG(dbgs() << "LOCALIZING GLOBAL: " << *GV); 1762 Instruction& FirstI = const_cast<Instruction&>(*GS.AccessingFunction 1763 ->getEntryBlock().begin()); 1764 Type* ElemTy = GV->getType()->getElementType(); 1765 // FIXME: Pass Global's alignment when globals have alignment 1766 AllocaInst* Alloca = new AllocaInst(ElemTy, NULL, GV->getName(), &FirstI); 1767 if (!isa<UndefValue>(GV->getInitializer())) 1768 new StoreInst(GV->getInitializer(), Alloca, &FirstI); 1769 1770 GV->replaceAllUsesWith(Alloca); 1771 GV->eraseFromParent(); 1772 ++NumLocalized; 1773 return true; 1774 } 1775 1776 // If the global is never loaded (but may be stored to), it is dead. 1777 // Delete it now. 1778 if (!GS.isLoaded) { 1779 DEBUG(dbgs() << "GLOBAL NEVER LOADED: " << *GV); 1780 1781 // Delete any stores we can find to the global. We may not be able to 1782 // make it completely dead though. 1783 bool Changed = CleanupConstantGlobalUsers(GV, GV->getInitializer()); 1784 1785 // If the global is dead now, delete it. 1786 if (GV->use_empty()) { 1787 GV->eraseFromParent(); 1788 ++NumDeleted; 1789 Changed = true; 1790 } 1791 return Changed; 1792 1793 } else if (GS.StoredType <= GlobalStatus::isInitializerStored) { 1794 DEBUG(dbgs() << "MARKING CONSTANT: " << *GV); 1795 GV->setConstant(true); 1796 1797 // Clean up any obviously simplifiable users now. 1798 CleanupConstantGlobalUsers(GV, GV->getInitializer()); 1799 1800 // If the global is dead now, just nuke it. 1801 if (GV->use_empty()) { 1802 DEBUG(dbgs() << " *** Marking constant allowed us to simplify " 1803 << "all users and delete global!\n"); 1804 GV->eraseFromParent(); 1805 ++NumDeleted; 1806 } 1807 1808 ++NumMarked; 1809 return true; 1810 } else if (!GV->getInitializer()->getType()->isSingleValueType()) { 1811 if (TargetData *TD = getAnalysisIfAvailable<TargetData>()) 1812 if (GlobalVariable *FirstNewGV = SRAGlobal(GV, *TD)) { 1813 GVI = FirstNewGV; // Don't skip the newly produced globals! 1814 return true; 1815 } 1816 } else if (GS.StoredType == GlobalStatus::isStoredOnce) { 1817 // If the initial value for the global was an undef value, and if only 1818 // one other value was stored into it, we can just change the 1819 // initializer to be the stored value, then delete all stores to the 1820 // global. This allows us to mark it constant. 1821 if (Constant *SOVConstant = dyn_cast<Constant>(GS.StoredOnceValue)) 1822 if (isa<UndefValue>(GV->getInitializer())) { 1823 // Change the initial value here. 1824 GV->setInitializer(SOVConstant); 1825 1826 // Clean up any obviously simplifiable users now. 1827 CleanupConstantGlobalUsers(GV, GV->getInitializer()); 1828 1829 if (GV->use_empty()) { 1830 DEBUG(dbgs() << " *** Substituting initializer allowed us to " 1831 << "simplify all users and delete global!\n"); 1832 GV->eraseFromParent(); 1833 ++NumDeleted; 1834 } else { 1835 GVI = GV; 1836 } 1837 ++NumSubstitute; 1838 return true; 1839 } 1840 1841 // Try to optimize globals based on the knowledge that only one value 1842 // (besides its initializer) is ever stored to the global. 1843 if (OptimizeOnceStoredGlobal(GV, GS.StoredOnceValue, GVI, 1844 getAnalysisIfAvailable<TargetData>())) 1845 return true; 1846 1847 // Otherwise, if the global was not a boolean, we can shrink it to be a 1848 // boolean. 1849 if (Constant *SOVConstant = dyn_cast<Constant>(GS.StoredOnceValue)) 1850 if (TryToShrinkGlobalToBoolean(GV, SOVConstant)) { 1851 ++NumShrunkToBool; 1852 return true; 1853 } 1854 } 1855 1856 return false; 1857 } 1858 1859 /// ChangeCalleesToFastCall - Walk all of the direct calls of the specified 1860 /// function, changing them to FastCC. 1861 static void ChangeCalleesToFastCall(Function *F) { 1862 for (Value::use_iterator UI = F->use_begin(), E = F->use_end(); UI != E;++UI){ 1863 CallSite User(cast<Instruction>(*UI)); 1864 User.setCallingConv(CallingConv::Fast); 1865 } 1866 } 1867 1868 static AttrListPtr StripNest(const AttrListPtr &Attrs) { 1869 for (unsigned i = 0, e = Attrs.getNumSlots(); i != e; ++i) { 1870 if ((Attrs.getSlot(i).Attrs & Attribute::Nest) == 0) 1871 continue; 1872 1873 // There can be only one. 1874 return Attrs.removeAttr(Attrs.getSlot(i).Index, Attribute::Nest); 1875 } 1876 1877 return Attrs; 1878 } 1879 1880 static void RemoveNestAttribute(Function *F) { 1881 F->setAttributes(StripNest(F->getAttributes())); 1882 for (Value::use_iterator UI = F->use_begin(), E = F->use_end(); UI != E;++UI){ 1883 CallSite User(cast<Instruction>(*UI)); 1884 User.setAttributes(StripNest(User.getAttributes())); 1885 } 1886 } 1887 1888 bool GlobalOpt::OptimizeFunctions(Module &M) { 1889 bool Changed = false; 1890 // Optimize functions. 1891 for (Module::iterator FI = M.begin(), E = M.end(); FI != E; ) { 1892 Function *F = FI++; 1893 // Functions without names cannot be referenced outside this module. 1894 if (!F->hasName() && !F->isDeclaration()) 1895 F->setLinkage(GlobalValue::InternalLinkage); 1896 F->removeDeadConstantUsers(); 1897 if (F->use_empty() && (F->hasLocalLinkage() || F->hasLinkOnceLinkage())) { 1898 F->eraseFromParent(); 1899 Changed = true; 1900 ++NumFnDeleted; 1901 } else if (F->hasLocalLinkage()) { 1902 if (F->getCallingConv() == CallingConv::C && !F->isVarArg() && 1903 !F->hasAddressTaken()) { 1904 // If this function has C calling conventions, is not a varargs 1905 // function, and is only called directly, promote it to use the Fast 1906 // calling convention. 1907 F->setCallingConv(CallingConv::Fast); 1908 ChangeCalleesToFastCall(F); 1909 ++NumFastCallFns; 1910 Changed = true; 1911 } 1912 1913 if (F->getAttributes().hasAttrSomewhere(Attribute::Nest) && 1914 !F->hasAddressTaken()) { 1915 // The function is not used by a trampoline intrinsic, so it is safe 1916 // to remove the 'nest' attribute. 1917 RemoveNestAttribute(F); 1918 ++NumNestRemoved; 1919 Changed = true; 1920 } 1921 } 1922 } 1923 return Changed; 1924 } 1925 1926 bool GlobalOpt::OptimizeGlobalVars(Module &M) { 1927 bool Changed = false; 1928 for (Module::global_iterator GVI = M.global_begin(), E = M.global_end(); 1929 GVI != E; ) { 1930 GlobalVariable *GV = GVI++; 1931 // Global variables without names cannot be referenced outside this module. 1932 if (!GV->hasName() && !GV->isDeclaration()) 1933 GV->setLinkage(GlobalValue::InternalLinkage); 1934 // Simplify the initializer. 1935 if (GV->hasInitializer()) 1936 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(GV->getInitializer())) { 1937 TargetData *TD = getAnalysisIfAvailable<TargetData>(); 1938 Constant *New = ConstantFoldConstantExpression(CE, TD); 1939 if (New && New != CE) 1940 GV->setInitializer(New); 1941 } 1942 1943 Changed |= ProcessGlobal(GV, GVI); 1944 } 1945 return Changed; 1946 } 1947 1948 /// FindGlobalCtors - Find the llvm.global_ctors list, verifying that all 1949 /// initializers have an init priority of 65535. 1950 GlobalVariable *GlobalOpt::FindGlobalCtors(Module &M) { 1951 GlobalVariable *GV = M.getGlobalVariable("llvm.global_ctors"); 1952 if (GV == 0) return 0; 1953 1954 // Verify that the initializer is simple enough for us to handle. We are 1955 // only allowed to optimize the initializer if it is unique. 1956 if (!GV->hasUniqueInitializer()) return 0; 1957 1958 if (isa<ConstantAggregateZero>(GV->getInitializer())) 1959 return GV; 1960 ConstantArray *CA = cast<ConstantArray>(GV->getInitializer()); 1961 1962 for (User::op_iterator i = CA->op_begin(), e = CA->op_end(); i != e; ++i) { 1963 if (isa<ConstantAggregateZero>(*i)) 1964 continue; 1965 ConstantStruct *CS = cast<ConstantStruct>(*i); 1966 if (isa<ConstantPointerNull>(CS->getOperand(1))) 1967 continue; 1968 1969 // Must have a function or null ptr. 1970 if (!isa<Function>(CS->getOperand(1))) 1971 return 0; 1972 1973 // Init priority must be standard. 1974 ConstantInt *CI = cast<ConstantInt>(CS->getOperand(0)); 1975 if (CI->getZExtValue() != 65535) 1976 return 0; 1977 } 1978 1979 return GV; 1980 } 1981 1982 /// ParseGlobalCtors - Given a llvm.global_ctors list that we can understand, 1983 /// return a list of the functions and null terminator as a vector. 1984 static std::vector<Function*> ParseGlobalCtors(GlobalVariable *GV) { 1985 if (GV->getInitializer()->isNullValue()) 1986 return std::vector<Function*>(); 1987 ConstantArray *CA = cast<ConstantArray>(GV->getInitializer()); 1988 std::vector<Function*> Result; 1989 Result.reserve(CA->getNumOperands()); 1990 for (User::op_iterator i = CA->op_begin(), e = CA->op_end(); i != e; ++i) { 1991 ConstantStruct *CS = cast<ConstantStruct>(*i); 1992 Result.push_back(dyn_cast<Function>(CS->getOperand(1))); 1993 } 1994 return Result; 1995 } 1996 1997 /// InstallGlobalCtors - Given a specified llvm.global_ctors list, install the 1998 /// specified array, returning the new global to use. 1999 static GlobalVariable *InstallGlobalCtors(GlobalVariable *GCL, 2000 const std::vector<Function*> &Ctors) { 2001 // If we made a change, reassemble the initializer list. 2002 Constant *CSVals[2]; 2003 CSVals[0] = ConstantInt::get(Type::getInt32Ty(GCL->getContext()), 65535); 2004 CSVals[1] = 0; 2005 2006 StructType *StructTy = 2007 cast <StructType>( 2008 cast<ArrayType>(GCL->getType()->getElementType())->getElementType()); 2009 2010 // Create the new init list. 2011 std::vector<Constant*> CAList; 2012 for (unsigned i = 0, e = Ctors.size(); i != e; ++i) { 2013 if (Ctors[i]) { 2014 CSVals[1] = Ctors[i]; 2015 } else { 2016 Type *FTy = FunctionType::get(Type::getVoidTy(GCL->getContext()), 2017 false); 2018 PointerType *PFTy = PointerType::getUnqual(FTy); 2019 CSVals[1] = Constant::getNullValue(PFTy); 2020 CSVals[0] = ConstantInt::get(Type::getInt32Ty(GCL->getContext()), 2021 0x7fffffff); 2022 } 2023 CAList.push_back(ConstantStruct::get(StructTy, CSVals)); 2024 } 2025 2026 // Create the array initializer. 2027 Constant *CA = ConstantArray::get(ArrayType::get(StructTy, 2028 CAList.size()), CAList); 2029 2030 // If we didn't change the number of elements, don't create a new GV. 2031 if (CA->getType() == GCL->getInitializer()->getType()) { 2032 GCL->setInitializer(CA); 2033 return GCL; 2034 } 2035 2036 // Create the new global and insert it next to the existing list. 2037 GlobalVariable *NGV = new GlobalVariable(CA->getType(), GCL->isConstant(), 2038 GCL->getLinkage(), CA, "", 2039 GCL->isThreadLocal()); 2040 GCL->getParent()->getGlobalList().insert(GCL, NGV); 2041 NGV->takeName(GCL); 2042 2043 // Nuke the old list, replacing any uses with the new one. 2044 if (!GCL->use_empty()) { 2045 Constant *V = NGV; 2046 if (V->getType() != GCL->getType()) 2047 V = ConstantExpr::getBitCast(V, GCL->getType()); 2048 GCL->replaceAllUsesWith(V); 2049 } 2050 GCL->eraseFromParent(); 2051 2052 if (Ctors.size()) 2053 return NGV; 2054 else 2055 return 0; 2056 } 2057 2058 2059 static Constant *getVal(DenseMap<Value*, Constant*> &ComputedValues, Value *V) { 2060 if (Constant *CV = dyn_cast<Constant>(V)) return CV; 2061 Constant *R = ComputedValues[V]; 2062 assert(R && "Reference to an uncomputed value!"); 2063 return R; 2064 } 2065 2066 static inline bool 2067 isSimpleEnoughValueToCommit(Constant *C, 2068 SmallPtrSet<Constant*, 8> &SimpleConstants); 2069 2070 2071 /// isSimpleEnoughValueToCommit - Return true if the specified constant can be 2072 /// handled by the code generator. We don't want to generate something like: 2073 /// void *X = &X/42; 2074 /// because the code generator doesn't have a relocation that can handle that. 2075 /// 2076 /// This function should be called if C was not found (but just got inserted) 2077 /// in SimpleConstants to avoid having to rescan the same constants all the 2078 /// time. 2079 static bool isSimpleEnoughValueToCommitHelper(Constant *C, 2080 SmallPtrSet<Constant*, 8> &SimpleConstants) { 2081 // Simple integer, undef, constant aggregate zero, global addresses, etc are 2082 // all supported. 2083 if (C->getNumOperands() == 0 || isa<BlockAddress>(C) || 2084 isa<GlobalValue>(C)) 2085 return true; 2086 2087 // Aggregate values are safe if all their elements are. 2088 if (isa<ConstantArray>(C) || isa<ConstantStruct>(C) || 2089 isa<ConstantVector>(C)) { 2090 for (unsigned i = 0, e = C->getNumOperands(); i != e; ++i) { 2091 Constant *Op = cast<Constant>(C->getOperand(i)); 2092 if (!isSimpleEnoughValueToCommit(Op, SimpleConstants)) 2093 return false; 2094 } 2095 return true; 2096 } 2097 2098 // We don't know exactly what relocations are allowed in constant expressions, 2099 // so we allow &global+constantoffset, which is safe and uniformly supported 2100 // across targets. 2101 ConstantExpr *CE = cast<ConstantExpr>(C); 2102 switch (CE->getOpcode()) { 2103 case Instruction::BitCast: 2104 case Instruction::IntToPtr: 2105 case Instruction::PtrToInt: 2106 // These casts are always fine if the casted value is. 2107 return isSimpleEnoughValueToCommit(CE->getOperand(0), SimpleConstants); 2108 2109 // GEP is fine if it is simple + constant offset. 2110 case Instruction::GetElementPtr: 2111 for (unsigned i = 1, e = CE->getNumOperands(); i != e; ++i) 2112 if (!isa<ConstantInt>(CE->getOperand(i))) 2113 return false; 2114 return isSimpleEnoughValueToCommit(CE->getOperand(0), SimpleConstants); 2115 2116 case Instruction::Add: 2117 // We allow simple+cst. 2118 if (!isa<ConstantInt>(CE->getOperand(1))) 2119 return false; 2120 return isSimpleEnoughValueToCommit(CE->getOperand(0), SimpleConstants); 2121 } 2122 return false; 2123 } 2124 2125 static inline bool 2126 isSimpleEnoughValueToCommit(Constant *C, 2127 SmallPtrSet<Constant*, 8> &SimpleConstants) { 2128 // If we already checked this constant, we win. 2129 if (!SimpleConstants.insert(C)) return true; 2130 // Check the constant. 2131 return isSimpleEnoughValueToCommitHelper(C, SimpleConstants); 2132 } 2133 2134 2135 /// isSimpleEnoughPointerToCommit - Return true if this constant is simple 2136 /// enough for us to understand. In particular, if it is a cast to anything 2137 /// other than from one pointer type to another pointer type, we punt. 2138 /// We basically just support direct accesses to globals and GEP's of 2139 /// globals. This should be kept up to date with CommitValueTo. 2140 static bool isSimpleEnoughPointerToCommit(Constant *C) { 2141 // Conservatively, avoid aggregate types. This is because we don't 2142 // want to worry about them partially overlapping other stores. 2143 if (!cast<PointerType>(C->getType())->getElementType()->isSingleValueType()) 2144 return false; 2145 2146 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(C)) 2147 // Do not allow weak/*_odr/linkonce/dllimport/dllexport linkage or 2148 // external globals. 2149 return GV->hasUniqueInitializer(); 2150 2151 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) { 2152 // Handle a constantexpr gep. 2153 if (CE->getOpcode() == Instruction::GetElementPtr && 2154 isa<GlobalVariable>(CE->getOperand(0)) && 2155 cast<GEPOperator>(CE)->isInBounds()) { 2156 GlobalVariable *GV = cast<GlobalVariable>(CE->getOperand(0)); 2157 // Do not allow weak/*_odr/linkonce/dllimport/dllexport linkage or 2158 // external globals. 2159 if (!GV->hasUniqueInitializer()) 2160 return false; 2161 2162 // The first index must be zero. 2163 ConstantInt *CI = dyn_cast<ConstantInt>(*llvm::next(CE->op_begin())); 2164 if (!CI || !CI->isZero()) return false; 2165 2166 // The remaining indices must be compile-time known integers within the 2167 // notional bounds of the corresponding static array types. 2168 if (!CE->isGEPWithNoNotionalOverIndexing()) 2169 return false; 2170 2171 return ConstantFoldLoadThroughGEPConstantExpr(GV->getInitializer(), CE); 2172 2173 // A constantexpr bitcast from a pointer to another pointer is a no-op, 2174 // and we know how to evaluate it by moving the bitcast from the pointer 2175 // operand to the value operand. 2176 } else if (CE->getOpcode() == Instruction::BitCast && 2177 isa<GlobalVariable>(CE->getOperand(0))) { 2178 // Do not allow weak/*_odr/linkonce/dllimport/dllexport linkage or 2179 // external globals. 2180 return cast<GlobalVariable>(CE->getOperand(0))->hasUniqueInitializer(); 2181 } 2182 } 2183 2184 return false; 2185 } 2186 2187 /// EvaluateStoreInto - Evaluate a piece of a constantexpr store into a global 2188 /// initializer. This returns 'Init' modified to reflect 'Val' stored into it. 2189 /// At this point, the GEP operands of Addr [0, OpNo) have been stepped into. 2190 static Constant *EvaluateStoreInto(Constant *Init, Constant *Val, 2191 ConstantExpr *Addr, unsigned OpNo) { 2192 // Base case of the recursion. 2193 if (OpNo == Addr->getNumOperands()) { 2194 assert(Val->getType() == Init->getType() && "Type mismatch!"); 2195 return Val; 2196 } 2197 2198 std::vector<Constant*> Elts; 2199 if (StructType *STy = dyn_cast<StructType>(Init->getType())) { 2200 2201 // Break up the constant into its elements. 2202 if (ConstantStruct *CS = dyn_cast<ConstantStruct>(Init)) { 2203 for (User::op_iterator i = CS->op_begin(), e = CS->op_end(); i != e; ++i) 2204 Elts.push_back(cast<Constant>(*i)); 2205 } else if (isa<ConstantAggregateZero>(Init)) { 2206 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) 2207 Elts.push_back(Constant::getNullValue(STy->getElementType(i))); 2208 } else if (isa<UndefValue>(Init)) { 2209 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) 2210 Elts.push_back(UndefValue::get(STy->getElementType(i))); 2211 } else { 2212 llvm_unreachable("This code is out of sync with " 2213 " ConstantFoldLoadThroughGEPConstantExpr"); 2214 } 2215 2216 // Replace the element that we are supposed to. 2217 ConstantInt *CU = cast<ConstantInt>(Addr->getOperand(OpNo)); 2218 unsigned Idx = CU->getZExtValue(); 2219 assert(Idx < STy->getNumElements() && "Struct index out of range!"); 2220 Elts[Idx] = EvaluateStoreInto(Elts[Idx], Val, Addr, OpNo+1); 2221 2222 // Return the modified struct. 2223 return ConstantStruct::get(STy, Elts); 2224 } 2225 2226 ConstantInt *CI = cast<ConstantInt>(Addr->getOperand(OpNo)); 2227 SequentialType *InitTy = cast<SequentialType>(Init->getType()); 2228 2229 uint64_t NumElts; 2230 if (ArrayType *ATy = dyn_cast<ArrayType>(InitTy)) 2231 NumElts = ATy->getNumElements(); 2232 else 2233 NumElts = cast<VectorType>(InitTy)->getNumElements(); 2234 2235 // Break up the array into elements. 2236 if (ConstantArray *CA = dyn_cast<ConstantArray>(Init)) { 2237 for (User::op_iterator i = CA->op_begin(), e = CA->op_end(); i != e; ++i) 2238 Elts.push_back(cast<Constant>(*i)); 2239 } else if (ConstantVector *CV = dyn_cast<ConstantVector>(Init)) { 2240 for (User::op_iterator i = CV->op_begin(), e = CV->op_end(); i != e; ++i) 2241 Elts.push_back(cast<Constant>(*i)); 2242 } else if (isa<ConstantAggregateZero>(Init)) { 2243 Elts.assign(NumElts, Constant::getNullValue(InitTy->getElementType())); 2244 } else { 2245 assert(isa<UndefValue>(Init) && "This code is out of sync with " 2246 " ConstantFoldLoadThroughGEPConstantExpr"); 2247 Elts.assign(NumElts, UndefValue::get(InitTy->getElementType())); 2248 } 2249 2250 assert(CI->getZExtValue() < NumElts); 2251 Elts[CI->getZExtValue()] = 2252 EvaluateStoreInto(Elts[CI->getZExtValue()], Val, Addr, OpNo+1); 2253 2254 if (Init->getType()->isArrayTy()) 2255 return ConstantArray::get(cast<ArrayType>(InitTy), Elts); 2256 return ConstantVector::get(Elts); 2257 } 2258 2259 /// CommitValueTo - We have decided that Addr (which satisfies the predicate 2260 /// isSimpleEnoughPointerToCommit) should get Val as its value. Make it happen. 2261 static void CommitValueTo(Constant *Val, Constant *Addr) { 2262 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Addr)) { 2263 assert(GV->hasInitializer()); 2264 GV->setInitializer(Val); 2265 return; 2266 } 2267 2268 ConstantExpr *CE = cast<ConstantExpr>(Addr); 2269 GlobalVariable *GV = cast<GlobalVariable>(CE->getOperand(0)); 2270 GV->setInitializer(EvaluateStoreInto(GV->getInitializer(), Val, CE, 2)); 2271 } 2272 2273 /// ComputeLoadResult - Return the value that would be computed by a load from 2274 /// P after the stores reflected by 'memory' have been performed. If we can't 2275 /// decide, return null. 2276 static Constant *ComputeLoadResult(Constant *P, 2277 const DenseMap<Constant*, Constant*> &Memory) { 2278 // If this memory location has been recently stored, use the stored value: it 2279 // is the most up-to-date. 2280 DenseMap<Constant*, Constant*>::const_iterator I = Memory.find(P); 2281 if (I != Memory.end()) return I->second; 2282 2283 // Access it. 2284 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(P)) { 2285 if (GV->hasDefinitiveInitializer()) 2286 return GV->getInitializer(); 2287 return 0; 2288 } 2289 2290 // Handle a constantexpr getelementptr. 2291 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(P)) 2292 if (CE->getOpcode() == Instruction::GetElementPtr && 2293 isa<GlobalVariable>(CE->getOperand(0))) { 2294 GlobalVariable *GV = cast<GlobalVariable>(CE->getOperand(0)); 2295 if (GV->hasDefinitiveInitializer()) 2296 return ConstantFoldLoadThroughGEPConstantExpr(GV->getInitializer(), CE); 2297 } 2298 2299 return 0; // don't know how to evaluate. 2300 } 2301 2302 /// EvaluateFunction - Evaluate a call to function F, returning true if 2303 /// successful, false if we can't evaluate it. ActualArgs contains the formal 2304 /// arguments for the function. 2305 static bool EvaluateFunction(Function *F, Constant *&RetVal, 2306 const SmallVectorImpl<Constant*> &ActualArgs, 2307 std::vector<Function*> &CallStack, 2308 DenseMap<Constant*, Constant*> &MutatedMemory, 2309 std::vector<GlobalVariable*> &AllocaTmps, 2310 SmallPtrSet<Constant*, 8> &SimpleConstants, 2311 const TargetData *TD) { 2312 // Check to see if this function is already executing (recursion). If so, 2313 // bail out. TODO: we might want to accept limited recursion. 2314 if (std::find(CallStack.begin(), CallStack.end(), F) != CallStack.end()) 2315 return false; 2316 2317 CallStack.push_back(F); 2318 2319 /// Values - As we compute SSA register values, we store their contents here. 2320 DenseMap<Value*, Constant*> Values; 2321 2322 // Initialize arguments to the incoming values specified. 2323 unsigned ArgNo = 0; 2324 for (Function::arg_iterator AI = F->arg_begin(), E = F->arg_end(); AI != E; 2325 ++AI, ++ArgNo) 2326 Values[AI] = ActualArgs[ArgNo]; 2327 2328 /// ExecutedBlocks - We only handle non-looping, non-recursive code. As such, 2329 /// we can only evaluate any one basic block at most once. This set keeps 2330 /// track of what we have executed so we can detect recursive cases etc. 2331 SmallPtrSet<BasicBlock*, 32> ExecutedBlocks; 2332 2333 // CurInst - The current instruction we're evaluating. 2334 BasicBlock::iterator CurInst = F->begin()->begin(); 2335 2336 // This is the main evaluation loop. 2337 while (1) { 2338 Constant *InstResult = 0; 2339 2340 if (StoreInst *SI = dyn_cast<StoreInst>(CurInst)) { 2341 if (SI->isVolatile()) return false; // no volatile accesses. 2342 Constant *Ptr = getVal(Values, SI->getOperand(1)); 2343 if (!isSimpleEnoughPointerToCommit(Ptr)) 2344 // If this is too complex for us to commit, reject it. 2345 return false; 2346 2347 Constant *Val = getVal(Values, SI->getOperand(0)); 2348 2349 // If this might be too difficult for the backend to handle (e.g. the addr 2350 // of one global variable divided by another) then we can't commit it. 2351 if (!isSimpleEnoughValueToCommit(Val, SimpleConstants)) 2352 return false; 2353 2354 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ptr)) 2355 if (CE->getOpcode() == Instruction::BitCast) { 2356 // If we're evaluating a store through a bitcast, then we need 2357 // to pull the bitcast off the pointer type and push it onto the 2358 // stored value. 2359 Ptr = CE->getOperand(0); 2360 2361 Type *NewTy=cast<PointerType>(Ptr->getType())->getElementType(); 2362 2363 // In order to push the bitcast onto the stored value, a bitcast 2364 // from NewTy to Val's type must be legal. If it's not, we can try 2365 // introspecting NewTy to find a legal conversion. 2366 while (!Val->getType()->canLosslesslyBitCastTo(NewTy)) { 2367 // If NewTy is a struct, we can convert the pointer to the struct 2368 // into a pointer to its first member. 2369 // FIXME: This could be extended to support arrays as well. 2370 if (StructType *STy = dyn_cast<StructType>(NewTy)) { 2371 NewTy = STy->getTypeAtIndex(0U); 2372 2373 IntegerType *IdxTy =IntegerType::get(NewTy->getContext(), 32); 2374 Constant *IdxZero = ConstantInt::get(IdxTy, 0, false); 2375 Constant * const IdxList[] = {IdxZero, IdxZero}; 2376 2377 Ptr = ConstantExpr::getGetElementPtr(Ptr, IdxList, 2); 2378 2379 // If we can't improve the situation by introspecting NewTy, 2380 // we have to give up. 2381 } else { 2382 return 0; 2383 } 2384 } 2385 2386 // If we found compatible types, go ahead and push the bitcast 2387 // onto the stored value. 2388 Val = ConstantExpr::getBitCast(Val, NewTy); 2389 } 2390 2391 MutatedMemory[Ptr] = Val; 2392 } else if (BinaryOperator *BO = dyn_cast<BinaryOperator>(CurInst)) { 2393 InstResult = ConstantExpr::get(BO->getOpcode(), 2394 getVal(Values, BO->getOperand(0)), 2395 getVal(Values, BO->getOperand(1))); 2396 } else if (CmpInst *CI = dyn_cast<CmpInst>(CurInst)) { 2397 InstResult = ConstantExpr::getCompare(CI->getPredicate(), 2398 getVal(Values, CI->getOperand(0)), 2399 getVal(Values, CI->getOperand(1))); 2400 } else if (CastInst *CI = dyn_cast<CastInst>(CurInst)) { 2401 InstResult = ConstantExpr::getCast(CI->getOpcode(), 2402 getVal(Values, CI->getOperand(0)), 2403 CI->getType()); 2404 } else if (SelectInst *SI = dyn_cast<SelectInst>(CurInst)) { 2405 InstResult = ConstantExpr::getSelect(getVal(Values, SI->getOperand(0)), 2406 getVal(Values, SI->getOperand(1)), 2407 getVal(Values, SI->getOperand(2))); 2408 } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(CurInst)) { 2409 Constant *P = getVal(Values, GEP->getOperand(0)); 2410 SmallVector<Constant*, 8> GEPOps; 2411 for (User::op_iterator i = GEP->op_begin() + 1, e = GEP->op_end(); 2412 i != e; ++i) 2413 GEPOps.push_back(getVal(Values, *i)); 2414 InstResult = cast<GEPOperator>(GEP)->isInBounds() ? 2415 ConstantExpr::getInBoundsGetElementPtr(P, &GEPOps[0], GEPOps.size()) : 2416 ConstantExpr::getGetElementPtr(P, &GEPOps[0], GEPOps.size()); 2417 } else if (LoadInst *LI = dyn_cast<LoadInst>(CurInst)) { 2418 if (LI->isVolatile()) return false; // no volatile accesses. 2419 InstResult = ComputeLoadResult(getVal(Values, LI->getOperand(0)), 2420 MutatedMemory); 2421 if (InstResult == 0) return false; // Could not evaluate load. 2422 } else if (AllocaInst *AI = dyn_cast<AllocaInst>(CurInst)) { 2423 if (AI->isArrayAllocation()) return false; // Cannot handle array allocs. 2424 Type *Ty = AI->getType()->getElementType(); 2425 AllocaTmps.push_back(new GlobalVariable(Ty, false, 2426 GlobalValue::InternalLinkage, 2427 UndefValue::get(Ty), 2428 AI->getName())); 2429 InstResult = AllocaTmps.back(); 2430 } else if (CallInst *CI = dyn_cast<CallInst>(CurInst)) { 2431 2432 // Debug info can safely be ignored here. 2433 if (isa<DbgInfoIntrinsic>(CI)) { 2434 ++CurInst; 2435 continue; 2436 } 2437 2438 // Cannot handle inline asm. 2439 if (isa<InlineAsm>(CI->getCalledValue())) return false; 2440 2441 if (MemSetInst *MSI = dyn_cast<MemSetInst>(CI)) { 2442 if (MSI->isVolatile()) return false; 2443 Constant *Ptr = getVal(Values, MSI->getDest()); 2444 Constant *Val = getVal(Values, MSI->getValue()); 2445 Constant *DestVal = ComputeLoadResult(getVal(Values, Ptr), 2446 MutatedMemory); 2447 if (Val->isNullValue() && DestVal && DestVal->isNullValue()) { 2448 // This memset is a no-op. 2449 ++CurInst; 2450 continue; 2451 } 2452 return false; 2453 } 2454 2455 // Resolve function pointers. 2456 Function *Callee = dyn_cast<Function>(getVal(Values, 2457 CI->getCalledValue())); 2458 if (!Callee) return false; // Cannot resolve. 2459 2460 SmallVector<Constant*, 8> Formals; 2461 CallSite CS(CI); 2462 for (User::op_iterator i = CS.arg_begin(), e = CS.arg_end(); 2463 i != e; ++i) 2464 Formals.push_back(getVal(Values, *i)); 2465 2466 if (Callee->isDeclaration()) { 2467 // If this is a function we can constant fold, do it. 2468 if (Constant *C = ConstantFoldCall(Callee, Formals)) { 2469 InstResult = C; 2470 } else { 2471 return false; 2472 } 2473 } else { 2474 if (Callee->getFunctionType()->isVarArg()) 2475 return false; 2476 2477 Constant *RetVal; 2478 // Execute the call, if successful, use the return value. 2479 if (!EvaluateFunction(Callee, RetVal, Formals, CallStack, 2480 MutatedMemory, AllocaTmps, SimpleConstants, TD)) 2481 return false; 2482 InstResult = RetVal; 2483 } 2484 } else if (isa<TerminatorInst>(CurInst)) { 2485 BasicBlock *NewBB = 0; 2486 if (BranchInst *BI = dyn_cast<BranchInst>(CurInst)) { 2487 if (BI->isUnconditional()) { 2488 NewBB = BI->getSuccessor(0); 2489 } else { 2490 ConstantInt *Cond = 2491 dyn_cast<ConstantInt>(getVal(Values, BI->getCondition())); 2492 if (!Cond) return false; // Cannot determine. 2493 2494 NewBB = BI->getSuccessor(!Cond->getZExtValue()); 2495 } 2496 } else if (SwitchInst *SI = dyn_cast<SwitchInst>(CurInst)) { 2497 ConstantInt *Val = 2498 dyn_cast<ConstantInt>(getVal(Values, SI->getCondition())); 2499 if (!Val) return false; // Cannot determine. 2500 NewBB = SI->getSuccessor(SI->findCaseValue(Val)); 2501 } else if (IndirectBrInst *IBI = dyn_cast<IndirectBrInst>(CurInst)) { 2502 Value *Val = getVal(Values, IBI->getAddress())->stripPointerCasts(); 2503 if (BlockAddress *BA = dyn_cast<BlockAddress>(Val)) 2504 NewBB = BA->getBasicBlock(); 2505 else 2506 return false; // Cannot determine. 2507 } else if (ReturnInst *RI = dyn_cast<ReturnInst>(CurInst)) { 2508 if (RI->getNumOperands()) 2509 RetVal = getVal(Values, RI->getOperand(0)); 2510 2511 CallStack.pop_back(); // return from fn. 2512 return true; // We succeeded at evaluating this ctor! 2513 } else { 2514 // invoke, unwind, unreachable. 2515 return false; // Cannot handle this terminator. 2516 } 2517 2518 // Okay, we succeeded in evaluating this control flow. See if we have 2519 // executed the new block before. If so, we have a looping function, 2520 // which we cannot evaluate in reasonable time. 2521 if (!ExecutedBlocks.insert(NewBB)) 2522 return false; // looped! 2523 2524 // Okay, we have never been in this block before. Check to see if there 2525 // are any PHI nodes. If so, evaluate them with information about where 2526 // we came from. 2527 BasicBlock *OldBB = CurInst->getParent(); 2528 CurInst = NewBB->begin(); 2529 PHINode *PN; 2530 for (; (PN = dyn_cast<PHINode>(CurInst)); ++CurInst) 2531 Values[PN] = getVal(Values, PN->getIncomingValueForBlock(OldBB)); 2532 2533 // Do NOT increment CurInst. We know that the terminator had no value. 2534 continue; 2535 } else { 2536 // Did not know how to evaluate this! 2537 return false; 2538 } 2539 2540 if (!CurInst->use_empty()) { 2541 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(InstResult)) 2542 InstResult = ConstantFoldConstantExpression(CE, TD); 2543 2544 Values[CurInst] = InstResult; 2545 } 2546 2547 // Advance program counter. 2548 ++CurInst; 2549 } 2550 } 2551 2552 /// EvaluateStaticConstructor - Evaluate static constructors in the function, if 2553 /// we can. Return true if we can, false otherwise. 2554 static bool EvaluateStaticConstructor(Function *F, const TargetData *TD) { 2555 /// MutatedMemory - For each store we execute, we update this map. Loads 2556 /// check this to get the most up-to-date value. If evaluation is successful, 2557 /// this state is committed to the process. 2558 DenseMap<Constant*, Constant*> MutatedMemory; 2559 2560 /// AllocaTmps - To 'execute' an alloca, we create a temporary global variable 2561 /// to represent its body. This vector is needed so we can delete the 2562 /// temporary globals when we are done. 2563 std::vector<GlobalVariable*> AllocaTmps; 2564 2565 /// CallStack - This is used to detect recursion. In pathological situations 2566 /// we could hit exponential behavior, but at least there is nothing 2567 /// unbounded. 2568 std::vector<Function*> CallStack; 2569 2570 /// SimpleConstants - These are constants we have checked and know to be 2571 /// simple enough to live in a static initializer of a global. 2572 SmallPtrSet<Constant*, 8> SimpleConstants; 2573 2574 // Call the function. 2575 Constant *RetValDummy; 2576 bool EvalSuccess = EvaluateFunction(F, RetValDummy, 2577 SmallVector<Constant*, 0>(), CallStack, 2578 MutatedMemory, AllocaTmps, 2579 SimpleConstants, TD); 2580 2581 if (EvalSuccess) { 2582 // We succeeded at evaluation: commit the result. 2583 DEBUG(dbgs() << "FULLY EVALUATED GLOBAL CTOR FUNCTION '" 2584 << F->getName() << "' to " << MutatedMemory.size() 2585 << " stores.\n"); 2586 for (DenseMap<Constant*, Constant*>::iterator I = MutatedMemory.begin(), 2587 E = MutatedMemory.end(); I != E; ++I) 2588 CommitValueTo(I->second, I->first); 2589 } 2590 2591 // At this point, we are done interpreting. If we created any 'alloca' 2592 // temporaries, release them now. 2593 while (!AllocaTmps.empty()) { 2594 GlobalVariable *Tmp = AllocaTmps.back(); 2595 AllocaTmps.pop_back(); 2596 2597 // If there are still users of the alloca, the program is doing something 2598 // silly, e.g. storing the address of the alloca somewhere and using it 2599 // later. Since this is undefined, we'll just make it be null. 2600 if (!Tmp->use_empty()) 2601 Tmp->replaceAllUsesWith(Constant::getNullValue(Tmp->getType())); 2602 delete Tmp; 2603 } 2604 2605 return EvalSuccess; 2606 } 2607 2608 2609 2610 /// OptimizeGlobalCtorsList - Simplify and evaluation global ctors if possible. 2611 /// Return true if anything changed. 2612 bool GlobalOpt::OptimizeGlobalCtorsList(GlobalVariable *&GCL) { 2613 std::vector<Function*> Ctors = ParseGlobalCtors(GCL); 2614 bool MadeChange = false; 2615 if (Ctors.empty()) return false; 2616 2617 const TargetData *TD = getAnalysisIfAvailable<TargetData>(); 2618 // Loop over global ctors, optimizing them when we can. 2619 for (unsigned i = 0; i != Ctors.size(); ++i) { 2620 Function *F = Ctors[i]; 2621 // Found a null terminator in the middle of the list, prune off the rest of 2622 // the list. 2623 if (F == 0) { 2624 if (i != Ctors.size()-1) { 2625 Ctors.resize(i+1); 2626 MadeChange = true; 2627 } 2628 break; 2629 } 2630 2631 // We cannot simplify external ctor functions. 2632 if (F->empty()) continue; 2633 2634 // If we can evaluate the ctor at compile time, do. 2635 if (EvaluateStaticConstructor(F, TD)) { 2636 Ctors.erase(Ctors.begin()+i); 2637 MadeChange = true; 2638 --i; 2639 ++NumCtorsEvaluated; 2640 continue; 2641 } 2642 } 2643 2644 if (!MadeChange) return false; 2645 2646 GCL = InstallGlobalCtors(GCL, Ctors); 2647 return true; 2648 } 2649 2650 bool GlobalOpt::OptimizeGlobalAliases(Module &M) { 2651 bool Changed = false; 2652 2653 for (Module::alias_iterator I = M.alias_begin(), E = M.alias_end(); 2654 I != E;) { 2655 Module::alias_iterator J = I++; 2656 // Aliases without names cannot be referenced outside this module. 2657 if (!J->hasName() && !J->isDeclaration()) 2658 J->setLinkage(GlobalValue::InternalLinkage); 2659 // If the aliasee may change at link time, nothing can be done - bail out. 2660 if (J->mayBeOverridden()) 2661 continue; 2662 2663 Constant *Aliasee = J->getAliasee(); 2664 GlobalValue *Target = cast<GlobalValue>(Aliasee->stripPointerCasts()); 2665 Target->removeDeadConstantUsers(); 2666 bool hasOneUse = Target->hasOneUse() && Aliasee->hasOneUse(); 2667 2668 // Make all users of the alias use the aliasee instead. 2669 if (!J->use_empty()) { 2670 J->replaceAllUsesWith(Aliasee); 2671 ++NumAliasesResolved; 2672 Changed = true; 2673 } 2674 2675 // If the alias is externally visible, we may still be able to simplify it. 2676 if (!J->hasLocalLinkage()) { 2677 // If the aliasee has internal linkage, give it the name and linkage 2678 // of the alias, and delete the alias. This turns: 2679 // define internal ... @f(...) 2680 // @a = alias ... @f 2681 // into: 2682 // define ... @a(...) 2683 if (!Target->hasLocalLinkage()) 2684 continue; 2685 2686 // Do not perform the transform if multiple aliases potentially target the 2687 // aliasee. This check also ensures that it is safe to replace the section 2688 // and other attributes of the aliasee with those of the alias. 2689 if (!hasOneUse) 2690 continue; 2691 2692 // Give the aliasee the name, linkage and other attributes of the alias. 2693 Target->takeName(J); 2694 Target->setLinkage(J->getLinkage()); 2695 Target->GlobalValue::copyAttributesFrom(J); 2696 } 2697 2698 // Delete the alias. 2699 M.getAliasList().erase(J); 2700 ++NumAliasesRemoved; 2701 Changed = true; 2702 } 2703 2704 return Changed; 2705 } 2706 2707 static Function *FindCXAAtExit(Module &M) { 2708 Function *Fn = M.getFunction("__cxa_atexit"); 2709 2710 if (!Fn) 2711 return 0; 2712 2713 FunctionType *FTy = Fn->getFunctionType(); 2714 2715 // Checking that the function has the right return type, the right number of 2716 // parameters and that they all have pointer types should be enough. 2717 if (!FTy->getReturnType()->isIntegerTy() || 2718 FTy->getNumParams() != 3 || 2719 !FTy->getParamType(0)->isPointerTy() || 2720 !FTy->getParamType(1)->isPointerTy() || 2721 !FTy->getParamType(2)->isPointerTy()) 2722 return 0; 2723 2724 return Fn; 2725 } 2726 2727 /// cxxDtorIsEmpty - Returns whether the given function is an empty C++ 2728 /// destructor and can therefore be eliminated. 2729 /// Note that we assume that other optimization passes have already simplified 2730 /// the code so we only look for a function with a single basic block, where 2731 /// the only allowed instructions are 'ret' or 'call' to empty C++ dtor. 2732 static bool cxxDtorIsEmpty(const Function &Fn, 2733 SmallPtrSet<const Function *, 8> &CalledFunctions) { 2734 // FIXME: We could eliminate C++ destructors if they're readonly/readnone and 2735 // nounwind, but that doesn't seem worth doing. 2736 if (Fn.isDeclaration()) 2737 return false; 2738 2739 if (++Fn.begin() != Fn.end()) 2740 return false; 2741 2742 const BasicBlock &EntryBlock = Fn.getEntryBlock(); 2743 for (BasicBlock::const_iterator I = EntryBlock.begin(), E = EntryBlock.end(); 2744 I != E; ++I) { 2745 if (const CallInst *CI = dyn_cast<CallInst>(I)) { 2746 // Ignore debug intrinsics. 2747 if (isa<DbgInfoIntrinsic>(CI)) 2748 continue; 2749 2750 const Function *CalledFn = CI->getCalledFunction(); 2751 2752 if (!CalledFn) 2753 return false; 2754 2755 SmallPtrSet<const Function *, 8> NewCalledFunctions(CalledFunctions); 2756 2757 // Don't treat recursive functions as empty. 2758 if (!NewCalledFunctions.insert(CalledFn)) 2759 return false; 2760 2761 if (!cxxDtorIsEmpty(*CalledFn, NewCalledFunctions)) 2762 return false; 2763 } else if (isa<ReturnInst>(*I)) 2764 return true; 2765 else 2766 return false; 2767 } 2768 2769 return false; 2770 } 2771 2772 bool GlobalOpt::OptimizeEmptyGlobalCXXDtors(Function *CXAAtExitFn) { 2773 /// Itanium C++ ABI p3.3.5: 2774 /// 2775 /// After constructing a global (or local static) object, that will require 2776 /// destruction on exit, a termination function is registered as follows: 2777 /// 2778 /// extern "C" int __cxa_atexit ( void (*f)(void *), void *p, void *d ); 2779 /// 2780 /// This registration, e.g. __cxa_atexit(f,p,d), is intended to cause the 2781 /// call f(p) when DSO d is unloaded, before all such termination calls 2782 /// registered before this one. It returns zero if registration is 2783 /// successful, nonzero on failure. 2784 2785 // This pass will look for calls to __cxa_atexit where the function is trivial 2786 // and remove them. 2787 bool Changed = false; 2788 2789 for (Function::use_iterator I = CXAAtExitFn->use_begin(), 2790 E = CXAAtExitFn->use_end(); I != E;) { 2791 // We're only interested in calls. Theoretically, we could handle invoke 2792 // instructions as well, but neither llvm-gcc nor clang generate invokes 2793 // to __cxa_atexit. 2794 CallInst *CI = dyn_cast<CallInst>(*I++); 2795 if (!CI) 2796 continue; 2797 2798 Function *DtorFn = 2799 dyn_cast<Function>(CI->getArgOperand(0)->stripPointerCasts()); 2800 if (!DtorFn) 2801 continue; 2802 2803 SmallPtrSet<const Function *, 8> CalledFunctions; 2804 if (!cxxDtorIsEmpty(*DtorFn, CalledFunctions)) 2805 continue; 2806 2807 // Just remove the call. 2808 CI->replaceAllUsesWith(Constant::getNullValue(CI->getType())); 2809 CI->eraseFromParent(); 2810 2811 ++NumCXXDtorsRemoved; 2812 2813 Changed |= true; 2814 } 2815 2816 return Changed; 2817 } 2818 2819 bool GlobalOpt::runOnModule(Module &M) { 2820 bool Changed = false; 2821 2822 // Try to find the llvm.globalctors list. 2823 GlobalVariable *GlobalCtors = FindGlobalCtors(M); 2824 2825 Function *CXAAtExitFn = FindCXAAtExit(M); 2826 2827 bool LocalChange = true; 2828 while (LocalChange) { 2829 LocalChange = false; 2830 2831 // Delete functions that are trivially dead, ccc -> fastcc 2832 LocalChange |= OptimizeFunctions(M); 2833 2834 // Optimize global_ctors list. 2835 if (GlobalCtors) 2836 LocalChange |= OptimizeGlobalCtorsList(GlobalCtors); 2837 2838 // Optimize non-address-taken globals. 2839 LocalChange |= OptimizeGlobalVars(M); 2840 2841 // Resolve aliases, when possible. 2842 LocalChange |= OptimizeGlobalAliases(M); 2843 2844 // Try to remove trivial global destructors. 2845 if (CXAAtExitFn) 2846 LocalChange |= OptimizeEmptyGlobalCXXDtors(CXAAtExitFn); 2847 2848 Changed |= LocalChange; 2849 } 2850 2851 // TODO: Move all global ctors functions to the end of the module for code 2852 // layout. 2853 2854 return Changed; 2855 } 2856