1 //===--- CodeGenFunction.cpp - Emit LLVM Code from ASTs for a Function ----===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This coordinates the per-function state used while generating code. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "CodeGenFunction.h" 15 #include "CodeGenModule.h" 16 #include "CGCUDARuntime.h" 17 #include "CGCXXABI.h" 18 #include "CGDebugInfo.h" 19 #include "clang/Basic/TargetInfo.h" 20 #include "clang/AST/ASTContext.h" 21 #include "clang/AST/Decl.h" 22 #include "clang/AST/DeclCXX.h" 23 #include "clang/AST/StmtCXX.h" 24 #include "clang/Frontend/CodeGenOptions.h" 25 #include "llvm/Intrinsics.h" 26 #include "llvm/MDBuilder.h" 27 #include "llvm/Target/TargetData.h" 28 using namespace clang; 29 using namespace CodeGen; 30 31 CodeGenFunction::CodeGenFunction(CodeGenModule &cgm, bool suppressNewContext) 32 : CodeGenTypeCache(cgm), CGM(cgm), 33 Target(CGM.getContext().getTargetInfo()), 34 Builder(cgm.getModule().getContext()), 35 AutoreleaseResult(false), BlockInfo(0), BlockPointer(0), 36 LambdaThisCaptureField(0), NormalCleanupDest(0), NextCleanupDestIndex(1), 37 FirstBlockInfo(0), EHResumeBlock(0), ExceptionSlot(0), EHSelectorSlot(0), 38 DebugInfo(0), DisableDebugInfo(false), DidCallStackSave(false), 39 IndirectBranch(0), SwitchInsn(0), CaseRangeBlock(0), UnreachableBlock(0), 40 CXXABIThisDecl(0), CXXABIThisValue(0), CXXThisValue(0), CXXVTTDecl(0), 41 CXXVTTValue(0), OutermostConditional(0), TerminateLandingPad(0), 42 TerminateHandler(0), TrapBB(0) { 43 44 CatchUndefined = getContext().getLangOpts().CatchUndefined; 45 if (!suppressNewContext) 46 CGM.getCXXABI().getMangleContext().startNewFunction(); 47 } 48 49 CodeGenFunction::~CodeGenFunction() { 50 // If there are any unclaimed block infos, go ahead and destroy them 51 // now. This can happen if IR-gen gets clever and skips evaluating 52 // something. 53 if (FirstBlockInfo) 54 destroyBlockInfos(FirstBlockInfo); 55 } 56 57 58 llvm::Type *CodeGenFunction::ConvertTypeForMem(QualType T) { 59 return CGM.getTypes().ConvertTypeForMem(T); 60 } 61 62 llvm::Type *CodeGenFunction::ConvertType(QualType T) { 63 return CGM.getTypes().ConvertType(T); 64 } 65 66 bool CodeGenFunction::hasAggregateLLVMType(QualType type) { 67 switch (type.getCanonicalType()->getTypeClass()) { 68 #define TYPE(name, parent) 69 #define ABSTRACT_TYPE(name, parent) 70 #define NON_CANONICAL_TYPE(name, parent) case Type::name: 71 #define DEPENDENT_TYPE(name, parent) case Type::name: 72 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(name, parent) case Type::name: 73 #include "clang/AST/TypeNodes.def" 74 llvm_unreachable("non-canonical or dependent type in IR-generation"); 75 76 case Type::Builtin: 77 case Type::Pointer: 78 case Type::BlockPointer: 79 case Type::LValueReference: 80 case Type::RValueReference: 81 case Type::MemberPointer: 82 case Type::Vector: 83 case Type::ExtVector: 84 case Type::FunctionProto: 85 case Type::FunctionNoProto: 86 case Type::Enum: 87 case Type::ObjCObjectPointer: 88 return false; 89 90 // Complexes, arrays, records, and Objective-C objects. 91 case Type::Complex: 92 case Type::ConstantArray: 93 case Type::IncompleteArray: 94 case Type::VariableArray: 95 case Type::Record: 96 case Type::ObjCObject: 97 case Type::ObjCInterface: 98 return true; 99 100 // In IRGen, atomic types are just the underlying type 101 case Type::Atomic: 102 return hasAggregateLLVMType(type->getAs<AtomicType>()->getValueType()); 103 } 104 llvm_unreachable("unknown type kind!"); 105 } 106 107 void CodeGenFunction::EmitReturnBlock() { 108 // For cleanliness, we try to avoid emitting the return block for 109 // simple cases. 110 llvm::BasicBlock *CurBB = Builder.GetInsertBlock(); 111 112 if (CurBB) { 113 assert(!CurBB->getTerminator() && "Unexpected terminated block."); 114 115 // We have a valid insert point, reuse it if it is empty or there are no 116 // explicit jumps to the return block. 117 if (CurBB->empty() || ReturnBlock.getBlock()->use_empty()) { 118 ReturnBlock.getBlock()->replaceAllUsesWith(CurBB); 119 delete ReturnBlock.getBlock(); 120 } else 121 EmitBlock(ReturnBlock.getBlock()); 122 return; 123 } 124 125 // Otherwise, if the return block is the target of a single direct 126 // branch then we can just put the code in that block instead. This 127 // cleans up functions which started with a unified return block. 128 if (ReturnBlock.getBlock()->hasOneUse()) { 129 llvm::BranchInst *BI = 130 dyn_cast<llvm::BranchInst>(*ReturnBlock.getBlock()->use_begin()); 131 if (BI && BI->isUnconditional() && 132 BI->getSuccessor(0) == ReturnBlock.getBlock()) { 133 // Reset insertion point, including debug location, and delete the branch. 134 Builder.SetCurrentDebugLocation(BI->getDebugLoc()); 135 Builder.SetInsertPoint(BI->getParent()); 136 BI->eraseFromParent(); 137 delete ReturnBlock.getBlock(); 138 return; 139 } 140 } 141 142 // FIXME: We are at an unreachable point, there is no reason to emit the block 143 // unless it has uses. However, we still need a place to put the debug 144 // region.end for now. 145 146 EmitBlock(ReturnBlock.getBlock()); 147 } 148 149 static void EmitIfUsed(CodeGenFunction &CGF, llvm::BasicBlock *BB) { 150 if (!BB) return; 151 if (!BB->use_empty()) 152 return CGF.CurFn->getBasicBlockList().push_back(BB); 153 delete BB; 154 } 155 156 void CodeGenFunction::FinishFunction(SourceLocation EndLoc) { 157 assert(BreakContinueStack.empty() && 158 "mismatched push/pop in break/continue stack!"); 159 160 // Pop any cleanups that might have been associated with the 161 // parameters. Do this in whatever block we're currently in; it's 162 // important to do this before we enter the return block or return 163 // edges will be *really* confused. 164 if (EHStack.stable_begin() != PrologueCleanupDepth) 165 PopCleanupBlocks(PrologueCleanupDepth); 166 167 // Emit function epilog (to return). 168 EmitReturnBlock(); 169 170 if (ShouldInstrumentFunction()) 171 EmitFunctionInstrumentation("__cyg_profile_func_exit"); 172 173 // Emit debug descriptor for function end. 174 if (CGDebugInfo *DI = getDebugInfo()) { 175 DI->setLocation(EndLoc); 176 DI->EmitFunctionEnd(Builder); 177 } 178 179 EmitFunctionEpilog(*CurFnInfo); 180 EmitEndEHSpec(CurCodeDecl); 181 182 assert(EHStack.empty() && 183 "did not remove all scopes from cleanup stack!"); 184 185 // If someone did an indirect goto, emit the indirect goto block at the end of 186 // the function. 187 if (IndirectBranch) { 188 EmitBlock(IndirectBranch->getParent()); 189 Builder.ClearInsertionPoint(); 190 } 191 192 // Remove the AllocaInsertPt instruction, which is just a convenience for us. 193 llvm::Instruction *Ptr = AllocaInsertPt; 194 AllocaInsertPt = 0; 195 Ptr->eraseFromParent(); 196 197 // If someone took the address of a label but never did an indirect goto, we 198 // made a zero entry PHI node, which is illegal, zap it now. 199 if (IndirectBranch) { 200 llvm::PHINode *PN = cast<llvm::PHINode>(IndirectBranch->getAddress()); 201 if (PN->getNumIncomingValues() == 0) { 202 PN->replaceAllUsesWith(llvm::UndefValue::get(PN->getType())); 203 PN->eraseFromParent(); 204 } 205 } 206 207 EmitIfUsed(*this, EHResumeBlock); 208 EmitIfUsed(*this, TerminateLandingPad); 209 EmitIfUsed(*this, TerminateHandler); 210 EmitIfUsed(*this, UnreachableBlock); 211 212 if (CGM.getCodeGenOpts().EmitDeclMetadata) 213 EmitDeclMetadata(); 214 } 215 216 /// ShouldInstrumentFunction - Return true if the current function should be 217 /// instrumented with __cyg_profile_func_* calls 218 bool CodeGenFunction::ShouldInstrumentFunction() { 219 if (!CGM.getCodeGenOpts().InstrumentFunctions) 220 return false; 221 if (!CurFuncDecl || CurFuncDecl->hasAttr<NoInstrumentFunctionAttr>()) 222 return false; 223 return true; 224 } 225 226 /// EmitFunctionInstrumentation - Emit LLVM code to call the specified 227 /// instrumentation function with the current function and the call site, if 228 /// function instrumentation is enabled. 229 void CodeGenFunction::EmitFunctionInstrumentation(const char *Fn) { 230 // void __cyg_profile_func_{enter,exit} (void *this_fn, void *call_site); 231 llvm::PointerType *PointerTy = Int8PtrTy; 232 llvm::Type *ProfileFuncArgs[] = { PointerTy, PointerTy }; 233 llvm::FunctionType *FunctionTy = 234 llvm::FunctionType::get(VoidTy, ProfileFuncArgs, false); 235 236 llvm::Constant *F = CGM.CreateRuntimeFunction(FunctionTy, Fn); 237 llvm::CallInst *CallSite = Builder.CreateCall( 238 CGM.getIntrinsic(llvm::Intrinsic::returnaddress), 239 llvm::ConstantInt::get(Int32Ty, 0), 240 "callsite"); 241 242 Builder.CreateCall2(F, 243 llvm::ConstantExpr::getBitCast(CurFn, PointerTy), 244 CallSite); 245 } 246 247 void CodeGenFunction::EmitMCountInstrumentation() { 248 llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false); 249 250 llvm::Constant *MCountFn = CGM.CreateRuntimeFunction(FTy, 251 Target.getMCountName()); 252 Builder.CreateCall(MCountFn); 253 } 254 255 // OpenCL v1.2 s5.6.4.6 allows the compiler to store kernel argument 256 // information in the program executable. The argument information stored 257 // includes the argument name, its type, the address and access qualifiers used. 258 // FIXME: Add type, address, and access qualifiers. 259 static void GenOpenCLArgMetadata(const FunctionDecl *FD, llvm::Function *Fn, 260 CodeGenModule &CGM,llvm::LLVMContext &Context, 261 llvm::SmallVector <llvm::Value*, 5> &kernelMDArgs) { 262 263 // Create MDNodes that represents the kernel arg metadata. 264 // Each MDNode is a list in the form of "key", N number of values which is 265 // the same number of values as their are kernel arguments. 266 267 // MDNode for the kernel argument names. 268 SmallVector<llvm::Value*, 8> argNames; 269 argNames.push_back(llvm::MDString::get(Context, "kernel_arg_name")); 270 271 for (unsigned i = 0, e = FD->getNumParams(); i != e; ++i) { 272 const ParmVarDecl *parm = FD->getParamDecl(i); 273 274 // Get argument name. 275 argNames.push_back(llvm::MDString::get(Context, parm->getName())); 276 277 } 278 // Add MDNode to the list of all metadata. 279 kernelMDArgs.push_back(llvm::MDNode::get(Context, argNames)); 280 } 281 282 void CodeGenFunction::EmitOpenCLKernelMetadata(const FunctionDecl *FD, 283 llvm::Function *Fn) 284 { 285 if (!FD->hasAttr<OpenCLKernelAttr>()) 286 return; 287 288 llvm::LLVMContext &Context = getLLVMContext(); 289 290 llvm::SmallVector <llvm::Value*, 5> kernelMDArgs; 291 kernelMDArgs.push_back(Fn); 292 293 if (CGM.getCodeGenOpts().EmitOpenCLArgMetadata) 294 GenOpenCLArgMetadata(FD, Fn, CGM, Context, kernelMDArgs); 295 296 if (FD->hasAttr<WorkGroupSizeHintAttr>()) { 297 llvm::SmallVector <llvm::Value*, 5> attrMDArgs; 298 attrMDArgs.push_back(llvm::MDString::get(Context, "work_group_size_hint")); 299 WorkGroupSizeHintAttr *attr = FD->getAttr<WorkGroupSizeHintAttr>(); 300 llvm::Type *iTy = llvm::IntegerType::get(Context, 32); 301 attrMDArgs.push_back(llvm::ConstantInt::get(iTy, 302 llvm::APInt(32, (uint64_t)attr->getXDim()))); 303 attrMDArgs.push_back(llvm::ConstantInt::get(iTy, 304 llvm::APInt(32, (uint64_t)attr->getYDim()))); 305 attrMDArgs.push_back(llvm::ConstantInt::get(iTy, 306 llvm::APInt(32, (uint64_t)attr->getZDim()))); 307 kernelMDArgs.push_back(llvm::MDNode::get(Context, attrMDArgs)); 308 } 309 310 if (FD->hasAttr<ReqdWorkGroupSizeAttr>()) { 311 llvm::SmallVector <llvm::Value*, 5> attrMDArgs; 312 attrMDArgs.push_back(llvm::MDString::get(Context, "reqd_work_group_size")); 313 ReqdWorkGroupSizeAttr *attr = FD->getAttr<ReqdWorkGroupSizeAttr>(); 314 llvm::Type *iTy = llvm::IntegerType::get(Context, 32); 315 attrMDArgs.push_back(llvm::ConstantInt::get(iTy, 316 llvm::APInt(32, (uint64_t)attr->getXDim()))); 317 attrMDArgs.push_back(llvm::ConstantInt::get(iTy, 318 llvm::APInt(32, (uint64_t)attr->getYDim()))); 319 attrMDArgs.push_back(llvm::ConstantInt::get(iTy, 320 llvm::APInt(32, (uint64_t)attr->getZDim()))); 321 kernelMDArgs.push_back(llvm::MDNode::get(Context, attrMDArgs)); 322 } 323 324 llvm::MDNode *kernelMDNode = llvm::MDNode::get(Context, kernelMDArgs); 325 llvm::NamedMDNode *OpenCLKernelMetadata = 326 CGM.getModule().getOrInsertNamedMetadata("opencl.kernels"); 327 OpenCLKernelMetadata->addOperand(kernelMDNode); 328 } 329 330 void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy, 331 llvm::Function *Fn, 332 const CGFunctionInfo &FnInfo, 333 const FunctionArgList &Args, 334 SourceLocation StartLoc) { 335 const Decl *D = GD.getDecl(); 336 337 DidCallStackSave = false; 338 CurCodeDecl = CurFuncDecl = D; 339 FnRetTy = RetTy; 340 CurFn = Fn; 341 CurFnInfo = &FnInfo; 342 assert(CurFn->isDeclaration() && "Function already has body?"); 343 344 // Pass inline keyword to optimizer if it appears explicitly on any 345 // declaration. 346 if (!CGM.getCodeGenOpts().NoInline) 347 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) 348 for (FunctionDecl::redecl_iterator RI = FD->redecls_begin(), 349 RE = FD->redecls_end(); RI != RE; ++RI) 350 if (RI->isInlineSpecified()) { 351 Fn->addFnAttr(llvm::Attribute::InlineHint); 352 break; 353 } 354 355 if (getContext().getLangOpts().OpenCL) { 356 // Add metadata for a kernel function. 357 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) 358 EmitOpenCLKernelMetadata(FD, Fn); 359 } 360 361 llvm::BasicBlock *EntryBB = createBasicBlock("entry", CurFn); 362 363 // Create a marker to make it easy to insert allocas into the entryblock 364 // later. Don't create this with the builder, because we don't want it 365 // folded. 366 llvm::Value *Undef = llvm::UndefValue::get(Int32Ty); 367 AllocaInsertPt = new llvm::BitCastInst(Undef, Int32Ty, "", EntryBB); 368 if (Builder.isNamePreserving()) 369 AllocaInsertPt->setName("allocapt"); 370 371 ReturnBlock = getJumpDestInCurrentScope("return"); 372 373 Builder.SetInsertPoint(EntryBB); 374 375 // Emit subprogram debug descriptor. 376 if (CGDebugInfo *DI = getDebugInfo()) { 377 unsigned NumArgs = 0; 378 QualType *ArgsArray = new QualType[Args.size()]; 379 for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end(); 380 i != e; ++i) { 381 ArgsArray[NumArgs++] = (*i)->getType(); 382 } 383 384 QualType FnType = 385 getContext().getFunctionType(RetTy, ArgsArray, NumArgs, 386 FunctionProtoType::ExtProtoInfo()); 387 388 delete[] ArgsArray; 389 390 DI->setLocation(StartLoc); 391 DI->EmitFunctionStart(GD, FnType, CurFn, Builder); 392 } 393 394 if (ShouldInstrumentFunction()) 395 EmitFunctionInstrumentation("__cyg_profile_func_enter"); 396 397 if (CGM.getCodeGenOpts().InstrumentForProfiling) 398 EmitMCountInstrumentation(); 399 400 if (RetTy->isVoidType()) { 401 // Void type; nothing to return. 402 ReturnValue = 0; 403 } else if (CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::Indirect && 404 hasAggregateLLVMType(CurFnInfo->getReturnType())) { 405 // Indirect aggregate return; emit returned value directly into sret slot. 406 // This reduces code size, and affects correctness in C++. 407 ReturnValue = CurFn->arg_begin(); 408 } else { 409 ReturnValue = CreateIRTemp(RetTy, "retval"); 410 411 // Tell the epilog emitter to autorelease the result. We do this 412 // now so that various specialized functions can suppress it 413 // during their IR-generation. 414 if (getLangOpts().ObjCAutoRefCount && 415 !CurFnInfo->isReturnsRetained() && 416 RetTy->isObjCRetainableType()) 417 AutoreleaseResult = true; 418 } 419 420 EmitStartEHSpec(CurCodeDecl); 421 422 PrologueCleanupDepth = EHStack.stable_begin(); 423 EmitFunctionProlog(*CurFnInfo, CurFn, Args); 424 425 if (D && isa<CXXMethodDecl>(D) && cast<CXXMethodDecl>(D)->isInstance()) { 426 CGM.getCXXABI().EmitInstanceFunctionProlog(*this); 427 const CXXMethodDecl *MD = cast<CXXMethodDecl>(D); 428 if (MD->getParent()->isLambda() && 429 MD->getOverloadedOperator() == OO_Call) { 430 // We're in a lambda; figure out the captures. 431 MD->getParent()->getCaptureFields(LambdaCaptureFields, 432 LambdaThisCaptureField); 433 if (LambdaThisCaptureField) { 434 // If this lambda captures this, load it. 435 QualType LambdaTagType = 436 getContext().getTagDeclType(LambdaThisCaptureField->getParent()); 437 LValue LambdaLV = MakeNaturalAlignAddrLValue(CXXABIThisValue, 438 LambdaTagType); 439 LValue ThisLValue = EmitLValueForField(LambdaLV, 440 LambdaThisCaptureField); 441 CXXThisValue = EmitLoadOfLValue(ThisLValue).getScalarVal(); 442 } 443 } else { 444 // Not in a lambda; just use 'this' from the method. 445 // FIXME: Should we generate a new load for each use of 'this'? The 446 // fast register allocator would be happier... 447 CXXThisValue = CXXABIThisValue; 448 } 449 } 450 451 // If any of the arguments have a variably modified type, make sure to 452 // emit the type size. 453 for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end(); 454 i != e; ++i) { 455 QualType Ty = (*i)->getType(); 456 457 if (Ty->isVariablyModifiedType()) 458 EmitVariablyModifiedType(Ty); 459 } 460 // Emit a location at the end of the prologue. 461 if (CGDebugInfo *DI = getDebugInfo()) 462 DI->EmitLocation(Builder, StartLoc); 463 } 464 465 void CodeGenFunction::EmitFunctionBody(FunctionArgList &Args) { 466 const FunctionDecl *FD = cast<FunctionDecl>(CurGD.getDecl()); 467 assert(FD->getBody()); 468 EmitStmt(FD->getBody()); 469 } 470 471 /// Tries to mark the given function nounwind based on the 472 /// non-existence of any throwing calls within it. We believe this is 473 /// lightweight enough to do at -O0. 474 static void TryMarkNoThrow(llvm::Function *F) { 475 // LLVM treats 'nounwind' on a function as part of the type, so we 476 // can't do this on functions that can be overwritten. 477 if (F->mayBeOverridden()) return; 478 479 for (llvm::Function::iterator FI = F->begin(), FE = F->end(); FI != FE; ++FI) 480 for (llvm::BasicBlock::iterator 481 BI = FI->begin(), BE = FI->end(); BI != BE; ++BI) 482 if (llvm::CallInst *Call = dyn_cast<llvm::CallInst>(&*BI)) { 483 if (!Call->doesNotThrow()) 484 return; 485 } else if (isa<llvm::ResumeInst>(&*BI)) { 486 return; 487 } 488 F->setDoesNotThrow(true); 489 } 490 491 void CodeGenFunction::GenerateCode(GlobalDecl GD, llvm::Function *Fn, 492 const CGFunctionInfo &FnInfo) { 493 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl()); 494 495 // Check if we should generate debug info for this function. 496 if (CGM.getModuleDebugInfo() && !FD->hasAttr<NoDebugAttr>()) 497 DebugInfo = CGM.getModuleDebugInfo(); 498 499 FunctionArgList Args; 500 QualType ResTy = FD->getResultType(); 501 502 CurGD = GD; 503 if (isa<CXXMethodDecl>(FD) && cast<CXXMethodDecl>(FD)->isInstance()) 504 CGM.getCXXABI().BuildInstanceFunctionParams(*this, ResTy, Args); 505 506 for (unsigned i = 0, e = FD->getNumParams(); i != e; ++i) 507 Args.push_back(FD->getParamDecl(i)); 508 509 SourceRange BodyRange; 510 if (Stmt *Body = FD->getBody()) BodyRange = Body->getSourceRange(); 511 512 // Emit the standard function prologue. 513 StartFunction(GD, ResTy, Fn, FnInfo, Args, BodyRange.getBegin()); 514 515 // Generate the body of the function. 516 if (isa<CXXDestructorDecl>(FD)) 517 EmitDestructorBody(Args); 518 else if (isa<CXXConstructorDecl>(FD)) 519 EmitConstructorBody(Args); 520 else if (getContext().getLangOpts().CUDA && 521 !CGM.getCodeGenOpts().CUDAIsDevice && 522 FD->hasAttr<CUDAGlobalAttr>()) 523 CGM.getCUDARuntime().EmitDeviceStubBody(*this, Args); 524 else if (isa<CXXConversionDecl>(FD) && 525 cast<CXXConversionDecl>(FD)->isLambdaToBlockPointerConversion()) { 526 // The lambda conversion to block pointer is special; the semantics can't be 527 // expressed in the AST, so IRGen needs to special-case it. 528 EmitLambdaToBlockPointerBody(Args); 529 } else if (isa<CXXMethodDecl>(FD) && 530 cast<CXXMethodDecl>(FD)->isLambdaStaticInvoker()) { 531 // The lambda "__invoke" function is special, because it forwards or 532 // clones the body of the function call operator (but is actually static). 533 EmitLambdaStaticInvokeFunction(cast<CXXMethodDecl>(FD)); 534 } 535 else 536 EmitFunctionBody(Args); 537 538 // Emit the standard function epilogue. 539 FinishFunction(BodyRange.getEnd()); 540 541 // If we haven't marked the function nothrow through other means, do 542 // a quick pass now to see if we can. 543 if (!CurFn->doesNotThrow()) 544 TryMarkNoThrow(CurFn); 545 } 546 547 /// ContainsLabel - Return true if the statement contains a label in it. If 548 /// this statement is not executed normally, it not containing a label means 549 /// that we can just remove the code. 550 bool CodeGenFunction::ContainsLabel(const Stmt *S, bool IgnoreCaseStmts) { 551 // Null statement, not a label! 552 if (S == 0) return false; 553 554 // If this is a label, we have to emit the code, consider something like: 555 // if (0) { ... foo: bar(); } goto foo; 556 // 557 // TODO: If anyone cared, we could track __label__'s, since we know that you 558 // can't jump to one from outside their declared region. 559 if (isa<LabelStmt>(S)) 560 return true; 561 562 // If this is a case/default statement, and we haven't seen a switch, we have 563 // to emit the code. 564 if (isa<SwitchCase>(S) && !IgnoreCaseStmts) 565 return true; 566 567 // If this is a switch statement, we want to ignore cases below it. 568 if (isa<SwitchStmt>(S)) 569 IgnoreCaseStmts = true; 570 571 // Scan subexpressions for verboten labels. 572 for (Stmt::const_child_range I = S->children(); I; ++I) 573 if (ContainsLabel(*I, IgnoreCaseStmts)) 574 return true; 575 576 return false; 577 } 578 579 /// containsBreak - Return true if the statement contains a break out of it. 580 /// If the statement (recursively) contains a switch or loop with a break 581 /// inside of it, this is fine. 582 bool CodeGenFunction::containsBreak(const Stmt *S) { 583 // Null statement, not a label! 584 if (S == 0) return false; 585 586 // If this is a switch or loop that defines its own break scope, then we can 587 // include it and anything inside of it. 588 if (isa<SwitchStmt>(S) || isa<WhileStmt>(S) || isa<DoStmt>(S) || 589 isa<ForStmt>(S)) 590 return false; 591 592 if (isa<BreakStmt>(S)) 593 return true; 594 595 // Scan subexpressions for verboten breaks. 596 for (Stmt::const_child_range I = S->children(); I; ++I) 597 if (containsBreak(*I)) 598 return true; 599 600 return false; 601 } 602 603 604 /// ConstantFoldsToSimpleInteger - If the specified expression does not fold 605 /// to a constant, or if it does but contains a label, return false. If it 606 /// constant folds return true and set the boolean result in Result. 607 bool CodeGenFunction::ConstantFoldsToSimpleInteger(const Expr *Cond, 608 bool &ResultBool) { 609 llvm::APSInt ResultInt; 610 if (!ConstantFoldsToSimpleInteger(Cond, ResultInt)) 611 return false; 612 613 ResultBool = ResultInt.getBoolValue(); 614 return true; 615 } 616 617 /// ConstantFoldsToSimpleInteger - If the specified expression does not fold 618 /// to a constant, or if it does but contains a label, return false. If it 619 /// constant folds return true and set the folded value. 620 bool CodeGenFunction:: 621 ConstantFoldsToSimpleInteger(const Expr *Cond, llvm::APSInt &ResultInt) { 622 // FIXME: Rename and handle conversion of other evaluatable things 623 // to bool. 624 llvm::APSInt Int; 625 if (!Cond->EvaluateAsInt(Int, getContext())) 626 return false; // Not foldable, not integer or not fully evaluatable. 627 628 if (CodeGenFunction::ContainsLabel(Cond)) 629 return false; // Contains a label. 630 631 ResultInt = Int; 632 return true; 633 } 634 635 636 637 /// EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g. for an if 638 /// statement) to the specified blocks. Based on the condition, this might try 639 /// to simplify the codegen of the conditional based on the branch. 640 /// 641 void CodeGenFunction::EmitBranchOnBoolExpr(const Expr *Cond, 642 llvm::BasicBlock *TrueBlock, 643 llvm::BasicBlock *FalseBlock) { 644 Cond = Cond->IgnoreParens(); 645 646 if (const BinaryOperator *CondBOp = dyn_cast<BinaryOperator>(Cond)) { 647 // Handle X && Y in a condition. 648 if (CondBOp->getOpcode() == BO_LAnd) { 649 // If we have "1 && X", simplify the code. "0 && X" would have constant 650 // folded if the case was simple enough. 651 bool ConstantBool = false; 652 if (ConstantFoldsToSimpleInteger(CondBOp->getLHS(), ConstantBool) && 653 ConstantBool) { 654 // br(1 && X) -> br(X). 655 return EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock); 656 } 657 658 // If we have "X && 1", simplify the code to use an uncond branch. 659 // "X && 0" would have been constant folded to 0. 660 if (ConstantFoldsToSimpleInteger(CondBOp->getRHS(), ConstantBool) && 661 ConstantBool) { 662 // br(X && 1) -> br(X). 663 return EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, FalseBlock); 664 } 665 666 // Emit the LHS as a conditional. If the LHS conditional is false, we 667 // want to jump to the FalseBlock. 668 llvm::BasicBlock *LHSTrue = createBasicBlock("land.lhs.true"); 669 670 ConditionalEvaluation eval(*this); 671 EmitBranchOnBoolExpr(CondBOp->getLHS(), LHSTrue, FalseBlock); 672 EmitBlock(LHSTrue); 673 674 // Any temporaries created here are conditional. 675 eval.begin(*this); 676 EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock); 677 eval.end(*this); 678 679 return; 680 } 681 682 if (CondBOp->getOpcode() == BO_LOr) { 683 // If we have "0 || X", simplify the code. "1 || X" would have constant 684 // folded if the case was simple enough. 685 bool ConstantBool = false; 686 if (ConstantFoldsToSimpleInteger(CondBOp->getLHS(), ConstantBool) && 687 !ConstantBool) { 688 // br(0 || X) -> br(X). 689 return EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock); 690 } 691 692 // If we have "X || 0", simplify the code to use an uncond branch. 693 // "X || 1" would have been constant folded to 1. 694 if (ConstantFoldsToSimpleInteger(CondBOp->getRHS(), ConstantBool) && 695 !ConstantBool) { 696 // br(X || 0) -> br(X). 697 return EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, FalseBlock); 698 } 699 700 // Emit the LHS as a conditional. If the LHS conditional is true, we 701 // want to jump to the TrueBlock. 702 llvm::BasicBlock *LHSFalse = createBasicBlock("lor.lhs.false"); 703 704 ConditionalEvaluation eval(*this); 705 EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, LHSFalse); 706 EmitBlock(LHSFalse); 707 708 // Any temporaries created here are conditional. 709 eval.begin(*this); 710 EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock); 711 eval.end(*this); 712 713 return; 714 } 715 } 716 717 if (const UnaryOperator *CondUOp = dyn_cast<UnaryOperator>(Cond)) { 718 // br(!x, t, f) -> br(x, f, t) 719 if (CondUOp->getOpcode() == UO_LNot) 720 return EmitBranchOnBoolExpr(CondUOp->getSubExpr(), FalseBlock, TrueBlock); 721 } 722 723 if (const ConditionalOperator *CondOp = dyn_cast<ConditionalOperator>(Cond)) { 724 // br(c ? x : y, t, f) -> br(c, br(x, t, f), br(y, t, f)) 725 llvm::BasicBlock *LHSBlock = createBasicBlock("cond.true"); 726 llvm::BasicBlock *RHSBlock = createBasicBlock("cond.false"); 727 728 ConditionalEvaluation cond(*this); 729 EmitBranchOnBoolExpr(CondOp->getCond(), LHSBlock, RHSBlock); 730 731 cond.begin(*this); 732 EmitBlock(LHSBlock); 733 EmitBranchOnBoolExpr(CondOp->getLHS(), TrueBlock, FalseBlock); 734 cond.end(*this); 735 736 cond.begin(*this); 737 EmitBlock(RHSBlock); 738 EmitBranchOnBoolExpr(CondOp->getRHS(), TrueBlock, FalseBlock); 739 cond.end(*this); 740 741 return; 742 } 743 744 // Emit the code with the fully general case. 745 llvm::Value *CondV = EvaluateExprAsBool(Cond); 746 Builder.CreateCondBr(CondV, TrueBlock, FalseBlock); 747 } 748 749 /// ErrorUnsupported - Print out an error that codegen doesn't support the 750 /// specified stmt yet. 751 void CodeGenFunction::ErrorUnsupported(const Stmt *S, const char *Type, 752 bool OmitOnError) { 753 CGM.ErrorUnsupported(S, Type, OmitOnError); 754 } 755 756 /// emitNonZeroVLAInit - Emit the "zero" initialization of a 757 /// variable-length array whose elements have a non-zero bit-pattern. 758 /// 759 /// \param baseType the inner-most element type of the array 760 /// \param src - a char* pointing to the bit-pattern for a single 761 /// base element of the array 762 /// \param sizeInChars - the total size of the VLA, in chars 763 static void emitNonZeroVLAInit(CodeGenFunction &CGF, QualType baseType, 764 llvm::Value *dest, llvm::Value *src, 765 llvm::Value *sizeInChars) { 766 std::pair<CharUnits,CharUnits> baseSizeAndAlign 767 = CGF.getContext().getTypeInfoInChars(baseType); 768 769 CGBuilderTy &Builder = CGF.Builder; 770 771 llvm::Value *baseSizeInChars 772 = llvm::ConstantInt::get(CGF.IntPtrTy, baseSizeAndAlign.first.getQuantity()); 773 774 llvm::Type *i8p = Builder.getInt8PtrTy(); 775 776 llvm::Value *begin = Builder.CreateBitCast(dest, i8p, "vla.begin"); 777 llvm::Value *end = Builder.CreateInBoundsGEP(dest, sizeInChars, "vla.end"); 778 779 llvm::BasicBlock *originBB = CGF.Builder.GetInsertBlock(); 780 llvm::BasicBlock *loopBB = CGF.createBasicBlock("vla-init.loop"); 781 llvm::BasicBlock *contBB = CGF.createBasicBlock("vla-init.cont"); 782 783 // Make a loop over the VLA. C99 guarantees that the VLA element 784 // count must be nonzero. 785 CGF.EmitBlock(loopBB); 786 787 llvm::PHINode *cur = Builder.CreatePHI(i8p, 2, "vla.cur"); 788 cur->addIncoming(begin, originBB); 789 790 // memcpy the individual element bit-pattern. 791 Builder.CreateMemCpy(cur, src, baseSizeInChars, 792 baseSizeAndAlign.second.getQuantity(), 793 /*volatile*/ false); 794 795 // Go to the next element. 796 llvm::Value *next = Builder.CreateConstInBoundsGEP1_32(cur, 1, "vla.next"); 797 798 // Leave if that's the end of the VLA. 799 llvm::Value *done = Builder.CreateICmpEQ(next, end, "vla-init.isdone"); 800 Builder.CreateCondBr(done, contBB, loopBB); 801 cur->addIncoming(next, loopBB); 802 803 CGF.EmitBlock(contBB); 804 } 805 806 void 807 CodeGenFunction::EmitNullInitialization(llvm::Value *DestPtr, QualType Ty) { 808 // Ignore empty classes in C++. 809 if (getContext().getLangOpts().CPlusPlus) { 810 if (const RecordType *RT = Ty->getAs<RecordType>()) { 811 if (cast<CXXRecordDecl>(RT->getDecl())->isEmpty()) 812 return; 813 } 814 } 815 816 // Cast the dest ptr to the appropriate i8 pointer type. 817 unsigned DestAS = 818 cast<llvm::PointerType>(DestPtr->getType())->getAddressSpace(); 819 llvm::Type *BP = Builder.getInt8PtrTy(DestAS); 820 if (DestPtr->getType() != BP) 821 DestPtr = Builder.CreateBitCast(DestPtr, BP); 822 823 // Get size and alignment info for this aggregate. 824 std::pair<CharUnits, CharUnits> TypeInfo = 825 getContext().getTypeInfoInChars(Ty); 826 CharUnits Size = TypeInfo.first; 827 CharUnits Align = TypeInfo.second; 828 829 llvm::Value *SizeVal; 830 const VariableArrayType *vla; 831 832 // Don't bother emitting a zero-byte memset. 833 if (Size.isZero()) { 834 // But note that getTypeInfo returns 0 for a VLA. 835 if (const VariableArrayType *vlaType = 836 dyn_cast_or_null<VariableArrayType>( 837 getContext().getAsArrayType(Ty))) { 838 QualType eltType; 839 llvm::Value *numElts; 840 llvm::tie(numElts, eltType) = getVLASize(vlaType); 841 842 SizeVal = numElts; 843 CharUnits eltSize = getContext().getTypeSizeInChars(eltType); 844 if (!eltSize.isOne()) 845 SizeVal = Builder.CreateNUWMul(SizeVal, CGM.getSize(eltSize)); 846 vla = vlaType; 847 } else { 848 return; 849 } 850 } else { 851 SizeVal = CGM.getSize(Size); 852 vla = 0; 853 } 854 855 // If the type contains a pointer to data member we can't memset it to zero. 856 // Instead, create a null constant and copy it to the destination. 857 // TODO: there are other patterns besides zero that we can usefully memset, 858 // like -1, which happens to be the pattern used by member-pointers. 859 if (!CGM.getTypes().isZeroInitializable(Ty)) { 860 // For a VLA, emit a single element, then splat that over the VLA. 861 if (vla) Ty = getContext().getBaseElementType(vla); 862 863 llvm::Constant *NullConstant = CGM.EmitNullConstant(Ty); 864 865 llvm::GlobalVariable *NullVariable = 866 new llvm::GlobalVariable(CGM.getModule(), NullConstant->getType(), 867 /*isConstant=*/true, 868 llvm::GlobalVariable::PrivateLinkage, 869 NullConstant, Twine()); 870 llvm::Value *SrcPtr = 871 Builder.CreateBitCast(NullVariable, Builder.getInt8PtrTy()); 872 873 if (vla) return emitNonZeroVLAInit(*this, Ty, DestPtr, SrcPtr, SizeVal); 874 875 // Get and call the appropriate llvm.memcpy overload. 876 Builder.CreateMemCpy(DestPtr, SrcPtr, SizeVal, Align.getQuantity(), false); 877 return; 878 } 879 880 // Otherwise, just memset the whole thing to zero. This is legal 881 // because in LLVM, all default initializers (other than the ones we just 882 // handled above) are guaranteed to have a bit pattern of all zeros. 883 Builder.CreateMemSet(DestPtr, Builder.getInt8(0), SizeVal, 884 Align.getQuantity(), false); 885 } 886 887 llvm::BlockAddress *CodeGenFunction::GetAddrOfLabel(const LabelDecl *L) { 888 // Make sure that there is a block for the indirect goto. 889 if (IndirectBranch == 0) 890 GetIndirectGotoBlock(); 891 892 llvm::BasicBlock *BB = getJumpDestForLabel(L).getBlock(); 893 894 // Make sure the indirect branch includes all of the address-taken blocks. 895 IndirectBranch->addDestination(BB); 896 return llvm::BlockAddress::get(CurFn, BB); 897 } 898 899 llvm::BasicBlock *CodeGenFunction::GetIndirectGotoBlock() { 900 // If we already made the indirect branch for indirect goto, return its block. 901 if (IndirectBranch) return IndirectBranch->getParent(); 902 903 CGBuilderTy TmpBuilder(createBasicBlock("indirectgoto")); 904 905 // Create the PHI node that indirect gotos will add entries to. 906 llvm::Value *DestVal = TmpBuilder.CreatePHI(Int8PtrTy, 0, 907 "indirect.goto.dest"); 908 909 // Create the indirect branch instruction. 910 IndirectBranch = TmpBuilder.CreateIndirectBr(DestVal); 911 return IndirectBranch->getParent(); 912 } 913 914 /// Computes the length of an array in elements, as well as the base 915 /// element type and a properly-typed first element pointer. 916 llvm::Value *CodeGenFunction::emitArrayLength(const ArrayType *origArrayType, 917 QualType &baseType, 918 llvm::Value *&addr) { 919 const ArrayType *arrayType = origArrayType; 920 921 // If it's a VLA, we have to load the stored size. Note that 922 // this is the size of the VLA in bytes, not its size in elements. 923 llvm::Value *numVLAElements = 0; 924 if (isa<VariableArrayType>(arrayType)) { 925 numVLAElements = getVLASize(cast<VariableArrayType>(arrayType)).first; 926 927 // Walk into all VLAs. This doesn't require changes to addr, 928 // which has type T* where T is the first non-VLA element type. 929 do { 930 QualType elementType = arrayType->getElementType(); 931 arrayType = getContext().getAsArrayType(elementType); 932 933 // If we only have VLA components, 'addr' requires no adjustment. 934 if (!arrayType) { 935 baseType = elementType; 936 return numVLAElements; 937 } 938 } while (isa<VariableArrayType>(arrayType)); 939 940 // We get out here only if we find a constant array type 941 // inside the VLA. 942 } 943 944 // We have some number of constant-length arrays, so addr should 945 // have LLVM type [M x [N x [...]]]*. Build a GEP that walks 946 // down to the first element of addr. 947 SmallVector<llvm::Value*, 8> gepIndices; 948 949 // GEP down to the array type. 950 llvm::ConstantInt *zero = Builder.getInt32(0); 951 gepIndices.push_back(zero); 952 953 uint64_t countFromCLAs = 1; 954 QualType eltType; 955 956 llvm::ArrayType *llvmArrayType = 957 dyn_cast<llvm::ArrayType>( 958 cast<llvm::PointerType>(addr->getType())->getElementType()); 959 while (llvmArrayType) { 960 assert(isa<ConstantArrayType>(arrayType)); 961 assert(cast<ConstantArrayType>(arrayType)->getSize().getZExtValue() 962 == llvmArrayType->getNumElements()); 963 964 gepIndices.push_back(zero); 965 countFromCLAs *= llvmArrayType->getNumElements(); 966 eltType = arrayType->getElementType(); 967 968 llvmArrayType = 969 dyn_cast<llvm::ArrayType>(llvmArrayType->getElementType()); 970 arrayType = getContext().getAsArrayType(arrayType->getElementType()); 971 assert((!llvmArrayType || arrayType) && 972 "LLVM and Clang types are out-of-synch"); 973 } 974 975 if (arrayType) { 976 // From this point onwards, the Clang array type has been emitted 977 // as some other type (probably a packed struct). Compute the array 978 // size, and just emit the 'begin' expression as a bitcast. 979 while (arrayType) { 980 countFromCLAs *= 981 cast<ConstantArrayType>(arrayType)->getSize().getZExtValue(); 982 eltType = arrayType->getElementType(); 983 arrayType = getContext().getAsArrayType(eltType); 984 } 985 986 unsigned AddressSpace = 987 cast<llvm::PointerType>(addr->getType())->getAddressSpace(); 988 llvm::Type *BaseType = ConvertType(eltType)->getPointerTo(AddressSpace); 989 addr = Builder.CreateBitCast(addr, BaseType, "array.begin"); 990 } else { 991 // Create the actual GEP. 992 addr = Builder.CreateInBoundsGEP(addr, gepIndices, "array.begin"); 993 } 994 995 baseType = eltType; 996 997 llvm::Value *numElements 998 = llvm::ConstantInt::get(SizeTy, countFromCLAs); 999 1000 // If we had any VLA dimensions, factor them in. 1001 if (numVLAElements) 1002 numElements = Builder.CreateNUWMul(numVLAElements, numElements); 1003 1004 return numElements; 1005 } 1006 1007 std::pair<llvm::Value*, QualType> 1008 CodeGenFunction::getVLASize(QualType type) { 1009 const VariableArrayType *vla = getContext().getAsVariableArrayType(type); 1010 assert(vla && "type was not a variable array type!"); 1011 return getVLASize(vla); 1012 } 1013 1014 std::pair<llvm::Value*, QualType> 1015 CodeGenFunction::getVLASize(const VariableArrayType *type) { 1016 // The number of elements so far; always size_t. 1017 llvm::Value *numElements = 0; 1018 1019 QualType elementType; 1020 do { 1021 elementType = type->getElementType(); 1022 llvm::Value *vlaSize = VLASizeMap[type->getSizeExpr()]; 1023 assert(vlaSize && "no size for VLA!"); 1024 assert(vlaSize->getType() == SizeTy); 1025 1026 if (!numElements) { 1027 numElements = vlaSize; 1028 } else { 1029 // It's undefined behavior if this wraps around, so mark it that way. 1030 numElements = Builder.CreateNUWMul(numElements, vlaSize); 1031 } 1032 } while ((type = getContext().getAsVariableArrayType(elementType))); 1033 1034 return std::pair<llvm::Value*,QualType>(numElements, elementType); 1035 } 1036 1037 void CodeGenFunction::EmitVariablyModifiedType(QualType type) { 1038 assert(type->isVariablyModifiedType() && 1039 "Must pass variably modified type to EmitVLASizes!"); 1040 1041 EnsureInsertPoint(); 1042 1043 // We're going to walk down into the type and look for VLA 1044 // expressions. 1045 do { 1046 assert(type->isVariablyModifiedType()); 1047 1048 const Type *ty = type.getTypePtr(); 1049 switch (ty->getTypeClass()) { 1050 1051 #define TYPE(Class, Base) 1052 #define ABSTRACT_TYPE(Class, Base) 1053 #define NON_CANONICAL_TYPE(Class, Base) 1054 #define DEPENDENT_TYPE(Class, Base) case Type::Class: 1055 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) 1056 #include "clang/AST/TypeNodes.def" 1057 llvm_unreachable("unexpected dependent type!"); 1058 1059 // These types are never variably-modified. 1060 case Type::Builtin: 1061 case Type::Complex: 1062 case Type::Vector: 1063 case Type::ExtVector: 1064 case Type::Record: 1065 case Type::Enum: 1066 case Type::Elaborated: 1067 case Type::TemplateSpecialization: 1068 case Type::ObjCObject: 1069 case Type::ObjCInterface: 1070 case Type::ObjCObjectPointer: 1071 llvm_unreachable("type class is never variably-modified!"); 1072 1073 case Type::Pointer: 1074 type = cast<PointerType>(ty)->getPointeeType(); 1075 break; 1076 1077 case Type::BlockPointer: 1078 type = cast<BlockPointerType>(ty)->getPointeeType(); 1079 break; 1080 1081 case Type::LValueReference: 1082 case Type::RValueReference: 1083 type = cast<ReferenceType>(ty)->getPointeeType(); 1084 break; 1085 1086 case Type::MemberPointer: 1087 type = cast<MemberPointerType>(ty)->getPointeeType(); 1088 break; 1089 1090 case Type::ConstantArray: 1091 case Type::IncompleteArray: 1092 // Losing element qualification here is fine. 1093 type = cast<ArrayType>(ty)->getElementType(); 1094 break; 1095 1096 case Type::VariableArray: { 1097 // Losing element qualification here is fine. 1098 const VariableArrayType *vat = cast<VariableArrayType>(ty); 1099 1100 // Unknown size indication requires no size computation. 1101 // Otherwise, evaluate and record it. 1102 if (const Expr *size = vat->getSizeExpr()) { 1103 // It's possible that we might have emitted this already, 1104 // e.g. with a typedef and a pointer to it. 1105 llvm::Value *&entry = VLASizeMap[size]; 1106 if (!entry) { 1107 // Always zexting here would be wrong if it weren't 1108 // undefined behavior to have a negative bound. 1109 entry = Builder.CreateIntCast(EmitScalarExpr(size), SizeTy, 1110 /*signed*/ false); 1111 } 1112 } 1113 type = vat->getElementType(); 1114 break; 1115 } 1116 1117 case Type::FunctionProto: 1118 case Type::FunctionNoProto: 1119 type = cast<FunctionType>(ty)->getResultType(); 1120 break; 1121 1122 case Type::Paren: 1123 case Type::TypeOf: 1124 case Type::UnaryTransform: 1125 case Type::Attributed: 1126 case Type::SubstTemplateTypeParm: 1127 // Keep walking after single level desugaring. 1128 type = type.getSingleStepDesugaredType(getContext()); 1129 break; 1130 1131 case Type::Typedef: 1132 case Type::Decltype: 1133 case Type::Auto: 1134 // Stop walking: nothing to do. 1135 return; 1136 1137 case Type::TypeOfExpr: 1138 // Stop walking: emit typeof expression. 1139 EmitIgnoredExpr(cast<TypeOfExprType>(ty)->getUnderlyingExpr()); 1140 return; 1141 1142 case Type::Atomic: 1143 type = cast<AtomicType>(ty)->getValueType(); 1144 break; 1145 } 1146 } while (type->isVariablyModifiedType()); 1147 } 1148 1149 llvm::Value* CodeGenFunction::EmitVAListRef(const Expr* E) { 1150 if (getContext().getBuiltinVaListType()->isArrayType()) 1151 return EmitScalarExpr(E); 1152 return EmitLValue(E).getAddress(); 1153 } 1154 1155 void CodeGenFunction::EmitDeclRefExprDbgValue(const DeclRefExpr *E, 1156 llvm::Constant *Init) { 1157 assert (Init && "Invalid DeclRefExpr initializer!"); 1158 if (CGDebugInfo *Dbg = getDebugInfo()) 1159 if (CGM.getCodeGenOpts().DebugInfo >= CodeGenOptions::LimitedDebugInfo) 1160 Dbg->EmitGlobalVariable(E->getDecl(), Init); 1161 } 1162 1163 CodeGenFunction::PeepholeProtection 1164 CodeGenFunction::protectFromPeepholes(RValue rvalue) { 1165 // At the moment, the only aggressive peephole we do in IR gen 1166 // is trunc(zext) folding, but if we add more, we can easily 1167 // extend this protection. 1168 1169 if (!rvalue.isScalar()) return PeepholeProtection(); 1170 llvm::Value *value = rvalue.getScalarVal(); 1171 if (!isa<llvm::ZExtInst>(value)) return PeepholeProtection(); 1172 1173 // Just make an extra bitcast. 1174 assert(HaveInsertPoint()); 1175 llvm::Instruction *inst = new llvm::BitCastInst(value, value->getType(), "", 1176 Builder.GetInsertBlock()); 1177 1178 PeepholeProtection protection; 1179 protection.Inst = inst; 1180 return protection; 1181 } 1182 1183 void CodeGenFunction::unprotectFromPeepholes(PeepholeProtection protection) { 1184 if (!protection.Inst) return; 1185 1186 // In theory, we could try to duplicate the peepholes now, but whatever. 1187 protection.Inst->eraseFromParent(); 1188 } 1189 1190 llvm::Value *CodeGenFunction::EmitAnnotationCall(llvm::Value *AnnotationFn, 1191 llvm::Value *AnnotatedVal, 1192 llvm::StringRef AnnotationStr, 1193 SourceLocation Location) { 1194 llvm::Value *Args[4] = { 1195 AnnotatedVal, 1196 Builder.CreateBitCast(CGM.EmitAnnotationString(AnnotationStr), Int8PtrTy), 1197 Builder.CreateBitCast(CGM.EmitAnnotationUnit(Location), Int8PtrTy), 1198 CGM.EmitAnnotationLineNo(Location) 1199 }; 1200 return Builder.CreateCall(AnnotationFn, Args); 1201 } 1202 1203 void CodeGenFunction::EmitVarAnnotations(const VarDecl *D, llvm::Value *V) { 1204 assert(D->hasAttr<AnnotateAttr>() && "no annotate attribute"); 1205 // FIXME We create a new bitcast for every annotation because that's what 1206 // llvm-gcc was doing. 1207 for (specific_attr_iterator<AnnotateAttr> 1208 ai = D->specific_attr_begin<AnnotateAttr>(), 1209 ae = D->specific_attr_end<AnnotateAttr>(); ai != ae; ++ai) 1210 EmitAnnotationCall(CGM.getIntrinsic(llvm::Intrinsic::var_annotation), 1211 Builder.CreateBitCast(V, CGM.Int8PtrTy, V->getName()), 1212 (*ai)->getAnnotation(), D->getLocation()); 1213 } 1214 1215 llvm::Value *CodeGenFunction::EmitFieldAnnotations(const FieldDecl *D, 1216 llvm::Value *V) { 1217 assert(D->hasAttr<AnnotateAttr>() && "no annotate attribute"); 1218 llvm::Type *VTy = V->getType(); 1219 llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::ptr_annotation, 1220 CGM.Int8PtrTy); 1221 1222 for (specific_attr_iterator<AnnotateAttr> 1223 ai = D->specific_attr_begin<AnnotateAttr>(), 1224 ae = D->specific_attr_end<AnnotateAttr>(); ai != ae; ++ai) { 1225 // FIXME Always emit the cast inst so we can differentiate between 1226 // annotation on the first field of a struct and annotation on the struct 1227 // itself. 1228 if (VTy != CGM.Int8PtrTy) 1229 V = Builder.Insert(new llvm::BitCastInst(V, CGM.Int8PtrTy)); 1230 V = EmitAnnotationCall(F, V, (*ai)->getAnnotation(), D->getLocation()); 1231 V = Builder.CreateBitCast(V, VTy); 1232 } 1233 1234 return V; 1235 } 1236