1 //===--- CGCall.cpp - Encapsulate calling convention details --------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // These classes wrap the information about a call or function 11 // definition used to handle ABI compliancy. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "CGCall.h" 16 #include "ABIInfo.h" 17 #include "CGCXXABI.h" 18 #include "CodeGenFunction.h" 19 #include "CodeGenModule.h" 20 #include "TargetInfo.h" 21 #include "clang/AST/Decl.h" 22 #include "clang/AST/DeclCXX.h" 23 #include "clang/AST/DeclObjC.h" 24 #include "clang/Basic/TargetInfo.h" 25 #include "clang/CodeGen/CGFunctionInfo.h" 26 #include "clang/Frontend/CodeGenOptions.h" 27 #include "llvm/ADT/StringExtras.h" 28 #include "llvm/IR/Attributes.h" 29 #include "llvm/IR/CallSite.h" 30 #include "llvm/IR/DataLayout.h" 31 #include "llvm/IR/InlineAsm.h" 32 #include "llvm/IR/Intrinsics.h" 33 #include "llvm/Transforms/Utils/Local.h" 34 #include <sstream> 35 using namespace clang; 36 using namespace CodeGen; 37 38 /***/ 39 40 static unsigned ClangCallConvToLLVMCallConv(CallingConv CC) { 41 switch (CC) { 42 default: return llvm::CallingConv::C; 43 case CC_X86StdCall: return llvm::CallingConv::X86_StdCall; 44 case CC_X86FastCall: return llvm::CallingConv::X86_FastCall; 45 case CC_X86ThisCall: return llvm::CallingConv::X86_ThisCall; 46 case CC_X86_64Win64: return llvm::CallingConv::X86_64_Win64; 47 case CC_X86_64SysV: return llvm::CallingConv::X86_64_SysV; 48 case CC_AAPCS: return llvm::CallingConv::ARM_AAPCS; 49 case CC_AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP; 50 case CC_IntelOclBicc: return llvm::CallingConv::Intel_OCL_BI; 51 // TODO: Add support for __pascal to LLVM. 52 case CC_X86Pascal: return llvm::CallingConv::C; 53 // TODO: Add support for __vectorcall to LLVM. 54 case CC_X86VectorCall: return llvm::CallingConv::X86_VectorCall; 55 case CC_SpirFunction: return llvm::CallingConv::SPIR_FUNC; 56 case CC_SpirKernel: return llvm::CallingConv::SPIR_KERNEL; 57 } 58 } 59 60 /// Derives the 'this' type for codegen purposes, i.e. ignoring method 61 /// qualification. 62 /// FIXME: address space qualification? 63 static CanQualType GetThisType(ASTContext &Context, const CXXRecordDecl *RD) { 64 QualType RecTy = Context.getTagDeclType(RD)->getCanonicalTypeInternal(); 65 return Context.getPointerType(CanQualType::CreateUnsafe(RecTy)); 66 } 67 68 /// Returns the canonical formal type of the given C++ method. 69 static CanQual<FunctionProtoType> GetFormalType(const CXXMethodDecl *MD) { 70 return MD->getType()->getCanonicalTypeUnqualified() 71 .getAs<FunctionProtoType>(); 72 } 73 74 /// Returns the "extra-canonicalized" return type, which discards 75 /// qualifiers on the return type. Codegen doesn't care about them, 76 /// and it makes ABI code a little easier to be able to assume that 77 /// all parameter and return types are top-level unqualified. 78 static CanQualType GetReturnType(QualType RetTy) { 79 return RetTy->getCanonicalTypeUnqualified().getUnqualifiedType(); 80 } 81 82 /// Arrange the argument and result information for a value of the given 83 /// unprototyped freestanding function type. 84 const CGFunctionInfo & 85 CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionNoProtoType> FTNP) { 86 // When translating an unprototyped function type, always use a 87 // variadic type. 88 return arrangeLLVMFunctionInfo(FTNP->getReturnType().getUnqualifiedType(), 89 /*instanceMethod=*/false, 90 /*chainCall=*/false, None, 91 FTNP->getExtInfo(), RequiredArgs(0)); 92 } 93 94 /// Arrange the LLVM function layout for a value of the given function 95 /// type, on top of any implicit parameters already stored. 96 static const CGFunctionInfo & 97 arrangeLLVMFunctionInfo(CodeGenTypes &CGT, bool instanceMethod, 98 SmallVectorImpl<CanQualType> &prefix, 99 CanQual<FunctionProtoType> FTP) { 100 RequiredArgs required = RequiredArgs::forPrototypePlus(FTP, prefix.size()); 101 // FIXME: Kill copy. 102 prefix.append(FTP->param_type_begin(), FTP->param_type_end()); 103 CanQualType resultType = FTP->getReturnType().getUnqualifiedType(); 104 return CGT.arrangeLLVMFunctionInfo(resultType, instanceMethod, 105 /*chainCall=*/false, prefix, 106 FTP->getExtInfo(), required); 107 } 108 109 /// Arrange the argument and result information for a value of the 110 /// given freestanding function type. 111 const CGFunctionInfo & 112 CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionProtoType> FTP) { 113 SmallVector<CanQualType, 16> argTypes; 114 return ::arrangeLLVMFunctionInfo(*this, /*instanceMethod=*/false, argTypes, 115 FTP); 116 } 117 118 static CallingConv getCallingConventionForDecl(const Decl *D, bool IsWindows) { 119 // Set the appropriate calling convention for the Function. 120 if (D->hasAttr<StdCallAttr>()) 121 return CC_X86StdCall; 122 123 if (D->hasAttr<FastCallAttr>()) 124 return CC_X86FastCall; 125 126 if (D->hasAttr<ThisCallAttr>()) 127 return CC_X86ThisCall; 128 129 if (D->hasAttr<VectorCallAttr>()) 130 return CC_X86VectorCall; 131 132 if (D->hasAttr<PascalAttr>()) 133 return CC_X86Pascal; 134 135 if (PcsAttr *PCS = D->getAttr<PcsAttr>()) 136 return (PCS->getPCS() == PcsAttr::AAPCS ? CC_AAPCS : CC_AAPCS_VFP); 137 138 if (D->hasAttr<IntelOclBiccAttr>()) 139 return CC_IntelOclBicc; 140 141 if (D->hasAttr<MSABIAttr>()) 142 return IsWindows ? CC_C : CC_X86_64Win64; 143 144 if (D->hasAttr<SysVABIAttr>()) 145 return IsWindows ? CC_X86_64SysV : CC_C; 146 147 return CC_C; 148 } 149 150 /// Arrange the argument and result information for a call to an 151 /// unknown C++ non-static member function of the given abstract type. 152 /// (Zero value of RD means we don't have any meaningful "this" argument type, 153 /// so fall back to a generic pointer type). 154 /// The member function must be an ordinary function, i.e. not a 155 /// constructor or destructor. 156 const CGFunctionInfo & 157 CodeGenTypes::arrangeCXXMethodType(const CXXRecordDecl *RD, 158 const FunctionProtoType *FTP) { 159 SmallVector<CanQualType, 16> argTypes; 160 161 // Add the 'this' pointer. 162 if (RD) 163 argTypes.push_back(GetThisType(Context, RD)); 164 else 165 argTypes.push_back(Context.VoidPtrTy); 166 167 return ::arrangeLLVMFunctionInfo( 168 *this, true, argTypes, 169 FTP->getCanonicalTypeUnqualified().getAs<FunctionProtoType>()); 170 } 171 172 /// Arrange the argument and result information for a declaration or 173 /// definition of the given C++ non-static member function. The 174 /// member function must be an ordinary function, i.e. not a 175 /// constructor or destructor. 176 const CGFunctionInfo & 177 CodeGenTypes::arrangeCXXMethodDeclaration(const CXXMethodDecl *MD) { 178 assert(!isa<CXXConstructorDecl>(MD) && "wrong method for constructors!"); 179 assert(!isa<CXXDestructorDecl>(MD) && "wrong method for destructors!"); 180 181 CanQual<FunctionProtoType> prototype = GetFormalType(MD); 182 183 if (MD->isInstance()) { 184 // The abstract case is perfectly fine. 185 const CXXRecordDecl *ThisType = TheCXXABI.getThisArgumentTypeForMethod(MD); 186 return arrangeCXXMethodType(ThisType, prototype.getTypePtr()); 187 } 188 189 return arrangeFreeFunctionType(prototype); 190 } 191 192 const CGFunctionInfo & 193 CodeGenTypes::arrangeCXXStructorDeclaration(const CXXMethodDecl *MD, 194 StructorType Type) { 195 196 SmallVector<CanQualType, 16> argTypes; 197 argTypes.push_back(GetThisType(Context, MD->getParent())); 198 199 GlobalDecl GD; 200 if (auto *CD = dyn_cast<CXXConstructorDecl>(MD)) { 201 GD = GlobalDecl(CD, toCXXCtorType(Type)); 202 } else { 203 auto *DD = dyn_cast<CXXDestructorDecl>(MD); 204 GD = GlobalDecl(DD, toCXXDtorType(Type)); 205 } 206 207 CanQual<FunctionProtoType> FTP = GetFormalType(MD); 208 209 // Add the formal parameters. 210 argTypes.append(FTP->param_type_begin(), FTP->param_type_end()); 211 212 TheCXXABI.buildStructorSignature(MD, Type, argTypes); 213 214 RequiredArgs required = 215 (MD->isVariadic() ? RequiredArgs(argTypes.size()) : RequiredArgs::All); 216 217 FunctionType::ExtInfo extInfo = FTP->getExtInfo(); 218 CanQualType resultType = TheCXXABI.HasThisReturn(GD) 219 ? argTypes.front() 220 : TheCXXABI.hasMostDerivedReturn(GD) 221 ? CGM.getContext().VoidPtrTy 222 : Context.VoidTy; 223 return arrangeLLVMFunctionInfo(resultType, /*instanceMethod=*/true, 224 /*chainCall=*/false, argTypes, extInfo, 225 required); 226 } 227 228 /// Arrange a call to a C++ method, passing the given arguments. 229 const CGFunctionInfo & 230 CodeGenTypes::arrangeCXXConstructorCall(const CallArgList &args, 231 const CXXConstructorDecl *D, 232 CXXCtorType CtorKind, 233 unsigned ExtraArgs) { 234 // FIXME: Kill copy. 235 SmallVector<CanQualType, 16> ArgTypes; 236 for (const auto &Arg : args) 237 ArgTypes.push_back(Context.getCanonicalParamType(Arg.Ty)); 238 239 CanQual<FunctionProtoType> FPT = GetFormalType(D); 240 RequiredArgs Required = RequiredArgs::forPrototypePlus(FPT, 1 + ExtraArgs); 241 GlobalDecl GD(D, CtorKind); 242 CanQualType ResultType = TheCXXABI.HasThisReturn(GD) 243 ? ArgTypes.front() 244 : TheCXXABI.hasMostDerivedReturn(GD) 245 ? CGM.getContext().VoidPtrTy 246 : Context.VoidTy; 247 248 FunctionType::ExtInfo Info = FPT->getExtInfo(); 249 return arrangeLLVMFunctionInfo(ResultType, /*instanceMethod=*/true, 250 /*chainCall=*/false, ArgTypes, Info, 251 Required); 252 } 253 254 /// Arrange the argument and result information for the declaration or 255 /// definition of the given function. 256 const CGFunctionInfo & 257 CodeGenTypes::arrangeFunctionDeclaration(const FunctionDecl *FD) { 258 if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD)) 259 if (MD->isInstance()) 260 return arrangeCXXMethodDeclaration(MD); 261 262 CanQualType FTy = FD->getType()->getCanonicalTypeUnqualified(); 263 264 assert(isa<FunctionType>(FTy)); 265 266 // When declaring a function without a prototype, always use a 267 // non-variadic type. 268 if (isa<FunctionNoProtoType>(FTy)) { 269 CanQual<FunctionNoProtoType> noProto = FTy.getAs<FunctionNoProtoType>(); 270 return arrangeLLVMFunctionInfo( 271 noProto->getReturnType(), /*instanceMethod=*/false, 272 /*chainCall=*/false, None, noProto->getExtInfo(), RequiredArgs::All); 273 } 274 275 assert(isa<FunctionProtoType>(FTy)); 276 return arrangeFreeFunctionType(FTy.getAs<FunctionProtoType>()); 277 } 278 279 /// Arrange the argument and result information for the declaration or 280 /// definition of an Objective-C method. 281 const CGFunctionInfo & 282 CodeGenTypes::arrangeObjCMethodDeclaration(const ObjCMethodDecl *MD) { 283 // It happens that this is the same as a call with no optional 284 // arguments, except also using the formal 'self' type. 285 return arrangeObjCMessageSendSignature(MD, MD->getSelfDecl()->getType()); 286 } 287 288 /// Arrange the argument and result information for the function type 289 /// through which to perform a send to the given Objective-C method, 290 /// using the given receiver type. The receiver type is not always 291 /// the 'self' type of the method or even an Objective-C pointer type. 292 /// This is *not* the right method for actually performing such a 293 /// message send, due to the possibility of optional arguments. 294 const CGFunctionInfo & 295 CodeGenTypes::arrangeObjCMessageSendSignature(const ObjCMethodDecl *MD, 296 QualType receiverType) { 297 SmallVector<CanQualType, 16> argTys; 298 argTys.push_back(Context.getCanonicalParamType(receiverType)); 299 argTys.push_back(Context.getCanonicalParamType(Context.getObjCSelType())); 300 // FIXME: Kill copy? 301 for (const auto *I : MD->params()) { 302 argTys.push_back(Context.getCanonicalParamType(I->getType())); 303 } 304 305 FunctionType::ExtInfo einfo; 306 bool IsWindows = getContext().getTargetInfo().getTriple().isOSWindows(); 307 einfo = einfo.withCallingConv(getCallingConventionForDecl(MD, IsWindows)); 308 309 if (getContext().getLangOpts().ObjCAutoRefCount && 310 MD->hasAttr<NSReturnsRetainedAttr>()) 311 einfo = einfo.withProducesResult(true); 312 313 RequiredArgs required = 314 (MD->isVariadic() ? RequiredArgs(argTys.size()) : RequiredArgs::All); 315 316 return arrangeLLVMFunctionInfo( 317 GetReturnType(MD->getReturnType()), /*instanceMethod=*/false, 318 /*chainCall=*/false, argTys, einfo, required); 319 } 320 321 const CGFunctionInfo & 322 CodeGenTypes::arrangeGlobalDeclaration(GlobalDecl GD) { 323 // FIXME: Do we need to handle ObjCMethodDecl? 324 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl()); 325 326 if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD)) 327 return arrangeCXXStructorDeclaration(CD, getFromCtorType(GD.getCtorType())); 328 329 if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(FD)) 330 return arrangeCXXStructorDeclaration(DD, getFromDtorType(GD.getDtorType())); 331 332 return arrangeFunctionDeclaration(FD); 333 } 334 335 /// Arrange a thunk that takes 'this' as the first parameter followed by 336 /// varargs. Return a void pointer, regardless of the actual return type. 337 /// The body of the thunk will end in a musttail call to a function of the 338 /// correct type, and the caller will bitcast the function to the correct 339 /// prototype. 340 const CGFunctionInfo & 341 CodeGenTypes::arrangeMSMemberPointerThunk(const CXXMethodDecl *MD) { 342 assert(MD->isVirtual() && "only virtual memptrs have thunks"); 343 CanQual<FunctionProtoType> FTP = GetFormalType(MD); 344 CanQualType ArgTys[] = { GetThisType(Context, MD->getParent()) }; 345 return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/false, 346 /*chainCall=*/false, ArgTys, 347 FTP->getExtInfo(), RequiredArgs(1)); 348 } 349 350 const CGFunctionInfo & 351 CodeGenTypes::arrangeMSCtorClosure(const CXXConstructorDecl *CD, 352 CXXCtorType CT) { 353 assert(CT == Ctor_CopyingClosure || CT == Ctor_DefaultClosure); 354 355 CanQual<FunctionProtoType> FTP = GetFormalType(CD); 356 SmallVector<CanQualType, 2> ArgTys; 357 const CXXRecordDecl *RD = CD->getParent(); 358 ArgTys.push_back(GetThisType(Context, RD)); 359 if (CT == Ctor_CopyingClosure) 360 ArgTys.push_back(*FTP->param_type_begin()); 361 if (RD->getNumVBases() > 0) 362 ArgTys.push_back(Context.IntTy); 363 CallingConv CC = Context.getDefaultCallingConvention( 364 /*IsVariadic=*/false, /*IsCXXMethod=*/true); 365 return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/true, 366 /*chainCall=*/false, ArgTys, 367 FunctionType::ExtInfo(CC), RequiredArgs::All); 368 } 369 370 /// Arrange a call as unto a free function, except possibly with an 371 /// additional number of formal parameters considered required. 372 static const CGFunctionInfo & 373 arrangeFreeFunctionLikeCall(CodeGenTypes &CGT, 374 CodeGenModule &CGM, 375 const CallArgList &args, 376 const FunctionType *fnType, 377 unsigned numExtraRequiredArgs, 378 bool chainCall) { 379 assert(args.size() >= numExtraRequiredArgs); 380 381 // In most cases, there are no optional arguments. 382 RequiredArgs required = RequiredArgs::All; 383 384 // If we have a variadic prototype, the required arguments are the 385 // extra prefix plus the arguments in the prototype. 386 if (const FunctionProtoType *proto = dyn_cast<FunctionProtoType>(fnType)) { 387 if (proto->isVariadic()) 388 required = RequiredArgs(proto->getNumParams() + numExtraRequiredArgs); 389 390 // If we don't have a prototype at all, but we're supposed to 391 // explicitly use the variadic convention for unprototyped calls, 392 // treat all of the arguments as required but preserve the nominal 393 // possibility of variadics. 394 } else if (CGM.getTargetCodeGenInfo() 395 .isNoProtoCallVariadic(args, 396 cast<FunctionNoProtoType>(fnType))) { 397 required = RequiredArgs(args.size()); 398 } 399 400 // FIXME: Kill copy. 401 SmallVector<CanQualType, 16> argTypes; 402 for (const auto &arg : args) 403 argTypes.push_back(CGT.getContext().getCanonicalParamType(arg.Ty)); 404 return CGT.arrangeLLVMFunctionInfo(GetReturnType(fnType->getReturnType()), 405 /*instanceMethod=*/false, chainCall, 406 argTypes, fnType->getExtInfo(), required); 407 } 408 409 /// Figure out the rules for calling a function with the given formal 410 /// type using the given arguments. The arguments are necessary 411 /// because the function might be unprototyped, in which case it's 412 /// target-dependent in crazy ways. 413 const CGFunctionInfo & 414 CodeGenTypes::arrangeFreeFunctionCall(const CallArgList &args, 415 const FunctionType *fnType, 416 bool chainCall) { 417 return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType, 418 chainCall ? 1 : 0, chainCall); 419 } 420 421 /// A block function call is essentially a free-function call with an 422 /// extra implicit argument. 423 const CGFunctionInfo & 424 CodeGenTypes::arrangeBlockFunctionCall(const CallArgList &args, 425 const FunctionType *fnType) { 426 return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType, 1, 427 /*chainCall=*/false); 428 } 429 430 const CGFunctionInfo & 431 CodeGenTypes::arrangeFreeFunctionCall(QualType resultType, 432 const CallArgList &args, 433 FunctionType::ExtInfo info, 434 RequiredArgs required) { 435 // FIXME: Kill copy. 436 SmallVector<CanQualType, 16> argTypes; 437 for (const auto &Arg : args) 438 argTypes.push_back(Context.getCanonicalParamType(Arg.Ty)); 439 return arrangeLLVMFunctionInfo( 440 GetReturnType(resultType), /*instanceMethod=*/false, 441 /*chainCall=*/false, argTypes, info, required); 442 } 443 444 /// Arrange a call to a C++ method, passing the given arguments. 445 const CGFunctionInfo & 446 CodeGenTypes::arrangeCXXMethodCall(const CallArgList &args, 447 const FunctionProtoType *FPT, 448 RequiredArgs required) { 449 // FIXME: Kill copy. 450 SmallVector<CanQualType, 16> argTypes; 451 for (const auto &Arg : args) 452 argTypes.push_back(Context.getCanonicalParamType(Arg.Ty)); 453 454 FunctionType::ExtInfo info = FPT->getExtInfo(); 455 return arrangeLLVMFunctionInfo( 456 GetReturnType(FPT->getReturnType()), /*instanceMethod=*/true, 457 /*chainCall=*/false, argTypes, info, required); 458 } 459 460 const CGFunctionInfo &CodeGenTypes::arrangeFreeFunctionDeclaration( 461 QualType resultType, const FunctionArgList &args, 462 const FunctionType::ExtInfo &info, bool isVariadic) { 463 // FIXME: Kill copy. 464 SmallVector<CanQualType, 16> argTypes; 465 for (auto Arg : args) 466 argTypes.push_back(Context.getCanonicalParamType(Arg->getType())); 467 468 RequiredArgs required = 469 (isVariadic ? RequiredArgs(args.size()) : RequiredArgs::All); 470 return arrangeLLVMFunctionInfo( 471 GetReturnType(resultType), /*instanceMethod=*/false, 472 /*chainCall=*/false, argTypes, info, required); 473 } 474 475 const CGFunctionInfo &CodeGenTypes::arrangeNullaryFunction() { 476 return arrangeLLVMFunctionInfo( 477 getContext().VoidTy, /*instanceMethod=*/false, /*chainCall=*/false, 478 None, FunctionType::ExtInfo(), RequiredArgs::All); 479 } 480 481 /// Arrange the argument and result information for an abstract value 482 /// of a given function type. This is the method which all of the 483 /// above functions ultimately defer to. 484 const CGFunctionInfo & 485 CodeGenTypes::arrangeLLVMFunctionInfo(CanQualType resultType, 486 bool instanceMethod, 487 bool chainCall, 488 ArrayRef<CanQualType> argTypes, 489 FunctionType::ExtInfo info, 490 RequiredArgs required) { 491 assert(std::all_of(argTypes.begin(), argTypes.end(), 492 std::mem_fun_ref(&CanQualType::isCanonicalAsParam))); 493 494 unsigned CC = ClangCallConvToLLVMCallConv(info.getCC()); 495 496 // Lookup or create unique function info. 497 llvm::FoldingSetNodeID ID; 498 CGFunctionInfo::Profile(ID, instanceMethod, chainCall, info, required, 499 resultType, argTypes); 500 501 void *insertPos = nullptr; 502 CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, insertPos); 503 if (FI) 504 return *FI; 505 506 // Construct the function info. We co-allocate the ArgInfos. 507 FI = CGFunctionInfo::create(CC, instanceMethod, chainCall, info, 508 resultType, argTypes, required); 509 FunctionInfos.InsertNode(FI, insertPos); 510 511 bool inserted = FunctionsBeingProcessed.insert(FI).second; 512 (void)inserted; 513 assert(inserted && "Recursively being processed?"); 514 515 // Compute ABI information. 516 getABIInfo().computeInfo(*FI); 517 518 // Loop over all of the computed argument and return value info. If any of 519 // them are direct or extend without a specified coerce type, specify the 520 // default now. 521 ABIArgInfo &retInfo = FI->getReturnInfo(); 522 if (retInfo.canHaveCoerceToType() && retInfo.getCoerceToType() == nullptr) 523 retInfo.setCoerceToType(ConvertType(FI->getReturnType())); 524 525 for (auto &I : FI->arguments()) 526 if (I.info.canHaveCoerceToType() && I.info.getCoerceToType() == nullptr) 527 I.info.setCoerceToType(ConvertType(I.type)); 528 529 bool erased = FunctionsBeingProcessed.erase(FI); (void)erased; 530 assert(erased && "Not in set?"); 531 532 return *FI; 533 } 534 535 CGFunctionInfo *CGFunctionInfo::create(unsigned llvmCC, 536 bool instanceMethod, 537 bool chainCall, 538 const FunctionType::ExtInfo &info, 539 CanQualType resultType, 540 ArrayRef<CanQualType> argTypes, 541 RequiredArgs required) { 542 void *buffer = operator new(sizeof(CGFunctionInfo) + 543 sizeof(ArgInfo) * (argTypes.size() + 1)); 544 CGFunctionInfo *FI = new(buffer) CGFunctionInfo(); 545 FI->CallingConvention = llvmCC; 546 FI->EffectiveCallingConvention = llvmCC; 547 FI->ASTCallingConvention = info.getCC(); 548 FI->InstanceMethod = instanceMethod; 549 FI->ChainCall = chainCall; 550 FI->NoReturn = info.getNoReturn(); 551 FI->ReturnsRetained = info.getProducesResult(); 552 FI->Required = required; 553 FI->HasRegParm = info.getHasRegParm(); 554 FI->RegParm = info.getRegParm(); 555 FI->ArgStruct = nullptr; 556 FI->NumArgs = argTypes.size(); 557 FI->getArgsBuffer()[0].type = resultType; 558 for (unsigned i = 0, e = argTypes.size(); i != e; ++i) 559 FI->getArgsBuffer()[i + 1].type = argTypes[i]; 560 return FI; 561 } 562 563 /***/ 564 565 namespace { 566 // ABIArgInfo::Expand implementation. 567 568 // Specifies the way QualType passed as ABIArgInfo::Expand is expanded. 569 struct TypeExpansion { 570 enum TypeExpansionKind { 571 // Elements of constant arrays are expanded recursively. 572 TEK_ConstantArray, 573 // Record fields are expanded recursively (but if record is a union, only 574 // the field with the largest size is expanded). 575 TEK_Record, 576 // For complex types, real and imaginary parts are expanded recursively. 577 TEK_Complex, 578 // All other types are not expandable. 579 TEK_None 580 }; 581 582 const TypeExpansionKind Kind; 583 584 TypeExpansion(TypeExpansionKind K) : Kind(K) {} 585 virtual ~TypeExpansion() {} 586 }; 587 588 struct ConstantArrayExpansion : TypeExpansion { 589 QualType EltTy; 590 uint64_t NumElts; 591 592 ConstantArrayExpansion(QualType EltTy, uint64_t NumElts) 593 : TypeExpansion(TEK_ConstantArray), EltTy(EltTy), NumElts(NumElts) {} 594 static bool classof(const TypeExpansion *TE) { 595 return TE->Kind == TEK_ConstantArray; 596 } 597 }; 598 599 struct RecordExpansion : TypeExpansion { 600 SmallVector<const CXXBaseSpecifier *, 1> Bases; 601 602 SmallVector<const FieldDecl *, 1> Fields; 603 604 RecordExpansion(SmallVector<const CXXBaseSpecifier *, 1> &&Bases, 605 SmallVector<const FieldDecl *, 1> &&Fields) 606 : TypeExpansion(TEK_Record), Bases(Bases), Fields(Fields) {} 607 static bool classof(const TypeExpansion *TE) { 608 return TE->Kind == TEK_Record; 609 } 610 }; 611 612 struct ComplexExpansion : TypeExpansion { 613 QualType EltTy; 614 615 ComplexExpansion(QualType EltTy) : TypeExpansion(TEK_Complex), EltTy(EltTy) {} 616 static bool classof(const TypeExpansion *TE) { 617 return TE->Kind == TEK_Complex; 618 } 619 }; 620 621 struct NoExpansion : TypeExpansion { 622 NoExpansion() : TypeExpansion(TEK_None) {} 623 static bool classof(const TypeExpansion *TE) { 624 return TE->Kind == TEK_None; 625 } 626 }; 627 } // namespace 628 629 static std::unique_ptr<TypeExpansion> 630 getTypeExpansion(QualType Ty, const ASTContext &Context) { 631 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) { 632 return llvm::make_unique<ConstantArrayExpansion>( 633 AT->getElementType(), AT->getSize().getZExtValue()); 634 } 635 if (const RecordType *RT = Ty->getAs<RecordType>()) { 636 SmallVector<const CXXBaseSpecifier *, 1> Bases; 637 SmallVector<const FieldDecl *, 1> Fields; 638 const RecordDecl *RD = RT->getDecl(); 639 assert(!RD->hasFlexibleArrayMember() && 640 "Cannot expand structure with flexible array."); 641 if (RD->isUnion()) { 642 // Unions can be here only in degenerative cases - all the fields are same 643 // after flattening. Thus we have to use the "largest" field. 644 const FieldDecl *LargestFD = nullptr; 645 CharUnits UnionSize = CharUnits::Zero(); 646 647 for (const auto *FD : RD->fields()) { 648 // Skip zero length bitfields. 649 if (FD->isBitField() && FD->getBitWidthValue(Context) == 0) 650 continue; 651 assert(!FD->isBitField() && 652 "Cannot expand structure with bit-field members."); 653 CharUnits FieldSize = Context.getTypeSizeInChars(FD->getType()); 654 if (UnionSize < FieldSize) { 655 UnionSize = FieldSize; 656 LargestFD = FD; 657 } 658 } 659 if (LargestFD) 660 Fields.push_back(LargestFD); 661 } else { 662 if (const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { 663 assert(!CXXRD->isDynamicClass() && 664 "cannot expand vtable pointers in dynamic classes"); 665 for (const CXXBaseSpecifier &BS : CXXRD->bases()) 666 Bases.push_back(&BS); 667 } 668 669 for (const auto *FD : RD->fields()) { 670 // Skip zero length bitfields. 671 if (FD->isBitField() && FD->getBitWidthValue(Context) == 0) 672 continue; 673 assert(!FD->isBitField() && 674 "Cannot expand structure with bit-field members."); 675 Fields.push_back(FD); 676 } 677 } 678 return llvm::make_unique<RecordExpansion>(std::move(Bases), 679 std::move(Fields)); 680 } 681 if (const ComplexType *CT = Ty->getAs<ComplexType>()) { 682 return llvm::make_unique<ComplexExpansion>(CT->getElementType()); 683 } 684 return llvm::make_unique<NoExpansion>(); 685 } 686 687 static int getExpansionSize(QualType Ty, const ASTContext &Context) { 688 auto Exp = getTypeExpansion(Ty, Context); 689 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) { 690 return CAExp->NumElts * getExpansionSize(CAExp->EltTy, Context); 691 } 692 if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) { 693 int Res = 0; 694 for (auto BS : RExp->Bases) 695 Res += getExpansionSize(BS->getType(), Context); 696 for (auto FD : RExp->Fields) 697 Res += getExpansionSize(FD->getType(), Context); 698 return Res; 699 } 700 if (isa<ComplexExpansion>(Exp.get())) 701 return 2; 702 assert(isa<NoExpansion>(Exp.get())); 703 return 1; 704 } 705 706 void 707 CodeGenTypes::getExpandedTypes(QualType Ty, 708 SmallVectorImpl<llvm::Type *>::iterator &TI) { 709 auto Exp = getTypeExpansion(Ty, Context); 710 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) { 711 for (int i = 0, n = CAExp->NumElts; i < n; i++) { 712 getExpandedTypes(CAExp->EltTy, TI); 713 } 714 } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) { 715 for (auto BS : RExp->Bases) 716 getExpandedTypes(BS->getType(), TI); 717 for (auto FD : RExp->Fields) 718 getExpandedTypes(FD->getType(), TI); 719 } else if (auto CExp = dyn_cast<ComplexExpansion>(Exp.get())) { 720 llvm::Type *EltTy = ConvertType(CExp->EltTy); 721 *TI++ = EltTy; 722 *TI++ = EltTy; 723 } else { 724 assert(isa<NoExpansion>(Exp.get())); 725 *TI++ = ConvertType(Ty); 726 } 727 } 728 729 void CodeGenFunction::ExpandTypeFromArgs( 730 QualType Ty, LValue LV, SmallVectorImpl<llvm::Argument *>::iterator &AI) { 731 assert(LV.isSimple() && 732 "Unexpected non-simple lvalue during struct expansion."); 733 734 auto Exp = getTypeExpansion(Ty, getContext()); 735 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) { 736 for (int i = 0, n = CAExp->NumElts; i < n; i++) { 737 llvm::Value *EltAddr = 738 Builder.CreateConstGEP2_32(nullptr, LV.getAddress(), 0, i); 739 LValue LV = MakeAddrLValue(EltAddr, CAExp->EltTy); 740 ExpandTypeFromArgs(CAExp->EltTy, LV, AI); 741 } 742 } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) { 743 llvm::Value *This = LV.getAddress(); 744 for (const CXXBaseSpecifier *BS : RExp->Bases) { 745 // Perform a single step derived-to-base conversion. 746 llvm::Value *Base = 747 GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1, 748 /*NullCheckValue=*/false, SourceLocation()); 749 LValue SubLV = MakeAddrLValue(Base, BS->getType()); 750 751 // Recurse onto bases. 752 ExpandTypeFromArgs(BS->getType(), SubLV, AI); 753 } 754 for (auto FD : RExp->Fields) { 755 // FIXME: What are the right qualifiers here? 756 LValue SubLV = EmitLValueForField(LV, FD); 757 ExpandTypeFromArgs(FD->getType(), SubLV, AI); 758 } 759 } else if (auto CExp = dyn_cast<ComplexExpansion>(Exp.get())) { 760 llvm::Value *RealAddr = 761 Builder.CreateStructGEP(nullptr, LV.getAddress(), 0, "real"); 762 EmitStoreThroughLValue(RValue::get(*AI++), 763 MakeAddrLValue(RealAddr, CExp->EltTy)); 764 llvm::Value *ImagAddr = 765 Builder.CreateStructGEP(nullptr, LV.getAddress(), 1, "imag"); 766 EmitStoreThroughLValue(RValue::get(*AI++), 767 MakeAddrLValue(ImagAddr, CExp->EltTy)); 768 } else { 769 assert(isa<NoExpansion>(Exp.get())); 770 EmitStoreThroughLValue(RValue::get(*AI++), LV); 771 } 772 } 773 774 void CodeGenFunction::ExpandTypeToArgs( 775 QualType Ty, RValue RV, llvm::FunctionType *IRFuncTy, 776 SmallVectorImpl<llvm::Value *> &IRCallArgs, unsigned &IRCallArgPos) { 777 auto Exp = getTypeExpansion(Ty, getContext()); 778 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) { 779 llvm::Value *Addr = RV.getAggregateAddr(); 780 for (int i = 0, n = CAExp->NumElts; i < n; i++) { 781 llvm::Value *EltAddr = Builder.CreateConstGEP2_32(nullptr, Addr, 0, i); 782 RValue EltRV = 783 convertTempToRValue(EltAddr, CAExp->EltTy, SourceLocation()); 784 ExpandTypeToArgs(CAExp->EltTy, EltRV, IRFuncTy, IRCallArgs, IRCallArgPos); 785 } 786 } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) { 787 llvm::Value *This = RV.getAggregateAddr(); 788 for (const CXXBaseSpecifier *BS : RExp->Bases) { 789 // Perform a single step derived-to-base conversion. 790 llvm::Value *Base = 791 GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1, 792 /*NullCheckValue=*/false, SourceLocation()); 793 RValue BaseRV = RValue::getAggregate(Base); 794 795 // Recurse onto bases. 796 ExpandTypeToArgs(BS->getType(), BaseRV, IRFuncTy, IRCallArgs, 797 IRCallArgPos); 798 } 799 800 LValue LV = MakeAddrLValue(This, Ty); 801 for (auto FD : RExp->Fields) { 802 RValue FldRV = EmitRValueForField(LV, FD, SourceLocation()); 803 ExpandTypeToArgs(FD->getType(), FldRV, IRFuncTy, IRCallArgs, 804 IRCallArgPos); 805 } 806 } else if (isa<ComplexExpansion>(Exp.get())) { 807 ComplexPairTy CV = RV.getComplexVal(); 808 IRCallArgs[IRCallArgPos++] = CV.first; 809 IRCallArgs[IRCallArgPos++] = CV.second; 810 } else { 811 assert(isa<NoExpansion>(Exp.get())); 812 assert(RV.isScalar() && 813 "Unexpected non-scalar rvalue during struct expansion."); 814 815 // Insert a bitcast as needed. 816 llvm::Value *V = RV.getScalarVal(); 817 if (IRCallArgPos < IRFuncTy->getNumParams() && 818 V->getType() != IRFuncTy->getParamType(IRCallArgPos)) 819 V = Builder.CreateBitCast(V, IRFuncTy->getParamType(IRCallArgPos)); 820 821 IRCallArgs[IRCallArgPos++] = V; 822 } 823 } 824 825 /// EnterStructPointerForCoercedAccess - Given a struct pointer that we are 826 /// accessing some number of bytes out of it, try to gep into the struct to get 827 /// at its inner goodness. Dive as deep as possible without entering an element 828 /// with an in-memory size smaller than DstSize. 829 static llvm::Value * 830 EnterStructPointerForCoercedAccess(llvm::Value *SrcPtr, 831 llvm::StructType *SrcSTy, 832 uint64_t DstSize, CodeGenFunction &CGF) { 833 // We can't dive into a zero-element struct. 834 if (SrcSTy->getNumElements() == 0) return SrcPtr; 835 836 llvm::Type *FirstElt = SrcSTy->getElementType(0); 837 838 // If the first elt is at least as large as what we're looking for, or if the 839 // first element is the same size as the whole struct, we can enter it. The 840 // comparison must be made on the store size and not the alloca size. Using 841 // the alloca size may overstate the size of the load. 842 uint64_t FirstEltSize = 843 CGF.CGM.getDataLayout().getTypeStoreSize(FirstElt); 844 if (FirstEltSize < DstSize && 845 FirstEltSize < CGF.CGM.getDataLayout().getTypeStoreSize(SrcSTy)) 846 return SrcPtr; 847 848 // GEP into the first element. 849 SrcPtr = CGF.Builder.CreateConstGEP2_32(SrcSTy, SrcPtr, 0, 0, "coerce.dive"); 850 851 // If the first element is a struct, recurse. 852 llvm::Type *SrcTy = 853 cast<llvm::PointerType>(SrcPtr->getType())->getElementType(); 854 if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) 855 return EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF); 856 857 return SrcPtr; 858 } 859 860 /// CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both 861 /// are either integers or pointers. This does a truncation of the value if it 862 /// is too large or a zero extension if it is too small. 863 /// 864 /// This behaves as if the value were coerced through memory, so on big-endian 865 /// targets the high bits are preserved in a truncation, while little-endian 866 /// targets preserve the low bits. 867 static llvm::Value *CoerceIntOrPtrToIntOrPtr(llvm::Value *Val, 868 llvm::Type *Ty, 869 CodeGenFunction &CGF) { 870 if (Val->getType() == Ty) 871 return Val; 872 873 if (isa<llvm::PointerType>(Val->getType())) { 874 // If this is Pointer->Pointer avoid conversion to and from int. 875 if (isa<llvm::PointerType>(Ty)) 876 return CGF.Builder.CreateBitCast(Val, Ty, "coerce.val"); 877 878 // Convert the pointer to an integer so we can play with its width. 879 Val = CGF.Builder.CreatePtrToInt(Val, CGF.IntPtrTy, "coerce.val.pi"); 880 } 881 882 llvm::Type *DestIntTy = Ty; 883 if (isa<llvm::PointerType>(DestIntTy)) 884 DestIntTy = CGF.IntPtrTy; 885 886 if (Val->getType() != DestIntTy) { 887 const llvm::DataLayout &DL = CGF.CGM.getDataLayout(); 888 if (DL.isBigEndian()) { 889 // Preserve the high bits on big-endian targets. 890 // That is what memory coercion does. 891 uint64_t SrcSize = DL.getTypeSizeInBits(Val->getType()); 892 uint64_t DstSize = DL.getTypeSizeInBits(DestIntTy); 893 894 if (SrcSize > DstSize) { 895 Val = CGF.Builder.CreateLShr(Val, SrcSize - DstSize, "coerce.highbits"); 896 Val = CGF.Builder.CreateTrunc(Val, DestIntTy, "coerce.val.ii"); 897 } else { 898 Val = CGF.Builder.CreateZExt(Val, DestIntTy, "coerce.val.ii"); 899 Val = CGF.Builder.CreateShl(Val, DstSize - SrcSize, "coerce.highbits"); 900 } 901 } else { 902 // Little-endian targets preserve the low bits. No shifts required. 903 Val = CGF.Builder.CreateIntCast(Val, DestIntTy, false, "coerce.val.ii"); 904 } 905 } 906 907 if (isa<llvm::PointerType>(Ty)) 908 Val = CGF.Builder.CreateIntToPtr(Val, Ty, "coerce.val.ip"); 909 return Val; 910 } 911 912 913 914 /// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as 915 /// a pointer to an object of type \arg Ty. 916 /// 917 /// This safely handles the case when the src type is smaller than the 918 /// destination type; in this situation the values of bits which not 919 /// present in the src are undefined. 920 static llvm::Value *CreateCoercedLoad(llvm::Value *SrcPtr, 921 llvm::Type *Ty, 922 CodeGenFunction &CGF) { 923 llvm::Type *SrcTy = 924 cast<llvm::PointerType>(SrcPtr->getType())->getElementType(); 925 926 // If SrcTy and Ty are the same, just do a load. 927 if (SrcTy == Ty) 928 return CGF.Builder.CreateLoad(SrcPtr); 929 930 uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(Ty); 931 932 if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) { 933 SrcPtr = EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF); 934 SrcTy = cast<llvm::PointerType>(SrcPtr->getType())->getElementType(); 935 } 936 937 uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy); 938 939 // If the source and destination are integer or pointer types, just do an 940 // extension or truncation to the desired type. 941 if ((isa<llvm::IntegerType>(Ty) || isa<llvm::PointerType>(Ty)) && 942 (isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy))) { 943 llvm::LoadInst *Load = CGF.Builder.CreateLoad(SrcPtr); 944 return CoerceIntOrPtrToIntOrPtr(Load, Ty, CGF); 945 } 946 947 // If load is legal, just bitcast the src pointer. 948 if (SrcSize >= DstSize) { 949 // Generally SrcSize is never greater than DstSize, since this means we are 950 // losing bits. However, this can happen in cases where the structure has 951 // additional padding, for example due to a user specified alignment. 952 // 953 // FIXME: Assert that we aren't truncating non-padding bits when have access 954 // to that information. 955 llvm::Value *Casted = 956 CGF.Builder.CreateBitCast(SrcPtr, llvm::PointerType::getUnqual(Ty)); 957 llvm::LoadInst *Load = CGF.Builder.CreateLoad(Casted); 958 // FIXME: Use better alignment / avoid requiring aligned load. 959 Load->setAlignment(1); 960 return Load; 961 } 962 963 // Otherwise do coercion through memory. This is stupid, but 964 // simple. 965 llvm::Value *Tmp = CGF.CreateTempAlloca(Ty); 966 llvm::Type *I8PtrTy = CGF.Builder.getInt8PtrTy(); 967 llvm::Value *Casted = CGF.Builder.CreateBitCast(Tmp, I8PtrTy); 968 llvm::Value *SrcCasted = CGF.Builder.CreateBitCast(SrcPtr, I8PtrTy); 969 // FIXME: Use better alignment. 970 CGF.Builder.CreateMemCpy(Casted, SrcCasted, 971 llvm::ConstantInt::get(CGF.IntPtrTy, SrcSize), 972 1, false); 973 return CGF.Builder.CreateLoad(Tmp); 974 } 975 976 // Function to store a first-class aggregate into memory. We prefer to 977 // store the elements rather than the aggregate to be more friendly to 978 // fast-isel. 979 // FIXME: Do we need to recurse here? 980 static void BuildAggStore(CodeGenFunction &CGF, llvm::Value *Val, 981 llvm::Value *DestPtr, bool DestIsVolatile, 982 bool LowAlignment) { 983 // Prefer scalar stores to first-class aggregate stores. 984 if (llvm::StructType *STy = 985 dyn_cast<llvm::StructType>(Val->getType())) { 986 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 987 llvm::Value *EltPtr = CGF.Builder.CreateConstGEP2_32(STy, DestPtr, 0, i); 988 llvm::Value *Elt = CGF.Builder.CreateExtractValue(Val, i); 989 llvm::StoreInst *SI = CGF.Builder.CreateStore(Elt, EltPtr, 990 DestIsVolatile); 991 if (LowAlignment) 992 SI->setAlignment(1); 993 } 994 } else { 995 llvm::StoreInst *SI = CGF.Builder.CreateStore(Val, DestPtr, DestIsVolatile); 996 if (LowAlignment) 997 SI->setAlignment(1); 998 } 999 } 1000 1001 /// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src, 1002 /// where the source and destination may have different types. 1003 /// 1004 /// This safely handles the case when the src type is larger than the 1005 /// destination type; the upper bits of the src will be lost. 1006 static void CreateCoercedStore(llvm::Value *Src, 1007 llvm::Value *DstPtr, 1008 bool DstIsVolatile, 1009 CodeGenFunction &CGF) { 1010 llvm::Type *SrcTy = Src->getType(); 1011 llvm::Type *DstTy = 1012 cast<llvm::PointerType>(DstPtr->getType())->getElementType(); 1013 if (SrcTy == DstTy) { 1014 CGF.Builder.CreateStore(Src, DstPtr, DstIsVolatile); 1015 return; 1016 } 1017 1018 uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy); 1019 1020 if (llvm::StructType *DstSTy = dyn_cast<llvm::StructType>(DstTy)) { 1021 DstPtr = EnterStructPointerForCoercedAccess(DstPtr, DstSTy, SrcSize, CGF); 1022 DstTy = cast<llvm::PointerType>(DstPtr->getType())->getElementType(); 1023 } 1024 1025 // If the source and destination are integer or pointer types, just do an 1026 // extension or truncation to the desired type. 1027 if ((isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy)) && 1028 (isa<llvm::IntegerType>(DstTy) || isa<llvm::PointerType>(DstTy))) { 1029 Src = CoerceIntOrPtrToIntOrPtr(Src, DstTy, CGF); 1030 CGF.Builder.CreateStore(Src, DstPtr, DstIsVolatile); 1031 return; 1032 } 1033 1034 uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(DstTy); 1035 1036 // If store is legal, just bitcast the src pointer. 1037 if (SrcSize <= DstSize) { 1038 llvm::Value *Casted = 1039 CGF.Builder.CreateBitCast(DstPtr, llvm::PointerType::getUnqual(SrcTy)); 1040 // FIXME: Use better alignment / avoid requiring aligned store. 1041 BuildAggStore(CGF, Src, Casted, DstIsVolatile, true); 1042 } else { 1043 // Otherwise do coercion through memory. This is stupid, but 1044 // simple. 1045 1046 // Generally SrcSize is never greater than DstSize, since this means we are 1047 // losing bits. However, this can happen in cases where the structure has 1048 // additional padding, for example due to a user specified alignment. 1049 // 1050 // FIXME: Assert that we aren't truncating non-padding bits when have access 1051 // to that information. 1052 llvm::Value *Tmp = CGF.CreateTempAlloca(SrcTy); 1053 CGF.Builder.CreateStore(Src, Tmp); 1054 llvm::Type *I8PtrTy = CGF.Builder.getInt8PtrTy(); 1055 llvm::Value *Casted = CGF.Builder.CreateBitCast(Tmp, I8PtrTy); 1056 llvm::Value *DstCasted = CGF.Builder.CreateBitCast(DstPtr, I8PtrTy); 1057 // FIXME: Use better alignment. 1058 CGF.Builder.CreateMemCpy(DstCasted, Casted, 1059 llvm::ConstantInt::get(CGF.IntPtrTy, DstSize), 1060 1, false); 1061 } 1062 } 1063 1064 namespace { 1065 1066 /// Encapsulates information about the way function arguments from 1067 /// CGFunctionInfo should be passed to actual LLVM IR function. 1068 class ClangToLLVMArgMapping { 1069 static const unsigned InvalidIndex = ~0U; 1070 unsigned InallocaArgNo; 1071 unsigned SRetArgNo; 1072 unsigned TotalIRArgs; 1073 1074 /// Arguments of LLVM IR function corresponding to single Clang argument. 1075 struct IRArgs { 1076 unsigned PaddingArgIndex; 1077 // Argument is expanded to IR arguments at positions 1078 // [FirstArgIndex, FirstArgIndex + NumberOfArgs). 1079 unsigned FirstArgIndex; 1080 unsigned NumberOfArgs; 1081 1082 IRArgs() 1083 : PaddingArgIndex(InvalidIndex), FirstArgIndex(InvalidIndex), 1084 NumberOfArgs(0) {} 1085 }; 1086 1087 SmallVector<IRArgs, 8> ArgInfo; 1088 1089 public: 1090 ClangToLLVMArgMapping(const ASTContext &Context, const CGFunctionInfo &FI, 1091 bool OnlyRequiredArgs = false) 1092 : InallocaArgNo(InvalidIndex), SRetArgNo(InvalidIndex), TotalIRArgs(0), 1093 ArgInfo(OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size()) { 1094 construct(Context, FI, OnlyRequiredArgs); 1095 } 1096 1097 bool hasInallocaArg() const { return InallocaArgNo != InvalidIndex; } 1098 unsigned getInallocaArgNo() const { 1099 assert(hasInallocaArg()); 1100 return InallocaArgNo; 1101 } 1102 1103 bool hasSRetArg() const { return SRetArgNo != InvalidIndex; } 1104 unsigned getSRetArgNo() const { 1105 assert(hasSRetArg()); 1106 return SRetArgNo; 1107 } 1108 1109 unsigned totalIRArgs() const { return TotalIRArgs; } 1110 1111 bool hasPaddingArg(unsigned ArgNo) const { 1112 assert(ArgNo < ArgInfo.size()); 1113 return ArgInfo[ArgNo].PaddingArgIndex != InvalidIndex; 1114 } 1115 unsigned getPaddingArgNo(unsigned ArgNo) const { 1116 assert(hasPaddingArg(ArgNo)); 1117 return ArgInfo[ArgNo].PaddingArgIndex; 1118 } 1119 1120 /// Returns index of first IR argument corresponding to ArgNo, and their 1121 /// quantity. 1122 std::pair<unsigned, unsigned> getIRArgs(unsigned ArgNo) const { 1123 assert(ArgNo < ArgInfo.size()); 1124 return std::make_pair(ArgInfo[ArgNo].FirstArgIndex, 1125 ArgInfo[ArgNo].NumberOfArgs); 1126 } 1127 1128 private: 1129 void construct(const ASTContext &Context, const CGFunctionInfo &FI, 1130 bool OnlyRequiredArgs); 1131 }; 1132 1133 void ClangToLLVMArgMapping::construct(const ASTContext &Context, 1134 const CGFunctionInfo &FI, 1135 bool OnlyRequiredArgs) { 1136 unsigned IRArgNo = 0; 1137 bool SwapThisWithSRet = false; 1138 const ABIArgInfo &RetAI = FI.getReturnInfo(); 1139 1140 if (RetAI.getKind() == ABIArgInfo::Indirect) { 1141 SwapThisWithSRet = RetAI.isSRetAfterThis(); 1142 SRetArgNo = SwapThisWithSRet ? 1 : IRArgNo++; 1143 } 1144 1145 unsigned ArgNo = 0; 1146 unsigned NumArgs = OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size(); 1147 for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(); ArgNo < NumArgs; 1148 ++I, ++ArgNo) { 1149 assert(I != FI.arg_end()); 1150 QualType ArgType = I->type; 1151 const ABIArgInfo &AI = I->info; 1152 // Collect data about IR arguments corresponding to Clang argument ArgNo. 1153 auto &IRArgs = ArgInfo[ArgNo]; 1154 1155 if (AI.getPaddingType()) 1156 IRArgs.PaddingArgIndex = IRArgNo++; 1157 1158 switch (AI.getKind()) { 1159 case ABIArgInfo::Extend: 1160 case ABIArgInfo::Direct: { 1161 // FIXME: handle sseregparm someday... 1162 llvm::StructType *STy = dyn_cast<llvm::StructType>(AI.getCoerceToType()); 1163 if (AI.isDirect() && AI.getCanBeFlattened() && STy) { 1164 IRArgs.NumberOfArgs = STy->getNumElements(); 1165 } else { 1166 IRArgs.NumberOfArgs = 1; 1167 } 1168 break; 1169 } 1170 case ABIArgInfo::Indirect: 1171 IRArgs.NumberOfArgs = 1; 1172 break; 1173 case ABIArgInfo::Ignore: 1174 case ABIArgInfo::InAlloca: 1175 // ignore and inalloca doesn't have matching LLVM parameters. 1176 IRArgs.NumberOfArgs = 0; 1177 break; 1178 case ABIArgInfo::Expand: { 1179 IRArgs.NumberOfArgs = getExpansionSize(ArgType, Context); 1180 break; 1181 } 1182 } 1183 1184 if (IRArgs.NumberOfArgs > 0) { 1185 IRArgs.FirstArgIndex = IRArgNo; 1186 IRArgNo += IRArgs.NumberOfArgs; 1187 } 1188 1189 // Skip over the sret parameter when it comes second. We already handled it 1190 // above. 1191 if (IRArgNo == 1 && SwapThisWithSRet) 1192 IRArgNo++; 1193 } 1194 assert(ArgNo == ArgInfo.size()); 1195 1196 if (FI.usesInAlloca()) 1197 InallocaArgNo = IRArgNo++; 1198 1199 TotalIRArgs = IRArgNo; 1200 } 1201 } // namespace 1202 1203 /***/ 1204 1205 bool CodeGenModule::ReturnTypeUsesSRet(const CGFunctionInfo &FI) { 1206 return FI.getReturnInfo().isIndirect(); 1207 } 1208 1209 bool CodeGenModule::ReturnSlotInterferesWithArgs(const CGFunctionInfo &FI) { 1210 return ReturnTypeUsesSRet(FI) && 1211 getTargetCodeGenInfo().doesReturnSlotInterfereWithArgs(); 1212 } 1213 1214 bool CodeGenModule::ReturnTypeUsesFPRet(QualType ResultType) { 1215 if (const BuiltinType *BT = ResultType->getAs<BuiltinType>()) { 1216 switch (BT->getKind()) { 1217 default: 1218 return false; 1219 case BuiltinType::Float: 1220 return getTarget().useObjCFPRetForRealType(TargetInfo::Float); 1221 case BuiltinType::Double: 1222 return getTarget().useObjCFPRetForRealType(TargetInfo::Double); 1223 case BuiltinType::LongDouble: 1224 return getTarget().useObjCFPRetForRealType(TargetInfo::LongDouble); 1225 } 1226 } 1227 1228 return false; 1229 } 1230 1231 bool CodeGenModule::ReturnTypeUsesFP2Ret(QualType ResultType) { 1232 if (const ComplexType *CT = ResultType->getAs<ComplexType>()) { 1233 if (const BuiltinType *BT = CT->getElementType()->getAs<BuiltinType>()) { 1234 if (BT->getKind() == BuiltinType::LongDouble) 1235 return getTarget().useObjCFP2RetForComplexLongDouble(); 1236 } 1237 } 1238 1239 return false; 1240 } 1241 1242 llvm::FunctionType *CodeGenTypes::GetFunctionType(GlobalDecl GD) { 1243 const CGFunctionInfo &FI = arrangeGlobalDeclaration(GD); 1244 return GetFunctionType(FI); 1245 } 1246 1247 llvm::FunctionType * 1248 CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI) { 1249 1250 bool Inserted = FunctionsBeingProcessed.insert(&FI).second; 1251 (void)Inserted; 1252 assert(Inserted && "Recursively being processed?"); 1253 1254 llvm::Type *resultType = nullptr; 1255 const ABIArgInfo &retAI = FI.getReturnInfo(); 1256 switch (retAI.getKind()) { 1257 case ABIArgInfo::Expand: 1258 llvm_unreachable("Invalid ABI kind for return argument"); 1259 1260 case ABIArgInfo::Extend: 1261 case ABIArgInfo::Direct: 1262 resultType = retAI.getCoerceToType(); 1263 break; 1264 1265 case ABIArgInfo::InAlloca: 1266 if (retAI.getInAllocaSRet()) { 1267 // sret things on win32 aren't void, they return the sret pointer. 1268 QualType ret = FI.getReturnType(); 1269 llvm::Type *ty = ConvertType(ret); 1270 unsigned addressSpace = Context.getTargetAddressSpace(ret); 1271 resultType = llvm::PointerType::get(ty, addressSpace); 1272 } else { 1273 resultType = llvm::Type::getVoidTy(getLLVMContext()); 1274 } 1275 break; 1276 1277 case ABIArgInfo::Indirect: { 1278 assert(!retAI.getIndirectAlign() && "Align unused on indirect return."); 1279 resultType = llvm::Type::getVoidTy(getLLVMContext()); 1280 break; 1281 } 1282 1283 case ABIArgInfo::Ignore: 1284 resultType = llvm::Type::getVoidTy(getLLVMContext()); 1285 break; 1286 } 1287 1288 ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI, true); 1289 SmallVector<llvm::Type*, 8> ArgTypes(IRFunctionArgs.totalIRArgs()); 1290 1291 // Add type for sret argument. 1292 if (IRFunctionArgs.hasSRetArg()) { 1293 QualType Ret = FI.getReturnType(); 1294 llvm::Type *Ty = ConvertType(Ret); 1295 unsigned AddressSpace = Context.getTargetAddressSpace(Ret); 1296 ArgTypes[IRFunctionArgs.getSRetArgNo()] = 1297 llvm::PointerType::get(Ty, AddressSpace); 1298 } 1299 1300 // Add type for inalloca argument. 1301 if (IRFunctionArgs.hasInallocaArg()) { 1302 auto ArgStruct = FI.getArgStruct(); 1303 assert(ArgStruct); 1304 ArgTypes[IRFunctionArgs.getInallocaArgNo()] = ArgStruct->getPointerTo(); 1305 } 1306 1307 // Add in all of the required arguments. 1308 unsigned ArgNo = 0; 1309 CGFunctionInfo::const_arg_iterator it = FI.arg_begin(), 1310 ie = it + FI.getNumRequiredArgs(); 1311 for (; it != ie; ++it, ++ArgNo) { 1312 const ABIArgInfo &ArgInfo = it->info; 1313 1314 // Insert a padding type to ensure proper alignment. 1315 if (IRFunctionArgs.hasPaddingArg(ArgNo)) 1316 ArgTypes[IRFunctionArgs.getPaddingArgNo(ArgNo)] = 1317 ArgInfo.getPaddingType(); 1318 1319 unsigned FirstIRArg, NumIRArgs; 1320 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo); 1321 1322 switch (ArgInfo.getKind()) { 1323 case ABIArgInfo::Ignore: 1324 case ABIArgInfo::InAlloca: 1325 assert(NumIRArgs == 0); 1326 break; 1327 1328 case ABIArgInfo::Indirect: { 1329 assert(NumIRArgs == 1); 1330 // indirect arguments are always on the stack, which is addr space #0. 1331 llvm::Type *LTy = ConvertTypeForMem(it->type); 1332 ArgTypes[FirstIRArg] = LTy->getPointerTo(); 1333 break; 1334 } 1335 1336 case ABIArgInfo::Extend: 1337 case ABIArgInfo::Direct: { 1338 // Fast-isel and the optimizer generally like scalar values better than 1339 // FCAs, so we flatten them if this is safe to do for this argument. 1340 llvm::Type *argType = ArgInfo.getCoerceToType(); 1341 llvm::StructType *st = dyn_cast<llvm::StructType>(argType); 1342 if (st && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) { 1343 assert(NumIRArgs == st->getNumElements()); 1344 for (unsigned i = 0, e = st->getNumElements(); i != e; ++i) 1345 ArgTypes[FirstIRArg + i] = st->getElementType(i); 1346 } else { 1347 assert(NumIRArgs == 1); 1348 ArgTypes[FirstIRArg] = argType; 1349 } 1350 break; 1351 } 1352 1353 case ABIArgInfo::Expand: 1354 auto ArgTypesIter = ArgTypes.begin() + FirstIRArg; 1355 getExpandedTypes(it->type, ArgTypesIter); 1356 assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs); 1357 break; 1358 } 1359 } 1360 1361 bool Erased = FunctionsBeingProcessed.erase(&FI); (void)Erased; 1362 assert(Erased && "Not in set?"); 1363 1364 return llvm::FunctionType::get(resultType, ArgTypes, FI.isVariadic()); 1365 } 1366 1367 llvm::Type *CodeGenTypes::GetFunctionTypeForVTable(GlobalDecl GD) { 1368 const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl()); 1369 const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>(); 1370 1371 if (!isFuncTypeConvertible(FPT)) 1372 return llvm::StructType::get(getLLVMContext()); 1373 1374 const CGFunctionInfo *Info; 1375 if (isa<CXXDestructorDecl>(MD)) 1376 Info = 1377 &arrangeCXXStructorDeclaration(MD, getFromDtorType(GD.getDtorType())); 1378 else 1379 Info = &arrangeCXXMethodDeclaration(MD); 1380 return GetFunctionType(*Info); 1381 } 1382 1383 void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI, 1384 const Decl *TargetDecl, 1385 AttributeListType &PAL, 1386 unsigned &CallingConv, 1387 bool AttrOnCallSite) { 1388 llvm::AttrBuilder FuncAttrs; 1389 llvm::AttrBuilder RetAttrs; 1390 bool HasOptnone = false; 1391 1392 CallingConv = FI.getEffectiveCallingConvention(); 1393 1394 if (FI.isNoReturn()) 1395 FuncAttrs.addAttribute(llvm::Attribute::NoReturn); 1396 1397 // FIXME: handle sseregparm someday... 1398 if (TargetDecl) { 1399 if (TargetDecl->hasAttr<ReturnsTwiceAttr>()) 1400 FuncAttrs.addAttribute(llvm::Attribute::ReturnsTwice); 1401 if (TargetDecl->hasAttr<NoThrowAttr>()) 1402 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); 1403 if (TargetDecl->hasAttr<NoReturnAttr>()) 1404 FuncAttrs.addAttribute(llvm::Attribute::NoReturn); 1405 if (TargetDecl->hasAttr<NoDuplicateAttr>()) 1406 FuncAttrs.addAttribute(llvm::Attribute::NoDuplicate); 1407 1408 if (const FunctionDecl *Fn = dyn_cast<FunctionDecl>(TargetDecl)) { 1409 const FunctionProtoType *FPT = Fn->getType()->getAs<FunctionProtoType>(); 1410 if (FPT && FPT->isNothrow(getContext())) 1411 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); 1412 // Don't use [[noreturn]] or _Noreturn for a call to a virtual function. 1413 // These attributes are not inherited by overloads. 1414 const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Fn); 1415 if (Fn->isNoReturn() && !(AttrOnCallSite && MD && MD->isVirtual())) 1416 FuncAttrs.addAttribute(llvm::Attribute::NoReturn); 1417 } 1418 1419 // 'const' and 'pure' attribute functions are also nounwind. 1420 if (TargetDecl->hasAttr<ConstAttr>()) { 1421 FuncAttrs.addAttribute(llvm::Attribute::ReadNone); 1422 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); 1423 } else if (TargetDecl->hasAttr<PureAttr>()) { 1424 FuncAttrs.addAttribute(llvm::Attribute::ReadOnly); 1425 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); 1426 } 1427 if (TargetDecl->hasAttr<RestrictAttr>()) 1428 RetAttrs.addAttribute(llvm::Attribute::NoAlias); 1429 if (TargetDecl->hasAttr<ReturnsNonNullAttr>()) 1430 RetAttrs.addAttribute(llvm::Attribute::NonNull); 1431 1432 HasOptnone = TargetDecl->hasAttr<OptimizeNoneAttr>(); 1433 } 1434 1435 // OptimizeNoneAttr takes precedence over -Os or -Oz. No warning needed. 1436 if (!HasOptnone) { 1437 if (CodeGenOpts.OptimizeSize) 1438 FuncAttrs.addAttribute(llvm::Attribute::OptimizeForSize); 1439 if (CodeGenOpts.OptimizeSize == 2) 1440 FuncAttrs.addAttribute(llvm::Attribute::MinSize); 1441 } 1442 1443 if (CodeGenOpts.DisableRedZone) 1444 FuncAttrs.addAttribute(llvm::Attribute::NoRedZone); 1445 if (CodeGenOpts.NoImplicitFloat) 1446 FuncAttrs.addAttribute(llvm::Attribute::NoImplicitFloat); 1447 if (CodeGenOpts.EnableSegmentedStacks && 1448 !(TargetDecl && TargetDecl->hasAttr<NoSplitStackAttr>())) 1449 FuncAttrs.addAttribute("split-stack"); 1450 1451 if (AttrOnCallSite) { 1452 // Attributes that should go on the call site only. 1453 if (!CodeGenOpts.SimplifyLibCalls) 1454 FuncAttrs.addAttribute(llvm::Attribute::NoBuiltin); 1455 } else { 1456 // Attributes that should go on the function, but not the call site. 1457 if (!CodeGenOpts.DisableFPElim) { 1458 FuncAttrs.addAttribute("no-frame-pointer-elim", "false"); 1459 } else if (CodeGenOpts.OmitLeafFramePointer) { 1460 FuncAttrs.addAttribute("no-frame-pointer-elim", "false"); 1461 FuncAttrs.addAttribute("no-frame-pointer-elim-non-leaf"); 1462 } else { 1463 FuncAttrs.addAttribute("no-frame-pointer-elim", "true"); 1464 FuncAttrs.addAttribute("no-frame-pointer-elim-non-leaf"); 1465 } 1466 1467 FuncAttrs.addAttribute("less-precise-fpmad", 1468 llvm::toStringRef(CodeGenOpts.LessPreciseFPMAD)); 1469 FuncAttrs.addAttribute("no-infs-fp-math", 1470 llvm::toStringRef(CodeGenOpts.NoInfsFPMath)); 1471 FuncAttrs.addAttribute("no-nans-fp-math", 1472 llvm::toStringRef(CodeGenOpts.NoNaNsFPMath)); 1473 FuncAttrs.addAttribute("unsafe-fp-math", 1474 llvm::toStringRef(CodeGenOpts.UnsafeFPMath)); 1475 FuncAttrs.addAttribute("use-soft-float", 1476 llvm::toStringRef(CodeGenOpts.SoftFloat)); 1477 FuncAttrs.addAttribute("stack-protector-buffer-size", 1478 llvm::utostr(CodeGenOpts.SSPBufferSize)); 1479 1480 if (!CodeGenOpts.StackRealignment) 1481 FuncAttrs.addAttribute("no-realign-stack"); 1482 1483 // Add target-cpu and target-features work if they differ from the defaults. 1484 std::string &CPU = getTarget().getTargetOpts().CPU; 1485 if (CPU != "" && CPU != getTarget().getTriple().getArchName()) 1486 FuncAttrs.addAttribute("target-cpu", getTarget().getTargetOpts().CPU); 1487 1488 // TODO: FeaturesAsWritten gets us the features on the command line, 1489 // for canonicalization purposes we might want to avoid putting features 1490 // in the target-features set if we know it'll be one of the default 1491 // features in the backend, e.g. corei7-avx and +avx. 1492 std::vector<std::string> &Features = 1493 getTarget().getTargetOpts().FeaturesAsWritten; 1494 if (!Features.empty()) { 1495 std::stringstream S; 1496 std::copy(Features.begin(), Features.end(), 1497 std::ostream_iterator<std::string>(S, ",")); 1498 // The drop_back gets rid of the trailing space. 1499 FuncAttrs.addAttribute("target-features", 1500 StringRef(S.str()).drop_back(1)); 1501 } 1502 } 1503 1504 ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI); 1505 1506 QualType RetTy = FI.getReturnType(); 1507 const ABIArgInfo &RetAI = FI.getReturnInfo(); 1508 switch (RetAI.getKind()) { 1509 case ABIArgInfo::Extend: 1510 if (RetTy->hasSignedIntegerRepresentation()) 1511 RetAttrs.addAttribute(llvm::Attribute::SExt); 1512 else if (RetTy->hasUnsignedIntegerRepresentation()) 1513 RetAttrs.addAttribute(llvm::Attribute::ZExt); 1514 // FALL THROUGH 1515 case ABIArgInfo::Direct: 1516 if (RetAI.getInReg()) 1517 RetAttrs.addAttribute(llvm::Attribute::InReg); 1518 break; 1519 case ABIArgInfo::Ignore: 1520 break; 1521 1522 case ABIArgInfo::InAlloca: 1523 case ABIArgInfo::Indirect: { 1524 // inalloca and sret disable readnone and readonly 1525 FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly) 1526 .removeAttribute(llvm::Attribute::ReadNone); 1527 break; 1528 } 1529 1530 case ABIArgInfo::Expand: 1531 llvm_unreachable("Invalid ABI kind for return argument"); 1532 } 1533 1534 if (const auto *RefTy = RetTy->getAs<ReferenceType>()) { 1535 QualType PTy = RefTy->getPointeeType(); 1536 if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) 1537 RetAttrs.addDereferenceableAttr(getContext().getTypeSizeInChars(PTy) 1538 .getQuantity()); 1539 else if (getContext().getTargetAddressSpace(PTy) == 0) 1540 RetAttrs.addAttribute(llvm::Attribute::NonNull); 1541 } 1542 1543 // Attach return attributes. 1544 if (RetAttrs.hasAttributes()) { 1545 PAL.push_back(llvm::AttributeSet::get( 1546 getLLVMContext(), llvm::AttributeSet::ReturnIndex, RetAttrs)); 1547 } 1548 1549 // Attach attributes to sret. 1550 if (IRFunctionArgs.hasSRetArg()) { 1551 llvm::AttrBuilder SRETAttrs; 1552 SRETAttrs.addAttribute(llvm::Attribute::StructRet); 1553 if (RetAI.getInReg()) 1554 SRETAttrs.addAttribute(llvm::Attribute::InReg); 1555 PAL.push_back(llvm::AttributeSet::get( 1556 getLLVMContext(), IRFunctionArgs.getSRetArgNo() + 1, SRETAttrs)); 1557 } 1558 1559 // Attach attributes to inalloca argument. 1560 if (IRFunctionArgs.hasInallocaArg()) { 1561 llvm::AttrBuilder Attrs; 1562 Attrs.addAttribute(llvm::Attribute::InAlloca); 1563 PAL.push_back(llvm::AttributeSet::get( 1564 getLLVMContext(), IRFunctionArgs.getInallocaArgNo() + 1, Attrs)); 1565 } 1566 1567 unsigned ArgNo = 0; 1568 for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(), 1569 E = FI.arg_end(); 1570 I != E; ++I, ++ArgNo) { 1571 QualType ParamType = I->type; 1572 const ABIArgInfo &AI = I->info; 1573 llvm::AttrBuilder Attrs; 1574 1575 // Add attribute for padding argument, if necessary. 1576 if (IRFunctionArgs.hasPaddingArg(ArgNo)) { 1577 if (AI.getPaddingInReg()) 1578 PAL.push_back(llvm::AttributeSet::get( 1579 getLLVMContext(), IRFunctionArgs.getPaddingArgNo(ArgNo) + 1, 1580 llvm::Attribute::InReg)); 1581 } 1582 1583 // 'restrict' -> 'noalias' is done in EmitFunctionProlog when we 1584 // have the corresponding parameter variable. It doesn't make 1585 // sense to do it here because parameters are so messed up. 1586 switch (AI.getKind()) { 1587 case ABIArgInfo::Extend: 1588 if (ParamType->isSignedIntegerOrEnumerationType()) 1589 Attrs.addAttribute(llvm::Attribute::SExt); 1590 else if (ParamType->isUnsignedIntegerOrEnumerationType()) 1591 Attrs.addAttribute(llvm::Attribute::ZExt); 1592 // FALL THROUGH 1593 case ABIArgInfo::Direct: 1594 if (ArgNo == 0 && FI.isChainCall()) 1595 Attrs.addAttribute(llvm::Attribute::Nest); 1596 else if (AI.getInReg()) 1597 Attrs.addAttribute(llvm::Attribute::InReg); 1598 break; 1599 1600 case ABIArgInfo::Indirect: 1601 if (AI.getInReg()) 1602 Attrs.addAttribute(llvm::Attribute::InReg); 1603 1604 if (AI.getIndirectByVal()) 1605 Attrs.addAttribute(llvm::Attribute::ByVal); 1606 1607 Attrs.addAlignmentAttr(AI.getIndirectAlign()); 1608 1609 // byval disables readnone and readonly. 1610 FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly) 1611 .removeAttribute(llvm::Attribute::ReadNone); 1612 break; 1613 1614 case ABIArgInfo::Ignore: 1615 case ABIArgInfo::Expand: 1616 continue; 1617 1618 case ABIArgInfo::InAlloca: 1619 // inalloca disables readnone and readonly. 1620 FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly) 1621 .removeAttribute(llvm::Attribute::ReadNone); 1622 continue; 1623 } 1624 1625 if (const auto *RefTy = ParamType->getAs<ReferenceType>()) { 1626 QualType PTy = RefTy->getPointeeType(); 1627 if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) 1628 Attrs.addDereferenceableAttr(getContext().getTypeSizeInChars(PTy) 1629 .getQuantity()); 1630 else if (getContext().getTargetAddressSpace(PTy) == 0) 1631 Attrs.addAttribute(llvm::Attribute::NonNull); 1632 } 1633 1634 if (Attrs.hasAttributes()) { 1635 unsigned FirstIRArg, NumIRArgs; 1636 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo); 1637 for (unsigned i = 0; i < NumIRArgs; i++) 1638 PAL.push_back(llvm::AttributeSet::get(getLLVMContext(), 1639 FirstIRArg + i + 1, Attrs)); 1640 } 1641 } 1642 assert(ArgNo == FI.arg_size()); 1643 1644 if (FuncAttrs.hasAttributes()) 1645 PAL.push_back(llvm:: 1646 AttributeSet::get(getLLVMContext(), 1647 llvm::AttributeSet::FunctionIndex, 1648 FuncAttrs)); 1649 } 1650 1651 /// An argument came in as a promoted argument; demote it back to its 1652 /// declared type. 1653 static llvm::Value *emitArgumentDemotion(CodeGenFunction &CGF, 1654 const VarDecl *var, 1655 llvm::Value *value) { 1656 llvm::Type *varType = CGF.ConvertType(var->getType()); 1657 1658 // This can happen with promotions that actually don't change the 1659 // underlying type, like the enum promotions. 1660 if (value->getType() == varType) return value; 1661 1662 assert((varType->isIntegerTy() || varType->isFloatingPointTy()) 1663 && "unexpected promotion type"); 1664 1665 if (isa<llvm::IntegerType>(varType)) 1666 return CGF.Builder.CreateTrunc(value, varType, "arg.unpromote"); 1667 1668 return CGF.Builder.CreateFPCast(value, varType, "arg.unpromote"); 1669 } 1670 1671 /// Returns the attribute (either parameter attribute, or function 1672 /// attribute), which declares argument ArgNo to be non-null. 1673 static const NonNullAttr *getNonNullAttr(const Decl *FD, const ParmVarDecl *PVD, 1674 QualType ArgType, unsigned ArgNo) { 1675 // FIXME: __attribute__((nonnull)) can also be applied to: 1676 // - references to pointers, where the pointee is known to be 1677 // nonnull (apparently a Clang extension) 1678 // - transparent unions containing pointers 1679 // In the former case, LLVM IR cannot represent the constraint. In 1680 // the latter case, we have no guarantee that the transparent union 1681 // is in fact passed as a pointer. 1682 if (!ArgType->isAnyPointerType() && !ArgType->isBlockPointerType()) 1683 return nullptr; 1684 // First, check attribute on parameter itself. 1685 if (PVD) { 1686 if (auto ParmNNAttr = PVD->getAttr<NonNullAttr>()) 1687 return ParmNNAttr; 1688 } 1689 // Check function attributes. 1690 if (!FD) 1691 return nullptr; 1692 for (const auto *NNAttr : FD->specific_attrs<NonNullAttr>()) { 1693 if (NNAttr->isNonNull(ArgNo)) 1694 return NNAttr; 1695 } 1696 return nullptr; 1697 } 1698 1699 void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI, 1700 llvm::Function *Fn, 1701 const FunctionArgList &Args) { 1702 if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>()) 1703 // Naked functions don't have prologues. 1704 return; 1705 1706 // If this is an implicit-return-zero function, go ahead and 1707 // initialize the return value. TODO: it might be nice to have 1708 // a more general mechanism for this that didn't require synthesized 1709 // return statements. 1710 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurCodeDecl)) { 1711 if (FD->hasImplicitReturnZero()) { 1712 QualType RetTy = FD->getReturnType().getUnqualifiedType(); 1713 llvm::Type* LLVMTy = CGM.getTypes().ConvertType(RetTy); 1714 llvm::Constant* Zero = llvm::Constant::getNullValue(LLVMTy); 1715 Builder.CreateStore(Zero, ReturnValue); 1716 } 1717 } 1718 1719 // FIXME: We no longer need the types from FunctionArgList; lift up and 1720 // simplify. 1721 1722 ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), FI); 1723 // Flattened function arguments. 1724 SmallVector<llvm::Argument *, 16> FnArgs; 1725 FnArgs.reserve(IRFunctionArgs.totalIRArgs()); 1726 for (auto &Arg : Fn->args()) { 1727 FnArgs.push_back(&Arg); 1728 } 1729 assert(FnArgs.size() == IRFunctionArgs.totalIRArgs()); 1730 1731 // If we're using inalloca, all the memory arguments are GEPs off of the last 1732 // parameter, which is a pointer to the complete memory area. 1733 llvm::Value *ArgStruct = nullptr; 1734 if (IRFunctionArgs.hasInallocaArg()) { 1735 ArgStruct = FnArgs[IRFunctionArgs.getInallocaArgNo()]; 1736 assert(ArgStruct->getType() == FI.getArgStruct()->getPointerTo()); 1737 } 1738 1739 // Name the struct return parameter. 1740 if (IRFunctionArgs.hasSRetArg()) { 1741 auto AI = FnArgs[IRFunctionArgs.getSRetArgNo()]; 1742 AI->setName("agg.result"); 1743 AI->addAttr(llvm::AttributeSet::get(getLLVMContext(), AI->getArgNo() + 1, 1744 llvm::Attribute::NoAlias)); 1745 } 1746 1747 // Track if we received the parameter as a pointer (indirect, byval, or 1748 // inalloca). If already have a pointer, EmitParmDecl doesn't need to copy it 1749 // into a local alloca for us. 1750 enum ValOrPointer { HaveValue = 0, HavePointer = 1 }; 1751 typedef llvm::PointerIntPair<llvm::Value *, 1> ValueAndIsPtr; 1752 SmallVector<ValueAndIsPtr, 16> ArgVals; 1753 ArgVals.reserve(Args.size()); 1754 1755 // Create a pointer value for every parameter declaration. This usually 1756 // entails copying one or more LLVM IR arguments into an alloca. Don't push 1757 // any cleanups or do anything that might unwind. We do that separately, so 1758 // we can push the cleanups in the correct order for the ABI. 1759 assert(FI.arg_size() == Args.size() && 1760 "Mismatch between function signature & arguments."); 1761 unsigned ArgNo = 0; 1762 CGFunctionInfo::const_arg_iterator info_it = FI.arg_begin(); 1763 for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end(); 1764 i != e; ++i, ++info_it, ++ArgNo) { 1765 const VarDecl *Arg = *i; 1766 QualType Ty = info_it->type; 1767 const ABIArgInfo &ArgI = info_it->info; 1768 1769 bool isPromoted = 1770 isa<ParmVarDecl>(Arg) && cast<ParmVarDecl>(Arg)->isKNRPromoted(); 1771 1772 unsigned FirstIRArg, NumIRArgs; 1773 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo); 1774 1775 switch (ArgI.getKind()) { 1776 case ABIArgInfo::InAlloca: { 1777 assert(NumIRArgs == 0); 1778 llvm::Value *V = 1779 Builder.CreateStructGEP(FI.getArgStruct(), ArgStruct, 1780 ArgI.getInAllocaFieldIndex(), Arg->getName()); 1781 ArgVals.push_back(ValueAndIsPtr(V, HavePointer)); 1782 break; 1783 } 1784 1785 case ABIArgInfo::Indirect: { 1786 assert(NumIRArgs == 1); 1787 llvm::Value *V = FnArgs[FirstIRArg]; 1788 1789 if (!hasScalarEvaluationKind(Ty)) { 1790 // Aggregates and complex variables are accessed by reference. All we 1791 // need to do is realign the value, if requested 1792 if (ArgI.getIndirectRealign()) { 1793 llvm::Value *AlignedTemp = CreateMemTemp(Ty, "coerce"); 1794 1795 // Copy from the incoming argument pointer to the temporary with the 1796 // appropriate alignment. 1797 // 1798 // FIXME: We should have a common utility for generating an aggregate 1799 // copy. 1800 llvm::Type *I8PtrTy = Builder.getInt8PtrTy(); 1801 CharUnits Size = getContext().getTypeSizeInChars(Ty); 1802 llvm::Value *Dst = Builder.CreateBitCast(AlignedTemp, I8PtrTy); 1803 llvm::Value *Src = Builder.CreateBitCast(V, I8PtrTy); 1804 Builder.CreateMemCpy(Dst, 1805 Src, 1806 llvm::ConstantInt::get(IntPtrTy, 1807 Size.getQuantity()), 1808 ArgI.getIndirectAlign(), 1809 false); 1810 V = AlignedTemp; 1811 } 1812 ArgVals.push_back(ValueAndIsPtr(V, HavePointer)); 1813 } else { 1814 // Load scalar value from indirect argument. 1815 CharUnits Alignment = getContext().getTypeAlignInChars(Ty); 1816 V = EmitLoadOfScalar(V, false, Alignment.getQuantity(), Ty, 1817 Arg->getLocStart()); 1818 1819 if (isPromoted) 1820 V = emitArgumentDemotion(*this, Arg, V); 1821 ArgVals.push_back(ValueAndIsPtr(V, HaveValue)); 1822 } 1823 break; 1824 } 1825 1826 case ABIArgInfo::Extend: 1827 case ABIArgInfo::Direct: { 1828 1829 // If we have the trivial case, handle it with no muss and fuss. 1830 if (!isa<llvm::StructType>(ArgI.getCoerceToType()) && 1831 ArgI.getCoerceToType() == ConvertType(Ty) && 1832 ArgI.getDirectOffset() == 0) { 1833 assert(NumIRArgs == 1); 1834 auto AI = FnArgs[FirstIRArg]; 1835 llvm::Value *V = AI; 1836 1837 if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(Arg)) { 1838 if (getNonNullAttr(CurCodeDecl, PVD, PVD->getType(), 1839 PVD->getFunctionScopeIndex())) 1840 AI->addAttr(llvm::AttributeSet::get(getLLVMContext(), 1841 AI->getArgNo() + 1, 1842 llvm::Attribute::NonNull)); 1843 1844 QualType OTy = PVD->getOriginalType(); 1845 if (const auto *ArrTy = 1846 getContext().getAsConstantArrayType(OTy)) { 1847 // A C99 array parameter declaration with the static keyword also 1848 // indicates dereferenceability, and if the size is constant we can 1849 // use the dereferenceable attribute (which requires the size in 1850 // bytes). 1851 if (ArrTy->getSizeModifier() == ArrayType::Static) { 1852 QualType ETy = ArrTy->getElementType(); 1853 uint64_t ArrSize = ArrTy->getSize().getZExtValue(); 1854 if (!ETy->isIncompleteType() && ETy->isConstantSizeType() && 1855 ArrSize) { 1856 llvm::AttrBuilder Attrs; 1857 Attrs.addDereferenceableAttr( 1858 getContext().getTypeSizeInChars(ETy).getQuantity()*ArrSize); 1859 AI->addAttr(llvm::AttributeSet::get(getLLVMContext(), 1860 AI->getArgNo() + 1, Attrs)); 1861 } else if (getContext().getTargetAddressSpace(ETy) == 0) { 1862 AI->addAttr(llvm::AttributeSet::get(getLLVMContext(), 1863 AI->getArgNo() + 1, 1864 llvm::Attribute::NonNull)); 1865 } 1866 } 1867 } else if (const auto *ArrTy = 1868 getContext().getAsVariableArrayType(OTy)) { 1869 // For C99 VLAs with the static keyword, we don't know the size so 1870 // we can't use the dereferenceable attribute, but in addrspace(0) 1871 // we know that it must be nonnull. 1872 if (ArrTy->getSizeModifier() == VariableArrayType::Static && 1873 !getContext().getTargetAddressSpace(ArrTy->getElementType())) 1874 AI->addAttr(llvm::AttributeSet::get(getLLVMContext(), 1875 AI->getArgNo() + 1, 1876 llvm::Attribute::NonNull)); 1877 } 1878 1879 const auto *AVAttr = PVD->getAttr<AlignValueAttr>(); 1880 if (!AVAttr) 1881 if (const auto *TOTy = dyn_cast<TypedefType>(OTy)) 1882 AVAttr = TOTy->getDecl()->getAttr<AlignValueAttr>(); 1883 if (AVAttr) { 1884 llvm::Value *AlignmentValue = 1885 EmitScalarExpr(AVAttr->getAlignment()); 1886 llvm::ConstantInt *AlignmentCI = 1887 cast<llvm::ConstantInt>(AlignmentValue); 1888 unsigned Alignment = 1889 std::min((unsigned) AlignmentCI->getZExtValue(), 1890 +llvm::Value::MaximumAlignment); 1891 1892 llvm::AttrBuilder Attrs; 1893 Attrs.addAlignmentAttr(Alignment); 1894 AI->addAttr(llvm::AttributeSet::get(getLLVMContext(), 1895 AI->getArgNo() + 1, Attrs)); 1896 } 1897 } 1898 1899 if (Arg->getType().isRestrictQualified()) 1900 AI->addAttr(llvm::AttributeSet::get(getLLVMContext(), 1901 AI->getArgNo() + 1, 1902 llvm::Attribute::NoAlias)); 1903 1904 // Ensure the argument is the correct type. 1905 if (V->getType() != ArgI.getCoerceToType()) 1906 V = Builder.CreateBitCast(V, ArgI.getCoerceToType()); 1907 1908 if (isPromoted) 1909 V = emitArgumentDemotion(*this, Arg, V); 1910 1911 if (const CXXMethodDecl *MD = 1912 dyn_cast_or_null<CXXMethodDecl>(CurCodeDecl)) { 1913 if (MD->isVirtual() && Arg == CXXABIThisDecl) 1914 V = CGM.getCXXABI(). 1915 adjustThisParameterInVirtualFunctionPrologue(*this, CurGD, V); 1916 } 1917 1918 // Because of merging of function types from multiple decls it is 1919 // possible for the type of an argument to not match the corresponding 1920 // type in the function type. Since we are codegening the callee 1921 // in here, add a cast to the argument type. 1922 llvm::Type *LTy = ConvertType(Arg->getType()); 1923 if (V->getType() != LTy) 1924 V = Builder.CreateBitCast(V, LTy); 1925 1926 ArgVals.push_back(ValueAndIsPtr(V, HaveValue)); 1927 break; 1928 } 1929 1930 llvm::AllocaInst *Alloca = CreateMemTemp(Ty, Arg->getName()); 1931 1932 // The alignment we need to use is the max of the requested alignment for 1933 // the argument plus the alignment required by our access code below. 1934 unsigned AlignmentToUse = 1935 CGM.getDataLayout().getABITypeAlignment(ArgI.getCoerceToType()); 1936 AlignmentToUse = std::max(AlignmentToUse, 1937 (unsigned)getContext().getDeclAlign(Arg).getQuantity()); 1938 1939 Alloca->setAlignment(AlignmentToUse); 1940 llvm::Value *V = Alloca; 1941 llvm::Value *Ptr = V; // Pointer to store into. 1942 1943 // If the value is offset in memory, apply the offset now. 1944 if (unsigned Offs = ArgI.getDirectOffset()) { 1945 Ptr = Builder.CreateBitCast(Ptr, Builder.getInt8PtrTy()); 1946 Ptr = Builder.CreateConstGEP1_32(Builder.getInt8Ty(), Ptr, Offs); 1947 Ptr = Builder.CreateBitCast(Ptr, 1948 llvm::PointerType::getUnqual(ArgI.getCoerceToType())); 1949 } 1950 1951 // Fast-isel and the optimizer generally like scalar values better than 1952 // FCAs, so we flatten them if this is safe to do for this argument. 1953 llvm::StructType *STy = dyn_cast<llvm::StructType>(ArgI.getCoerceToType()); 1954 if (ArgI.isDirect() && ArgI.getCanBeFlattened() && STy && 1955 STy->getNumElements() > 1) { 1956 uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(STy); 1957 llvm::Type *DstTy = 1958 cast<llvm::PointerType>(Ptr->getType())->getElementType(); 1959 uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(DstTy); 1960 1961 if (SrcSize <= DstSize) { 1962 Ptr = Builder.CreateBitCast(Ptr, llvm::PointerType::getUnqual(STy)); 1963 1964 assert(STy->getNumElements() == NumIRArgs); 1965 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 1966 auto AI = FnArgs[FirstIRArg + i]; 1967 AI->setName(Arg->getName() + ".coerce" + Twine(i)); 1968 llvm::Value *EltPtr = Builder.CreateConstGEP2_32(STy, Ptr, 0, i); 1969 Builder.CreateStore(AI, EltPtr); 1970 } 1971 } else { 1972 llvm::AllocaInst *TempAlloca = 1973 CreateTempAlloca(ArgI.getCoerceToType(), "coerce"); 1974 TempAlloca->setAlignment(AlignmentToUse); 1975 llvm::Value *TempV = TempAlloca; 1976 1977 assert(STy->getNumElements() == NumIRArgs); 1978 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 1979 auto AI = FnArgs[FirstIRArg + i]; 1980 AI->setName(Arg->getName() + ".coerce" + Twine(i)); 1981 llvm::Value *EltPtr = 1982 Builder.CreateConstGEP2_32(ArgI.getCoerceToType(), TempV, 0, i); 1983 Builder.CreateStore(AI, EltPtr); 1984 } 1985 1986 Builder.CreateMemCpy(Ptr, TempV, DstSize, AlignmentToUse); 1987 } 1988 } else { 1989 // Simple case, just do a coerced store of the argument into the alloca. 1990 assert(NumIRArgs == 1); 1991 auto AI = FnArgs[FirstIRArg]; 1992 AI->setName(Arg->getName() + ".coerce"); 1993 CreateCoercedStore(AI, Ptr, /*DestIsVolatile=*/false, *this); 1994 } 1995 1996 1997 // Match to what EmitParmDecl is expecting for this type. 1998 if (CodeGenFunction::hasScalarEvaluationKind(Ty)) { 1999 V = EmitLoadOfScalar(V, false, AlignmentToUse, Ty, Arg->getLocStart()); 2000 if (isPromoted) 2001 V = emitArgumentDemotion(*this, Arg, V); 2002 ArgVals.push_back(ValueAndIsPtr(V, HaveValue)); 2003 } else { 2004 ArgVals.push_back(ValueAndIsPtr(V, HavePointer)); 2005 } 2006 break; 2007 } 2008 2009 case ABIArgInfo::Expand: { 2010 // If this structure was expanded into multiple arguments then 2011 // we need to create a temporary and reconstruct it from the 2012 // arguments. 2013 llvm::AllocaInst *Alloca = CreateMemTemp(Ty); 2014 CharUnits Align = getContext().getDeclAlign(Arg); 2015 Alloca->setAlignment(Align.getQuantity()); 2016 LValue LV = MakeAddrLValue(Alloca, Ty, Align); 2017 ArgVals.push_back(ValueAndIsPtr(Alloca, HavePointer)); 2018 2019 auto FnArgIter = FnArgs.begin() + FirstIRArg; 2020 ExpandTypeFromArgs(Ty, LV, FnArgIter); 2021 assert(FnArgIter == FnArgs.begin() + FirstIRArg + NumIRArgs); 2022 for (unsigned i = 0, e = NumIRArgs; i != e; ++i) { 2023 auto AI = FnArgs[FirstIRArg + i]; 2024 AI->setName(Arg->getName() + "." + Twine(i)); 2025 } 2026 break; 2027 } 2028 2029 case ABIArgInfo::Ignore: 2030 assert(NumIRArgs == 0); 2031 // Initialize the local variable appropriately. 2032 if (!hasScalarEvaluationKind(Ty)) { 2033 ArgVals.push_back(ValueAndIsPtr(CreateMemTemp(Ty), HavePointer)); 2034 } else { 2035 llvm::Value *U = llvm::UndefValue::get(ConvertType(Arg->getType())); 2036 ArgVals.push_back(ValueAndIsPtr(U, HaveValue)); 2037 } 2038 break; 2039 } 2040 } 2041 2042 if (getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) { 2043 for (int I = Args.size() - 1; I >= 0; --I) 2044 EmitParmDecl(*Args[I], ArgVals[I].getPointer(), ArgVals[I].getInt(), 2045 I + 1); 2046 } else { 2047 for (unsigned I = 0, E = Args.size(); I != E; ++I) 2048 EmitParmDecl(*Args[I], ArgVals[I].getPointer(), ArgVals[I].getInt(), 2049 I + 1); 2050 } 2051 } 2052 2053 static void eraseUnusedBitCasts(llvm::Instruction *insn) { 2054 while (insn->use_empty()) { 2055 llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(insn); 2056 if (!bitcast) return; 2057 2058 // This is "safe" because we would have used a ConstantExpr otherwise. 2059 insn = cast<llvm::Instruction>(bitcast->getOperand(0)); 2060 bitcast->eraseFromParent(); 2061 } 2062 } 2063 2064 /// Try to emit a fused autorelease of a return result. 2065 static llvm::Value *tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF, 2066 llvm::Value *result) { 2067 // We must be immediately followed the cast. 2068 llvm::BasicBlock *BB = CGF.Builder.GetInsertBlock(); 2069 if (BB->empty()) return nullptr; 2070 if (&BB->back() != result) return nullptr; 2071 2072 llvm::Type *resultType = result->getType(); 2073 2074 // result is in a BasicBlock and is therefore an Instruction. 2075 llvm::Instruction *generator = cast<llvm::Instruction>(result); 2076 2077 SmallVector<llvm::Instruction*,4> insnsToKill; 2078 2079 // Look for: 2080 // %generator = bitcast %type1* %generator2 to %type2* 2081 while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(generator)) { 2082 // We would have emitted this as a constant if the operand weren't 2083 // an Instruction. 2084 generator = cast<llvm::Instruction>(bitcast->getOperand(0)); 2085 2086 // Require the generator to be immediately followed by the cast. 2087 if (generator->getNextNode() != bitcast) 2088 return nullptr; 2089 2090 insnsToKill.push_back(bitcast); 2091 } 2092 2093 // Look for: 2094 // %generator = call i8* @objc_retain(i8* %originalResult) 2095 // or 2096 // %generator = call i8* @objc_retainAutoreleasedReturnValue(i8* %originalResult) 2097 llvm::CallInst *call = dyn_cast<llvm::CallInst>(generator); 2098 if (!call) return nullptr; 2099 2100 bool doRetainAutorelease; 2101 2102 if (call->getCalledValue() == CGF.CGM.getARCEntrypoints().objc_retain) { 2103 doRetainAutorelease = true; 2104 } else if (call->getCalledValue() == CGF.CGM.getARCEntrypoints() 2105 .objc_retainAutoreleasedReturnValue) { 2106 doRetainAutorelease = false; 2107 2108 // If we emitted an assembly marker for this call (and the 2109 // ARCEntrypoints field should have been set if so), go looking 2110 // for that call. If we can't find it, we can't do this 2111 // optimization. But it should always be the immediately previous 2112 // instruction, unless we needed bitcasts around the call. 2113 if (CGF.CGM.getARCEntrypoints().retainAutoreleasedReturnValueMarker) { 2114 llvm::Instruction *prev = call->getPrevNode(); 2115 assert(prev); 2116 if (isa<llvm::BitCastInst>(prev)) { 2117 prev = prev->getPrevNode(); 2118 assert(prev); 2119 } 2120 assert(isa<llvm::CallInst>(prev)); 2121 assert(cast<llvm::CallInst>(prev)->getCalledValue() == 2122 CGF.CGM.getARCEntrypoints().retainAutoreleasedReturnValueMarker); 2123 insnsToKill.push_back(prev); 2124 } 2125 } else { 2126 return nullptr; 2127 } 2128 2129 result = call->getArgOperand(0); 2130 insnsToKill.push_back(call); 2131 2132 // Keep killing bitcasts, for sanity. Note that we no longer care 2133 // about precise ordering as long as there's exactly one use. 2134 while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(result)) { 2135 if (!bitcast->hasOneUse()) break; 2136 insnsToKill.push_back(bitcast); 2137 result = bitcast->getOperand(0); 2138 } 2139 2140 // Delete all the unnecessary instructions, from latest to earliest. 2141 for (SmallVectorImpl<llvm::Instruction*>::iterator 2142 i = insnsToKill.begin(), e = insnsToKill.end(); i != e; ++i) 2143 (*i)->eraseFromParent(); 2144 2145 // Do the fused retain/autorelease if we were asked to. 2146 if (doRetainAutorelease) 2147 result = CGF.EmitARCRetainAutoreleaseReturnValue(result); 2148 2149 // Cast back to the result type. 2150 return CGF.Builder.CreateBitCast(result, resultType); 2151 } 2152 2153 /// If this is a +1 of the value of an immutable 'self', remove it. 2154 static llvm::Value *tryRemoveRetainOfSelf(CodeGenFunction &CGF, 2155 llvm::Value *result) { 2156 // This is only applicable to a method with an immutable 'self'. 2157 const ObjCMethodDecl *method = 2158 dyn_cast_or_null<ObjCMethodDecl>(CGF.CurCodeDecl); 2159 if (!method) return nullptr; 2160 const VarDecl *self = method->getSelfDecl(); 2161 if (!self->getType().isConstQualified()) return nullptr; 2162 2163 // Look for a retain call. 2164 llvm::CallInst *retainCall = 2165 dyn_cast<llvm::CallInst>(result->stripPointerCasts()); 2166 if (!retainCall || 2167 retainCall->getCalledValue() != CGF.CGM.getARCEntrypoints().objc_retain) 2168 return nullptr; 2169 2170 // Look for an ordinary load of 'self'. 2171 llvm::Value *retainedValue = retainCall->getArgOperand(0); 2172 llvm::LoadInst *load = 2173 dyn_cast<llvm::LoadInst>(retainedValue->stripPointerCasts()); 2174 if (!load || load->isAtomic() || load->isVolatile() || 2175 load->getPointerOperand() != CGF.GetAddrOfLocalVar(self)) 2176 return nullptr; 2177 2178 // Okay! Burn it all down. This relies for correctness on the 2179 // assumption that the retain is emitted as part of the return and 2180 // that thereafter everything is used "linearly". 2181 llvm::Type *resultType = result->getType(); 2182 eraseUnusedBitCasts(cast<llvm::Instruction>(result)); 2183 assert(retainCall->use_empty()); 2184 retainCall->eraseFromParent(); 2185 eraseUnusedBitCasts(cast<llvm::Instruction>(retainedValue)); 2186 2187 return CGF.Builder.CreateBitCast(load, resultType); 2188 } 2189 2190 /// Emit an ARC autorelease of the result of a function. 2191 /// 2192 /// \return the value to actually return from the function 2193 static llvm::Value *emitAutoreleaseOfResult(CodeGenFunction &CGF, 2194 llvm::Value *result) { 2195 // If we're returning 'self', kill the initial retain. This is a 2196 // heuristic attempt to "encourage correctness" in the really unfortunate 2197 // case where we have a return of self during a dealloc and we desperately 2198 // need to avoid the possible autorelease. 2199 if (llvm::Value *self = tryRemoveRetainOfSelf(CGF, result)) 2200 return self; 2201 2202 // At -O0, try to emit a fused retain/autorelease. 2203 if (CGF.shouldUseFusedARCCalls()) 2204 if (llvm::Value *fused = tryEmitFusedAutoreleaseOfResult(CGF, result)) 2205 return fused; 2206 2207 return CGF.EmitARCAutoreleaseReturnValue(result); 2208 } 2209 2210 /// Heuristically search for a dominating store to the return-value slot. 2211 static llvm::StoreInst *findDominatingStoreToReturnValue(CodeGenFunction &CGF) { 2212 // If there are multiple uses of the return-value slot, just check 2213 // for something immediately preceding the IP. Sometimes this can 2214 // happen with how we generate implicit-returns; it can also happen 2215 // with noreturn cleanups. 2216 if (!CGF.ReturnValue->hasOneUse()) { 2217 llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock(); 2218 if (IP->empty()) return nullptr; 2219 llvm::StoreInst *store = dyn_cast<llvm::StoreInst>(&IP->back()); 2220 if (!store) return nullptr; 2221 if (store->getPointerOperand() != CGF.ReturnValue) return nullptr; 2222 assert(!store->isAtomic() && !store->isVolatile()); // see below 2223 return store; 2224 } 2225 2226 llvm::StoreInst *store = 2227 dyn_cast<llvm::StoreInst>(CGF.ReturnValue->user_back()); 2228 if (!store) return nullptr; 2229 2230 // These aren't actually possible for non-coerced returns, and we 2231 // only care about non-coerced returns on this code path. 2232 assert(!store->isAtomic() && !store->isVolatile()); 2233 2234 // Now do a first-and-dirty dominance check: just walk up the 2235 // single-predecessors chain from the current insertion point. 2236 llvm::BasicBlock *StoreBB = store->getParent(); 2237 llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock(); 2238 while (IP != StoreBB) { 2239 if (!(IP = IP->getSinglePredecessor())) 2240 return nullptr; 2241 } 2242 2243 // Okay, the store's basic block dominates the insertion point; we 2244 // can do our thing. 2245 return store; 2246 } 2247 2248 void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI, 2249 bool EmitRetDbgLoc, 2250 SourceLocation EndLoc) { 2251 if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>()) { 2252 // Naked functions don't have epilogues. 2253 Builder.CreateUnreachable(); 2254 return; 2255 } 2256 2257 // Functions with no result always return void. 2258 if (!ReturnValue) { 2259 Builder.CreateRetVoid(); 2260 return; 2261 } 2262 2263 llvm::DebugLoc RetDbgLoc; 2264 llvm::Value *RV = nullptr; 2265 QualType RetTy = FI.getReturnType(); 2266 const ABIArgInfo &RetAI = FI.getReturnInfo(); 2267 2268 switch (RetAI.getKind()) { 2269 case ABIArgInfo::InAlloca: 2270 // Aggregrates get evaluated directly into the destination. Sometimes we 2271 // need to return the sret value in a register, though. 2272 assert(hasAggregateEvaluationKind(RetTy)); 2273 if (RetAI.getInAllocaSRet()) { 2274 llvm::Function::arg_iterator EI = CurFn->arg_end(); 2275 --EI; 2276 llvm::Value *ArgStruct = EI; 2277 llvm::Value *SRet = Builder.CreateStructGEP( 2278 nullptr, ArgStruct, RetAI.getInAllocaFieldIndex()); 2279 RV = Builder.CreateLoad(SRet, "sret"); 2280 } 2281 break; 2282 2283 case ABIArgInfo::Indirect: { 2284 auto AI = CurFn->arg_begin(); 2285 if (RetAI.isSRetAfterThis()) 2286 ++AI; 2287 switch (getEvaluationKind(RetTy)) { 2288 case TEK_Complex: { 2289 ComplexPairTy RT = 2290 EmitLoadOfComplex(MakeNaturalAlignAddrLValue(ReturnValue, RetTy), 2291 EndLoc); 2292 EmitStoreOfComplex(RT, MakeNaturalAlignAddrLValue(AI, RetTy), 2293 /*isInit*/ true); 2294 break; 2295 } 2296 case TEK_Aggregate: 2297 // Do nothing; aggregrates get evaluated directly into the destination. 2298 break; 2299 case TEK_Scalar: 2300 EmitStoreOfScalar(Builder.CreateLoad(ReturnValue), 2301 MakeNaturalAlignAddrLValue(AI, RetTy), 2302 /*isInit*/ true); 2303 break; 2304 } 2305 break; 2306 } 2307 2308 case ABIArgInfo::Extend: 2309 case ABIArgInfo::Direct: 2310 if (RetAI.getCoerceToType() == ConvertType(RetTy) && 2311 RetAI.getDirectOffset() == 0) { 2312 // The internal return value temp always will have pointer-to-return-type 2313 // type, just do a load. 2314 2315 // If there is a dominating store to ReturnValue, we can elide 2316 // the load, zap the store, and usually zap the alloca. 2317 if (llvm::StoreInst *SI = findDominatingStoreToReturnValue(*this)) { 2318 // Reuse the debug location from the store unless there is 2319 // cleanup code to be emitted between the store and return 2320 // instruction. 2321 if (EmitRetDbgLoc && !AutoreleaseResult) 2322 RetDbgLoc = SI->getDebugLoc(); 2323 // Get the stored value and nuke the now-dead store. 2324 RV = SI->getValueOperand(); 2325 SI->eraseFromParent(); 2326 2327 // If that was the only use of the return value, nuke it as well now. 2328 if (ReturnValue->use_empty() && isa<llvm::AllocaInst>(ReturnValue)) { 2329 cast<llvm::AllocaInst>(ReturnValue)->eraseFromParent(); 2330 ReturnValue = nullptr; 2331 } 2332 2333 // Otherwise, we have to do a simple load. 2334 } else { 2335 RV = Builder.CreateLoad(ReturnValue); 2336 } 2337 } else { 2338 llvm::Value *V = ReturnValue; 2339 // If the value is offset in memory, apply the offset now. 2340 if (unsigned Offs = RetAI.getDirectOffset()) { 2341 V = Builder.CreateBitCast(V, Builder.getInt8PtrTy()); 2342 V = Builder.CreateConstGEP1_32(Builder.getInt8Ty(), V, Offs); 2343 V = Builder.CreateBitCast(V, 2344 llvm::PointerType::getUnqual(RetAI.getCoerceToType())); 2345 } 2346 2347 RV = CreateCoercedLoad(V, RetAI.getCoerceToType(), *this); 2348 } 2349 2350 // In ARC, end functions that return a retainable type with a call 2351 // to objc_autoreleaseReturnValue. 2352 if (AutoreleaseResult) { 2353 assert(getLangOpts().ObjCAutoRefCount && 2354 !FI.isReturnsRetained() && 2355 RetTy->isObjCRetainableType()); 2356 RV = emitAutoreleaseOfResult(*this, RV); 2357 } 2358 2359 break; 2360 2361 case ABIArgInfo::Ignore: 2362 break; 2363 2364 case ABIArgInfo::Expand: 2365 llvm_unreachable("Invalid ABI kind for return argument"); 2366 } 2367 2368 llvm::Instruction *Ret; 2369 if (RV) { 2370 if (SanOpts.has(SanitizerKind::ReturnsNonnullAttribute)) { 2371 if (auto RetNNAttr = CurGD.getDecl()->getAttr<ReturnsNonNullAttr>()) { 2372 SanitizerScope SanScope(this); 2373 llvm::Value *Cond = Builder.CreateICmpNE( 2374 RV, llvm::Constant::getNullValue(RV->getType())); 2375 llvm::Constant *StaticData[] = { 2376 EmitCheckSourceLocation(EndLoc), 2377 EmitCheckSourceLocation(RetNNAttr->getLocation()), 2378 }; 2379 EmitCheck(std::make_pair(Cond, SanitizerKind::ReturnsNonnullAttribute), 2380 "nonnull_return", StaticData, None); 2381 } 2382 } 2383 Ret = Builder.CreateRet(RV); 2384 } else { 2385 Ret = Builder.CreateRetVoid(); 2386 } 2387 2388 if (RetDbgLoc) 2389 Ret->setDebugLoc(std::move(RetDbgLoc)); 2390 } 2391 2392 static bool isInAllocaArgument(CGCXXABI &ABI, QualType type) { 2393 const CXXRecordDecl *RD = type->getAsCXXRecordDecl(); 2394 return RD && ABI.getRecordArgABI(RD) == CGCXXABI::RAA_DirectInMemory; 2395 } 2396 2397 static AggValueSlot createPlaceholderSlot(CodeGenFunction &CGF, QualType Ty) { 2398 // FIXME: Generate IR in one pass, rather than going back and fixing up these 2399 // placeholders. 2400 llvm::Type *IRTy = CGF.ConvertTypeForMem(Ty); 2401 llvm::Value *Placeholder = 2402 llvm::UndefValue::get(IRTy->getPointerTo()->getPointerTo()); 2403 Placeholder = CGF.Builder.CreateLoad(Placeholder); 2404 return AggValueSlot::forAddr(Placeholder, CharUnits::Zero(), 2405 Ty.getQualifiers(), 2406 AggValueSlot::IsNotDestructed, 2407 AggValueSlot::DoesNotNeedGCBarriers, 2408 AggValueSlot::IsNotAliased); 2409 } 2410 2411 void CodeGenFunction::EmitDelegateCallArg(CallArgList &args, 2412 const VarDecl *param, 2413 SourceLocation loc) { 2414 // StartFunction converted the ABI-lowered parameter(s) into a 2415 // local alloca. We need to turn that into an r-value suitable 2416 // for EmitCall. 2417 llvm::Value *local = GetAddrOfLocalVar(param); 2418 2419 QualType type = param->getType(); 2420 2421 // For the most part, we just need to load the alloca, except: 2422 // 1) aggregate r-values are actually pointers to temporaries, and 2423 // 2) references to non-scalars are pointers directly to the aggregate. 2424 // I don't know why references to scalars are different here. 2425 if (const ReferenceType *ref = type->getAs<ReferenceType>()) { 2426 if (!hasScalarEvaluationKind(ref->getPointeeType())) 2427 return args.add(RValue::getAggregate(local), type); 2428 2429 // Locals which are references to scalars are represented 2430 // with allocas holding the pointer. 2431 return args.add(RValue::get(Builder.CreateLoad(local)), type); 2432 } 2433 2434 assert(!isInAllocaArgument(CGM.getCXXABI(), type) && 2435 "cannot emit delegate call arguments for inalloca arguments!"); 2436 2437 args.add(convertTempToRValue(local, type, loc), type); 2438 } 2439 2440 static bool isProvablyNull(llvm::Value *addr) { 2441 return isa<llvm::ConstantPointerNull>(addr); 2442 } 2443 2444 static bool isProvablyNonNull(llvm::Value *addr) { 2445 return isa<llvm::AllocaInst>(addr); 2446 } 2447 2448 /// Emit the actual writing-back of a writeback. 2449 static void emitWriteback(CodeGenFunction &CGF, 2450 const CallArgList::Writeback &writeback) { 2451 const LValue &srcLV = writeback.Source; 2452 llvm::Value *srcAddr = srcLV.getAddress(); 2453 assert(!isProvablyNull(srcAddr) && 2454 "shouldn't have writeback for provably null argument"); 2455 2456 llvm::BasicBlock *contBB = nullptr; 2457 2458 // If the argument wasn't provably non-null, we need to null check 2459 // before doing the store. 2460 bool provablyNonNull = isProvablyNonNull(srcAddr); 2461 if (!provablyNonNull) { 2462 llvm::BasicBlock *writebackBB = CGF.createBasicBlock("icr.writeback"); 2463 contBB = CGF.createBasicBlock("icr.done"); 2464 2465 llvm::Value *isNull = CGF.Builder.CreateIsNull(srcAddr, "icr.isnull"); 2466 CGF.Builder.CreateCondBr(isNull, contBB, writebackBB); 2467 CGF.EmitBlock(writebackBB); 2468 } 2469 2470 // Load the value to writeback. 2471 llvm::Value *value = CGF.Builder.CreateLoad(writeback.Temporary); 2472 2473 // Cast it back, in case we're writing an id to a Foo* or something. 2474 value = CGF.Builder.CreateBitCast(value, 2475 cast<llvm::PointerType>(srcAddr->getType())->getElementType(), 2476 "icr.writeback-cast"); 2477 2478 // Perform the writeback. 2479 2480 // If we have a "to use" value, it's something we need to emit a use 2481 // of. This has to be carefully threaded in: if it's done after the 2482 // release it's potentially undefined behavior (and the optimizer 2483 // will ignore it), and if it happens before the retain then the 2484 // optimizer could move the release there. 2485 if (writeback.ToUse) { 2486 assert(srcLV.getObjCLifetime() == Qualifiers::OCL_Strong); 2487 2488 // Retain the new value. No need to block-copy here: the block's 2489 // being passed up the stack. 2490 value = CGF.EmitARCRetainNonBlock(value); 2491 2492 // Emit the intrinsic use here. 2493 CGF.EmitARCIntrinsicUse(writeback.ToUse); 2494 2495 // Load the old value (primitively). 2496 llvm::Value *oldValue = CGF.EmitLoadOfScalar(srcLV, SourceLocation()); 2497 2498 // Put the new value in place (primitively). 2499 CGF.EmitStoreOfScalar(value, srcLV, /*init*/ false); 2500 2501 // Release the old value. 2502 CGF.EmitARCRelease(oldValue, srcLV.isARCPreciseLifetime()); 2503 2504 // Otherwise, we can just do a normal lvalue store. 2505 } else { 2506 CGF.EmitStoreThroughLValue(RValue::get(value), srcLV); 2507 } 2508 2509 // Jump to the continuation block. 2510 if (!provablyNonNull) 2511 CGF.EmitBlock(contBB); 2512 } 2513 2514 static void emitWritebacks(CodeGenFunction &CGF, 2515 const CallArgList &args) { 2516 for (const auto &I : args.writebacks()) 2517 emitWriteback(CGF, I); 2518 } 2519 2520 static void deactivateArgCleanupsBeforeCall(CodeGenFunction &CGF, 2521 const CallArgList &CallArgs) { 2522 assert(CGF.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()); 2523 ArrayRef<CallArgList::CallArgCleanup> Cleanups = 2524 CallArgs.getCleanupsToDeactivate(); 2525 // Iterate in reverse to increase the likelihood of popping the cleanup. 2526 for (ArrayRef<CallArgList::CallArgCleanup>::reverse_iterator 2527 I = Cleanups.rbegin(), E = Cleanups.rend(); I != E; ++I) { 2528 CGF.DeactivateCleanupBlock(I->Cleanup, I->IsActiveIP); 2529 I->IsActiveIP->eraseFromParent(); 2530 } 2531 } 2532 2533 static const Expr *maybeGetUnaryAddrOfOperand(const Expr *E) { 2534 if (const UnaryOperator *uop = dyn_cast<UnaryOperator>(E->IgnoreParens())) 2535 if (uop->getOpcode() == UO_AddrOf) 2536 return uop->getSubExpr(); 2537 return nullptr; 2538 } 2539 2540 /// Emit an argument that's being passed call-by-writeback. That is, 2541 /// we are passing the address of 2542 static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args, 2543 const ObjCIndirectCopyRestoreExpr *CRE) { 2544 LValue srcLV; 2545 2546 // Make an optimistic effort to emit the address as an l-value. 2547 // This can fail if the the argument expression is more complicated. 2548 if (const Expr *lvExpr = maybeGetUnaryAddrOfOperand(CRE->getSubExpr())) { 2549 srcLV = CGF.EmitLValue(lvExpr); 2550 2551 // Otherwise, just emit it as a scalar. 2552 } else { 2553 llvm::Value *srcAddr = CGF.EmitScalarExpr(CRE->getSubExpr()); 2554 2555 QualType srcAddrType = 2556 CRE->getSubExpr()->getType()->castAs<PointerType>()->getPointeeType(); 2557 srcLV = CGF.MakeNaturalAlignAddrLValue(srcAddr, srcAddrType); 2558 } 2559 llvm::Value *srcAddr = srcLV.getAddress(); 2560 2561 // The dest and src types don't necessarily match in LLVM terms 2562 // because of the crazy ObjC compatibility rules. 2563 2564 llvm::PointerType *destType = 2565 cast<llvm::PointerType>(CGF.ConvertType(CRE->getType())); 2566 2567 // If the address is a constant null, just pass the appropriate null. 2568 if (isProvablyNull(srcAddr)) { 2569 args.add(RValue::get(llvm::ConstantPointerNull::get(destType)), 2570 CRE->getType()); 2571 return; 2572 } 2573 2574 // Create the temporary. 2575 llvm::Value *temp = CGF.CreateTempAlloca(destType->getElementType(), 2576 "icr.temp"); 2577 // Loading an l-value can introduce a cleanup if the l-value is __weak, 2578 // and that cleanup will be conditional if we can't prove that the l-value 2579 // isn't null, so we need to register a dominating point so that the cleanups 2580 // system will make valid IR. 2581 CodeGenFunction::ConditionalEvaluation condEval(CGF); 2582 2583 // Zero-initialize it if we're not doing a copy-initialization. 2584 bool shouldCopy = CRE->shouldCopy(); 2585 if (!shouldCopy) { 2586 llvm::Value *null = 2587 llvm::ConstantPointerNull::get( 2588 cast<llvm::PointerType>(destType->getElementType())); 2589 CGF.Builder.CreateStore(null, temp); 2590 } 2591 2592 llvm::BasicBlock *contBB = nullptr; 2593 llvm::BasicBlock *originBB = nullptr; 2594 2595 // If the address is *not* known to be non-null, we need to switch. 2596 llvm::Value *finalArgument; 2597 2598 bool provablyNonNull = isProvablyNonNull(srcAddr); 2599 if (provablyNonNull) { 2600 finalArgument = temp; 2601 } else { 2602 llvm::Value *isNull = CGF.Builder.CreateIsNull(srcAddr, "icr.isnull"); 2603 2604 finalArgument = CGF.Builder.CreateSelect(isNull, 2605 llvm::ConstantPointerNull::get(destType), 2606 temp, "icr.argument"); 2607 2608 // If we need to copy, then the load has to be conditional, which 2609 // means we need control flow. 2610 if (shouldCopy) { 2611 originBB = CGF.Builder.GetInsertBlock(); 2612 contBB = CGF.createBasicBlock("icr.cont"); 2613 llvm::BasicBlock *copyBB = CGF.createBasicBlock("icr.copy"); 2614 CGF.Builder.CreateCondBr(isNull, contBB, copyBB); 2615 CGF.EmitBlock(copyBB); 2616 condEval.begin(CGF); 2617 } 2618 } 2619 2620 llvm::Value *valueToUse = nullptr; 2621 2622 // Perform a copy if necessary. 2623 if (shouldCopy) { 2624 RValue srcRV = CGF.EmitLoadOfLValue(srcLV, SourceLocation()); 2625 assert(srcRV.isScalar()); 2626 2627 llvm::Value *src = srcRV.getScalarVal(); 2628 src = CGF.Builder.CreateBitCast(src, destType->getElementType(), 2629 "icr.cast"); 2630 2631 // Use an ordinary store, not a store-to-lvalue. 2632 CGF.Builder.CreateStore(src, temp); 2633 2634 // If optimization is enabled, and the value was held in a 2635 // __strong variable, we need to tell the optimizer that this 2636 // value has to stay alive until we're doing the store back. 2637 // This is because the temporary is effectively unretained, 2638 // and so otherwise we can violate the high-level semantics. 2639 if (CGF.CGM.getCodeGenOpts().OptimizationLevel != 0 && 2640 srcLV.getObjCLifetime() == Qualifiers::OCL_Strong) { 2641 valueToUse = src; 2642 } 2643 } 2644 2645 // Finish the control flow if we needed it. 2646 if (shouldCopy && !provablyNonNull) { 2647 llvm::BasicBlock *copyBB = CGF.Builder.GetInsertBlock(); 2648 CGF.EmitBlock(contBB); 2649 2650 // Make a phi for the value to intrinsically use. 2651 if (valueToUse) { 2652 llvm::PHINode *phiToUse = CGF.Builder.CreatePHI(valueToUse->getType(), 2, 2653 "icr.to-use"); 2654 phiToUse->addIncoming(valueToUse, copyBB); 2655 phiToUse->addIncoming(llvm::UndefValue::get(valueToUse->getType()), 2656 originBB); 2657 valueToUse = phiToUse; 2658 } 2659 2660 condEval.end(CGF); 2661 } 2662 2663 args.addWriteback(srcLV, temp, valueToUse); 2664 args.add(RValue::get(finalArgument), CRE->getType()); 2665 } 2666 2667 void CallArgList::allocateArgumentMemory(CodeGenFunction &CGF) { 2668 assert(!StackBase && !StackCleanup.isValid()); 2669 2670 // Save the stack. 2671 llvm::Function *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stacksave); 2672 StackBase = CGF.Builder.CreateCall(F, "inalloca.save"); 2673 2674 // Control gets really tied up in landing pads, so we have to spill the 2675 // stacksave to an alloca to avoid violating SSA form. 2676 // TODO: This is dead if we never emit the cleanup. We should create the 2677 // alloca and store lazily on the first cleanup emission. 2678 StackBaseMem = CGF.CreateTempAlloca(CGF.Int8PtrTy, "inalloca.spmem"); 2679 CGF.Builder.CreateStore(StackBase, StackBaseMem); 2680 CGF.pushStackRestore(EHCleanup, StackBaseMem); 2681 StackCleanup = CGF.EHStack.getInnermostEHScope(); 2682 assert(StackCleanup.isValid()); 2683 } 2684 2685 void CallArgList::freeArgumentMemory(CodeGenFunction &CGF) const { 2686 if (StackBase) { 2687 CGF.DeactivateCleanupBlock(StackCleanup, StackBase); 2688 llvm::Value *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stackrestore); 2689 // We could load StackBase from StackBaseMem, but in the non-exceptional 2690 // case we can skip it. 2691 CGF.Builder.CreateCall(F, StackBase); 2692 } 2693 } 2694 2695 static void emitNonNullArgCheck(CodeGenFunction &CGF, RValue RV, 2696 QualType ArgType, SourceLocation ArgLoc, 2697 const FunctionDecl *FD, unsigned ParmNum) { 2698 if (!CGF.SanOpts.has(SanitizerKind::NonnullAttribute) || !FD) 2699 return; 2700 auto PVD = ParmNum < FD->getNumParams() ? FD->getParamDecl(ParmNum) : nullptr; 2701 unsigned ArgNo = PVD ? PVD->getFunctionScopeIndex() : ParmNum; 2702 auto NNAttr = getNonNullAttr(FD, PVD, ArgType, ArgNo); 2703 if (!NNAttr) 2704 return; 2705 CodeGenFunction::SanitizerScope SanScope(&CGF); 2706 assert(RV.isScalar()); 2707 llvm::Value *V = RV.getScalarVal(); 2708 llvm::Value *Cond = 2709 CGF.Builder.CreateICmpNE(V, llvm::Constant::getNullValue(V->getType())); 2710 llvm::Constant *StaticData[] = { 2711 CGF.EmitCheckSourceLocation(ArgLoc), 2712 CGF.EmitCheckSourceLocation(NNAttr->getLocation()), 2713 llvm::ConstantInt::get(CGF.Int32Ty, ArgNo + 1), 2714 }; 2715 CGF.EmitCheck(std::make_pair(Cond, SanitizerKind::NonnullAttribute), 2716 "nonnull_arg", StaticData, None); 2717 } 2718 2719 void CodeGenFunction::EmitCallArgs(CallArgList &Args, 2720 ArrayRef<QualType> ArgTypes, 2721 CallExpr::const_arg_iterator ArgBeg, 2722 CallExpr::const_arg_iterator ArgEnd, 2723 const FunctionDecl *CalleeDecl, 2724 unsigned ParamsToSkip) { 2725 // We *have* to evaluate arguments from right to left in the MS C++ ABI, 2726 // because arguments are destroyed left to right in the callee. 2727 if (CGM.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) { 2728 // Insert a stack save if we're going to need any inalloca args. 2729 bool HasInAllocaArgs = false; 2730 for (ArrayRef<QualType>::iterator I = ArgTypes.begin(), E = ArgTypes.end(); 2731 I != E && !HasInAllocaArgs; ++I) 2732 HasInAllocaArgs = isInAllocaArgument(CGM.getCXXABI(), *I); 2733 if (HasInAllocaArgs) { 2734 assert(getTarget().getTriple().getArch() == llvm::Triple::x86); 2735 Args.allocateArgumentMemory(*this); 2736 } 2737 2738 // Evaluate each argument. 2739 size_t CallArgsStart = Args.size(); 2740 for (int I = ArgTypes.size() - 1; I >= 0; --I) { 2741 CallExpr::const_arg_iterator Arg = ArgBeg + I; 2742 EmitCallArg(Args, *Arg, ArgTypes[I]); 2743 emitNonNullArgCheck(*this, Args.back().RV, ArgTypes[I], Arg->getExprLoc(), 2744 CalleeDecl, ParamsToSkip + I); 2745 } 2746 2747 // Un-reverse the arguments we just evaluated so they match up with the LLVM 2748 // IR function. 2749 std::reverse(Args.begin() + CallArgsStart, Args.end()); 2750 return; 2751 } 2752 2753 for (unsigned I = 0, E = ArgTypes.size(); I != E; ++I) { 2754 CallExpr::const_arg_iterator Arg = ArgBeg + I; 2755 assert(Arg != ArgEnd); 2756 EmitCallArg(Args, *Arg, ArgTypes[I]); 2757 emitNonNullArgCheck(*this, Args.back().RV, ArgTypes[I], Arg->getExprLoc(), 2758 CalleeDecl, ParamsToSkip + I); 2759 } 2760 } 2761 2762 namespace { 2763 2764 struct DestroyUnpassedArg : EHScopeStack::Cleanup { 2765 DestroyUnpassedArg(llvm::Value *Addr, QualType Ty) 2766 : Addr(Addr), Ty(Ty) {} 2767 2768 llvm::Value *Addr; 2769 QualType Ty; 2770 2771 void Emit(CodeGenFunction &CGF, Flags flags) override { 2772 const CXXDestructorDecl *Dtor = Ty->getAsCXXRecordDecl()->getDestructor(); 2773 assert(!Dtor->isTrivial()); 2774 CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete, /*for vbase*/ false, 2775 /*Delegating=*/false, Addr); 2776 } 2777 }; 2778 2779 } 2780 2781 struct DisableDebugLocationUpdates { 2782 CodeGenFunction &CGF; 2783 bool disabledDebugInfo; 2784 DisableDebugLocationUpdates(CodeGenFunction &CGF, const Expr *E) : CGF(CGF) { 2785 if ((disabledDebugInfo = isa<CXXDefaultArgExpr>(E) && CGF.getDebugInfo())) 2786 CGF.disableDebugInfo(); 2787 } 2788 ~DisableDebugLocationUpdates() { 2789 if (disabledDebugInfo) 2790 CGF.enableDebugInfo(); 2791 } 2792 }; 2793 2794 void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E, 2795 QualType type) { 2796 DisableDebugLocationUpdates Dis(*this, E); 2797 if (const ObjCIndirectCopyRestoreExpr *CRE 2798 = dyn_cast<ObjCIndirectCopyRestoreExpr>(E)) { 2799 assert(getLangOpts().ObjCAutoRefCount); 2800 assert(getContext().hasSameType(E->getType(), type)); 2801 return emitWritebackArg(*this, args, CRE); 2802 } 2803 2804 assert(type->isReferenceType() == E->isGLValue() && 2805 "reference binding to unmaterialized r-value!"); 2806 2807 if (E->isGLValue()) { 2808 assert(E->getObjectKind() == OK_Ordinary); 2809 return args.add(EmitReferenceBindingToExpr(E), type); 2810 } 2811 2812 bool HasAggregateEvalKind = hasAggregateEvaluationKind(type); 2813 2814 // In the Microsoft C++ ABI, aggregate arguments are destructed by the callee. 2815 // However, we still have to push an EH-only cleanup in case we unwind before 2816 // we make it to the call. 2817 if (HasAggregateEvalKind && 2818 CGM.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) { 2819 // If we're using inalloca, use the argument memory. Otherwise, use a 2820 // temporary. 2821 AggValueSlot Slot; 2822 if (args.isUsingInAlloca()) 2823 Slot = createPlaceholderSlot(*this, type); 2824 else 2825 Slot = CreateAggTemp(type, "agg.tmp"); 2826 2827 const CXXRecordDecl *RD = type->getAsCXXRecordDecl(); 2828 bool DestroyedInCallee = 2829 RD && RD->hasNonTrivialDestructor() && 2830 CGM.getCXXABI().getRecordArgABI(RD) != CGCXXABI::RAA_Default; 2831 if (DestroyedInCallee) 2832 Slot.setExternallyDestructed(); 2833 2834 EmitAggExpr(E, Slot); 2835 RValue RV = Slot.asRValue(); 2836 args.add(RV, type); 2837 2838 if (DestroyedInCallee) { 2839 // Create a no-op GEP between the placeholder and the cleanup so we can 2840 // RAUW it successfully. It also serves as a marker of the first 2841 // instruction where the cleanup is active. 2842 pushFullExprCleanup<DestroyUnpassedArg>(EHCleanup, Slot.getAddr(), type); 2843 // This unreachable is a temporary marker which will be removed later. 2844 llvm::Instruction *IsActive = Builder.CreateUnreachable(); 2845 args.addArgCleanupDeactivation(EHStack.getInnermostEHScope(), IsActive); 2846 } 2847 return; 2848 } 2849 2850 if (HasAggregateEvalKind && isa<ImplicitCastExpr>(E) && 2851 cast<CastExpr>(E)->getCastKind() == CK_LValueToRValue) { 2852 LValue L = EmitLValue(cast<CastExpr>(E)->getSubExpr()); 2853 assert(L.isSimple()); 2854 if (L.getAlignment() >= getContext().getTypeAlignInChars(type)) { 2855 args.add(L.asAggregateRValue(), type, /*NeedsCopy*/true); 2856 } else { 2857 // We can't represent a misaligned lvalue in the CallArgList, so copy 2858 // to an aligned temporary now. 2859 llvm::Value *tmp = CreateMemTemp(type); 2860 EmitAggregateCopy(tmp, L.getAddress(), type, L.isVolatile(), 2861 L.getAlignment()); 2862 args.add(RValue::getAggregate(tmp), type); 2863 } 2864 return; 2865 } 2866 2867 args.add(EmitAnyExprToTemp(E), type); 2868 } 2869 2870 QualType CodeGenFunction::getVarArgType(const Expr *Arg) { 2871 // System headers on Windows define NULL to 0 instead of 0LL on Win64. MSVC 2872 // implicitly widens null pointer constants that are arguments to varargs 2873 // functions to pointer-sized ints. 2874 if (!getTarget().getTriple().isOSWindows()) 2875 return Arg->getType(); 2876 2877 if (Arg->getType()->isIntegerType() && 2878 getContext().getTypeSize(Arg->getType()) < 2879 getContext().getTargetInfo().getPointerWidth(0) && 2880 Arg->isNullPointerConstant(getContext(), 2881 Expr::NPC_ValueDependentIsNotNull)) { 2882 return getContext().getIntPtrType(); 2883 } 2884 2885 return Arg->getType(); 2886 } 2887 2888 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC 2889 // optimizer it can aggressively ignore unwind edges. 2890 void 2891 CodeGenFunction::AddObjCARCExceptionMetadata(llvm::Instruction *Inst) { 2892 if (CGM.getCodeGenOpts().OptimizationLevel != 0 && 2893 !CGM.getCodeGenOpts().ObjCAutoRefCountExceptions) 2894 Inst->setMetadata("clang.arc.no_objc_arc_exceptions", 2895 CGM.getNoObjCARCExceptionsMetadata()); 2896 } 2897 2898 /// Emits a call to the given no-arguments nounwind runtime function. 2899 llvm::CallInst * 2900 CodeGenFunction::EmitNounwindRuntimeCall(llvm::Value *callee, 2901 const llvm::Twine &name) { 2902 return EmitNounwindRuntimeCall(callee, None, name); 2903 } 2904 2905 /// Emits a call to the given nounwind runtime function. 2906 llvm::CallInst * 2907 CodeGenFunction::EmitNounwindRuntimeCall(llvm::Value *callee, 2908 ArrayRef<llvm::Value*> args, 2909 const llvm::Twine &name) { 2910 llvm::CallInst *call = EmitRuntimeCall(callee, args, name); 2911 call->setDoesNotThrow(); 2912 return call; 2913 } 2914 2915 /// Emits a simple call (never an invoke) to the given no-arguments 2916 /// runtime function. 2917 llvm::CallInst * 2918 CodeGenFunction::EmitRuntimeCall(llvm::Value *callee, 2919 const llvm::Twine &name) { 2920 return EmitRuntimeCall(callee, None, name); 2921 } 2922 2923 /// Emits a simple call (never an invoke) to the given runtime 2924 /// function. 2925 llvm::CallInst * 2926 CodeGenFunction::EmitRuntimeCall(llvm::Value *callee, 2927 ArrayRef<llvm::Value*> args, 2928 const llvm::Twine &name) { 2929 llvm::CallInst *call = Builder.CreateCall(callee, args, name); 2930 call->setCallingConv(getRuntimeCC()); 2931 return call; 2932 } 2933 2934 /// Emits a call or invoke to the given noreturn runtime function. 2935 void CodeGenFunction::EmitNoreturnRuntimeCallOrInvoke(llvm::Value *callee, 2936 ArrayRef<llvm::Value*> args) { 2937 if (getInvokeDest()) { 2938 llvm::InvokeInst *invoke = 2939 Builder.CreateInvoke(callee, 2940 getUnreachableBlock(), 2941 getInvokeDest(), 2942 args); 2943 invoke->setDoesNotReturn(); 2944 invoke->setCallingConv(getRuntimeCC()); 2945 } else { 2946 llvm::CallInst *call = Builder.CreateCall(callee, args); 2947 call->setDoesNotReturn(); 2948 call->setCallingConv(getRuntimeCC()); 2949 Builder.CreateUnreachable(); 2950 } 2951 PGO.setCurrentRegionUnreachable(); 2952 } 2953 2954 /// Emits a call or invoke instruction to the given nullary runtime 2955 /// function. 2956 llvm::CallSite 2957 CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::Value *callee, 2958 const Twine &name) { 2959 return EmitRuntimeCallOrInvoke(callee, None, name); 2960 } 2961 2962 /// Emits a call or invoke instruction to the given runtime function. 2963 llvm::CallSite 2964 CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::Value *callee, 2965 ArrayRef<llvm::Value*> args, 2966 const Twine &name) { 2967 llvm::CallSite callSite = EmitCallOrInvoke(callee, args, name); 2968 callSite.setCallingConv(getRuntimeCC()); 2969 return callSite; 2970 } 2971 2972 llvm::CallSite 2973 CodeGenFunction::EmitCallOrInvoke(llvm::Value *Callee, 2974 const Twine &Name) { 2975 return EmitCallOrInvoke(Callee, None, Name); 2976 } 2977 2978 /// Emits a call or invoke instruction to the given function, depending 2979 /// on the current state of the EH stack. 2980 llvm::CallSite 2981 CodeGenFunction::EmitCallOrInvoke(llvm::Value *Callee, 2982 ArrayRef<llvm::Value *> Args, 2983 const Twine &Name) { 2984 llvm::BasicBlock *InvokeDest = getInvokeDest(); 2985 2986 llvm::Instruction *Inst; 2987 if (!InvokeDest) 2988 Inst = Builder.CreateCall(Callee, Args, Name); 2989 else { 2990 llvm::BasicBlock *ContBB = createBasicBlock("invoke.cont"); 2991 Inst = Builder.CreateInvoke(Callee, ContBB, InvokeDest, Args, Name); 2992 EmitBlock(ContBB); 2993 } 2994 2995 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC 2996 // optimizer it can aggressively ignore unwind edges. 2997 if (CGM.getLangOpts().ObjCAutoRefCount) 2998 AddObjCARCExceptionMetadata(Inst); 2999 3000 return llvm::CallSite(Inst); 3001 } 3002 3003 /// \brief Store a non-aggregate value to an address to initialize it. For 3004 /// initialization, a non-atomic store will be used. 3005 static void EmitInitStoreOfNonAggregate(CodeGenFunction &CGF, RValue Src, 3006 LValue Dst) { 3007 if (Src.isScalar()) 3008 CGF.EmitStoreOfScalar(Src.getScalarVal(), Dst, /*init=*/true); 3009 else 3010 CGF.EmitStoreOfComplex(Src.getComplexVal(), Dst, /*init=*/true); 3011 } 3012 3013 void CodeGenFunction::deferPlaceholderReplacement(llvm::Instruction *Old, 3014 llvm::Value *New) { 3015 DeferredReplacements.push_back(std::make_pair(Old, New)); 3016 } 3017 3018 RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo, 3019 llvm::Value *Callee, 3020 ReturnValueSlot ReturnValue, 3021 const CallArgList &CallArgs, 3022 const Decl *TargetDecl, 3023 llvm::Instruction **callOrInvoke) { 3024 // FIXME: We no longer need the types from CallArgs; lift up and simplify. 3025 3026 // Handle struct-return functions by passing a pointer to the 3027 // location that we would like to return into. 3028 QualType RetTy = CallInfo.getReturnType(); 3029 const ABIArgInfo &RetAI = CallInfo.getReturnInfo(); 3030 3031 llvm::FunctionType *IRFuncTy = 3032 cast<llvm::FunctionType>( 3033 cast<llvm::PointerType>(Callee->getType())->getElementType()); 3034 3035 // If we're using inalloca, insert the allocation after the stack save. 3036 // FIXME: Do this earlier rather than hacking it in here! 3037 llvm::AllocaInst *ArgMemory = nullptr; 3038 if (llvm::StructType *ArgStruct = CallInfo.getArgStruct()) { 3039 llvm::Instruction *IP = CallArgs.getStackBase(); 3040 llvm::AllocaInst *AI; 3041 if (IP) { 3042 IP = IP->getNextNode(); 3043 AI = new llvm::AllocaInst(ArgStruct, "argmem", IP); 3044 } else { 3045 AI = CreateTempAlloca(ArgStruct, "argmem"); 3046 } 3047 AI->setUsedWithInAlloca(true); 3048 assert(AI->isUsedWithInAlloca() && !AI->isStaticAlloca()); 3049 ArgMemory = AI; 3050 } 3051 3052 ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), CallInfo); 3053 SmallVector<llvm::Value *, 16> IRCallArgs(IRFunctionArgs.totalIRArgs()); 3054 3055 // If the call returns a temporary with struct return, create a temporary 3056 // alloca to hold the result, unless one is given to us. 3057 llvm::Value *SRetPtr = nullptr; 3058 if (RetAI.isIndirect() || RetAI.isInAlloca()) { 3059 SRetPtr = ReturnValue.getValue(); 3060 if (!SRetPtr) 3061 SRetPtr = CreateMemTemp(RetTy); 3062 if (IRFunctionArgs.hasSRetArg()) { 3063 IRCallArgs[IRFunctionArgs.getSRetArgNo()] = SRetPtr; 3064 } else { 3065 llvm::Value *Addr = 3066 Builder.CreateStructGEP(ArgMemory->getAllocatedType(), ArgMemory, 3067 RetAI.getInAllocaFieldIndex()); 3068 Builder.CreateStore(SRetPtr, Addr); 3069 } 3070 } 3071 3072 assert(CallInfo.arg_size() == CallArgs.size() && 3073 "Mismatch between function signature & arguments."); 3074 unsigned ArgNo = 0; 3075 CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin(); 3076 for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end(); 3077 I != E; ++I, ++info_it, ++ArgNo) { 3078 const ABIArgInfo &ArgInfo = info_it->info; 3079 RValue RV = I->RV; 3080 3081 CharUnits TypeAlign = getContext().getTypeAlignInChars(I->Ty); 3082 3083 // Insert a padding argument to ensure proper alignment. 3084 if (IRFunctionArgs.hasPaddingArg(ArgNo)) 3085 IRCallArgs[IRFunctionArgs.getPaddingArgNo(ArgNo)] = 3086 llvm::UndefValue::get(ArgInfo.getPaddingType()); 3087 3088 unsigned FirstIRArg, NumIRArgs; 3089 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo); 3090 3091 switch (ArgInfo.getKind()) { 3092 case ABIArgInfo::InAlloca: { 3093 assert(NumIRArgs == 0); 3094 assert(getTarget().getTriple().getArch() == llvm::Triple::x86); 3095 if (RV.isAggregate()) { 3096 // Replace the placeholder with the appropriate argument slot GEP. 3097 llvm::Instruction *Placeholder = 3098 cast<llvm::Instruction>(RV.getAggregateAddr()); 3099 CGBuilderTy::InsertPoint IP = Builder.saveIP(); 3100 Builder.SetInsertPoint(Placeholder); 3101 llvm::Value *Addr = 3102 Builder.CreateStructGEP(ArgMemory->getAllocatedType(), ArgMemory, 3103 ArgInfo.getInAllocaFieldIndex()); 3104 Builder.restoreIP(IP); 3105 deferPlaceholderReplacement(Placeholder, Addr); 3106 } else { 3107 // Store the RValue into the argument struct. 3108 llvm::Value *Addr = 3109 Builder.CreateStructGEP(ArgMemory->getAllocatedType(), ArgMemory, 3110 ArgInfo.getInAllocaFieldIndex()); 3111 unsigned AS = Addr->getType()->getPointerAddressSpace(); 3112 llvm::Type *MemType = ConvertTypeForMem(I->Ty)->getPointerTo(AS); 3113 // There are some cases where a trivial bitcast is not avoidable. The 3114 // definition of a type later in a translation unit may change it's type 3115 // from {}* to (%struct.foo*)*. 3116 if (Addr->getType() != MemType) 3117 Addr = Builder.CreateBitCast(Addr, MemType); 3118 LValue argLV = MakeAddrLValue(Addr, I->Ty, TypeAlign); 3119 EmitInitStoreOfNonAggregate(*this, RV, argLV); 3120 } 3121 break; 3122 } 3123 3124 case ABIArgInfo::Indirect: { 3125 assert(NumIRArgs == 1); 3126 if (RV.isScalar() || RV.isComplex()) { 3127 // Make a temporary alloca to pass the argument. 3128 llvm::AllocaInst *AI = CreateMemTemp(I->Ty); 3129 if (ArgInfo.getIndirectAlign() > AI->getAlignment()) 3130 AI->setAlignment(ArgInfo.getIndirectAlign()); 3131 IRCallArgs[FirstIRArg] = AI; 3132 3133 LValue argLV = MakeAddrLValue(AI, I->Ty, TypeAlign); 3134 EmitInitStoreOfNonAggregate(*this, RV, argLV); 3135 } else { 3136 // We want to avoid creating an unnecessary temporary+copy here; 3137 // however, we need one in three cases: 3138 // 1. If the argument is not byval, and we are required to copy the 3139 // source. (This case doesn't occur on any common architecture.) 3140 // 2. If the argument is byval, RV is not sufficiently aligned, and 3141 // we cannot force it to be sufficiently aligned. 3142 // 3. If the argument is byval, but RV is located in an address space 3143 // different than that of the argument (0). 3144 llvm::Value *Addr = RV.getAggregateAddr(); 3145 unsigned Align = ArgInfo.getIndirectAlign(); 3146 const llvm::DataLayout *TD = &CGM.getDataLayout(); 3147 const unsigned RVAddrSpace = Addr->getType()->getPointerAddressSpace(); 3148 const unsigned ArgAddrSpace = 3149 (FirstIRArg < IRFuncTy->getNumParams() 3150 ? IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace() 3151 : 0); 3152 if ((!ArgInfo.getIndirectByVal() && I->NeedsCopy) || 3153 (ArgInfo.getIndirectByVal() && TypeAlign.getQuantity() < Align && 3154 llvm::getOrEnforceKnownAlignment(Addr, Align, *TD) < Align) || 3155 (ArgInfo.getIndirectByVal() && (RVAddrSpace != ArgAddrSpace))) { 3156 // Create an aligned temporary, and copy to it. 3157 llvm::AllocaInst *AI = CreateMemTemp(I->Ty); 3158 if (Align > AI->getAlignment()) 3159 AI->setAlignment(Align); 3160 IRCallArgs[FirstIRArg] = AI; 3161 EmitAggregateCopy(AI, Addr, I->Ty, RV.isVolatileQualified()); 3162 } else { 3163 // Skip the extra memcpy call. 3164 IRCallArgs[FirstIRArg] = Addr; 3165 } 3166 } 3167 break; 3168 } 3169 3170 case ABIArgInfo::Ignore: 3171 assert(NumIRArgs == 0); 3172 break; 3173 3174 case ABIArgInfo::Extend: 3175 case ABIArgInfo::Direct: { 3176 if (!isa<llvm::StructType>(ArgInfo.getCoerceToType()) && 3177 ArgInfo.getCoerceToType() == ConvertType(info_it->type) && 3178 ArgInfo.getDirectOffset() == 0) { 3179 assert(NumIRArgs == 1); 3180 llvm::Value *V; 3181 if (RV.isScalar()) 3182 V = RV.getScalarVal(); 3183 else 3184 V = Builder.CreateLoad(RV.getAggregateAddr()); 3185 3186 // We might have to widen integers, but we should never truncate. 3187 if (ArgInfo.getCoerceToType() != V->getType() && 3188 V->getType()->isIntegerTy()) 3189 V = Builder.CreateZExt(V, ArgInfo.getCoerceToType()); 3190 3191 // If the argument doesn't match, perform a bitcast to coerce it. This 3192 // can happen due to trivial type mismatches. 3193 if (FirstIRArg < IRFuncTy->getNumParams() && 3194 V->getType() != IRFuncTy->getParamType(FirstIRArg)) 3195 V = Builder.CreateBitCast(V, IRFuncTy->getParamType(FirstIRArg)); 3196 IRCallArgs[FirstIRArg] = V; 3197 break; 3198 } 3199 3200 // FIXME: Avoid the conversion through memory if possible. 3201 llvm::Value *SrcPtr; 3202 if (RV.isScalar() || RV.isComplex()) { 3203 SrcPtr = CreateMemTemp(I->Ty, "coerce"); 3204 LValue SrcLV = MakeAddrLValue(SrcPtr, I->Ty, TypeAlign); 3205 EmitInitStoreOfNonAggregate(*this, RV, SrcLV); 3206 } else 3207 SrcPtr = RV.getAggregateAddr(); 3208 3209 // If the value is offset in memory, apply the offset now. 3210 if (unsigned Offs = ArgInfo.getDirectOffset()) { 3211 SrcPtr = Builder.CreateBitCast(SrcPtr, Builder.getInt8PtrTy()); 3212 SrcPtr = Builder.CreateConstGEP1_32(Builder.getInt8Ty(), SrcPtr, Offs); 3213 SrcPtr = Builder.CreateBitCast(SrcPtr, 3214 llvm::PointerType::getUnqual(ArgInfo.getCoerceToType())); 3215 3216 } 3217 3218 // Fast-isel and the optimizer generally like scalar values better than 3219 // FCAs, so we flatten them if this is safe to do for this argument. 3220 llvm::StructType *STy = 3221 dyn_cast<llvm::StructType>(ArgInfo.getCoerceToType()); 3222 if (STy && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) { 3223 llvm::Type *SrcTy = 3224 cast<llvm::PointerType>(SrcPtr->getType())->getElementType(); 3225 uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(SrcTy); 3226 uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(STy); 3227 3228 // If the source type is smaller than the destination type of the 3229 // coerce-to logic, copy the source value into a temp alloca the size 3230 // of the destination type to allow loading all of it. The bits past 3231 // the source value are left undef. 3232 if (SrcSize < DstSize) { 3233 llvm::AllocaInst *TempAlloca 3234 = CreateTempAlloca(STy, SrcPtr->getName() + ".coerce"); 3235 Builder.CreateMemCpy(TempAlloca, SrcPtr, SrcSize, 0); 3236 SrcPtr = TempAlloca; 3237 } else { 3238 SrcPtr = Builder.CreateBitCast(SrcPtr, 3239 llvm::PointerType::getUnqual(STy)); 3240 } 3241 3242 assert(NumIRArgs == STy->getNumElements()); 3243 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 3244 llvm::Value *EltPtr = Builder.CreateConstGEP2_32(STy, SrcPtr, 0, i); 3245 llvm::LoadInst *LI = Builder.CreateLoad(EltPtr); 3246 // We don't know what we're loading from. 3247 LI->setAlignment(1); 3248 IRCallArgs[FirstIRArg + i] = LI; 3249 } 3250 } else { 3251 // In the simple case, just pass the coerced loaded value. 3252 assert(NumIRArgs == 1); 3253 IRCallArgs[FirstIRArg] = 3254 CreateCoercedLoad(SrcPtr, ArgInfo.getCoerceToType(), *this); 3255 } 3256 3257 break; 3258 } 3259 3260 case ABIArgInfo::Expand: 3261 unsigned IRArgPos = FirstIRArg; 3262 ExpandTypeToArgs(I->Ty, RV, IRFuncTy, IRCallArgs, IRArgPos); 3263 assert(IRArgPos == FirstIRArg + NumIRArgs); 3264 break; 3265 } 3266 } 3267 3268 if (ArgMemory) { 3269 llvm::Value *Arg = ArgMemory; 3270 if (CallInfo.isVariadic()) { 3271 // When passing non-POD arguments by value to variadic functions, we will 3272 // end up with a variadic prototype and an inalloca call site. In such 3273 // cases, we can't do any parameter mismatch checks. Give up and bitcast 3274 // the callee. 3275 unsigned CalleeAS = 3276 cast<llvm::PointerType>(Callee->getType())->getAddressSpace(); 3277 Callee = Builder.CreateBitCast( 3278 Callee, getTypes().GetFunctionType(CallInfo)->getPointerTo(CalleeAS)); 3279 } else { 3280 llvm::Type *LastParamTy = 3281 IRFuncTy->getParamType(IRFuncTy->getNumParams() - 1); 3282 if (Arg->getType() != LastParamTy) { 3283 #ifndef NDEBUG 3284 // Assert that these structs have equivalent element types. 3285 llvm::StructType *FullTy = CallInfo.getArgStruct(); 3286 llvm::StructType *DeclaredTy = cast<llvm::StructType>( 3287 cast<llvm::PointerType>(LastParamTy)->getElementType()); 3288 assert(DeclaredTy->getNumElements() == FullTy->getNumElements()); 3289 for (llvm::StructType::element_iterator DI = DeclaredTy->element_begin(), 3290 DE = DeclaredTy->element_end(), 3291 FI = FullTy->element_begin(); 3292 DI != DE; ++DI, ++FI) 3293 assert(*DI == *FI); 3294 #endif 3295 Arg = Builder.CreateBitCast(Arg, LastParamTy); 3296 } 3297 } 3298 assert(IRFunctionArgs.hasInallocaArg()); 3299 IRCallArgs[IRFunctionArgs.getInallocaArgNo()] = Arg; 3300 } 3301 3302 if (!CallArgs.getCleanupsToDeactivate().empty()) 3303 deactivateArgCleanupsBeforeCall(*this, CallArgs); 3304 3305 // If the callee is a bitcast of a function to a varargs pointer to function 3306 // type, check to see if we can remove the bitcast. This handles some cases 3307 // with unprototyped functions. 3308 if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Callee)) 3309 if (llvm::Function *CalleeF = dyn_cast<llvm::Function>(CE->getOperand(0))) { 3310 llvm::PointerType *CurPT=cast<llvm::PointerType>(Callee->getType()); 3311 llvm::FunctionType *CurFT = 3312 cast<llvm::FunctionType>(CurPT->getElementType()); 3313 llvm::FunctionType *ActualFT = CalleeF->getFunctionType(); 3314 3315 if (CE->getOpcode() == llvm::Instruction::BitCast && 3316 ActualFT->getReturnType() == CurFT->getReturnType() && 3317 ActualFT->getNumParams() == CurFT->getNumParams() && 3318 ActualFT->getNumParams() == IRCallArgs.size() && 3319 (CurFT->isVarArg() || !ActualFT->isVarArg())) { 3320 bool ArgsMatch = true; 3321 for (unsigned i = 0, e = ActualFT->getNumParams(); i != e; ++i) 3322 if (ActualFT->getParamType(i) != CurFT->getParamType(i)) { 3323 ArgsMatch = false; 3324 break; 3325 } 3326 3327 // Strip the cast if we can get away with it. This is a nice cleanup, 3328 // but also allows us to inline the function at -O0 if it is marked 3329 // always_inline. 3330 if (ArgsMatch) 3331 Callee = CalleeF; 3332 } 3333 } 3334 3335 assert(IRCallArgs.size() == IRFuncTy->getNumParams() || IRFuncTy->isVarArg()); 3336 for (unsigned i = 0; i < IRCallArgs.size(); ++i) { 3337 // Inalloca argument can have different type. 3338 if (IRFunctionArgs.hasInallocaArg() && 3339 i == IRFunctionArgs.getInallocaArgNo()) 3340 continue; 3341 if (i < IRFuncTy->getNumParams()) 3342 assert(IRCallArgs[i]->getType() == IRFuncTy->getParamType(i)); 3343 } 3344 3345 unsigned CallingConv; 3346 CodeGen::AttributeListType AttributeList; 3347 CGM.ConstructAttributeList(CallInfo, TargetDecl, AttributeList, 3348 CallingConv, true); 3349 llvm::AttributeSet Attrs = llvm::AttributeSet::get(getLLVMContext(), 3350 AttributeList); 3351 3352 llvm::BasicBlock *InvokeDest = nullptr; 3353 if (!Attrs.hasAttribute(llvm::AttributeSet::FunctionIndex, 3354 llvm::Attribute::NoUnwind) || 3355 currentFunctionUsesSEHTry()) 3356 InvokeDest = getInvokeDest(); 3357 3358 llvm::CallSite CS; 3359 if (!InvokeDest) { 3360 CS = Builder.CreateCall(Callee, IRCallArgs); 3361 } else { 3362 llvm::BasicBlock *Cont = createBasicBlock("invoke.cont"); 3363 CS = Builder.CreateInvoke(Callee, Cont, InvokeDest, IRCallArgs); 3364 EmitBlock(Cont); 3365 } 3366 if (callOrInvoke) 3367 *callOrInvoke = CS.getInstruction(); 3368 3369 if (CurCodeDecl && CurCodeDecl->hasAttr<FlattenAttr>() && 3370 !CS.hasFnAttr(llvm::Attribute::NoInline)) 3371 Attrs = 3372 Attrs.addAttribute(getLLVMContext(), llvm::AttributeSet::FunctionIndex, 3373 llvm::Attribute::AlwaysInline); 3374 3375 // Disable inlining inside SEH __try blocks. 3376 if (isSEHTryScope()) 3377 Attrs = 3378 Attrs.addAttribute(getLLVMContext(), llvm::AttributeSet::FunctionIndex, 3379 llvm::Attribute::NoInline); 3380 3381 CS.setAttributes(Attrs); 3382 CS.setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv)); 3383 3384 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC 3385 // optimizer it can aggressively ignore unwind edges. 3386 if (CGM.getLangOpts().ObjCAutoRefCount) 3387 AddObjCARCExceptionMetadata(CS.getInstruction()); 3388 3389 // If the call doesn't return, finish the basic block and clear the 3390 // insertion point; this allows the rest of IRgen to discard 3391 // unreachable code. 3392 if (CS.doesNotReturn()) { 3393 Builder.CreateUnreachable(); 3394 Builder.ClearInsertionPoint(); 3395 3396 // FIXME: For now, emit a dummy basic block because expr emitters in 3397 // generally are not ready to handle emitting expressions at unreachable 3398 // points. 3399 EnsureInsertPoint(); 3400 3401 // Return a reasonable RValue. 3402 return GetUndefRValue(RetTy); 3403 } 3404 3405 llvm::Instruction *CI = CS.getInstruction(); 3406 if (Builder.isNamePreserving() && !CI->getType()->isVoidTy()) 3407 CI->setName("call"); 3408 3409 // Emit any writebacks immediately. Arguably this should happen 3410 // after any return-value munging. 3411 if (CallArgs.hasWritebacks()) 3412 emitWritebacks(*this, CallArgs); 3413 3414 // The stack cleanup for inalloca arguments has to run out of the normal 3415 // lexical order, so deactivate it and run it manually here. 3416 CallArgs.freeArgumentMemory(*this); 3417 3418 RValue Ret = [&] { 3419 switch (RetAI.getKind()) { 3420 case ABIArgInfo::InAlloca: 3421 case ABIArgInfo::Indirect: 3422 return convertTempToRValue(SRetPtr, RetTy, SourceLocation()); 3423 3424 case ABIArgInfo::Ignore: 3425 // If we are ignoring an argument that had a result, make sure to 3426 // construct the appropriate return value for our caller. 3427 return GetUndefRValue(RetTy); 3428 3429 case ABIArgInfo::Extend: 3430 case ABIArgInfo::Direct: { 3431 llvm::Type *RetIRTy = ConvertType(RetTy); 3432 if (RetAI.getCoerceToType() == RetIRTy && RetAI.getDirectOffset() == 0) { 3433 switch (getEvaluationKind(RetTy)) { 3434 case TEK_Complex: { 3435 llvm::Value *Real = Builder.CreateExtractValue(CI, 0); 3436 llvm::Value *Imag = Builder.CreateExtractValue(CI, 1); 3437 return RValue::getComplex(std::make_pair(Real, Imag)); 3438 } 3439 case TEK_Aggregate: { 3440 llvm::Value *DestPtr = ReturnValue.getValue(); 3441 bool DestIsVolatile = ReturnValue.isVolatile(); 3442 3443 if (!DestPtr) { 3444 DestPtr = CreateMemTemp(RetTy, "agg.tmp"); 3445 DestIsVolatile = false; 3446 } 3447 BuildAggStore(*this, CI, DestPtr, DestIsVolatile, false); 3448 return RValue::getAggregate(DestPtr); 3449 } 3450 case TEK_Scalar: { 3451 // If the argument doesn't match, perform a bitcast to coerce it. This 3452 // can happen due to trivial type mismatches. 3453 llvm::Value *V = CI; 3454 if (V->getType() != RetIRTy) 3455 V = Builder.CreateBitCast(V, RetIRTy); 3456 return RValue::get(V); 3457 } 3458 } 3459 llvm_unreachable("bad evaluation kind"); 3460 } 3461 3462 llvm::Value *DestPtr = ReturnValue.getValue(); 3463 bool DestIsVolatile = ReturnValue.isVolatile(); 3464 3465 if (!DestPtr) { 3466 DestPtr = CreateMemTemp(RetTy, "coerce"); 3467 DestIsVolatile = false; 3468 } 3469 3470 // If the value is offset in memory, apply the offset now. 3471 llvm::Value *StorePtr = DestPtr; 3472 if (unsigned Offs = RetAI.getDirectOffset()) { 3473 StorePtr = Builder.CreateBitCast(StorePtr, Builder.getInt8PtrTy()); 3474 StorePtr = 3475 Builder.CreateConstGEP1_32(Builder.getInt8Ty(), StorePtr, Offs); 3476 StorePtr = Builder.CreateBitCast(StorePtr, 3477 llvm::PointerType::getUnqual(RetAI.getCoerceToType())); 3478 } 3479 CreateCoercedStore(CI, StorePtr, DestIsVolatile, *this); 3480 3481 return convertTempToRValue(DestPtr, RetTy, SourceLocation()); 3482 } 3483 3484 case ABIArgInfo::Expand: 3485 llvm_unreachable("Invalid ABI kind for return argument"); 3486 } 3487 3488 llvm_unreachable("Unhandled ABIArgInfo::Kind"); 3489 } (); 3490 3491 if (Ret.isScalar() && TargetDecl) { 3492 if (const auto *AA = TargetDecl->getAttr<AssumeAlignedAttr>()) { 3493 llvm::Value *OffsetValue = nullptr; 3494 if (const auto *Offset = AA->getOffset()) 3495 OffsetValue = EmitScalarExpr(Offset); 3496 3497 llvm::Value *Alignment = EmitScalarExpr(AA->getAlignment()); 3498 llvm::ConstantInt *AlignmentCI = cast<llvm::ConstantInt>(Alignment); 3499 EmitAlignmentAssumption(Ret.getScalarVal(), AlignmentCI->getZExtValue(), 3500 OffsetValue); 3501 } 3502 } 3503 3504 return Ret; 3505 } 3506 3507 /* VarArg handling */ 3508 3509 llvm::Value *CodeGenFunction::EmitVAArg(llvm::Value *VAListAddr, QualType Ty) { 3510 return CGM.getTypes().getABIInfo().EmitVAArg(VAListAddr, Ty, *this); 3511 } 3512