1 //===--- CGExprCXX.cpp - Emit LLVM Code for C++ expressions ---------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This contains code dealing with code generation of C++ expressions 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "clang/Frontend/CodeGenOptions.h" 15 #include "CodeGenFunction.h" 16 #include "CGCXXABI.h" 17 #include "CGObjCRuntime.h" 18 #include "CGDebugInfo.h" 19 #include "llvm/Intrinsics.h" 20 #include "llvm/Support/CallSite.h" 21 22 using namespace clang; 23 using namespace CodeGen; 24 25 RValue CodeGenFunction::EmitCXXMemberCall(const CXXMethodDecl *MD, 26 llvm::Value *Callee, 27 ReturnValueSlot ReturnValue, 28 llvm::Value *This, 29 llvm::Value *VTT, 30 CallExpr::const_arg_iterator ArgBeg, 31 CallExpr::const_arg_iterator ArgEnd) { 32 assert(MD->isInstance() && 33 "Trying to emit a member call expr on a static method!"); 34 35 const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>(); 36 37 CallArgList Args; 38 39 // Push the this ptr. 40 Args.add(RValue::get(This), MD->getThisType(getContext())); 41 42 // If there is a VTT parameter, emit it. 43 if (VTT) { 44 QualType T = getContext().getPointerType(getContext().VoidPtrTy); 45 Args.add(RValue::get(VTT), T); 46 } 47 48 // And the rest of the call args 49 EmitCallArgs(Args, FPT, ArgBeg, ArgEnd); 50 51 QualType ResultType = FPT->getResultType(); 52 return EmitCall(CGM.getTypes().getFunctionInfo(ResultType, Args, 53 FPT->getExtInfo()), 54 Callee, ReturnValue, Args, MD); 55 } 56 57 static const CXXRecordDecl *getMostDerivedClassDecl(const Expr *Base) { 58 const Expr *E = Base; 59 60 while (true) { 61 E = E->IgnoreParens(); 62 if (const CastExpr *CE = dyn_cast<CastExpr>(E)) { 63 if (CE->getCastKind() == CK_DerivedToBase || 64 CE->getCastKind() == CK_UncheckedDerivedToBase || 65 CE->getCastKind() == CK_NoOp) { 66 E = CE->getSubExpr(); 67 continue; 68 } 69 } 70 71 break; 72 } 73 74 QualType DerivedType = E->getType(); 75 if (const PointerType *PTy = DerivedType->getAs<PointerType>()) 76 DerivedType = PTy->getPointeeType(); 77 78 return cast<CXXRecordDecl>(DerivedType->castAs<RecordType>()->getDecl()); 79 } 80 81 // FIXME: Ideally Expr::IgnoreParenNoopCasts should do this, but it doesn't do 82 // quite what we want. 83 static const Expr *skipNoOpCastsAndParens(const Expr *E) { 84 while (true) { 85 if (const ParenExpr *PE = dyn_cast<ParenExpr>(E)) { 86 E = PE->getSubExpr(); 87 continue; 88 } 89 90 if (const CastExpr *CE = dyn_cast<CastExpr>(E)) { 91 if (CE->getCastKind() == CK_NoOp) { 92 E = CE->getSubExpr(); 93 continue; 94 } 95 } 96 if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) { 97 if (UO->getOpcode() == UO_Extension) { 98 E = UO->getSubExpr(); 99 continue; 100 } 101 } 102 return E; 103 } 104 } 105 106 /// canDevirtualizeMemberFunctionCalls - Checks whether virtual calls on given 107 /// expr can be devirtualized. 108 static bool canDevirtualizeMemberFunctionCalls(ASTContext &Context, 109 const Expr *Base, 110 const CXXMethodDecl *MD) { 111 112 // When building with -fapple-kext, all calls must go through the vtable since 113 // the kernel linker can do runtime patching of vtables. 114 if (Context.getLangOptions().AppleKext) 115 return false; 116 117 // If the most derived class is marked final, we know that no subclass can 118 // override this member function and so we can devirtualize it. For example: 119 // 120 // struct A { virtual void f(); } 121 // struct B final : A { }; 122 // 123 // void f(B *b) { 124 // b->f(); 125 // } 126 // 127 const CXXRecordDecl *MostDerivedClassDecl = getMostDerivedClassDecl(Base); 128 if (MostDerivedClassDecl->hasAttr<FinalAttr>()) 129 return true; 130 131 // If the member function is marked 'final', we know that it can't be 132 // overridden and can therefore devirtualize it. 133 if (MD->hasAttr<FinalAttr>()) 134 return true; 135 136 // Similarly, if the class itself is marked 'final' it can't be overridden 137 // and we can therefore devirtualize the member function call. 138 if (MD->getParent()->hasAttr<FinalAttr>()) 139 return true; 140 141 Base = skipNoOpCastsAndParens(Base); 142 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Base)) { 143 if (const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl())) { 144 // This is a record decl. We know the type and can devirtualize it. 145 return VD->getType()->isRecordType(); 146 } 147 148 return false; 149 } 150 151 // We can always devirtualize calls on temporary object expressions. 152 if (isa<CXXConstructExpr>(Base)) 153 return true; 154 155 // And calls on bound temporaries. 156 if (isa<CXXBindTemporaryExpr>(Base)) 157 return true; 158 159 // Check if this is a call expr that returns a record type. 160 if (const CallExpr *CE = dyn_cast<CallExpr>(Base)) 161 return CE->getCallReturnType()->isRecordType(); 162 163 // We can't devirtualize the call. 164 return false; 165 } 166 167 // Note: This function also emit constructor calls to support a MSVC 168 // extensions allowing explicit constructor function call. 169 RValue CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr *CE, 170 ReturnValueSlot ReturnValue) { 171 const Expr *callee = CE->getCallee()->IgnoreParens(); 172 173 if (isa<BinaryOperator>(callee)) 174 return EmitCXXMemberPointerCallExpr(CE, ReturnValue); 175 176 const MemberExpr *ME = cast<MemberExpr>(callee); 177 const CXXMethodDecl *MD = cast<CXXMethodDecl>(ME->getMemberDecl()); 178 179 CGDebugInfo *DI = getDebugInfo(); 180 if (DI && CGM.getCodeGenOpts().LimitDebugInfo 181 && !isa<CallExpr>(ME->getBase())) { 182 QualType PQTy = ME->getBase()->IgnoreParenImpCasts()->getType(); 183 if (const PointerType * PTy = dyn_cast<PointerType>(PQTy)) { 184 DI->getOrCreateRecordType(PTy->getPointeeType(), 185 MD->getParent()->getLocation()); 186 } 187 } 188 189 if (MD->isStatic()) { 190 // The method is static, emit it as we would a regular call. 191 llvm::Value *Callee = CGM.GetAddrOfFunction(MD); 192 return EmitCall(getContext().getPointerType(MD->getType()), Callee, 193 ReturnValue, CE->arg_begin(), CE->arg_end()); 194 } 195 196 // Compute the object pointer. 197 llvm::Value *This; 198 if (ME->isArrow()) 199 This = EmitScalarExpr(ME->getBase()); 200 else 201 This = EmitLValue(ME->getBase()).getAddress(); 202 203 if (MD->isTrivial()) { 204 if (isa<CXXDestructorDecl>(MD)) return RValue::get(0); 205 if (isa<CXXConstructorDecl>(MD) && 206 cast<CXXConstructorDecl>(MD)->isDefaultConstructor()) 207 return RValue::get(0); 208 209 if (MD->isCopyAssignmentOperator()) { 210 // We don't like to generate the trivial copy assignment operator when 211 // it isn't necessary; just produce the proper effect here. 212 llvm::Value *RHS = EmitLValue(*CE->arg_begin()).getAddress(); 213 EmitAggregateCopy(This, RHS, CE->getType()); 214 return RValue::get(This); 215 } 216 217 if (isa<CXXConstructorDecl>(MD) && 218 cast<CXXConstructorDecl>(MD)->isCopyConstructor()) { 219 llvm::Value *RHS = EmitLValue(*CE->arg_begin()).getAddress(); 220 EmitSynthesizedCXXCopyCtorCall(cast<CXXConstructorDecl>(MD), This, RHS, 221 CE->arg_begin(), CE->arg_end()); 222 return RValue::get(This); 223 } 224 llvm_unreachable("unknown trivial member function"); 225 } 226 227 // Compute the function type we're calling. 228 const CGFunctionInfo *FInfo = 0; 229 if (isa<CXXDestructorDecl>(MD)) 230 FInfo = &CGM.getTypes().getFunctionInfo(cast<CXXDestructorDecl>(MD), 231 Dtor_Complete); 232 else if (isa<CXXConstructorDecl>(MD)) 233 FInfo = &CGM.getTypes().getFunctionInfo(cast<CXXConstructorDecl>(MD), 234 Ctor_Complete); 235 else 236 FInfo = &CGM.getTypes().getFunctionInfo(MD); 237 238 const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>(); 239 llvm::Type *Ty 240 = CGM.getTypes().GetFunctionType(*FInfo, FPT->isVariadic()); 241 242 // C++ [class.virtual]p12: 243 // Explicit qualification with the scope operator (5.1) suppresses the 244 // virtual call mechanism. 245 // 246 // We also don't emit a virtual call if the base expression has a record type 247 // because then we know what the type is. 248 bool UseVirtualCall; 249 UseVirtualCall = MD->isVirtual() && !ME->hasQualifier() 250 && !canDevirtualizeMemberFunctionCalls(getContext(), 251 ME->getBase(), MD); 252 llvm::Value *Callee; 253 if (const CXXDestructorDecl *Dtor = dyn_cast<CXXDestructorDecl>(MD)) { 254 if (UseVirtualCall) { 255 Callee = BuildVirtualCall(Dtor, Dtor_Complete, This, Ty); 256 } else { 257 if (getContext().getLangOptions().AppleKext && 258 MD->isVirtual() && 259 ME->hasQualifier()) 260 Callee = BuildAppleKextVirtualCall(MD, ME->getQualifier(), Ty); 261 else 262 Callee = CGM.GetAddrOfFunction(GlobalDecl(Dtor, Dtor_Complete), Ty); 263 } 264 } else if (const CXXConstructorDecl *Ctor = 265 dyn_cast<CXXConstructorDecl>(MD)) { 266 Callee = CGM.GetAddrOfFunction(GlobalDecl(Ctor, Ctor_Complete), Ty); 267 } else if (UseVirtualCall) { 268 Callee = BuildVirtualCall(MD, This, Ty); 269 } else { 270 if (getContext().getLangOptions().AppleKext && 271 MD->isVirtual() && 272 ME->hasQualifier()) 273 Callee = BuildAppleKextVirtualCall(MD, ME->getQualifier(), Ty); 274 else 275 Callee = CGM.GetAddrOfFunction(MD, Ty); 276 } 277 278 return EmitCXXMemberCall(MD, Callee, ReturnValue, This, /*VTT=*/0, 279 CE->arg_begin(), CE->arg_end()); 280 } 281 282 RValue 283 CodeGenFunction::EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E, 284 ReturnValueSlot ReturnValue) { 285 const BinaryOperator *BO = 286 cast<BinaryOperator>(E->getCallee()->IgnoreParens()); 287 const Expr *BaseExpr = BO->getLHS(); 288 const Expr *MemFnExpr = BO->getRHS(); 289 290 const MemberPointerType *MPT = 291 MemFnExpr->getType()->castAs<MemberPointerType>(); 292 293 const FunctionProtoType *FPT = 294 MPT->getPointeeType()->castAs<FunctionProtoType>(); 295 const CXXRecordDecl *RD = 296 cast<CXXRecordDecl>(MPT->getClass()->getAs<RecordType>()->getDecl()); 297 298 // Get the member function pointer. 299 llvm::Value *MemFnPtr = EmitScalarExpr(MemFnExpr); 300 301 // Emit the 'this' pointer. 302 llvm::Value *This; 303 304 if (BO->getOpcode() == BO_PtrMemI) 305 This = EmitScalarExpr(BaseExpr); 306 else 307 This = EmitLValue(BaseExpr).getAddress(); 308 309 // Ask the ABI to load the callee. Note that This is modified. 310 llvm::Value *Callee = 311 CGM.getCXXABI().EmitLoadOfMemberFunctionPointer(*this, This, MemFnPtr, MPT); 312 313 CallArgList Args; 314 315 QualType ThisType = 316 getContext().getPointerType(getContext().getTagDeclType(RD)); 317 318 // Push the this ptr. 319 Args.add(RValue::get(This), ThisType); 320 321 // And the rest of the call args 322 EmitCallArgs(Args, FPT, E->arg_begin(), E->arg_end()); 323 return EmitCall(CGM.getTypes().getFunctionInfo(Args, FPT), Callee, 324 ReturnValue, Args); 325 } 326 327 RValue 328 CodeGenFunction::EmitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E, 329 const CXXMethodDecl *MD, 330 ReturnValueSlot ReturnValue) { 331 assert(MD->isInstance() && 332 "Trying to emit a member call expr on a static method!"); 333 LValue LV = EmitLValue(E->getArg(0)); 334 llvm::Value *This = LV.getAddress(); 335 336 if (MD->isCopyAssignmentOperator()) { 337 const CXXRecordDecl *ClassDecl = cast<CXXRecordDecl>(MD->getDeclContext()); 338 if (ClassDecl->hasTrivialCopyAssignment()) { 339 assert(!ClassDecl->hasUserDeclaredCopyAssignment() && 340 "EmitCXXOperatorMemberCallExpr - user declared copy assignment"); 341 llvm::Value *Src = EmitLValue(E->getArg(1)).getAddress(); 342 QualType Ty = E->getType(); 343 EmitAggregateCopy(This, Src, Ty); 344 return RValue::get(This); 345 } 346 } 347 348 llvm::Value *Callee = EmitCXXOperatorMemberCallee(E, MD, This); 349 return EmitCXXMemberCall(MD, Callee, ReturnValue, This, /*VTT=*/0, 350 E->arg_begin() + 1, E->arg_end()); 351 } 352 353 void 354 CodeGenFunction::EmitCXXConstructExpr(const CXXConstructExpr *E, 355 AggValueSlot Dest) { 356 assert(!Dest.isIgnored() && "Must have a destination!"); 357 const CXXConstructorDecl *CD = E->getConstructor(); 358 359 // If we require zero initialization before (or instead of) calling the 360 // constructor, as can be the case with a non-user-provided default 361 // constructor, emit the zero initialization now, unless destination is 362 // already zeroed. 363 if (E->requiresZeroInitialization() && !Dest.isZeroed()) 364 EmitNullInitialization(Dest.getAddr(), E->getType()); 365 366 // If this is a call to a trivial default constructor, do nothing. 367 if (CD->isTrivial() && CD->isDefaultConstructor()) 368 return; 369 370 // Elide the constructor if we're constructing from a temporary. 371 // The temporary check is required because Sema sets this on NRVO 372 // returns. 373 if (getContext().getLangOptions().ElideConstructors && E->isElidable()) { 374 assert(getContext().hasSameUnqualifiedType(E->getType(), 375 E->getArg(0)->getType())); 376 if (E->getArg(0)->isTemporaryObject(getContext(), CD->getParent())) { 377 EmitAggExpr(E->getArg(0), Dest); 378 return; 379 } 380 } 381 382 if (const ConstantArrayType *arrayType 383 = getContext().getAsConstantArrayType(E->getType())) { 384 EmitCXXAggrConstructorCall(CD, arrayType, Dest.getAddr(), 385 E->arg_begin(), E->arg_end()); 386 } else { 387 CXXCtorType Type = Ctor_Complete; 388 bool ForVirtualBase = false; 389 390 switch (E->getConstructionKind()) { 391 case CXXConstructExpr::CK_Delegating: 392 // We should be emitting a constructor; GlobalDecl will assert this 393 Type = CurGD.getCtorType(); 394 break; 395 396 case CXXConstructExpr::CK_Complete: 397 Type = Ctor_Complete; 398 break; 399 400 case CXXConstructExpr::CK_VirtualBase: 401 ForVirtualBase = true; 402 // fall-through 403 404 case CXXConstructExpr::CK_NonVirtualBase: 405 Type = Ctor_Base; 406 } 407 408 // Call the constructor. 409 EmitCXXConstructorCall(CD, Type, ForVirtualBase, Dest.getAddr(), 410 E->arg_begin(), E->arg_end()); 411 } 412 } 413 414 void 415 CodeGenFunction::EmitSynthesizedCXXCopyCtor(llvm::Value *Dest, 416 llvm::Value *Src, 417 const Expr *Exp) { 418 if (const ExprWithCleanups *E = dyn_cast<ExprWithCleanups>(Exp)) 419 Exp = E->getSubExpr(); 420 assert(isa<CXXConstructExpr>(Exp) && 421 "EmitSynthesizedCXXCopyCtor - unknown copy ctor expr"); 422 const CXXConstructExpr* E = cast<CXXConstructExpr>(Exp); 423 const CXXConstructorDecl *CD = E->getConstructor(); 424 RunCleanupsScope Scope(*this); 425 426 // If we require zero initialization before (or instead of) calling the 427 // constructor, as can be the case with a non-user-provided default 428 // constructor, emit the zero initialization now. 429 // FIXME. Do I still need this for a copy ctor synthesis? 430 if (E->requiresZeroInitialization()) 431 EmitNullInitialization(Dest, E->getType()); 432 433 assert(!getContext().getAsConstantArrayType(E->getType()) 434 && "EmitSynthesizedCXXCopyCtor - Copied-in Array"); 435 EmitSynthesizedCXXCopyCtorCall(CD, Dest, Src, 436 E->arg_begin(), E->arg_end()); 437 } 438 439 static CharUnits CalculateCookiePadding(CodeGenFunction &CGF, 440 const CXXNewExpr *E) { 441 if (!E->isArray()) 442 return CharUnits::Zero(); 443 444 // No cookie is required if the operator new[] being used is the 445 // reserved placement operator new[]. 446 if (E->getOperatorNew()->isReservedGlobalPlacementOperator()) 447 return CharUnits::Zero(); 448 449 return CGF.CGM.getCXXABI().GetArrayCookieSize(E); 450 } 451 452 static llvm::Value *EmitCXXNewAllocSize(CodeGenFunction &CGF, 453 const CXXNewExpr *e, 454 llvm::Value *&numElements, 455 llvm::Value *&sizeWithoutCookie) { 456 QualType type = e->getAllocatedType(); 457 458 if (!e->isArray()) { 459 CharUnits typeSize = CGF.getContext().getTypeSizeInChars(type); 460 sizeWithoutCookie 461 = llvm::ConstantInt::get(CGF.SizeTy, typeSize.getQuantity()); 462 return sizeWithoutCookie; 463 } 464 465 // The width of size_t. 466 unsigned sizeWidth = CGF.SizeTy->getBitWidth(); 467 468 // Figure out the cookie size. 469 llvm::APInt cookieSize(sizeWidth, 470 CalculateCookiePadding(CGF, e).getQuantity()); 471 472 // Emit the array size expression. 473 // We multiply the size of all dimensions for NumElements. 474 // e.g for 'int[2][3]', ElemType is 'int' and NumElements is 6. 475 numElements = CGF.EmitScalarExpr(e->getArraySize()); 476 assert(isa<llvm::IntegerType>(numElements->getType())); 477 478 // The number of elements can be have an arbitrary integer type; 479 // essentially, we need to multiply it by a constant factor, add a 480 // cookie size, and verify that the result is representable as a 481 // size_t. That's just a gloss, though, and it's wrong in one 482 // important way: if the count is negative, it's an error even if 483 // the cookie size would bring the total size >= 0. 484 bool isSigned 485 = e->getArraySize()->getType()->isSignedIntegerOrEnumerationType(); 486 llvm::IntegerType *numElementsType 487 = cast<llvm::IntegerType>(numElements->getType()); 488 unsigned numElementsWidth = numElementsType->getBitWidth(); 489 490 // Compute the constant factor. 491 llvm::APInt arraySizeMultiplier(sizeWidth, 1); 492 while (const ConstantArrayType *CAT 493 = CGF.getContext().getAsConstantArrayType(type)) { 494 type = CAT->getElementType(); 495 arraySizeMultiplier *= CAT->getSize(); 496 } 497 498 CharUnits typeSize = CGF.getContext().getTypeSizeInChars(type); 499 llvm::APInt typeSizeMultiplier(sizeWidth, typeSize.getQuantity()); 500 typeSizeMultiplier *= arraySizeMultiplier; 501 502 // This will be a size_t. 503 llvm::Value *size; 504 505 // If someone is doing 'new int[42]' there is no need to do a dynamic check. 506 // Don't bloat the -O0 code. 507 if (llvm::ConstantInt *numElementsC = 508 dyn_cast<llvm::ConstantInt>(numElements)) { 509 const llvm::APInt &count = numElementsC->getValue(); 510 511 bool hasAnyOverflow = false; 512 513 // If 'count' was a negative number, it's an overflow. 514 if (isSigned && count.isNegative()) 515 hasAnyOverflow = true; 516 517 // We want to do all this arithmetic in size_t. If numElements is 518 // wider than that, check whether it's already too big, and if so, 519 // overflow. 520 else if (numElementsWidth > sizeWidth && 521 numElementsWidth - sizeWidth > count.countLeadingZeros()) 522 hasAnyOverflow = true; 523 524 // Okay, compute a count at the right width. 525 llvm::APInt adjustedCount = count.zextOrTrunc(sizeWidth); 526 527 // Scale numElements by that. This might overflow, but we don't 528 // care because it only overflows if allocationSize does, too, and 529 // if that overflows then we shouldn't use this. 530 numElements = llvm::ConstantInt::get(CGF.SizeTy, 531 adjustedCount * arraySizeMultiplier); 532 533 // Compute the size before cookie, and track whether it overflowed. 534 bool overflow; 535 llvm::APInt allocationSize 536 = adjustedCount.umul_ov(typeSizeMultiplier, overflow); 537 hasAnyOverflow |= overflow; 538 539 // Add in the cookie, and check whether it's overflowed. 540 if (cookieSize != 0) { 541 // Save the current size without a cookie. This shouldn't be 542 // used if there was overflow. 543 sizeWithoutCookie = llvm::ConstantInt::get(CGF.SizeTy, allocationSize); 544 545 allocationSize = allocationSize.uadd_ov(cookieSize, overflow); 546 hasAnyOverflow |= overflow; 547 } 548 549 // On overflow, produce a -1 so operator new will fail. 550 if (hasAnyOverflow) { 551 size = llvm::Constant::getAllOnesValue(CGF.SizeTy); 552 } else { 553 size = llvm::ConstantInt::get(CGF.SizeTy, allocationSize); 554 } 555 556 // Otherwise, we might need to use the overflow intrinsics. 557 } else { 558 // There are up to four conditions we need to test for: 559 // 1) if isSigned, we need to check whether numElements is negative; 560 // 2) if numElementsWidth > sizeWidth, we need to check whether 561 // numElements is larger than something representable in size_t; 562 // 3) we need to compute 563 // sizeWithoutCookie := numElements * typeSizeMultiplier 564 // and check whether it overflows; and 565 // 4) if we need a cookie, we need to compute 566 // size := sizeWithoutCookie + cookieSize 567 // and check whether it overflows. 568 569 llvm::Value *hasOverflow = 0; 570 571 // If numElementsWidth > sizeWidth, then one way or another, we're 572 // going to have to do a comparison for (2), and this happens to 573 // take care of (1), too. 574 if (numElementsWidth > sizeWidth) { 575 llvm::APInt threshold(numElementsWidth, 1); 576 threshold <<= sizeWidth; 577 578 llvm::Value *thresholdV 579 = llvm::ConstantInt::get(numElementsType, threshold); 580 581 hasOverflow = CGF.Builder.CreateICmpUGE(numElements, thresholdV); 582 numElements = CGF.Builder.CreateTrunc(numElements, CGF.SizeTy); 583 584 // Otherwise, if we're signed, we want to sext up to size_t. 585 } else if (isSigned) { 586 if (numElementsWidth < sizeWidth) 587 numElements = CGF.Builder.CreateSExt(numElements, CGF.SizeTy); 588 589 // If there's a non-1 type size multiplier, then we can do the 590 // signedness check at the same time as we do the multiply 591 // because a negative number times anything will cause an 592 // unsigned overflow. Otherwise, we have to do it here. 593 if (typeSizeMultiplier == 1) 594 hasOverflow = CGF.Builder.CreateICmpSLT(numElements, 595 llvm::ConstantInt::get(CGF.SizeTy, 0)); 596 597 // Otherwise, zext up to size_t if necessary. 598 } else if (numElementsWidth < sizeWidth) { 599 numElements = CGF.Builder.CreateZExt(numElements, CGF.SizeTy); 600 } 601 602 assert(numElements->getType() == CGF.SizeTy); 603 604 size = numElements; 605 606 // Multiply by the type size if necessary. This multiplier 607 // includes all the factors for nested arrays. 608 // 609 // This step also causes numElements to be scaled up by the 610 // nested-array factor if necessary. Overflow on this computation 611 // can be ignored because the result shouldn't be used if 612 // allocation fails. 613 if (typeSizeMultiplier != 1) { 614 llvm::Value *umul_with_overflow 615 = CGF.CGM.getIntrinsic(llvm::Intrinsic::umul_with_overflow, CGF.SizeTy); 616 617 llvm::Value *tsmV = 618 llvm::ConstantInt::get(CGF.SizeTy, typeSizeMultiplier); 619 llvm::Value *result = 620 CGF.Builder.CreateCall2(umul_with_overflow, size, tsmV); 621 622 llvm::Value *overflowed = CGF.Builder.CreateExtractValue(result, 1); 623 if (hasOverflow) 624 hasOverflow = CGF.Builder.CreateOr(hasOverflow, overflowed); 625 else 626 hasOverflow = overflowed; 627 628 size = CGF.Builder.CreateExtractValue(result, 0); 629 630 // Also scale up numElements by the array size multiplier. 631 if (arraySizeMultiplier != 1) { 632 // If the base element type size is 1, then we can re-use the 633 // multiply we just did. 634 if (typeSize.isOne()) { 635 assert(arraySizeMultiplier == typeSizeMultiplier); 636 numElements = size; 637 638 // Otherwise we need a separate multiply. 639 } else { 640 llvm::Value *asmV = 641 llvm::ConstantInt::get(CGF.SizeTy, arraySizeMultiplier); 642 numElements = CGF.Builder.CreateMul(numElements, asmV); 643 } 644 } 645 } else { 646 // numElements doesn't need to be scaled. 647 assert(arraySizeMultiplier == 1); 648 } 649 650 // Add in the cookie size if necessary. 651 if (cookieSize != 0) { 652 sizeWithoutCookie = size; 653 654 llvm::Value *uadd_with_overflow 655 = CGF.CGM.getIntrinsic(llvm::Intrinsic::uadd_with_overflow, CGF.SizeTy); 656 657 llvm::Value *cookieSizeV = llvm::ConstantInt::get(CGF.SizeTy, cookieSize); 658 llvm::Value *result = 659 CGF.Builder.CreateCall2(uadd_with_overflow, size, cookieSizeV); 660 661 llvm::Value *overflowed = CGF.Builder.CreateExtractValue(result, 1); 662 if (hasOverflow) 663 hasOverflow = CGF.Builder.CreateOr(hasOverflow, overflowed); 664 else 665 hasOverflow = overflowed; 666 667 size = CGF.Builder.CreateExtractValue(result, 0); 668 } 669 670 // If we had any possibility of dynamic overflow, make a select to 671 // overwrite 'size' with an all-ones value, which should cause 672 // operator new to throw. 673 if (hasOverflow) 674 size = CGF.Builder.CreateSelect(hasOverflow, 675 llvm::Constant::getAllOnesValue(CGF.SizeTy), 676 size); 677 } 678 679 if (cookieSize == 0) 680 sizeWithoutCookie = size; 681 else 682 assert(sizeWithoutCookie && "didn't set sizeWithoutCookie?"); 683 684 return size; 685 } 686 687 static void StoreAnyExprIntoOneUnit(CodeGenFunction &CGF, const CXXNewExpr *E, 688 llvm::Value *NewPtr) { 689 690 assert(E->getNumConstructorArgs() == 1 && 691 "Can only have one argument to initializer of POD type."); 692 693 const Expr *Init = E->getConstructorArg(0); 694 QualType AllocType = E->getAllocatedType(); 695 696 unsigned Alignment = 697 CGF.getContext().getTypeAlignInChars(AllocType).getQuantity(); 698 if (!CGF.hasAggregateLLVMType(AllocType)) 699 CGF.EmitScalarInit(Init, 0, CGF.MakeAddrLValue(NewPtr, AllocType, Alignment), 700 false); 701 else if (AllocType->isAnyComplexType()) 702 CGF.EmitComplexExprIntoAddr(Init, NewPtr, 703 AllocType.isVolatileQualified()); 704 else { 705 AggValueSlot Slot 706 = AggValueSlot::forAddr(NewPtr, AllocType.getQualifiers(), true); 707 CGF.EmitAggExpr(Init, Slot); 708 } 709 } 710 711 void 712 CodeGenFunction::EmitNewArrayInitializer(const CXXNewExpr *E, 713 llvm::Value *NewPtr, 714 llvm::Value *NumElements) { 715 // We have a POD type. 716 if (E->getNumConstructorArgs() == 0) 717 return; 718 719 llvm::Type *SizeTy = ConvertType(getContext().getSizeType()); 720 721 // Create a temporary for the loop index and initialize it with 0. 722 llvm::Value *IndexPtr = CreateTempAlloca(SizeTy, "loop.index"); 723 llvm::Value *Zero = llvm::Constant::getNullValue(SizeTy); 724 Builder.CreateStore(Zero, IndexPtr); 725 726 // Start the loop with a block that tests the condition. 727 llvm::BasicBlock *CondBlock = createBasicBlock("for.cond"); 728 llvm::BasicBlock *AfterFor = createBasicBlock("for.end"); 729 730 EmitBlock(CondBlock); 731 732 llvm::BasicBlock *ForBody = createBasicBlock("for.body"); 733 734 // Generate: if (loop-index < number-of-elements fall to the loop body, 735 // otherwise, go to the block after the for-loop. 736 llvm::Value *Counter = Builder.CreateLoad(IndexPtr); 737 llvm::Value *IsLess = Builder.CreateICmpULT(Counter, NumElements, "isless"); 738 // If the condition is true, execute the body. 739 Builder.CreateCondBr(IsLess, ForBody, AfterFor); 740 741 EmitBlock(ForBody); 742 743 llvm::BasicBlock *ContinueBlock = createBasicBlock("for.inc"); 744 // Inside the loop body, emit the constructor call on the array element. 745 Counter = Builder.CreateLoad(IndexPtr); 746 llvm::Value *Address = Builder.CreateInBoundsGEP(NewPtr, Counter, 747 "arrayidx"); 748 StoreAnyExprIntoOneUnit(*this, E, Address); 749 750 EmitBlock(ContinueBlock); 751 752 // Emit the increment of the loop counter. 753 llvm::Value *NextVal = llvm::ConstantInt::get(SizeTy, 1); 754 Counter = Builder.CreateLoad(IndexPtr); 755 NextVal = Builder.CreateAdd(Counter, NextVal, "inc"); 756 Builder.CreateStore(NextVal, IndexPtr); 757 758 // Finally, branch back up to the condition for the next iteration. 759 EmitBranch(CondBlock); 760 761 // Emit the fall-through block. 762 EmitBlock(AfterFor, true); 763 } 764 765 static void EmitZeroMemSet(CodeGenFunction &CGF, QualType T, 766 llvm::Value *NewPtr, llvm::Value *Size) { 767 CGF.EmitCastToVoidPtr(NewPtr); 768 CharUnits Alignment = CGF.getContext().getTypeAlignInChars(T); 769 CGF.Builder.CreateMemSet(NewPtr, CGF.Builder.getInt8(0), Size, 770 Alignment.getQuantity(), false); 771 } 772 773 static void EmitNewInitializer(CodeGenFunction &CGF, const CXXNewExpr *E, 774 llvm::Value *NewPtr, 775 llvm::Value *NumElements, 776 llvm::Value *AllocSizeWithoutCookie) { 777 if (E->isArray()) { 778 if (CXXConstructorDecl *Ctor = E->getConstructor()) { 779 bool RequiresZeroInitialization = false; 780 if (Ctor->getParent()->hasTrivialDefaultConstructor()) { 781 // If new expression did not specify value-initialization, then there 782 // is no initialization. 783 if (!E->hasInitializer() || Ctor->getParent()->isEmpty()) 784 return; 785 786 if (CGF.CGM.getTypes().isZeroInitializable(E->getAllocatedType())) { 787 // Optimization: since zero initialization will just set the memory 788 // to all zeroes, generate a single memset to do it in one shot. 789 EmitZeroMemSet(CGF, E->getAllocatedType(), NewPtr, 790 AllocSizeWithoutCookie); 791 return; 792 } 793 794 RequiresZeroInitialization = true; 795 } 796 797 CGF.EmitCXXAggrConstructorCall(Ctor, NumElements, NewPtr, 798 E->constructor_arg_begin(), 799 E->constructor_arg_end(), 800 RequiresZeroInitialization); 801 return; 802 } else if (E->getNumConstructorArgs() == 1 && 803 isa<ImplicitValueInitExpr>(E->getConstructorArg(0))) { 804 // Optimization: since zero initialization will just set the memory 805 // to all zeroes, generate a single memset to do it in one shot. 806 EmitZeroMemSet(CGF, E->getAllocatedType(), NewPtr, 807 AllocSizeWithoutCookie); 808 return; 809 } else { 810 CGF.EmitNewArrayInitializer(E, NewPtr, NumElements); 811 return; 812 } 813 } 814 815 if (CXXConstructorDecl *Ctor = E->getConstructor()) { 816 // Per C++ [expr.new]p15, if we have an initializer, then we're performing 817 // direct initialization. C++ [dcl.init]p5 requires that we 818 // zero-initialize storage if there are no user-declared constructors. 819 if (E->hasInitializer() && 820 !Ctor->getParent()->hasUserDeclaredConstructor() && 821 !Ctor->getParent()->isEmpty()) 822 CGF.EmitNullInitialization(NewPtr, E->getAllocatedType()); 823 824 CGF.EmitCXXConstructorCall(Ctor, Ctor_Complete, /*ForVirtualBase=*/false, 825 NewPtr, E->constructor_arg_begin(), 826 E->constructor_arg_end()); 827 828 return; 829 } 830 // We have a POD type. 831 if (E->getNumConstructorArgs() == 0) 832 return; 833 834 StoreAnyExprIntoOneUnit(CGF, E, NewPtr); 835 } 836 837 namespace { 838 /// A cleanup to call the given 'operator delete' function upon 839 /// abnormal exit from a new expression. 840 class CallDeleteDuringNew : public EHScopeStack::Cleanup { 841 size_t NumPlacementArgs; 842 const FunctionDecl *OperatorDelete; 843 llvm::Value *Ptr; 844 llvm::Value *AllocSize; 845 846 RValue *getPlacementArgs() { return reinterpret_cast<RValue*>(this+1); } 847 848 public: 849 static size_t getExtraSize(size_t NumPlacementArgs) { 850 return NumPlacementArgs * sizeof(RValue); 851 } 852 853 CallDeleteDuringNew(size_t NumPlacementArgs, 854 const FunctionDecl *OperatorDelete, 855 llvm::Value *Ptr, 856 llvm::Value *AllocSize) 857 : NumPlacementArgs(NumPlacementArgs), OperatorDelete(OperatorDelete), 858 Ptr(Ptr), AllocSize(AllocSize) {} 859 860 void setPlacementArg(unsigned I, RValue Arg) { 861 assert(I < NumPlacementArgs && "index out of range"); 862 getPlacementArgs()[I] = Arg; 863 } 864 865 void Emit(CodeGenFunction &CGF, Flags flags) { 866 const FunctionProtoType *FPT 867 = OperatorDelete->getType()->getAs<FunctionProtoType>(); 868 assert(FPT->getNumArgs() == NumPlacementArgs + 1 || 869 (FPT->getNumArgs() == 2 && NumPlacementArgs == 0)); 870 871 CallArgList DeleteArgs; 872 873 // The first argument is always a void*. 874 FunctionProtoType::arg_type_iterator AI = FPT->arg_type_begin(); 875 DeleteArgs.add(RValue::get(Ptr), *AI++); 876 877 // A member 'operator delete' can take an extra 'size_t' argument. 878 if (FPT->getNumArgs() == NumPlacementArgs + 2) 879 DeleteArgs.add(RValue::get(AllocSize), *AI++); 880 881 // Pass the rest of the arguments, which must match exactly. 882 for (unsigned I = 0; I != NumPlacementArgs; ++I) 883 DeleteArgs.add(getPlacementArgs()[I], *AI++); 884 885 // Call 'operator delete'. 886 CGF.EmitCall(CGF.CGM.getTypes().getFunctionInfo(DeleteArgs, FPT), 887 CGF.CGM.GetAddrOfFunction(OperatorDelete), 888 ReturnValueSlot(), DeleteArgs, OperatorDelete); 889 } 890 }; 891 892 /// A cleanup to call the given 'operator delete' function upon 893 /// abnormal exit from a new expression when the new expression is 894 /// conditional. 895 class CallDeleteDuringConditionalNew : public EHScopeStack::Cleanup { 896 size_t NumPlacementArgs; 897 const FunctionDecl *OperatorDelete; 898 DominatingValue<RValue>::saved_type Ptr; 899 DominatingValue<RValue>::saved_type AllocSize; 900 901 DominatingValue<RValue>::saved_type *getPlacementArgs() { 902 return reinterpret_cast<DominatingValue<RValue>::saved_type*>(this+1); 903 } 904 905 public: 906 static size_t getExtraSize(size_t NumPlacementArgs) { 907 return NumPlacementArgs * sizeof(DominatingValue<RValue>::saved_type); 908 } 909 910 CallDeleteDuringConditionalNew(size_t NumPlacementArgs, 911 const FunctionDecl *OperatorDelete, 912 DominatingValue<RValue>::saved_type Ptr, 913 DominatingValue<RValue>::saved_type AllocSize) 914 : NumPlacementArgs(NumPlacementArgs), OperatorDelete(OperatorDelete), 915 Ptr(Ptr), AllocSize(AllocSize) {} 916 917 void setPlacementArg(unsigned I, DominatingValue<RValue>::saved_type Arg) { 918 assert(I < NumPlacementArgs && "index out of range"); 919 getPlacementArgs()[I] = Arg; 920 } 921 922 void Emit(CodeGenFunction &CGF, Flags flags) { 923 const FunctionProtoType *FPT 924 = OperatorDelete->getType()->getAs<FunctionProtoType>(); 925 assert(FPT->getNumArgs() == NumPlacementArgs + 1 || 926 (FPT->getNumArgs() == 2 && NumPlacementArgs == 0)); 927 928 CallArgList DeleteArgs; 929 930 // The first argument is always a void*. 931 FunctionProtoType::arg_type_iterator AI = FPT->arg_type_begin(); 932 DeleteArgs.add(Ptr.restore(CGF), *AI++); 933 934 // A member 'operator delete' can take an extra 'size_t' argument. 935 if (FPT->getNumArgs() == NumPlacementArgs + 2) { 936 RValue RV = AllocSize.restore(CGF); 937 DeleteArgs.add(RV, *AI++); 938 } 939 940 // Pass the rest of the arguments, which must match exactly. 941 for (unsigned I = 0; I != NumPlacementArgs; ++I) { 942 RValue RV = getPlacementArgs()[I].restore(CGF); 943 DeleteArgs.add(RV, *AI++); 944 } 945 946 // Call 'operator delete'. 947 CGF.EmitCall(CGF.CGM.getTypes().getFunctionInfo(DeleteArgs, FPT), 948 CGF.CGM.GetAddrOfFunction(OperatorDelete), 949 ReturnValueSlot(), DeleteArgs, OperatorDelete); 950 } 951 }; 952 } 953 954 /// Enter a cleanup to call 'operator delete' if the initializer in a 955 /// new-expression throws. 956 static void EnterNewDeleteCleanup(CodeGenFunction &CGF, 957 const CXXNewExpr *E, 958 llvm::Value *NewPtr, 959 llvm::Value *AllocSize, 960 const CallArgList &NewArgs) { 961 // If we're not inside a conditional branch, then the cleanup will 962 // dominate and we can do the easier (and more efficient) thing. 963 if (!CGF.isInConditionalBranch()) { 964 CallDeleteDuringNew *Cleanup = CGF.EHStack 965 .pushCleanupWithExtra<CallDeleteDuringNew>(EHCleanup, 966 E->getNumPlacementArgs(), 967 E->getOperatorDelete(), 968 NewPtr, AllocSize); 969 for (unsigned I = 0, N = E->getNumPlacementArgs(); I != N; ++I) 970 Cleanup->setPlacementArg(I, NewArgs[I+1].RV); 971 972 return; 973 } 974 975 // Otherwise, we need to save all this stuff. 976 DominatingValue<RValue>::saved_type SavedNewPtr = 977 DominatingValue<RValue>::save(CGF, RValue::get(NewPtr)); 978 DominatingValue<RValue>::saved_type SavedAllocSize = 979 DominatingValue<RValue>::save(CGF, RValue::get(AllocSize)); 980 981 CallDeleteDuringConditionalNew *Cleanup = CGF.EHStack 982 .pushCleanupWithExtra<CallDeleteDuringConditionalNew>(InactiveEHCleanup, 983 E->getNumPlacementArgs(), 984 E->getOperatorDelete(), 985 SavedNewPtr, 986 SavedAllocSize); 987 for (unsigned I = 0, N = E->getNumPlacementArgs(); I != N; ++I) 988 Cleanup->setPlacementArg(I, 989 DominatingValue<RValue>::save(CGF, NewArgs[I+1].RV)); 990 991 CGF.ActivateCleanupBlock(CGF.EHStack.stable_begin()); 992 } 993 994 llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) { 995 // The element type being allocated. 996 QualType allocType = getContext().getBaseElementType(E->getAllocatedType()); 997 998 // 1. Build a call to the allocation function. 999 FunctionDecl *allocator = E->getOperatorNew(); 1000 const FunctionProtoType *allocatorType = 1001 allocator->getType()->castAs<FunctionProtoType>(); 1002 1003 CallArgList allocatorArgs; 1004 1005 // The allocation size is the first argument. 1006 QualType sizeType = getContext().getSizeType(); 1007 1008 llvm::Value *numElements = 0; 1009 llvm::Value *allocSizeWithoutCookie = 0; 1010 llvm::Value *allocSize = 1011 EmitCXXNewAllocSize(*this, E, numElements, allocSizeWithoutCookie); 1012 1013 allocatorArgs.add(RValue::get(allocSize), sizeType); 1014 1015 // Emit the rest of the arguments. 1016 // FIXME: Ideally, this should just use EmitCallArgs. 1017 CXXNewExpr::const_arg_iterator placementArg = E->placement_arg_begin(); 1018 1019 // First, use the types from the function type. 1020 // We start at 1 here because the first argument (the allocation size) 1021 // has already been emitted. 1022 for (unsigned i = 1, e = allocatorType->getNumArgs(); i != e; 1023 ++i, ++placementArg) { 1024 QualType argType = allocatorType->getArgType(i); 1025 1026 assert(getContext().hasSameUnqualifiedType(argType.getNonReferenceType(), 1027 placementArg->getType()) && 1028 "type mismatch in call argument!"); 1029 1030 EmitCallArg(allocatorArgs, *placementArg, argType); 1031 } 1032 1033 // Either we've emitted all the call args, or we have a call to a 1034 // variadic function. 1035 assert((placementArg == E->placement_arg_end() || 1036 allocatorType->isVariadic()) && 1037 "Extra arguments to non-variadic function!"); 1038 1039 // If we still have any arguments, emit them using the type of the argument. 1040 for (CXXNewExpr::const_arg_iterator placementArgsEnd = E->placement_arg_end(); 1041 placementArg != placementArgsEnd; ++placementArg) { 1042 EmitCallArg(allocatorArgs, *placementArg, placementArg->getType()); 1043 } 1044 1045 // Emit the allocation call. If the allocator is a global placement 1046 // operator, just "inline" it directly. 1047 RValue RV; 1048 if (allocator->isReservedGlobalPlacementOperator()) { 1049 assert(allocatorArgs.size() == 2); 1050 RV = allocatorArgs[1].RV; 1051 // TODO: kill any unnecessary computations done for the size 1052 // argument. 1053 } else { 1054 RV = EmitCall(CGM.getTypes().getFunctionInfo(allocatorArgs, allocatorType), 1055 CGM.GetAddrOfFunction(allocator), ReturnValueSlot(), 1056 allocatorArgs, allocator); 1057 } 1058 1059 // Emit a null check on the allocation result if the allocation 1060 // function is allowed to return null (because it has a non-throwing 1061 // exception spec; for this part, we inline 1062 // CXXNewExpr::shouldNullCheckAllocation()) and we have an 1063 // interesting initializer. 1064 bool nullCheck = allocatorType->isNothrow(getContext()) && 1065 !(allocType.isPODType(getContext()) && !E->hasInitializer()); 1066 1067 llvm::BasicBlock *nullCheckBB = 0; 1068 llvm::BasicBlock *contBB = 0; 1069 1070 llvm::Value *allocation = RV.getScalarVal(); 1071 unsigned AS = 1072 cast<llvm::PointerType>(allocation->getType())->getAddressSpace(); 1073 1074 // The null-check means that the initializer is conditionally 1075 // evaluated. 1076 ConditionalEvaluation conditional(*this); 1077 1078 if (nullCheck) { 1079 conditional.begin(*this); 1080 1081 nullCheckBB = Builder.GetInsertBlock(); 1082 llvm::BasicBlock *notNullBB = createBasicBlock("new.notnull"); 1083 contBB = createBasicBlock("new.cont"); 1084 1085 llvm::Value *isNull = Builder.CreateIsNull(allocation, "new.isnull"); 1086 Builder.CreateCondBr(isNull, contBB, notNullBB); 1087 EmitBlock(notNullBB); 1088 } 1089 1090 assert((allocSize == allocSizeWithoutCookie) == 1091 CalculateCookiePadding(*this, E).isZero()); 1092 if (allocSize != allocSizeWithoutCookie) { 1093 assert(E->isArray()); 1094 allocation = CGM.getCXXABI().InitializeArrayCookie(*this, allocation, 1095 numElements, 1096 E, allocType); 1097 } 1098 1099 // If there's an operator delete, enter a cleanup to call it if an 1100 // exception is thrown. 1101 EHScopeStack::stable_iterator operatorDeleteCleanup; 1102 if (E->getOperatorDelete() && 1103 !E->getOperatorDelete()->isReservedGlobalPlacementOperator()) { 1104 EnterNewDeleteCleanup(*this, E, allocation, allocSize, allocatorArgs); 1105 operatorDeleteCleanup = EHStack.stable_begin(); 1106 } 1107 1108 llvm::Type *elementPtrTy 1109 = ConvertTypeForMem(allocType)->getPointerTo(AS); 1110 llvm::Value *result = Builder.CreateBitCast(allocation, elementPtrTy); 1111 1112 if (E->isArray()) { 1113 EmitNewInitializer(*this, E, result, numElements, allocSizeWithoutCookie); 1114 1115 // NewPtr is a pointer to the base element type. If we're 1116 // allocating an array of arrays, we'll need to cast back to the 1117 // array pointer type. 1118 llvm::Type *resultType = ConvertTypeForMem(E->getType()); 1119 if (result->getType() != resultType) 1120 result = Builder.CreateBitCast(result, resultType); 1121 } else { 1122 EmitNewInitializer(*this, E, result, numElements, allocSizeWithoutCookie); 1123 } 1124 1125 // Deactivate the 'operator delete' cleanup if we finished 1126 // initialization. 1127 if (operatorDeleteCleanup.isValid()) 1128 DeactivateCleanupBlock(operatorDeleteCleanup); 1129 1130 if (nullCheck) { 1131 conditional.end(*this); 1132 1133 llvm::BasicBlock *notNullBB = Builder.GetInsertBlock(); 1134 EmitBlock(contBB); 1135 1136 llvm::PHINode *PHI = Builder.CreatePHI(result->getType(), 2); 1137 PHI->addIncoming(result, notNullBB); 1138 PHI->addIncoming(llvm::Constant::getNullValue(result->getType()), 1139 nullCheckBB); 1140 1141 result = PHI; 1142 } 1143 1144 return result; 1145 } 1146 1147 void CodeGenFunction::EmitDeleteCall(const FunctionDecl *DeleteFD, 1148 llvm::Value *Ptr, 1149 QualType DeleteTy) { 1150 assert(DeleteFD->getOverloadedOperator() == OO_Delete); 1151 1152 const FunctionProtoType *DeleteFTy = 1153 DeleteFD->getType()->getAs<FunctionProtoType>(); 1154 1155 CallArgList DeleteArgs; 1156 1157 // Check if we need to pass the size to the delete operator. 1158 llvm::Value *Size = 0; 1159 QualType SizeTy; 1160 if (DeleteFTy->getNumArgs() == 2) { 1161 SizeTy = DeleteFTy->getArgType(1); 1162 CharUnits DeleteTypeSize = getContext().getTypeSizeInChars(DeleteTy); 1163 Size = llvm::ConstantInt::get(ConvertType(SizeTy), 1164 DeleteTypeSize.getQuantity()); 1165 } 1166 1167 QualType ArgTy = DeleteFTy->getArgType(0); 1168 llvm::Value *DeletePtr = Builder.CreateBitCast(Ptr, ConvertType(ArgTy)); 1169 DeleteArgs.add(RValue::get(DeletePtr), ArgTy); 1170 1171 if (Size) 1172 DeleteArgs.add(RValue::get(Size), SizeTy); 1173 1174 // Emit the call to delete. 1175 EmitCall(CGM.getTypes().getFunctionInfo(DeleteArgs, DeleteFTy), 1176 CGM.GetAddrOfFunction(DeleteFD), ReturnValueSlot(), 1177 DeleteArgs, DeleteFD); 1178 } 1179 1180 namespace { 1181 /// Calls the given 'operator delete' on a single object. 1182 struct CallObjectDelete : EHScopeStack::Cleanup { 1183 llvm::Value *Ptr; 1184 const FunctionDecl *OperatorDelete; 1185 QualType ElementType; 1186 1187 CallObjectDelete(llvm::Value *Ptr, 1188 const FunctionDecl *OperatorDelete, 1189 QualType ElementType) 1190 : Ptr(Ptr), OperatorDelete(OperatorDelete), ElementType(ElementType) {} 1191 1192 void Emit(CodeGenFunction &CGF, Flags flags) { 1193 CGF.EmitDeleteCall(OperatorDelete, Ptr, ElementType); 1194 } 1195 }; 1196 } 1197 1198 /// Emit the code for deleting a single object. 1199 static void EmitObjectDelete(CodeGenFunction &CGF, 1200 const FunctionDecl *OperatorDelete, 1201 llvm::Value *Ptr, 1202 QualType ElementType, 1203 bool UseGlobalDelete) { 1204 // Find the destructor for the type, if applicable. If the 1205 // destructor is virtual, we'll just emit the vcall and return. 1206 const CXXDestructorDecl *Dtor = 0; 1207 if (const RecordType *RT = ElementType->getAs<RecordType>()) { 1208 CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl()); 1209 if (!RD->hasTrivialDestructor()) { 1210 Dtor = RD->getDestructor(); 1211 1212 if (Dtor->isVirtual()) { 1213 if (UseGlobalDelete) { 1214 // If we're supposed to call the global delete, make sure we do so 1215 // even if the destructor throws. 1216 CGF.EHStack.pushCleanup<CallObjectDelete>(NormalAndEHCleanup, 1217 Ptr, OperatorDelete, 1218 ElementType); 1219 } 1220 1221 llvm::Type *Ty = 1222 CGF.getTypes().GetFunctionType(CGF.getTypes().getFunctionInfo(Dtor, 1223 Dtor_Complete), 1224 /*isVariadic=*/false); 1225 1226 llvm::Value *Callee 1227 = CGF.BuildVirtualCall(Dtor, 1228 UseGlobalDelete? Dtor_Complete : Dtor_Deleting, 1229 Ptr, Ty); 1230 CGF.EmitCXXMemberCall(Dtor, Callee, ReturnValueSlot(), Ptr, /*VTT=*/0, 1231 0, 0); 1232 1233 if (UseGlobalDelete) { 1234 CGF.PopCleanupBlock(); 1235 } 1236 1237 return; 1238 } 1239 } 1240 } 1241 1242 // Make sure that we call delete even if the dtor throws. 1243 // This doesn't have to a conditional cleanup because we're going 1244 // to pop it off in a second. 1245 CGF.EHStack.pushCleanup<CallObjectDelete>(NormalAndEHCleanup, 1246 Ptr, OperatorDelete, ElementType); 1247 1248 if (Dtor) 1249 CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete, 1250 /*ForVirtualBase=*/false, Ptr); 1251 else if (CGF.getLangOptions().ObjCAutoRefCount && 1252 ElementType->isObjCLifetimeType()) { 1253 switch (ElementType.getObjCLifetime()) { 1254 case Qualifiers::OCL_None: 1255 case Qualifiers::OCL_ExplicitNone: 1256 case Qualifiers::OCL_Autoreleasing: 1257 break; 1258 1259 case Qualifiers::OCL_Strong: { 1260 // Load the pointer value. 1261 llvm::Value *PtrValue = CGF.Builder.CreateLoad(Ptr, 1262 ElementType.isVolatileQualified()); 1263 1264 CGF.EmitARCRelease(PtrValue, /*precise*/ true); 1265 break; 1266 } 1267 1268 case Qualifiers::OCL_Weak: 1269 CGF.EmitARCDestroyWeak(Ptr); 1270 break; 1271 } 1272 } 1273 1274 CGF.PopCleanupBlock(); 1275 } 1276 1277 namespace { 1278 /// Calls the given 'operator delete' on an array of objects. 1279 struct CallArrayDelete : EHScopeStack::Cleanup { 1280 llvm::Value *Ptr; 1281 const FunctionDecl *OperatorDelete; 1282 llvm::Value *NumElements; 1283 QualType ElementType; 1284 CharUnits CookieSize; 1285 1286 CallArrayDelete(llvm::Value *Ptr, 1287 const FunctionDecl *OperatorDelete, 1288 llvm::Value *NumElements, 1289 QualType ElementType, 1290 CharUnits CookieSize) 1291 : Ptr(Ptr), OperatorDelete(OperatorDelete), NumElements(NumElements), 1292 ElementType(ElementType), CookieSize(CookieSize) {} 1293 1294 void Emit(CodeGenFunction &CGF, Flags flags) { 1295 const FunctionProtoType *DeleteFTy = 1296 OperatorDelete->getType()->getAs<FunctionProtoType>(); 1297 assert(DeleteFTy->getNumArgs() == 1 || DeleteFTy->getNumArgs() == 2); 1298 1299 CallArgList Args; 1300 1301 // Pass the pointer as the first argument. 1302 QualType VoidPtrTy = DeleteFTy->getArgType(0); 1303 llvm::Value *DeletePtr 1304 = CGF.Builder.CreateBitCast(Ptr, CGF.ConvertType(VoidPtrTy)); 1305 Args.add(RValue::get(DeletePtr), VoidPtrTy); 1306 1307 // Pass the original requested size as the second argument. 1308 if (DeleteFTy->getNumArgs() == 2) { 1309 QualType size_t = DeleteFTy->getArgType(1); 1310 llvm::IntegerType *SizeTy 1311 = cast<llvm::IntegerType>(CGF.ConvertType(size_t)); 1312 1313 CharUnits ElementTypeSize = 1314 CGF.CGM.getContext().getTypeSizeInChars(ElementType); 1315 1316 // The size of an element, multiplied by the number of elements. 1317 llvm::Value *Size 1318 = llvm::ConstantInt::get(SizeTy, ElementTypeSize.getQuantity()); 1319 Size = CGF.Builder.CreateMul(Size, NumElements); 1320 1321 // Plus the size of the cookie if applicable. 1322 if (!CookieSize.isZero()) { 1323 llvm::Value *CookieSizeV 1324 = llvm::ConstantInt::get(SizeTy, CookieSize.getQuantity()); 1325 Size = CGF.Builder.CreateAdd(Size, CookieSizeV); 1326 } 1327 1328 Args.add(RValue::get(Size), size_t); 1329 } 1330 1331 // Emit the call to delete. 1332 CGF.EmitCall(CGF.getTypes().getFunctionInfo(Args, DeleteFTy), 1333 CGF.CGM.GetAddrOfFunction(OperatorDelete), 1334 ReturnValueSlot(), Args, OperatorDelete); 1335 } 1336 }; 1337 } 1338 1339 /// Emit the code for deleting an array of objects. 1340 static void EmitArrayDelete(CodeGenFunction &CGF, 1341 const CXXDeleteExpr *E, 1342 llvm::Value *deletedPtr, 1343 QualType elementType) { 1344 llvm::Value *numElements = 0; 1345 llvm::Value *allocatedPtr = 0; 1346 CharUnits cookieSize; 1347 CGF.CGM.getCXXABI().ReadArrayCookie(CGF, deletedPtr, E, elementType, 1348 numElements, allocatedPtr, cookieSize); 1349 1350 assert(allocatedPtr && "ReadArrayCookie didn't set allocated pointer"); 1351 1352 // Make sure that we call delete even if one of the dtors throws. 1353 const FunctionDecl *operatorDelete = E->getOperatorDelete(); 1354 CGF.EHStack.pushCleanup<CallArrayDelete>(NormalAndEHCleanup, 1355 allocatedPtr, operatorDelete, 1356 numElements, elementType, 1357 cookieSize); 1358 1359 // Destroy the elements. 1360 if (QualType::DestructionKind dtorKind = elementType.isDestructedType()) { 1361 assert(numElements && "no element count for a type with a destructor!"); 1362 1363 llvm::Value *arrayEnd = 1364 CGF.Builder.CreateInBoundsGEP(deletedPtr, numElements, "delete.end"); 1365 1366 // Note that it is legal to allocate a zero-length array, and we 1367 // can never fold the check away because the length should always 1368 // come from a cookie. 1369 CGF.emitArrayDestroy(deletedPtr, arrayEnd, elementType, 1370 CGF.getDestroyer(dtorKind), 1371 /*checkZeroLength*/ true, 1372 CGF.needsEHCleanup(dtorKind)); 1373 } 1374 1375 // Pop the cleanup block. 1376 CGF.PopCleanupBlock(); 1377 } 1378 1379 void CodeGenFunction::EmitCXXDeleteExpr(const CXXDeleteExpr *E) { 1380 1381 // Get at the argument before we performed the implicit conversion 1382 // to void*. 1383 const Expr *Arg = E->getArgument(); 1384 while (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Arg)) { 1385 if (ICE->getCastKind() != CK_UserDefinedConversion && 1386 ICE->getType()->isVoidPointerType()) 1387 Arg = ICE->getSubExpr(); 1388 else 1389 break; 1390 } 1391 1392 llvm::Value *Ptr = EmitScalarExpr(Arg); 1393 1394 // Null check the pointer. 1395 llvm::BasicBlock *DeleteNotNull = createBasicBlock("delete.notnull"); 1396 llvm::BasicBlock *DeleteEnd = createBasicBlock("delete.end"); 1397 1398 llvm::Value *IsNull = Builder.CreateIsNull(Ptr, "isnull"); 1399 1400 Builder.CreateCondBr(IsNull, DeleteEnd, DeleteNotNull); 1401 EmitBlock(DeleteNotNull); 1402 1403 // We might be deleting a pointer to array. If so, GEP down to the 1404 // first non-array element. 1405 // (this assumes that A(*)[3][7] is converted to [3 x [7 x %A]]*) 1406 QualType DeleteTy = Arg->getType()->getAs<PointerType>()->getPointeeType(); 1407 if (DeleteTy->isConstantArrayType()) { 1408 llvm::Value *Zero = Builder.getInt32(0); 1409 llvm::SmallVector<llvm::Value*,8> GEP; 1410 1411 GEP.push_back(Zero); // point at the outermost array 1412 1413 // For each layer of array type we're pointing at: 1414 while (const ConstantArrayType *Arr 1415 = getContext().getAsConstantArrayType(DeleteTy)) { 1416 // 1. Unpeel the array type. 1417 DeleteTy = Arr->getElementType(); 1418 1419 // 2. GEP to the first element of the array. 1420 GEP.push_back(Zero); 1421 } 1422 1423 Ptr = Builder.CreateInBoundsGEP(Ptr, GEP.begin(), GEP.end(), "del.first"); 1424 } 1425 1426 assert(ConvertTypeForMem(DeleteTy) == 1427 cast<llvm::PointerType>(Ptr->getType())->getElementType()); 1428 1429 if (E->isArrayForm()) { 1430 EmitArrayDelete(*this, E, Ptr, DeleteTy); 1431 } else { 1432 EmitObjectDelete(*this, E->getOperatorDelete(), Ptr, DeleteTy, 1433 E->isGlobalDelete()); 1434 } 1435 1436 EmitBlock(DeleteEnd); 1437 } 1438 1439 static llvm::Constant *getBadTypeidFn(CodeGenFunction &CGF) { 1440 // void __cxa_bad_typeid(); 1441 1442 llvm::Type *VoidTy = llvm::Type::getVoidTy(CGF.getLLVMContext()); 1443 llvm::FunctionType *FTy = 1444 llvm::FunctionType::get(VoidTy, false); 1445 1446 return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_bad_typeid"); 1447 } 1448 1449 static void EmitBadTypeidCall(CodeGenFunction &CGF) { 1450 llvm::Value *Fn = getBadTypeidFn(CGF); 1451 CGF.EmitCallOrInvoke(Fn).setDoesNotReturn(); 1452 CGF.Builder.CreateUnreachable(); 1453 } 1454 1455 static llvm::Value *EmitTypeidFromVTable(CodeGenFunction &CGF, 1456 const Expr *E, 1457 llvm::Type *StdTypeInfoPtrTy) { 1458 // Get the vtable pointer. 1459 llvm::Value *ThisPtr = CGF.EmitLValue(E).getAddress(); 1460 1461 // C++ [expr.typeid]p2: 1462 // If the glvalue expression is obtained by applying the unary * operator to 1463 // a pointer and the pointer is a null pointer value, the typeid expression 1464 // throws the std::bad_typeid exception. 1465 if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(E->IgnoreParens())) { 1466 if (UO->getOpcode() == UO_Deref) { 1467 llvm::BasicBlock *BadTypeidBlock = 1468 CGF.createBasicBlock("typeid.bad_typeid"); 1469 llvm::BasicBlock *EndBlock = 1470 CGF.createBasicBlock("typeid.end"); 1471 1472 llvm::Value *IsNull = CGF.Builder.CreateIsNull(ThisPtr); 1473 CGF.Builder.CreateCondBr(IsNull, BadTypeidBlock, EndBlock); 1474 1475 CGF.EmitBlock(BadTypeidBlock); 1476 EmitBadTypeidCall(CGF); 1477 CGF.EmitBlock(EndBlock); 1478 } 1479 } 1480 1481 llvm::Value *Value = CGF.GetVTablePtr(ThisPtr, 1482 StdTypeInfoPtrTy->getPointerTo()); 1483 1484 // Load the type info. 1485 Value = CGF.Builder.CreateConstInBoundsGEP1_64(Value, -1ULL); 1486 return CGF.Builder.CreateLoad(Value); 1487 } 1488 1489 llvm::Value *CodeGenFunction::EmitCXXTypeidExpr(const CXXTypeidExpr *E) { 1490 llvm::Type *StdTypeInfoPtrTy = 1491 ConvertType(E->getType())->getPointerTo(); 1492 1493 if (E->isTypeOperand()) { 1494 llvm::Constant *TypeInfo = 1495 CGM.GetAddrOfRTTIDescriptor(E->getTypeOperand()); 1496 return Builder.CreateBitCast(TypeInfo, StdTypeInfoPtrTy); 1497 } 1498 1499 // C++ [expr.typeid]p2: 1500 // When typeid is applied to a glvalue expression whose type is a 1501 // polymorphic class type, the result refers to a std::type_info object 1502 // representing the type of the most derived object (that is, the dynamic 1503 // type) to which the glvalue refers. 1504 if (E->getExprOperand()->isGLValue()) { 1505 if (const RecordType *RT = 1506 E->getExprOperand()->getType()->getAs<RecordType>()) { 1507 const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl()); 1508 if (RD->isPolymorphic()) 1509 return EmitTypeidFromVTable(*this, E->getExprOperand(), 1510 StdTypeInfoPtrTy); 1511 } 1512 } 1513 1514 QualType OperandTy = E->getExprOperand()->getType(); 1515 return Builder.CreateBitCast(CGM.GetAddrOfRTTIDescriptor(OperandTy), 1516 StdTypeInfoPtrTy); 1517 } 1518 1519 static llvm::Constant *getDynamicCastFn(CodeGenFunction &CGF) { 1520 // void *__dynamic_cast(const void *sub, 1521 // const abi::__class_type_info *src, 1522 // const abi::__class_type_info *dst, 1523 // std::ptrdiff_t src2dst_offset); 1524 1525 llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGF.getLLVMContext()); 1526 llvm::Type *PtrDiffTy = 1527 CGF.ConvertType(CGF.getContext().getPointerDiffType()); 1528 1529 llvm::Type *Args[4] = { Int8PtrTy, Int8PtrTy, Int8PtrTy, PtrDiffTy }; 1530 1531 llvm::FunctionType *FTy = 1532 llvm::FunctionType::get(Int8PtrTy, Args, false); 1533 1534 return CGF.CGM.CreateRuntimeFunction(FTy, "__dynamic_cast"); 1535 } 1536 1537 static llvm::Constant *getBadCastFn(CodeGenFunction &CGF) { 1538 // void __cxa_bad_cast(); 1539 1540 llvm::Type *VoidTy = llvm::Type::getVoidTy(CGF.getLLVMContext()); 1541 llvm::FunctionType *FTy = 1542 llvm::FunctionType::get(VoidTy, false); 1543 1544 return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_bad_cast"); 1545 } 1546 1547 static void EmitBadCastCall(CodeGenFunction &CGF) { 1548 llvm::Value *Fn = getBadCastFn(CGF); 1549 CGF.EmitCallOrInvoke(Fn).setDoesNotReturn(); 1550 CGF.Builder.CreateUnreachable(); 1551 } 1552 1553 static llvm::Value * 1554 EmitDynamicCastCall(CodeGenFunction &CGF, llvm::Value *Value, 1555 QualType SrcTy, QualType DestTy, 1556 llvm::BasicBlock *CastEnd) { 1557 llvm::Type *PtrDiffLTy = 1558 CGF.ConvertType(CGF.getContext().getPointerDiffType()); 1559 llvm::Type *DestLTy = CGF.ConvertType(DestTy); 1560 1561 if (const PointerType *PTy = DestTy->getAs<PointerType>()) { 1562 if (PTy->getPointeeType()->isVoidType()) { 1563 // C++ [expr.dynamic.cast]p7: 1564 // If T is "pointer to cv void," then the result is a pointer to the 1565 // most derived object pointed to by v. 1566 1567 // Get the vtable pointer. 1568 llvm::Value *VTable = CGF.GetVTablePtr(Value, PtrDiffLTy->getPointerTo()); 1569 1570 // Get the offset-to-top from the vtable. 1571 llvm::Value *OffsetToTop = 1572 CGF.Builder.CreateConstInBoundsGEP1_64(VTable, -2ULL); 1573 OffsetToTop = CGF.Builder.CreateLoad(OffsetToTop, "offset.to.top"); 1574 1575 // Finally, add the offset to the pointer. 1576 Value = CGF.EmitCastToVoidPtr(Value); 1577 Value = CGF.Builder.CreateInBoundsGEP(Value, OffsetToTop); 1578 1579 return CGF.Builder.CreateBitCast(Value, DestLTy); 1580 } 1581 } 1582 1583 QualType SrcRecordTy; 1584 QualType DestRecordTy; 1585 1586 if (const PointerType *DestPTy = DestTy->getAs<PointerType>()) { 1587 SrcRecordTy = SrcTy->castAs<PointerType>()->getPointeeType(); 1588 DestRecordTy = DestPTy->getPointeeType(); 1589 } else { 1590 SrcRecordTy = SrcTy; 1591 DestRecordTy = DestTy->castAs<ReferenceType>()->getPointeeType(); 1592 } 1593 1594 assert(SrcRecordTy->isRecordType() && "source type must be a record type!"); 1595 assert(DestRecordTy->isRecordType() && "dest type must be a record type!"); 1596 1597 llvm::Value *SrcRTTI = 1598 CGF.CGM.GetAddrOfRTTIDescriptor(SrcRecordTy.getUnqualifiedType()); 1599 llvm::Value *DestRTTI = 1600 CGF.CGM.GetAddrOfRTTIDescriptor(DestRecordTy.getUnqualifiedType()); 1601 1602 // FIXME: Actually compute a hint here. 1603 llvm::Value *OffsetHint = llvm::ConstantInt::get(PtrDiffLTy, -1ULL); 1604 1605 // Emit the call to __dynamic_cast. 1606 Value = CGF.EmitCastToVoidPtr(Value); 1607 Value = CGF.Builder.CreateCall4(getDynamicCastFn(CGF), Value, 1608 SrcRTTI, DestRTTI, OffsetHint); 1609 Value = CGF.Builder.CreateBitCast(Value, DestLTy); 1610 1611 /// C++ [expr.dynamic.cast]p9: 1612 /// A failed cast to reference type throws std::bad_cast 1613 if (DestTy->isReferenceType()) { 1614 llvm::BasicBlock *BadCastBlock = 1615 CGF.createBasicBlock("dynamic_cast.bad_cast"); 1616 1617 llvm::Value *IsNull = CGF.Builder.CreateIsNull(Value); 1618 CGF.Builder.CreateCondBr(IsNull, BadCastBlock, CastEnd); 1619 1620 CGF.EmitBlock(BadCastBlock); 1621 EmitBadCastCall(CGF); 1622 } 1623 1624 return Value; 1625 } 1626 1627 static llvm::Value *EmitDynamicCastToNull(CodeGenFunction &CGF, 1628 QualType DestTy) { 1629 llvm::Type *DestLTy = CGF.ConvertType(DestTy); 1630 if (DestTy->isPointerType()) 1631 return llvm::Constant::getNullValue(DestLTy); 1632 1633 /// C++ [expr.dynamic.cast]p9: 1634 /// A failed cast to reference type throws std::bad_cast 1635 EmitBadCastCall(CGF); 1636 1637 CGF.EmitBlock(CGF.createBasicBlock("dynamic_cast.end")); 1638 return llvm::UndefValue::get(DestLTy); 1639 } 1640 1641 llvm::Value *CodeGenFunction::EmitDynamicCast(llvm::Value *Value, 1642 const CXXDynamicCastExpr *DCE) { 1643 QualType DestTy = DCE->getTypeAsWritten(); 1644 1645 if (DCE->isAlwaysNull()) 1646 return EmitDynamicCastToNull(*this, DestTy); 1647 1648 QualType SrcTy = DCE->getSubExpr()->getType(); 1649 1650 // C++ [expr.dynamic.cast]p4: 1651 // If the value of v is a null pointer value in the pointer case, the result 1652 // is the null pointer value of type T. 1653 bool ShouldNullCheckSrcValue = SrcTy->isPointerType(); 1654 1655 llvm::BasicBlock *CastNull = 0; 1656 llvm::BasicBlock *CastNotNull = 0; 1657 llvm::BasicBlock *CastEnd = createBasicBlock("dynamic_cast.end"); 1658 1659 if (ShouldNullCheckSrcValue) { 1660 CastNull = createBasicBlock("dynamic_cast.null"); 1661 CastNotNull = createBasicBlock("dynamic_cast.notnull"); 1662 1663 llvm::Value *IsNull = Builder.CreateIsNull(Value); 1664 Builder.CreateCondBr(IsNull, CastNull, CastNotNull); 1665 EmitBlock(CastNotNull); 1666 } 1667 1668 Value = EmitDynamicCastCall(*this, Value, SrcTy, DestTy, CastEnd); 1669 1670 if (ShouldNullCheckSrcValue) { 1671 EmitBranch(CastEnd); 1672 1673 EmitBlock(CastNull); 1674 EmitBranch(CastEnd); 1675 } 1676 1677 EmitBlock(CastEnd); 1678 1679 if (ShouldNullCheckSrcValue) { 1680 llvm::PHINode *PHI = Builder.CreatePHI(Value->getType(), 2); 1681 PHI->addIncoming(Value, CastNotNull); 1682 PHI->addIncoming(llvm::Constant::getNullValue(Value->getType()), CastNull); 1683 1684 Value = PHI; 1685 } 1686 1687 return Value; 1688 } 1689