1 //===--- ASTContext.cpp - Context to hold long-lived AST nodes ------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file implements the ASTContext interface. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "clang/AST/ASTContext.h" 15 #include "clang/AST/CharUnits.h" 16 #include "clang/AST/DeclCXX.h" 17 #include "clang/AST/DeclObjC.h" 18 #include "clang/AST/DeclTemplate.h" 19 #include "clang/AST/TypeLoc.h" 20 #include "clang/AST/Expr.h" 21 #include "clang/AST/ExprCXX.h" 22 #include "clang/AST/ExternalASTSource.h" 23 #include "clang/AST/ASTMutationListener.h" 24 #include "clang/AST/RecordLayout.h" 25 #include "clang/AST/Mangle.h" 26 #include "clang/Basic/Builtins.h" 27 #include "clang/Basic/SourceManager.h" 28 #include "clang/Basic/TargetInfo.h" 29 #include "llvm/ADT/SmallString.h" 30 #include "llvm/ADT/StringExtras.h" 31 #include "llvm/Support/MathExtras.h" 32 #include "llvm/Support/raw_ostream.h" 33 #include "llvm/Support/Capacity.h" 34 #include "CXXABI.h" 35 #include <map> 36 37 using namespace clang; 38 39 unsigned ASTContext::NumImplicitDefaultConstructors; 40 unsigned ASTContext::NumImplicitDefaultConstructorsDeclared; 41 unsigned ASTContext::NumImplicitCopyConstructors; 42 unsigned ASTContext::NumImplicitCopyConstructorsDeclared; 43 unsigned ASTContext::NumImplicitMoveConstructors; 44 unsigned ASTContext::NumImplicitMoveConstructorsDeclared; 45 unsigned ASTContext::NumImplicitCopyAssignmentOperators; 46 unsigned ASTContext::NumImplicitCopyAssignmentOperatorsDeclared; 47 unsigned ASTContext::NumImplicitMoveAssignmentOperators; 48 unsigned ASTContext::NumImplicitMoveAssignmentOperatorsDeclared; 49 unsigned ASTContext::NumImplicitDestructors; 50 unsigned ASTContext::NumImplicitDestructorsDeclared; 51 52 enum FloatingRank { 53 HalfRank, FloatRank, DoubleRank, LongDoubleRank 54 }; 55 56 void 57 ASTContext::CanonicalTemplateTemplateParm::Profile(llvm::FoldingSetNodeID &ID, 58 TemplateTemplateParmDecl *Parm) { 59 ID.AddInteger(Parm->getDepth()); 60 ID.AddInteger(Parm->getPosition()); 61 ID.AddBoolean(Parm->isParameterPack()); 62 63 TemplateParameterList *Params = Parm->getTemplateParameters(); 64 ID.AddInteger(Params->size()); 65 for (TemplateParameterList::const_iterator P = Params->begin(), 66 PEnd = Params->end(); 67 P != PEnd; ++P) { 68 if (TemplateTypeParmDecl *TTP = dyn_cast<TemplateTypeParmDecl>(*P)) { 69 ID.AddInteger(0); 70 ID.AddBoolean(TTP->isParameterPack()); 71 continue; 72 } 73 74 if (NonTypeTemplateParmDecl *NTTP = dyn_cast<NonTypeTemplateParmDecl>(*P)) { 75 ID.AddInteger(1); 76 ID.AddBoolean(NTTP->isParameterPack()); 77 ID.AddPointer(NTTP->getType().getAsOpaquePtr()); 78 if (NTTP->isExpandedParameterPack()) { 79 ID.AddBoolean(true); 80 ID.AddInteger(NTTP->getNumExpansionTypes()); 81 for (unsigned I = 0, N = NTTP->getNumExpansionTypes(); I != N; ++I) 82 ID.AddPointer(NTTP->getExpansionType(I).getAsOpaquePtr()); 83 } else 84 ID.AddBoolean(false); 85 continue; 86 } 87 88 TemplateTemplateParmDecl *TTP = cast<TemplateTemplateParmDecl>(*P); 89 ID.AddInteger(2); 90 Profile(ID, TTP); 91 } 92 } 93 94 TemplateTemplateParmDecl * 95 ASTContext::getCanonicalTemplateTemplateParmDecl( 96 TemplateTemplateParmDecl *TTP) const { 97 // Check if we already have a canonical template template parameter. 98 llvm::FoldingSetNodeID ID; 99 CanonicalTemplateTemplateParm::Profile(ID, TTP); 100 void *InsertPos = 0; 101 CanonicalTemplateTemplateParm *Canonical 102 = CanonTemplateTemplateParms.FindNodeOrInsertPos(ID, InsertPos); 103 if (Canonical) 104 return Canonical->getParam(); 105 106 // Build a canonical template parameter list. 107 TemplateParameterList *Params = TTP->getTemplateParameters(); 108 SmallVector<NamedDecl *, 4> CanonParams; 109 CanonParams.reserve(Params->size()); 110 for (TemplateParameterList::const_iterator P = Params->begin(), 111 PEnd = Params->end(); 112 P != PEnd; ++P) { 113 if (TemplateTypeParmDecl *TTP = dyn_cast<TemplateTypeParmDecl>(*P)) 114 CanonParams.push_back( 115 TemplateTypeParmDecl::Create(*this, getTranslationUnitDecl(), 116 SourceLocation(), 117 SourceLocation(), 118 TTP->getDepth(), 119 TTP->getIndex(), 0, false, 120 TTP->isParameterPack())); 121 else if (NonTypeTemplateParmDecl *NTTP 122 = dyn_cast<NonTypeTemplateParmDecl>(*P)) { 123 QualType T = getCanonicalType(NTTP->getType()); 124 TypeSourceInfo *TInfo = getTrivialTypeSourceInfo(T); 125 NonTypeTemplateParmDecl *Param; 126 if (NTTP->isExpandedParameterPack()) { 127 SmallVector<QualType, 2> ExpandedTypes; 128 SmallVector<TypeSourceInfo *, 2> ExpandedTInfos; 129 for (unsigned I = 0, N = NTTP->getNumExpansionTypes(); I != N; ++I) { 130 ExpandedTypes.push_back(getCanonicalType(NTTP->getExpansionType(I))); 131 ExpandedTInfos.push_back( 132 getTrivialTypeSourceInfo(ExpandedTypes.back())); 133 } 134 135 Param = NonTypeTemplateParmDecl::Create(*this, getTranslationUnitDecl(), 136 SourceLocation(), 137 SourceLocation(), 138 NTTP->getDepth(), 139 NTTP->getPosition(), 0, 140 T, 141 TInfo, 142 ExpandedTypes.data(), 143 ExpandedTypes.size(), 144 ExpandedTInfos.data()); 145 } else { 146 Param = NonTypeTemplateParmDecl::Create(*this, getTranslationUnitDecl(), 147 SourceLocation(), 148 SourceLocation(), 149 NTTP->getDepth(), 150 NTTP->getPosition(), 0, 151 T, 152 NTTP->isParameterPack(), 153 TInfo); 154 } 155 CanonParams.push_back(Param); 156 157 } else 158 CanonParams.push_back(getCanonicalTemplateTemplateParmDecl( 159 cast<TemplateTemplateParmDecl>(*P))); 160 } 161 162 TemplateTemplateParmDecl *CanonTTP 163 = TemplateTemplateParmDecl::Create(*this, getTranslationUnitDecl(), 164 SourceLocation(), TTP->getDepth(), 165 TTP->getPosition(), 166 TTP->isParameterPack(), 167 0, 168 TemplateParameterList::Create(*this, SourceLocation(), 169 SourceLocation(), 170 CanonParams.data(), 171 CanonParams.size(), 172 SourceLocation())); 173 174 // Get the new insert position for the node we care about. 175 Canonical = CanonTemplateTemplateParms.FindNodeOrInsertPos(ID, InsertPos); 176 assert(Canonical == 0 && "Shouldn't be in the map!"); 177 (void)Canonical; 178 179 // Create the canonical template template parameter entry. 180 Canonical = new (*this) CanonicalTemplateTemplateParm(CanonTTP); 181 CanonTemplateTemplateParms.InsertNode(Canonical, InsertPos); 182 return CanonTTP; 183 } 184 185 CXXABI *ASTContext::createCXXABI(const TargetInfo &T) { 186 if (!LangOpts.CPlusPlus) return 0; 187 188 switch (T.getCXXABI()) { 189 case CXXABI_ARM: 190 return CreateARMCXXABI(*this); 191 case CXXABI_Itanium: 192 return CreateItaniumCXXABI(*this); 193 case CXXABI_Microsoft: 194 return CreateMicrosoftCXXABI(*this); 195 } 196 return 0; 197 } 198 199 static const LangAS::Map *getAddressSpaceMap(const TargetInfo &T, 200 const LangOptions &LOpts) { 201 if (LOpts.FakeAddressSpaceMap) { 202 // The fake address space map must have a distinct entry for each 203 // language-specific address space. 204 static const unsigned FakeAddrSpaceMap[] = { 205 1, // opencl_global 206 2, // opencl_local 207 3 // opencl_constant 208 }; 209 return &FakeAddrSpaceMap; 210 } else { 211 return &T.getAddressSpaceMap(); 212 } 213 } 214 215 ASTContext::ASTContext(LangOptions& LOpts, SourceManager &SM, 216 const TargetInfo *t, 217 IdentifierTable &idents, SelectorTable &sels, 218 Builtin::Context &builtins, 219 unsigned size_reserve, 220 bool DelayInitialization) 221 : FunctionProtoTypes(this_()), 222 TemplateSpecializationTypes(this_()), 223 DependentTemplateSpecializationTypes(this_()), 224 SubstTemplateTemplateParmPacks(this_()), 225 GlobalNestedNameSpecifier(0), 226 Int128Decl(0), UInt128Decl(0), 227 ObjCIdDecl(0), ObjCSelDecl(0), ObjCClassDecl(0), 228 CFConstantStringTypeDecl(0), ObjCInstanceTypeDecl(0), 229 FILEDecl(0), 230 jmp_bufDecl(0), sigjmp_bufDecl(0), BlockDescriptorType(0), 231 BlockDescriptorExtendedType(0), cudaConfigureCallDecl(0), 232 NullTypeSourceInfo(QualType()), 233 SourceMgr(SM), LangOpts(LOpts), 234 AddrSpaceMap(0), Target(t), PrintingPolicy(LOpts), 235 Idents(idents), Selectors(sels), 236 BuiltinInfo(builtins), 237 DeclarationNames(*this), 238 ExternalSource(0), Listener(0), 239 LastSDM(0, 0), 240 UniqueBlockByRefTypeID(0) 241 { 242 if (size_reserve > 0) Types.reserve(size_reserve); 243 TUDecl = TranslationUnitDecl::Create(*this); 244 245 if (!DelayInitialization) { 246 assert(t && "No target supplied for ASTContext initialization"); 247 InitBuiltinTypes(*t); 248 } 249 } 250 251 ASTContext::~ASTContext() { 252 // Release the DenseMaps associated with DeclContext objects. 253 // FIXME: Is this the ideal solution? 254 ReleaseDeclContextMaps(); 255 256 // Call all of the deallocation functions. 257 for (unsigned I = 0, N = Deallocations.size(); I != N; ++I) 258 Deallocations[I].first(Deallocations[I].second); 259 260 // Release all of the memory associated with overridden C++ methods. 261 for (llvm::DenseMap<const CXXMethodDecl *, CXXMethodVector>::iterator 262 OM = OverriddenMethods.begin(), OMEnd = OverriddenMethods.end(); 263 OM != OMEnd; ++OM) 264 OM->second.Destroy(); 265 266 // ASTRecordLayout objects in ASTRecordLayouts must always be destroyed 267 // because they can contain DenseMaps. 268 for (llvm::DenseMap<const ObjCContainerDecl*, 269 const ASTRecordLayout*>::iterator 270 I = ObjCLayouts.begin(), E = ObjCLayouts.end(); I != E; ) 271 // Increment in loop to prevent using deallocated memory. 272 if (ASTRecordLayout *R = const_cast<ASTRecordLayout*>((I++)->second)) 273 R->Destroy(*this); 274 275 for (llvm::DenseMap<const RecordDecl*, const ASTRecordLayout*>::iterator 276 I = ASTRecordLayouts.begin(), E = ASTRecordLayouts.end(); I != E; ) { 277 // Increment in loop to prevent using deallocated memory. 278 if (ASTRecordLayout *R = const_cast<ASTRecordLayout*>((I++)->second)) 279 R->Destroy(*this); 280 } 281 282 for (llvm::DenseMap<const Decl*, AttrVec*>::iterator A = DeclAttrs.begin(), 283 AEnd = DeclAttrs.end(); 284 A != AEnd; ++A) 285 A->second->~AttrVec(); 286 } 287 288 void ASTContext::AddDeallocation(void (*Callback)(void*), void *Data) { 289 Deallocations.push_back(std::make_pair(Callback, Data)); 290 } 291 292 void 293 ASTContext::setExternalSource(llvm::OwningPtr<ExternalASTSource> &Source) { 294 ExternalSource.reset(Source.take()); 295 } 296 297 void ASTContext::PrintStats() const { 298 llvm::errs() << "\n*** AST Context Stats:\n"; 299 llvm::errs() << " " << Types.size() << " types total.\n"; 300 301 unsigned counts[] = { 302 #define TYPE(Name, Parent) 0, 303 #define ABSTRACT_TYPE(Name, Parent) 304 #include "clang/AST/TypeNodes.def" 305 0 // Extra 306 }; 307 308 for (unsigned i = 0, e = Types.size(); i != e; ++i) { 309 Type *T = Types[i]; 310 counts[(unsigned)T->getTypeClass()]++; 311 } 312 313 unsigned Idx = 0; 314 unsigned TotalBytes = 0; 315 #define TYPE(Name, Parent) \ 316 if (counts[Idx]) \ 317 llvm::errs() << " " << counts[Idx] << " " << #Name \ 318 << " types\n"; \ 319 TotalBytes += counts[Idx] * sizeof(Name##Type); \ 320 ++Idx; 321 #define ABSTRACT_TYPE(Name, Parent) 322 #include "clang/AST/TypeNodes.def" 323 324 llvm::errs() << "Total bytes = " << TotalBytes << "\n"; 325 326 // Implicit special member functions. 327 llvm::errs() << NumImplicitDefaultConstructorsDeclared << "/" 328 << NumImplicitDefaultConstructors 329 << " implicit default constructors created\n"; 330 llvm::errs() << NumImplicitCopyConstructorsDeclared << "/" 331 << NumImplicitCopyConstructors 332 << " implicit copy constructors created\n"; 333 if (getLangOptions().CPlusPlus) 334 llvm::errs() << NumImplicitMoveConstructorsDeclared << "/" 335 << NumImplicitMoveConstructors 336 << " implicit move constructors created\n"; 337 llvm::errs() << NumImplicitCopyAssignmentOperatorsDeclared << "/" 338 << NumImplicitCopyAssignmentOperators 339 << " implicit copy assignment operators created\n"; 340 if (getLangOptions().CPlusPlus) 341 llvm::errs() << NumImplicitMoveAssignmentOperatorsDeclared << "/" 342 << NumImplicitMoveAssignmentOperators 343 << " implicit move assignment operators created\n"; 344 llvm::errs() << NumImplicitDestructorsDeclared << "/" 345 << NumImplicitDestructors 346 << " implicit destructors created\n"; 347 348 if (ExternalSource.get()) { 349 llvm::errs() << "\n"; 350 ExternalSource->PrintStats(); 351 } 352 353 BumpAlloc.PrintStats(); 354 } 355 356 TypedefDecl *ASTContext::getInt128Decl() const { 357 if (!Int128Decl) { 358 TypeSourceInfo *TInfo = getTrivialTypeSourceInfo(Int128Ty); 359 Int128Decl = TypedefDecl::Create(const_cast<ASTContext &>(*this), 360 getTranslationUnitDecl(), 361 SourceLocation(), 362 SourceLocation(), 363 &Idents.get("__int128_t"), 364 TInfo); 365 } 366 367 return Int128Decl; 368 } 369 370 TypedefDecl *ASTContext::getUInt128Decl() const { 371 if (!UInt128Decl) { 372 TypeSourceInfo *TInfo = getTrivialTypeSourceInfo(UnsignedInt128Ty); 373 UInt128Decl = TypedefDecl::Create(const_cast<ASTContext &>(*this), 374 getTranslationUnitDecl(), 375 SourceLocation(), 376 SourceLocation(), 377 &Idents.get("__uint128_t"), 378 TInfo); 379 } 380 381 return UInt128Decl; 382 } 383 384 void ASTContext::InitBuiltinType(CanQualType &R, BuiltinType::Kind K) { 385 BuiltinType *Ty = new (*this, TypeAlignment) BuiltinType(K); 386 R = CanQualType::CreateUnsafe(QualType(Ty, 0)); 387 Types.push_back(Ty); 388 } 389 390 void ASTContext::InitBuiltinTypes(const TargetInfo &Target) { 391 assert((!this->Target || this->Target == &Target) && 392 "Incorrect target reinitialization"); 393 assert(VoidTy.isNull() && "Context reinitialized?"); 394 395 this->Target = &Target; 396 397 ABI.reset(createCXXABI(Target)); 398 AddrSpaceMap = getAddressSpaceMap(Target, LangOpts); 399 400 // C99 6.2.5p19. 401 InitBuiltinType(VoidTy, BuiltinType::Void); 402 403 // C99 6.2.5p2. 404 InitBuiltinType(BoolTy, BuiltinType::Bool); 405 // C99 6.2.5p3. 406 if (LangOpts.CharIsSigned) 407 InitBuiltinType(CharTy, BuiltinType::Char_S); 408 else 409 InitBuiltinType(CharTy, BuiltinType::Char_U); 410 // C99 6.2.5p4. 411 InitBuiltinType(SignedCharTy, BuiltinType::SChar); 412 InitBuiltinType(ShortTy, BuiltinType::Short); 413 InitBuiltinType(IntTy, BuiltinType::Int); 414 InitBuiltinType(LongTy, BuiltinType::Long); 415 InitBuiltinType(LongLongTy, BuiltinType::LongLong); 416 417 // C99 6.2.5p6. 418 InitBuiltinType(UnsignedCharTy, BuiltinType::UChar); 419 InitBuiltinType(UnsignedShortTy, BuiltinType::UShort); 420 InitBuiltinType(UnsignedIntTy, BuiltinType::UInt); 421 InitBuiltinType(UnsignedLongTy, BuiltinType::ULong); 422 InitBuiltinType(UnsignedLongLongTy, BuiltinType::ULongLong); 423 424 // C99 6.2.5p10. 425 InitBuiltinType(FloatTy, BuiltinType::Float); 426 InitBuiltinType(DoubleTy, BuiltinType::Double); 427 InitBuiltinType(LongDoubleTy, BuiltinType::LongDouble); 428 429 // GNU extension, 128-bit integers. 430 InitBuiltinType(Int128Ty, BuiltinType::Int128); 431 InitBuiltinType(UnsignedInt128Ty, BuiltinType::UInt128); 432 433 if (LangOpts.CPlusPlus) { // C++ 3.9.1p5 434 if (TargetInfo::isTypeSigned(Target.getWCharType())) 435 InitBuiltinType(WCharTy, BuiltinType::WChar_S); 436 else // -fshort-wchar makes wchar_t be unsigned. 437 InitBuiltinType(WCharTy, BuiltinType::WChar_U); 438 } else // C99 439 WCharTy = getFromTargetType(Target.getWCharType()); 440 441 if (LangOpts.CPlusPlus) // C++0x 3.9.1p5, extension for C++ 442 InitBuiltinType(Char16Ty, BuiltinType::Char16); 443 else // C99 444 Char16Ty = getFromTargetType(Target.getChar16Type()); 445 446 if (LangOpts.CPlusPlus) // C++0x 3.9.1p5, extension for C++ 447 InitBuiltinType(Char32Ty, BuiltinType::Char32); 448 else // C99 449 Char32Ty = getFromTargetType(Target.getChar32Type()); 450 451 // Placeholder type for type-dependent expressions whose type is 452 // completely unknown. No code should ever check a type against 453 // DependentTy and users should never see it; however, it is here to 454 // help diagnose failures to properly check for type-dependent 455 // expressions. 456 InitBuiltinType(DependentTy, BuiltinType::Dependent); 457 458 // Placeholder type for functions. 459 InitBuiltinType(OverloadTy, BuiltinType::Overload); 460 461 // Placeholder type for bound members. 462 InitBuiltinType(BoundMemberTy, BuiltinType::BoundMember); 463 464 // "any" type; useful for debugger-like clients. 465 InitBuiltinType(UnknownAnyTy, BuiltinType::UnknownAny); 466 467 // Placeholder type for unbridged ARC casts. 468 InitBuiltinType(ARCUnbridgedCastTy, BuiltinType::ARCUnbridgedCast); 469 470 // C99 6.2.5p11. 471 FloatComplexTy = getComplexType(FloatTy); 472 DoubleComplexTy = getComplexType(DoubleTy); 473 LongDoubleComplexTy = getComplexType(LongDoubleTy); 474 475 BuiltinVaListType = QualType(); 476 477 // Builtin types for 'id', 'Class', and 'SEL'. 478 InitBuiltinType(ObjCBuiltinIdTy, BuiltinType::ObjCId); 479 InitBuiltinType(ObjCBuiltinClassTy, BuiltinType::ObjCClass); 480 InitBuiltinType(ObjCBuiltinSelTy, BuiltinType::ObjCSel); 481 482 ObjCConstantStringType = QualType(); 483 484 // void * type 485 VoidPtrTy = getPointerType(VoidTy); 486 487 // nullptr type (C++0x 2.14.7) 488 InitBuiltinType(NullPtrTy, BuiltinType::NullPtr); 489 490 // half type (OpenCL 6.1.1.1) / ARM NEON __fp16 491 InitBuiltinType(HalfTy, BuiltinType::Half); 492 } 493 494 DiagnosticsEngine &ASTContext::getDiagnostics() const { 495 return SourceMgr.getDiagnostics(); 496 } 497 498 AttrVec& ASTContext::getDeclAttrs(const Decl *D) { 499 AttrVec *&Result = DeclAttrs[D]; 500 if (!Result) { 501 void *Mem = Allocate(sizeof(AttrVec)); 502 Result = new (Mem) AttrVec; 503 } 504 505 return *Result; 506 } 507 508 /// \brief Erase the attributes corresponding to the given declaration. 509 void ASTContext::eraseDeclAttrs(const Decl *D) { 510 llvm::DenseMap<const Decl*, AttrVec*>::iterator Pos = DeclAttrs.find(D); 511 if (Pos != DeclAttrs.end()) { 512 Pos->second->~AttrVec(); 513 DeclAttrs.erase(Pos); 514 } 515 } 516 517 MemberSpecializationInfo * 518 ASTContext::getInstantiatedFromStaticDataMember(const VarDecl *Var) { 519 assert(Var->isStaticDataMember() && "Not a static data member"); 520 llvm::DenseMap<const VarDecl *, MemberSpecializationInfo *>::iterator Pos 521 = InstantiatedFromStaticDataMember.find(Var); 522 if (Pos == InstantiatedFromStaticDataMember.end()) 523 return 0; 524 525 return Pos->second; 526 } 527 528 void 529 ASTContext::setInstantiatedFromStaticDataMember(VarDecl *Inst, VarDecl *Tmpl, 530 TemplateSpecializationKind TSK, 531 SourceLocation PointOfInstantiation) { 532 assert(Inst->isStaticDataMember() && "Not a static data member"); 533 assert(Tmpl->isStaticDataMember() && "Not a static data member"); 534 assert(!InstantiatedFromStaticDataMember[Inst] && 535 "Already noted what static data member was instantiated from"); 536 InstantiatedFromStaticDataMember[Inst] 537 = new (*this) MemberSpecializationInfo(Tmpl, TSK, PointOfInstantiation); 538 } 539 540 FunctionDecl *ASTContext::getClassScopeSpecializationPattern( 541 const FunctionDecl *FD){ 542 assert(FD && "Specialization is 0"); 543 llvm::DenseMap<const FunctionDecl*, FunctionDecl *>::const_iterator Pos 544 = ClassScopeSpecializationPattern.find(FD); 545 if (Pos == ClassScopeSpecializationPattern.end()) 546 return 0; 547 548 return Pos->second; 549 } 550 551 void ASTContext::setClassScopeSpecializationPattern(FunctionDecl *FD, 552 FunctionDecl *Pattern) { 553 assert(FD && "Specialization is 0"); 554 assert(Pattern && "Class scope specialization pattern is 0"); 555 ClassScopeSpecializationPattern[FD] = Pattern; 556 } 557 558 NamedDecl * 559 ASTContext::getInstantiatedFromUsingDecl(UsingDecl *UUD) { 560 llvm::DenseMap<UsingDecl *, NamedDecl *>::const_iterator Pos 561 = InstantiatedFromUsingDecl.find(UUD); 562 if (Pos == InstantiatedFromUsingDecl.end()) 563 return 0; 564 565 return Pos->second; 566 } 567 568 void 569 ASTContext::setInstantiatedFromUsingDecl(UsingDecl *Inst, NamedDecl *Pattern) { 570 assert((isa<UsingDecl>(Pattern) || 571 isa<UnresolvedUsingValueDecl>(Pattern) || 572 isa<UnresolvedUsingTypenameDecl>(Pattern)) && 573 "pattern decl is not a using decl"); 574 assert(!InstantiatedFromUsingDecl[Inst] && "pattern already exists"); 575 InstantiatedFromUsingDecl[Inst] = Pattern; 576 } 577 578 UsingShadowDecl * 579 ASTContext::getInstantiatedFromUsingShadowDecl(UsingShadowDecl *Inst) { 580 llvm::DenseMap<UsingShadowDecl*, UsingShadowDecl*>::const_iterator Pos 581 = InstantiatedFromUsingShadowDecl.find(Inst); 582 if (Pos == InstantiatedFromUsingShadowDecl.end()) 583 return 0; 584 585 return Pos->second; 586 } 587 588 void 589 ASTContext::setInstantiatedFromUsingShadowDecl(UsingShadowDecl *Inst, 590 UsingShadowDecl *Pattern) { 591 assert(!InstantiatedFromUsingShadowDecl[Inst] && "pattern already exists"); 592 InstantiatedFromUsingShadowDecl[Inst] = Pattern; 593 } 594 595 FieldDecl *ASTContext::getInstantiatedFromUnnamedFieldDecl(FieldDecl *Field) { 596 llvm::DenseMap<FieldDecl *, FieldDecl *>::iterator Pos 597 = InstantiatedFromUnnamedFieldDecl.find(Field); 598 if (Pos == InstantiatedFromUnnamedFieldDecl.end()) 599 return 0; 600 601 return Pos->second; 602 } 603 604 void ASTContext::setInstantiatedFromUnnamedFieldDecl(FieldDecl *Inst, 605 FieldDecl *Tmpl) { 606 assert(!Inst->getDeclName() && "Instantiated field decl is not unnamed"); 607 assert(!Tmpl->getDeclName() && "Template field decl is not unnamed"); 608 assert(!InstantiatedFromUnnamedFieldDecl[Inst] && 609 "Already noted what unnamed field was instantiated from"); 610 611 InstantiatedFromUnnamedFieldDecl[Inst] = Tmpl; 612 } 613 614 bool ASTContext::ZeroBitfieldFollowsNonBitfield(const FieldDecl *FD, 615 const FieldDecl *LastFD) const { 616 return (FD->isBitField() && LastFD && !LastFD->isBitField() && 617 FD->getBitWidthValue(*this) == 0); 618 } 619 620 bool ASTContext::ZeroBitfieldFollowsBitfield(const FieldDecl *FD, 621 const FieldDecl *LastFD) const { 622 return (FD->isBitField() && LastFD && LastFD->isBitField() && 623 FD->getBitWidthValue(*this) == 0 && 624 LastFD->getBitWidthValue(*this) != 0); 625 } 626 627 bool ASTContext::BitfieldFollowsBitfield(const FieldDecl *FD, 628 const FieldDecl *LastFD) const { 629 return (FD->isBitField() && LastFD && LastFD->isBitField() && 630 FD->getBitWidthValue(*this) && 631 LastFD->getBitWidthValue(*this)); 632 } 633 634 bool ASTContext::NonBitfieldFollowsBitfield(const FieldDecl *FD, 635 const FieldDecl *LastFD) const { 636 return (!FD->isBitField() && LastFD && LastFD->isBitField() && 637 LastFD->getBitWidthValue(*this)); 638 } 639 640 bool ASTContext::BitfieldFollowsNonBitfield(const FieldDecl *FD, 641 const FieldDecl *LastFD) const { 642 return (FD->isBitField() && LastFD && !LastFD->isBitField() && 643 FD->getBitWidthValue(*this)); 644 } 645 646 ASTContext::overridden_cxx_method_iterator 647 ASTContext::overridden_methods_begin(const CXXMethodDecl *Method) const { 648 llvm::DenseMap<const CXXMethodDecl *, CXXMethodVector>::const_iterator Pos 649 = OverriddenMethods.find(Method); 650 if (Pos == OverriddenMethods.end()) 651 return 0; 652 653 return Pos->second.begin(); 654 } 655 656 ASTContext::overridden_cxx_method_iterator 657 ASTContext::overridden_methods_end(const CXXMethodDecl *Method) const { 658 llvm::DenseMap<const CXXMethodDecl *, CXXMethodVector>::const_iterator Pos 659 = OverriddenMethods.find(Method); 660 if (Pos == OverriddenMethods.end()) 661 return 0; 662 663 return Pos->second.end(); 664 } 665 666 unsigned 667 ASTContext::overridden_methods_size(const CXXMethodDecl *Method) const { 668 llvm::DenseMap<const CXXMethodDecl *, CXXMethodVector>::const_iterator Pos 669 = OverriddenMethods.find(Method); 670 if (Pos == OverriddenMethods.end()) 671 return 0; 672 673 return Pos->second.size(); 674 } 675 676 void ASTContext::addOverriddenMethod(const CXXMethodDecl *Method, 677 const CXXMethodDecl *Overridden) { 678 OverriddenMethods[Method].push_back(Overridden); 679 } 680 681 //===----------------------------------------------------------------------===// 682 // Type Sizing and Analysis 683 //===----------------------------------------------------------------------===// 684 685 /// getFloatTypeSemantics - Return the APFloat 'semantics' for the specified 686 /// scalar floating point type. 687 const llvm::fltSemantics &ASTContext::getFloatTypeSemantics(QualType T) const { 688 const BuiltinType *BT = T->getAs<BuiltinType>(); 689 assert(BT && "Not a floating point type!"); 690 switch (BT->getKind()) { 691 default: llvm_unreachable("Not a floating point type!"); 692 case BuiltinType::Half: return Target->getHalfFormat(); 693 case BuiltinType::Float: return Target->getFloatFormat(); 694 case BuiltinType::Double: return Target->getDoubleFormat(); 695 case BuiltinType::LongDouble: return Target->getLongDoubleFormat(); 696 } 697 } 698 699 /// getDeclAlign - Return a conservative estimate of the alignment of the 700 /// specified decl. Note that bitfields do not have a valid alignment, so 701 /// this method will assert on them. 702 /// If @p RefAsPointee, references are treated like their underlying type 703 /// (for alignof), else they're treated like pointers (for CodeGen). 704 CharUnits ASTContext::getDeclAlign(const Decl *D, bool RefAsPointee) const { 705 unsigned Align = Target->getCharWidth(); 706 707 bool UseAlignAttrOnly = false; 708 if (unsigned AlignFromAttr = D->getMaxAlignment()) { 709 Align = AlignFromAttr; 710 711 // __attribute__((aligned)) can increase or decrease alignment 712 // *except* on a struct or struct member, where it only increases 713 // alignment unless 'packed' is also specified. 714 // 715 // It is an error for alignas to decrease alignment, so we can 716 // ignore that possibility; Sema should diagnose it. 717 if (isa<FieldDecl>(D)) { 718 UseAlignAttrOnly = D->hasAttr<PackedAttr>() || 719 cast<FieldDecl>(D)->getParent()->hasAttr<PackedAttr>(); 720 } else { 721 UseAlignAttrOnly = true; 722 } 723 } 724 else if (isa<FieldDecl>(D)) 725 UseAlignAttrOnly = 726 D->hasAttr<PackedAttr>() || 727 cast<FieldDecl>(D)->getParent()->hasAttr<PackedAttr>(); 728 729 // If we're using the align attribute only, just ignore everything 730 // else about the declaration and its type. 731 if (UseAlignAttrOnly) { 732 // do nothing 733 734 } else if (const ValueDecl *VD = dyn_cast<ValueDecl>(D)) { 735 QualType T = VD->getType(); 736 if (const ReferenceType* RT = T->getAs<ReferenceType>()) { 737 if (RefAsPointee) 738 T = RT->getPointeeType(); 739 else 740 T = getPointerType(RT->getPointeeType()); 741 } 742 if (!T->isIncompleteType() && !T->isFunctionType()) { 743 // Adjust alignments of declarations with array type by the 744 // large-array alignment on the target. 745 unsigned MinWidth = Target->getLargeArrayMinWidth(); 746 const ArrayType *arrayType; 747 if (MinWidth && (arrayType = getAsArrayType(T))) { 748 if (isa<VariableArrayType>(arrayType)) 749 Align = std::max(Align, Target->getLargeArrayAlign()); 750 else if (isa<ConstantArrayType>(arrayType) && 751 MinWidth <= getTypeSize(cast<ConstantArrayType>(arrayType))) 752 Align = std::max(Align, Target->getLargeArrayAlign()); 753 754 // Walk through any array types while we're at it. 755 T = getBaseElementType(arrayType); 756 } 757 Align = std::max(Align, getPreferredTypeAlign(T.getTypePtr())); 758 } 759 760 // Fields can be subject to extra alignment constraints, like if 761 // the field is packed, the struct is packed, or the struct has a 762 // a max-field-alignment constraint (#pragma pack). So calculate 763 // the actual alignment of the field within the struct, and then 764 // (as we're expected to) constrain that by the alignment of the type. 765 if (const FieldDecl *field = dyn_cast<FieldDecl>(VD)) { 766 // So calculate the alignment of the field. 767 const ASTRecordLayout &layout = getASTRecordLayout(field->getParent()); 768 769 // Start with the record's overall alignment. 770 unsigned fieldAlign = toBits(layout.getAlignment()); 771 772 // Use the GCD of that and the offset within the record. 773 uint64_t offset = layout.getFieldOffset(field->getFieldIndex()); 774 if (offset > 0) { 775 // Alignment is always a power of 2, so the GCD will be a power of 2, 776 // which means we get to do this crazy thing instead of Euclid's. 777 uint64_t lowBitOfOffset = offset & (~offset + 1); 778 if (lowBitOfOffset < fieldAlign) 779 fieldAlign = static_cast<unsigned>(lowBitOfOffset); 780 } 781 782 Align = std::min(Align, fieldAlign); 783 } 784 } 785 786 return toCharUnitsFromBits(Align); 787 } 788 789 std::pair<CharUnits, CharUnits> 790 ASTContext::getTypeInfoInChars(const Type *T) const { 791 std::pair<uint64_t, unsigned> Info = getTypeInfo(T); 792 return std::make_pair(toCharUnitsFromBits(Info.first), 793 toCharUnitsFromBits(Info.second)); 794 } 795 796 std::pair<CharUnits, CharUnits> 797 ASTContext::getTypeInfoInChars(QualType T) const { 798 return getTypeInfoInChars(T.getTypePtr()); 799 } 800 801 /// getTypeSize - Return the size of the specified type, in bits. This method 802 /// does not work on incomplete types. 803 /// 804 /// FIXME: Pointers into different addr spaces could have different sizes and 805 /// alignment requirements: getPointerInfo should take an AddrSpace, this 806 /// should take a QualType, &c. 807 std::pair<uint64_t, unsigned> 808 ASTContext::getTypeInfo(const Type *T) const { 809 uint64_t Width=0; 810 unsigned Align=8; 811 switch (T->getTypeClass()) { 812 #define TYPE(Class, Base) 813 #define ABSTRACT_TYPE(Class, Base) 814 #define NON_CANONICAL_TYPE(Class, Base) 815 #define DEPENDENT_TYPE(Class, Base) case Type::Class: 816 #include "clang/AST/TypeNodes.def" 817 llvm_unreachable("Should not see dependent types"); 818 break; 819 820 case Type::FunctionNoProto: 821 case Type::FunctionProto: 822 // GCC extension: alignof(function) = 32 bits 823 Width = 0; 824 Align = 32; 825 break; 826 827 case Type::IncompleteArray: 828 case Type::VariableArray: 829 Width = 0; 830 Align = getTypeAlign(cast<ArrayType>(T)->getElementType()); 831 break; 832 833 case Type::ConstantArray: { 834 const ConstantArrayType *CAT = cast<ConstantArrayType>(T); 835 836 std::pair<uint64_t, unsigned> EltInfo = getTypeInfo(CAT->getElementType()); 837 Width = EltInfo.first*CAT->getSize().getZExtValue(); 838 Align = EltInfo.second; 839 Width = llvm::RoundUpToAlignment(Width, Align); 840 break; 841 } 842 case Type::ExtVector: 843 case Type::Vector: { 844 const VectorType *VT = cast<VectorType>(T); 845 std::pair<uint64_t, unsigned> EltInfo = getTypeInfo(VT->getElementType()); 846 Width = EltInfo.first*VT->getNumElements(); 847 Align = Width; 848 // If the alignment is not a power of 2, round up to the next power of 2. 849 // This happens for non-power-of-2 length vectors. 850 if (Align & (Align-1)) { 851 Align = llvm::NextPowerOf2(Align); 852 Width = llvm::RoundUpToAlignment(Width, Align); 853 } 854 break; 855 } 856 857 case Type::Builtin: 858 switch (cast<BuiltinType>(T)->getKind()) { 859 default: llvm_unreachable("Unknown builtin type!"); 860 case BuiltinType::Void: 861 // GCC extension: alignof(void) = 8 bits. 862 Width = 0; 863 Align = 8; 864 break; 865 866 case BuiltinType::Bool: 867 Width = Target->getBoolWidth(); 868 Align = Target->getBoolAlign(); 869 break; 870 case BuiltinType::Char_S: 871 case BuiltinType::Char_U: 872 case BuiltinType::UChar: 873 case BuiltinType::SChar: 874 Width = Target->getCharWidth(); 875 Align = Target->getCharAlign(); 876 break; 877 case BuiltinType::WChar_S: 878 case BuiltinType::WChar_U: 879 Width = Target->getWCharWidth(); 880 Align = Target->getWCharAlign(); 881 break; 882 case BuiltinType::Char16: 883 Width = Target->getChar16Width(); 884 Align = Target->getChar16Align(); 885 break; 886 case BuiltinType::Char32: 887 Width = Target->getChar32Width(); 888 Align = Target->getChar32Align(); 889 break; 890 case BuiltinType::UShort: 891 case BuiltinType::Short: 892 Width = Target->getShortWidth(); 893 Align = Target->getShortAlign(); 894 break; 895 case BuiltinType::UInt: 896 case BuiltinType::Int: 897 Width = Target->getIntWidth(); 898 Align = Target->getIntAlign(); 899 break; 900 case BuiltinType::ULong: 901 case BuiltinType::Long: 902 Width = Target->getLongWidth(); 903 Align = Target->getLongAlign(); 904 break; 905 case BuiltinType::ULongLong: 906 case BuiltinType::LongLong: 907 Width = Target->getLongLongWidth(); 908 Align = Target->getLongLongAlign(); 909 break; 910 case BuiltinType::Int128: 911 case BuiltinType::UInt128: 912 Width = 128; 913 Align = 128; // int128_t is 128-bit aligned on all targets. 914 break; 915 case BuiltinType::Half: 916 Width = Target->getHalfWidth(); 917 Align = Target->getHalfAlign(); 918 break; 919 case BuiltinType::Float: 920 Width = Target->getFloatWidth(); 921 Align = Target->getFloatAlign(); 922 break; 923 case BuiltinType::Double: 924 Width = Target->getDoubleWidth(); 925 Align = Target->getDoubleAlign(); 926 break; 927 case BuiltinType::LongDouble: 928 Width = Target->getLongDoubleWidth(); 929 Align = Target->getLongDoubleAlign(); 930 break; 931 case BuiltinType::NullPtr: 932 Width = Target->getPointerWidth(0); // C++ 3.9.1p11: sizeof(nullptr_t) 933 Align = Target->getPointerAlign(0); // == sizeof(void*) 934 break; 935 case BuiltinType::ObjCId: 936 case BuiltinType::ObjCClass: 937 case BuiltinType::ObjCSel: 938 Width = Target->getPointerWidth(0); 939 Align = Target->getPointerAlign(0); 940 break; 941 } 942 break; 943 case Type::ObjCObjectPointer: 944 Width = Target->getPointerWidth(0); 945 Align = Target->getPointerAlign(0); 946 break; 947 case Type::BlockPointer: { 948 unsigned AS = getTargetAddressSpace( 949 cast<BlockPointerType>(T)->getPointeeType()); 950 Width = Target->getPointerWidth(AS); 951 Align = Target->getPointerAlign(AS); 952 break; 953 } 954 case Type::LValueReference: 955 case Type::RValueReference: { 956 // alignof and sizeof should never enter this code path here, so we go 957 // the pointer route. 958 unsigned AS = getTargetAddressSpace( 959 cast<ReferenceType>(T)->getPointeeType()); 960 Width = Target->getPointerWidth(AS); 961 Align = Target->getPointerAlign(AS); 962 break; 963 } 964 case Type::Pointer: { 965 unsigned AS = getTargetAddressSpace(cast<PointerType>(T)->getPointeeType()); 966 Width = Target->getPointerWidth(AS); 967 Align = Target->getPointerAlign(AS); 968 break; 969 } 970 case Type::MemberPointer: { 971 const MemberPointerType *MPT = cast<MemberPointerType>(T); 972 std::pair<uint64_t, unsigned> PtrDiffInfo = 973 getTypeInfo(getPointerDiffType()); 974 Width = PtrDiffInfo.first * ABI->getMemberPointerSize(MPT); 975 Align = PtrDiffInfo.second; 976 break; 977 } 978 case Type::Complex: { 979 // Complex types have the same alignment as their elements, but twice the 980 // size. 981 std::pair<uint64_t, unsigned> EltInfo = 982 getTypeInfo(cast<ComplexType>(T)->getElementType()); 983 Width = EltInfo.first*2; 984 Align = EltInfo.second; 985 break; 986 } 987 case Type::ObjCObject: 988 return getTypeInfo(cast<ObjCObjectType>(T)->getBaseType().getTypePtr()); 989 case Type::ObjCInterface: { 990 const ObjCInterfaceType *ObjCI = cast<ObjCInterfaceType>(T); 991 const ASTRecordLayout &Layout = getASTObjCInterfaceLayout(ObjCI->getDecl()); 992 Width = toBits(Layout.getSize()); 993 Align = toBits(Layout.getAlignment()); 994 break; 995 } 996 case Type::Record: 997 case Type::Enum: { 998 const TagType *TT = cast<TagType>(T); 999 1000 if (TT->getDecl()->isInvalidDecl()) { 1001 Width = 8; 1002 Align = 8; 1003 break; 1004 } 1005 1006 if (const EnumType *ET = dyn_cast<EnumType>(TT)) 1007 return getTypeInfo(ET->getDecl()->getIntegerType()); 1008 1009 const RecordType *RT = cast<RecordType>(TT); 1010 const ASTRecordLayout &Layout = getASTRecordLayout(RT->getDecl()); 1011 Width = toBits(Layout.getSize()); 1012 Align = toBits(Layout.getAlignment()); 1013 break; 1014 } 1015 1016 case Type::SubstTemplateTypeParm: 1017 return getTypeInfo(cast<SubstTemplateTypeParmType>(T)-> 1018 getReplacementType().getTypePtr()); 1019 1020 case Type::Auto: { 1021 const AutoType *A = cast<AutoType>(T); 1022 assert(A->isDeduced() && "Cannot request the size of a dependent type"); 1023 return getTypeInfo(A->getDeducedType().getTypePtr()); 1024 } 1025 1026 case Type::Paren: 1027 return getTypeInfo(cast<ParenType>(T)->getInnerType().getTypePtr()); 1028 1029 case Type::Typedef: { 1030 const TypedefNameDecl *Typedef = cast<TypedefType>(T)->getDecl(); 1031 std::pair<uint64_t, unsigned> Info 1032 = getTypeInfo(Typedef->getUnderlyingType().getTypePtr()); 1033 // If the typedef has an aligned attribute on it, it overrides any computed 1034 // alignment we have. This violates the GCC documentation (which says that 1035 // attribute(aligned) can only round up) but matches its implementation. 1036 if (unsigned AttrAlign = Typedef->getMaxAlignment()) 1037 Align = AttrAlign; 1038 else 1039 Align = Info.second; 1040 Width = Info.first; 1041 break; 1042 } 1043 1044 case Type::TypeOfExpr: 1045 return getTypeInfo(cast<TypeOfExprType>(T)->getUnderlyingExpr()->getType() 1046 .getTypePtr()); 1047 1048 case Type::TypeOf: 1049 return getTypeInfo(cast<TypeOfType>(T)->getUnderlyingType().getTypePtr()); 1050 1051 case Type::Decltype: 1052 return getTypeInfo(cast<DecltypeType>(T)->getUnderlyingExpr()->getType() 1053 .getTypePtr()); 1054 1055 case Type::UnaryTransform: 1056 return getTypeInfo(cast<UnaryTransformType>(T)->getUnderlyingType()); 1057 1058 case Type::Elaborated: 1059 return getTypeInfo(cast<ElaboratedType>(T)->getNamedType().getTypePtr()); 1060 1061 case Type::Attributed: 1062 return getTypeInfo( 1063 cast<AttributedType>(T)->getEquivalentType().getTypePtr()); 1064 1065 case Type::TemplateSpecialization: { 1066 assert(getCanonicalType(T) != T && 1067 "Cannot request the size of a dependent type"); 1068 const TemplateSpecializationType *TST = cast<TemplateSpecializationType>(T); 1069 // A type alias template specialization may refer to a typedef with the 1070 // aligned attribute on it. 1071 if (TST->isTypeAlias()) 1072 return getTypeInfo(TST->getAliasedType().getTypePtr()); 1073 else 1074 return getTypeInfo(getCanonicalType(T)); 1075 } 1076 1077 case Type::Atomic: { 1078 std::pair<uint64_t, unsigned> Info 1079 = getTypeInfo(cast<AtomicType>(T)->getValueType()); 1080 Width = Info.first; 1081 Align = Info.second; 1082 if (Width != 0 && Width <= Target->getMaxAtomicPromoteWidth() && 1083 llvm::isPowerOf2_64(Width)) { 1084 // We can potentially perform lock-free atomic operations for this 1085 // type; promote the alignment appropriately. 1086 // FIXME: We could potentially promote the width here as well... 1087 // is that worthwhile? (Non-struct atomic types generally have 1088 // power-of-two size anyway, but structs might not. Requires a bit 1089 // of implementation work to make sure we zero out the extra bits.) 1090 Align = static_cast<unsigned>(Width); 1091 } 1092 } 1093 1094 } 1095 1096 assert(llvm::isPowerOf2_32(Align) && "Alignment must be power of 2"); 1097 return std::make_pair(Width, Align); 1098 } 1099 1100 /// toCharUnitsFromBits - Convert a size in bits to a size in characters. 1101 CharUnits ASTContext::toCharUnitsFromBits(int64_t BitSize) const { 1102 return CharUnits::fromQuantity(BitSize / getCharWidth()); 1103 } 1104 1105 /// toBits - Convert a size in characters to a size in characters. 1106 int64_t ASTContext::toBits(CharUnits CharSize) const { 1107 return CharSize.getQuantity() * getCharWidth(); 1108 } 1109 1110 /// getTypeSizeInChars - Return the size of the specified type, in characters. 1111 /// This method does not work on incomplete types. 1112 CharUnits ASTContext::getTypeSizeInChars(QualType T) const { 1113 return toCharUnitsFromBits(getTypeSize(T)); 1114 } 1115 CharUnits ASTContext::getTypeSizeInChars(const Type *T) const { 1116 return toCharUnitsFromBits(getTypeSize(T)); 1117 } 1118 1119 /// getTypeAlignInChars - Return the ABI-specified alignment of a type, in 1120 /// characters. This method does not work on incomplete types. 1121 CharUnits ASTContext::getTypeAlignInChars(QualType T) const { 1122 return toCharUnitsFromBits(getTypeAlign(T)); 1123 } 1124 CharUnits ASTContext::getTypeAlignInChars(const Type *T) const { 1125 return toCharUnitsFromBits(getTypeAlign(T)); 1126 } 1127 1128 /// getPreferredTypeAlign - Return the "preferred" alignment of the specified 1129 /// type for the current target in bits. This can be different than the ABI 1130 /// alignment in cases where it is beneficial for performance to overalign 1131 /// a data type. 1132 unsigned ASTContext::getPreferredTypeAlign(const Type *T) const { 1133 unsigned ABIAlign = getTypeAlign(T); 1134 1135 // Double and long long should be naturally aligned if possible. 1136 if (const ComplexType* CT = T->getAs<ComplexType>()) 1137 T = CT->getElementType().getTypePtr(); 1138 if (T->isSpecificBuiltinType(BuiltinType::Double) || 1139 T->isSpecificBuiltinType(BuiltinType::LongLong)) 1140 return std::max(ABIAlign, (unsigned)getTypeSize(T)); 1141 1142 return ABIAlign; 1143 } 1144 1145 /// DeepCollectObjCIvars - 1146 /// This routine first collects all declared, but not synthesized, ivars in 1147 /// super class and then collects all ivars, including those synthesized for 1148 /// current class. This routine is used for implementation of current class 1149 /// when all ivars, declared and synthesized are known. 1150 /// 1151 void ASTContext::DeepCollectObjCIvars(const ObjCInterfaceDecl *OI, 1152 bool leafClass, 1153 SmallVectorImpl<const ObjCIvarDecl*> &Ivars) const { 1154 if (const ObjCInterfaceDecl *SuperClass = OI->getSuperClass()) 1155 DeepCollectObjCIvars(SuperClass, false, Ivars); 1156 if (!leafClass) { 1157 for (ObjCInterfaceDecl::ivar_iterator I = OI->ivar_begin(), 1158 E = OI->ivar_end(); I != E; ++I) 1159 Ivars.push_back(*I); 1160 } else { 1161 ObjCInterfaceDecl *IDecl = const_cast<ObjCInterfaceDecl *>(OI); 1162 for (const ObjCIvarDecl *Iv = IDecl->all_declared_ivar_begin(); Iv; 1163 Iv= Iv->getNextIvar()) 1164 Ivars.push_back(Iv); 1165 } 1166 } 1167 1168 /// CollectInheritedProtocols - Collect all protocols in current class and 1169 /// those inherited by it. 1170 void ASTContext::CollectInheritedProtocols(const Decl *CDecl, 1171 llvm::SmallPtrSet<ObjCProtocolDecl*, 8> &Protocols) { 1172 if (const ObjCInterfaceDecl *OI = dyn_cast<ObjCInterfaceDecl>(CDecl)) { 1173 // We can use protocol_iterator here instead of 1174 // all_referenced_protocol_iterator since we are walking all categories. 1175 for (ObjCInterfaceDecl::all_protocol_iterator P = OI->all_referenced_protocol_begin(), 1176 PE = OI->all_referenced_protocol_end(); P != PE; ++P) { 1177 ObjCProtocolDecl *Proto = (*P); 1178 Protocols.insert(Proto); 1179 for (ObjCProtocolDecl::protocol_iterator P = Proto->protocol_begin(), 1180 PE = Proto->protocol_end(); P != PE; ++P) { 1181 Protocols.insert(*P); 1182 CollectInheritedProtocols(*P, Protocols); 1183 } 1184 } 1185 1186 // Categories of this Interface. 1187 for (const ObjCCategoryDecl *CDeclChain = OI->getCategoryList(); 1188 CDeclChain; CDeclChain = CDeclChain->getNextClassCategory()) 1189 CollectInheritedProtocols(CDeclChain, Protocols); 1190 if (ObjCInterfaceDecl *SD = OI->getSuperClass()) 1191 while (SD) { 1192 CollectInheritedProtocols(SD, Protocols); 1193 SD = SD->getSuperClass(); 1194 } 1195 } else if (const ObjCCategoryDecl *OC = dyn_cast<ObjCCategoryDecl>(CDecl)) { 1196 for (ObjCCategoryDecl::protocol_iterator P = OC->protocol_begin(), 1197 PE = OC->protocol_end(); P != PE; ++P) { 1198 ObjCProtocolDecl *Proto = (*P); 1199 Protocols.insert(Proto); 1200 for (ObjCProtocolDecl::protocol_iterator P = Proto->protocol_begin(), 1201 PE = Proto->protocol_end(); P != PE; ++P) 1202 CollectInheritedProtocols(*P, Protocols); 1203 } 1204 } else if (const ObjCProtocolDecl *OP = dyn_cast<ObjCProtocolDecl>(CDecl)) { 1205 for (ObjCProtocolDecl::protocol_iterator P = OP->protocol_begin(), 1206 PE = OP->protocol_end(); P != PE; ++P) { 1207 ObjCProtocolDecl *Proto = (*P); 1208 Protocols.insert(Proto); 1209 for (ObjCProtocolDecl::protocol_iterator P = Proto->protocol_begin(), 1210 PE = Proto->protocol_end(); P != PE; ++P) 1211 CollectInheritedProtocols(*P, Protocols); 1212 } 1213 } 1214 } 1215 1216 unsigned ASTContext::CountNonClassIvars(const ObjCInterfaceDecl *OI) const { 1217 unsigned count = 0; 1218 // Count ivars declared in class extension. 1219 for (const ObjCCategoryDecl *CDecl = OI->getFirstClassExtension(); CDecl; 1220 CDecl = CDecl->getNextClassExtension()) 1221 count += CDecl->ivar_size(); 1222 1223 // Count ivar defined in this class's implementation. This 1224 // includes synthesized ivars. 1225 if (ObjCImplementationDecl *ImplDecl = OI->getImplementation()) 1226 count += ImplDecl->ivar_size(); 1227 1228 return count; 1229 } 1230 1231 /// \brief Get the implementation of ObjCInterfaceDecl,or NULL if none exists. 1232 ObjCImplementationDecl *ASTContext::getObjCImplementation(ObjCInterfaceDecl *D) { 1233 llvm::DenseMap<ObjCContainerDecl*, ObjCImplDecl*>::iterator 1234 I = ObjCImpls.find(D); 1235 if (I != ObjCImpls.end()) 1236 return cast<ObjCImplementationDecl>(I->second); 1237 return 0; 1238 } 1239 /// \brief Get the implementation of ObjCCategoryDecl, or NULL if none exists. 1240 ObjCCategoryImplDecl *ASTContext::getObjCImplementation(ObjCCategoryDecl *D) { 1241 llvm::DenseMap<ObjCContainerDecl*, ObjCImplDecl*>::iterator 1242 I = ObjCImpls.find(D); 1243 if (I != ObjCImpls.end()) 1244 return cast<ObjCCategoryImplDecl>(I->second); 1245 return 0; 1246 } 1247 1248 /// \brief Set the implementation of ObjCInterfaceDecl. 1249 void ASTContext::setObjCImplementation(ObjCInterfaceDecl *IFaceD, 1250 ObjCImplementationDecl *ImplD) { 1251 assert(IFaceD && ImplD && "Passed null params"); 1252 ObjCImpls[IFaceD] = ImplD; 1253 } 1254 /// \brief Set the implementation of ObjCCategoryDecl. 1255 void ASTContext::setObjCImplementation(ObjCCategoryDecl *CatD, 1256 ObjCCategoryImplDecl *ImplD) { 1257 assert(CatD && ImplD && "Passed null params"); 1258 ObjCImpls[CatD] = ImplD; 1259 } 1260 1261 /// \brief Get the copy initialization expression of VarDecl,or NULL if 1262 /// none exists. 1263 Expr *ASTContext::getBlockVarCopyInits(const VarDecl*VD) { 1264 assert(VD && "Passed null params"); 1265 assert(VD->hasAttr<BlocksAttr>() && 1266 "getBlockVarCopyInits - not __block var"); 1267 llvm::DenseMap<const VarDecl*, Expr*>::iterator 1268 I = BlockVarCopyInits.find(VD); 1269 return (I != BlockVarCopyInits.end()) ? cast<Expr>(I->second) : 0; 1270 } 1271 1272 /// \brief Set the copy inialization expression of a block var decl. 1273 void ASTContext::setBlockVarCopyInits(VarDecl*VD, Expr* Init) { 1274 assert(VD && Init && "Passed null params"); 1275 assert(VD->hasAttr<BlocksAttr>() && 1276 "setBlockVarCopyInits - not __block var"); 1277 BlockVarCopyInits[VD] = Init; 1278 } 1279 1280 /// \brief Allocate an uninitialized TypeSourceInfo. 1281 /// 1282 /// The caller should initialize the memory held by TypeSourceInfo using 1283 /// the TypeLoc wrappers. 1284 /// 1285 /// \param T the type that will be the basis for type source info. This type 1286 /// should refer to how the declarator was written in source code, not to 1287 /// what type semantic analysis resolved the declarator to. 1288 TypeSourceInfo *ASTContext::CreateTypeSourceInfo(QualType T, 1289 unsigned DataSize) const { 1290 if (!DataSize) 1291 DataSize = TypeLoc::getFullDataSizeForType(T); 1292 else 1293 assert(DataSize == TypeLoc::getFullDataSizeForType(T) && 1294 "incorrect data size provided to CreateTypeSourceInfo!"); 1295 1296 TypeSourceInfo *TInfo = 1297 (TypeSourceInfo*)BumpAlloc.Allocate(sizeof(TypeSourceInfo) + DataSize, 8); 1298 new (TInfo) TypeSourceInfo(T); 1299 return TInfo; 1300 } 1301 1302 TypeSourceInfo *ASTContext::getTrivialTypeSourceInfo(QualType T, 1303 SourceLocation L) const { 1304 TypeSourceInfo *DI = CreateTypeSourceInfo(T); 1305 DI->getTypeLoc().initialize(const_cast<ASTContext &>(*this), L); 1306 return DI; 1307 } 1308 1309 const ASTRecordLayout & 1310 ASTContext::getASTObjCInterfaceLayout(const ObjCInterfaceDecl *D) const { 1311 return getObjCLayout(D, 0); 1312 } 1313 1314 const ASTRecordLayout & 1315 ASTContext::getASTObjCImplementationLayout( 1316 const ObjCImplementationDecl *D) const { 1317 return getObjCLayout(D->getClassInterface(), D); 1318 } 1319 1320 //===----------------------------------------------------------------------===// 1321 // Type creation/memoization methods 1322 //===----------------------------------------------------------------------===// 1323 1324 QualType 1325 ASTContext::getExtQualType(const Type *baseType, Qualifiers quals) const { 1326 unsigned fastQuals = quals.getFastQualifiers(); 1327 quals.removeFastQualifiers(); 1328 1329 // Check if we've already instantiated this type. 1330 llvm::FoldingSetNodeID ID; 1331 ExtQuals::Profile(ID, baseType, quals); 1332 void *insertPos = 0; 1333 if (ExtQuals *eq = ExtQualNodes.FindNodeOrInsertPos(ID, insertPos)) { 1334 assert(eq->getQualifiers() == quals); 1335 return QualType(eq, fastQuals); 1336 } 1337 1338 // If the base type is not canonical, make the appropriate canonical type. 1339 QualType canon; 1340 if (!baseType->isCanonicalUnqualified()) { 1341 SplitQualType canonSplit = baseType->getCanonicalTypeInternal().split(); 1342 canonSplit.second.addConsistentQualifiers(quals); 1343 canon = getExtQualType(canonSplit.first, canonSplit.second); 1344 1345 // Re-find the insert position. 1346 (void) ExtQualNodes.FindNodeOrInsertPos(ID, insertPos); 1347 } 1348 1349 ExtQuals *eq = new (*this, TypeAlignment) ExtQuals(baseType, canon, quals); 1350 ExtQualNodes.InsertNode(eq, insertPos); 1351 return QualType(eq, fastQuals); 1352 } 1353 1354 QualType 1355 ASTContext::getAddrSpaceQualType(QualType T, unsigned AddressSpace) const { 1356 QualType CanT = getCanonicalType(T); 1357 if (CanT.getAddressSpace() == AddressSpace) 1358 return T; 1359 1360 // If we are composing extended qualifiers together, merge together 1361 // into one ExtQuals node. 1362 QualifierCollector Quals; 1363 const Type *TypeNode = Quals.strip(T); 1364 1365 // If this type already has an address space specified, it cannot get 1366 // another one. 1367 assert(!Quals.hasAddressSpace() && 1368 "Type cannot be in multiple addr spaces!"); 1369 Quals.addAddressSpace(AddressSpace); 1370 1371 return getExtQualType(TypeNode, Quals); 1372 } 1373 1374 QualType ASTContext::getObjCGCQualType(QualType T, 1375 Qualifiers::GC GCAttr) const { 1376 QualType CanT = getCanonicalType(T); 1377 if (CanT.getObjCGCAttr() == GCAttr) 1378 return T; 1379 1380 if (const PointerType *ptr = T->getAs<PointerType>()) { 1381 QualType Pointee = ptr->getPointeeType(); 1382 if (Pointee->isAnyPointerType()) { 1383 QualType ResultType = getObjCGCQualType(Pointee, GCAttr); 1384 return getPointerType(ResultType); 1385 } 1386 } 1387 1388 // If we are composing extended qualifiers together, merge together 1389 // into one ExtQuals node. 1390 QualifierCollector Quals; 1391 const Type *TypeNode = Quals.strip(T); 1392 1393 // If this type already has an ObjCGC specified, it cannot get 1394 // another one. 1395 assert(!Quals.hasObjCGCAttr() && 1396 "Type cannot have multiple ObjCGCs!"); 1397 Quals.addObjCGCAttr(GCAttr); 1398 1399 return getExtQualType(TypeNode, Quals); 1400 } 1401 1402 const FunctionType *ASTContext::adjustFunctionType(const FunctionType *T, 1403 FunctionType::ExtInfo Info) { 1404 if (T->getExtInfo() == Info) 1405 return T; 1406 1407 QualType Result; 1408 if (const FunctionNoProtoType *FNPT = dyn_cast<FunctionNoProtoType>(T)) { 1409 Result = getFunctionNoProtoType(FNPT->getResultType(), Info); 1410 } else { 1411 const FunctionProtoType *FPT = cast<FunctionProtoType>(T); 1412 FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo(); 1413 EPI.ExtInfo = Info; 1414 Result = getFunctionType(FPT->getResultType(), FPT->arg_type_begin(), 1415 FPT->getNumArgs(), EPI); 1416 } 1417 1418 return cast<FunctionType>(Result.getTypePtr()); 1419 } 1420 1421 /// getComplexType - Return the uniqued reference to the type for a complex 1422 /// number with the specified element type. 1423 QualType ASTContext::getComplexType(QualType T) const { 1424 // Unique pointers, to guarantee there is only one pointer of a particular 1425 // structure. 1426 llvm::FoldingSetNodeID ID; 1427 ComplexType::Profile(ID, T); 1428 1429 void *InsertPos = 0; 1430 if (ComplexType *CT = ComplexTypes.FindNodeOrInsertPos(ID, InsertPos)) 1431 return QualType(CT, 0); 1432 1433 // If the pointee type isn't canonical, this won't be a canonical type either, 1434 // so fill in the canonical type field. 1435 QualType Canonical; 1436 if (!T.isCanonical()) { 1437 Canonical = getComplexType(getCanonicalType(T)); 1438 1439 // Get the new insert position for the node we care about. 1440 ComplexType *NewIP = ComplexTypes.FindNodeOrInsertPos(ID, InsertPos); 1441 assert(NewIP == 0 && "Shouldn't be in the map!"); (void)NewIP; 1442 } 1443 ComplexType *New = new (*this, TypeAlignment) ComplexType(T, Canonical); 1444 Types.push_back(New); 1445 ComplexTypes.InsertNode(New, InsertPos); 1446 return QualType(New, 0); 1447 } 1448 1449 /// getPointerType - Return the uniqued reference to the type for a pointer to 1450 /// the specified type. 1451 QualType ASTContext::getPointerType(QualType T) const { 1452 // Unique pointers, to guarantee there is only one pointer of a particular 1453 // structure. 1454 llvm::FoldingSetNodeID ID; 1455 PointerType::Profile(ID, T); 1456 1457 void *InsertPos = 0; 1458 if (PointerType *PT = PointerTypes.FindNodeOrInsertPos(ID, InsertPos)) 1459 return QualType(PT, 0); 1460 1461 // If the pointee type isn't canonical, this won't be a canonical type either, 1462 // so fill in the canonical type field. 1463 QualType Canonical; 1464 if (!T.isCanonical()) { 1465 Canonical = getPointerType(getCanonicalType(T)); 1466 1467 // Get the new insert position for the node we care about. 1468 PointerType *NewIP = PointerTypes.FindNodeOrInsertPos(ID, InsertPos); 1469 assert(NewIP == 0 && "Shouldn't be in the map!"); (void)NewIP; 1470 } 1471 PointerType *New = new (*this, TypeAlignment) PointerType(T, Canonical); 1472 Types.push_back(New); 1473 PointerTypes.InsertNode(New, InsertPos); 1474 return QualType(New, 0); 1475 } 1476 1477 /// getBlockPointerType - Return the uniqued reference to the type for 1478 /// a pointer to the specified block. 1479 QualType ASTContext::getBlockPointerType(QualType T) const { 1480 assert(T->isFunctionType() && "block of function types only"); 1481 // Unique pointers, to guarantee there is only one block of a particular 1482 // structure. 1483 llvm::FoldingSetNodeID ID; 1484 BlockPointerType::Profile(ID, T); 1485 1486 void *InsertPos = 0; 1487 if (BlockPointerType *PT = 1488 BlockPointerTypes.FindNodeOrInsertPos(ID, InsertPos)) 1489 return QualType(PT, 0); 1490 1491 // If the block pointee type isn't canonical, this won't be a canonical 1492 // type either so fill in the canonical type field. 1493 QualType Canonical; 1494 if (!T.isCanonical()) { 1495 Canonical = getBlockPointerType(getCanonicalType(T)); 1496 1497 // Get the new insert position for the node we care about. 1498 BlockPointerType *NewIP = 1499 BlockPointerTypes.FindNodeOrInsertPos(ID, InsertPos); 1500 assert(NewIP == 0 && "Shouldn't be in the map!"); (void)NewIP; 1501 } 1502 BlockPointerType *New 1503 = new (*this, TypeAlignment) BlockPointerType(T, Canonical); 1504 Types.push_back(New); 1505 BlockPointerTypes.InsertNode(New, InsertPos); 1506 return QualType(New, 0); 1507 } 1508 1509 /// getLValueReferenceType - Return the uniqued reference to the type for an 1510 /// lvalue reference to the specified type. 1511 QualType 1512 ASTContext::getLValueReferenceType(QualType T, bool SpelledAsLValue) const { 1513 assert(getCanonicalType(T) != OverloadTy && 1514 "Unresolved overloaded function type"); 1515 1516 // Unique pointers, to guarantee there is only one pointer of a particular 1517 // structure. 1518 llvm::FoldingSetNodeID ID; 1519 ReferenceType::Profile(ID, T, SpelledAsLValue); 1520 1521 void *InsertPos = 0; 1522 if (LValueReferenceType *RT = 1523 LValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos)) 1524 return QualType(RT, 0); 1525 1526 const ReferenceType *InnerRef = T->getAs<ReferenceType>(); 1527 1528 // If the referencee type isn't canonical, this won't be a canonical type 1529 // either, so fill in the canonical type field. 1530 QualType Canonical; 1531 if (!SpelledAsLValue || InnerRef || !T.isCanonical()) { 1532 QualType PointeeType = (InnerRef ? InnerRef->getPointeeType() : T); 1533 Canonical = getLValueReferenceType(getCanonicalType(PointeeType)); 1534 1535 // Get the new insert position for the node we care about. 1536 LValueReferenceType *NewIP = 1537 LValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos); 1538 assert(NewIP == 0 && "Shouldn't be in the map!"); (void)NewIP; 1539 } 1540 1541 LValueReferenceType *New 1542 = new (*this, TypeAlignment) LValueReferenceType(T, Canonical, 1543 SpelledAsLValue); 1544 Types.push_back(New); 1545 LValueReferenceTypes.InsertNode(New, InsertPos); 1546 1547 return QualType(New, 0); 1548 } 1549 1550 /// getRValueReferenceType - Return the uniqued reference to the type for an 1551 /// rvalue reference to the specified type. 1552 QualType ASTContext::getRValueReferenceType(QualType T) const { 1553 // Unique pointers, to guarantee there is only one pointer of a particular 1554 // structure. 1555 llvm::FoldingSetNodeID ID; 1556 ReferenceType::Profile(ID, T, false); 1557 1558 void *InsertPos = 0; 1559 if (RValueReferenceType *RT = 1560 RValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos)) 1561 return QualType(RT, 0); 1562 1563 const ReferenceType *InnerRef = T->getAs<ReferenceType>(); 1564 1565 // If the referencee type isn't canonical, this won't be a canonical type 1566 // either, so fill in the canonical type field. 1567 QualType Canonical; 1568 if (InnerRef || !T.isCanonical()) { 1569 QualType PointeeType = (InnerRef ? InnerRef->getPointeeType() : T); 1570 Canonical = getRValueReferenceType(getCanonicalType(PointeeType)); 1571 1572 // Get the new insert position for the node we care about. 1573 RValueReferenceType *NewIP = 1574 RValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos); 1575 assert(NewIP == 0 && "Shouldn't be in the map!"); (void)NewIP; 1576 } 1577 1578 RValueReferenceType *New 1579 = new (*this, TypeAlignment) RValueReferenceType(T, Canonical); 1580 Types.push_back(New); 1581 RValueReferenceTypes.InsertNode(New, InsertPos); 1582 return QualType(New, 0); 1583 } 1584 1585 /// getMemberPointerType - Return the uniqued reference to the type for a 1586 /// member pointer to the specified type, in the specified class. 1587 QualType ASTContext::getMemberPointerType(QualType T, const Type *Cls) const { 1588 // Unique pointers, to guarantee there is only one pointer of a particular 1589 // structure. 1590 llvm::FoldingSetNodeID ID; 1591 MemberPointerType::Profile(ID, T, Cls); 1592 1593 void *InsertPos = 0; 1594 if (MemberPointerType *PT = 1595 MemberPointerTypes.FindNodeOrInsertPos(ID, InsertPos)) 1596 return QualType(PT, 0); 1597 1598 // If the pointee or class type isn't canonical, this won't be a canonical 1599 // type either, so fill in the canonical type field. 1600 QualType Canonical; 1601 if (!T.isCanonical() || !Cls->isCanonicalUnqualified()) { 1602 Canonical = getMemberPointerType(getCanonicalType(T),getCanonicalType(Cls)); 1603 1604 // Get the new insert position for the node we care about. 1605 MemberPointerType *NewIP = 1606 MemberPointerTypes.FindNodeOrInsertPos(ID, InsertPos); 1607 assert(NewIP == 0 && "Shouldn't be in the map!"); (void)NewIP; 1608 } 1609 MemberPointerType *New 1610 = new (*this, TypeAlignment) MemberPointerType(T, Cls, Canonical); 1611 Types.push_back(New); 1612 MemberPointerTypes.InsertNode(New, InsertPos); 1613 return QualType(New, 0); 1614 } 1615 1616 /// getConstantArrayType - Return the unique reference to the type for an 1617 /// array of the specified element type. 1618 QualType ASTContext::getConstantArrayType(QualType EltTy, 1619 const llvm::APInt &ArySizeIn, 1620 ArrayType::ArraySizeModifier ASM, 1621 unsigned IndexTypeQuals) const { 1622 assert((EltTy->isDependentType() || 1623 EltTy->isIncompleteType() || EltTy->isConstantSizeType()) && 1624 "Constant array of VLAs is illegal!"); 1625 1626 // Convert the array size into a canonical width matching the pointer size for 1627 // the target. 1628 llvm::APInt ArySize(ArySizeIn); 1629 ArySize = 1630 ArySize.zextOrTrunc(Target->getPointerWidth(getTargetAddressSpace(EltTy))); 1631 1632 llvm::FoldingSetNodeID ID; 1633 ConstantArrayType::Profile(ID, EltTy, ArySize, ASM, IndexTypeQuals); 1634 1635 void *InsertPos = 0; 1636 if (ConstantArrayType *ATP = 1637 ConstantArrayTypes.FindNodeOrInsertPos(ID, InsertPos)) 1638 return QualType(ATP, 0); 1639 1640 // If the element type isn't canonical or has qualifiers, this won't 1641 // be a canonical type either, so fill in the canonical type field. 1642 QualType Canon; 1643 if (!EltTy.isCanonical() || EltTy.hasLocalQualifiers()) { 1644 SplitQualType canonSplit = getCanonicalType(EltTy).split(); 1645 Canon = getConstantArrayType(QualType(canonSplit.first, 0), ArySize, 1646 ASM, IndexTypeQuals); 1647 Canon = getQualifiedType(Canon, canonSplit.second); 1648 1649 // Get the new insert position for the node we care about. 1650 ConstantArrayType *NewIP = 1651 ConstantArrayTypes.FindNodeOrInsertPos(ID, InsertPos); 1652 assert(NewIP == 0 && "Shouldn't be in the map!"); (void)NewIP; 1653 } 1654 1655 ConstantArrayType *New = new(*this,TypeAlignment) 1656 ConstantArrayType(EltTy, Canon, ArySize, ASM, IndexTypeQuals); 1657 ConstantArrayTypes.InsertNode(New, InsertPos); 1658 Types.push_back(New); 1659 return QualType(New, 0); 1660 } 1661 1662 /// getVariableArrayDecayedType - Turns the given type, which may be 1663 /// variably-modified, into the corresponding type with all the known 1664 /// sizes replaced with [*]. 1665 QualType ASTContext::getVariableArrayDecayedType(QualType type) const { 1666 // Vastly most common case. 1667 if (!type->isVariablyModifiedType()) return type; 1668 1669 QualType result; 1670 1671 SplitQualType split = type.getSplitDesugaredType(); 1672 const Type *ty = split.first; 1673 switch (ty->getTypeClass()) { 1674 #define TYPE(Class, Base) 1675 #define ABSTRACT_TYPE(Class, Base) 1676 #define NON_CANONICAL_TYPE(Class, Base) case Type::Class: 1677 #include "clang/AST/TypeNodes.def" 1678 llvm_unreachable("didn't desugar past all non-canonical types?"); 1679 1680 // These types should never be variably-modified. 1681 case Type::Builtin: 1682 case Type::Complex: 1683 case Type::Vector: 1684 case Type::ExtVector: 1685 case Type::DependentSizedExtVector: 1686 case Type::ObjCObject: 1687 case Type::ObjCInterface: 1688 case Type::ObjCObjectPointer: 1689 case Type::Record: 1690 case Type::Enum: 1691 case Type::UnresolvedUsing: 1692 case Type::TypeOfExpr: 1693 case Type::TypeOf: 1694 case Type::Decltype: 1695 case Type::UnaryTransform: 1696 case Type::DependentName: 1697 case Type::InjectedClassName: 1698 case Type::TemplateSpecialization: 1699 case Type::DependentTemplateSpecialization: 1700 case Type::TemplateTypeParm: 1701 case Type::SubstTemplateTypeParmPack: 1702 case Type::Auto: 1703 case Type::PackExpansion: 1704 llvm_unreachable("type should never be variably-modified"); 1705 1706 // These types can be variably-modified but should never need to 1707 // further decay. 1708 case Type::FunctionNoProto: 1709 case Type::FunctionProto: 1710 case Type::BlockPointer: 1711 case Type::MemberPointer: 1712 return type; 1713 1714 // These types can be variably-modified. All these modifications 1715 // preserve structure except as noted by comments. 1716 // TODO: if we ever care about optimizing VLAs, there are no-op 1717 // optimizations available here. 1718 case Type::Pointer: 1719 result = getPointerType(getVariableArrayDecayedType( 1720 cast<PointerType>(ty)->getPointeeType())); 1721 break; 1722 1723 case Type::LValueReference: { 1724 const LValueReferenceType *lv = cast<LValueReferenceType>(ty); 1725 result = getLValueReferenceType( 1726 getVariableArrayDecayedType(lv->getPointeeType()), 1727 lv->isSpelledAsLValue()); 1728 break; 1729 } 1730 1731 case Type::RValueReference: { 1732 const RValueReferenceType *lv = cast<RValueReferenceType>(ty); 1733 result = getRValueReferenceType( 1734 getVariableArrayDecayedType(lv->getPointeeType())); 1735 break; 1736 } 1737 1738 case Type::Atomic: { 1739 const AtomicType *at = cast<AtomicType>(ty); 1740 result = getAtomicType(getVariableArrayDecayedType(at->getValueType())); 1741 break; 1742 } 1743 1744 case Type::ConstantArray: { 1745 const ConstantArrayType *cat = cast<ConstantArrayType>(ty); 1746 result = getConstantArrayType( 1747 getVariableArrayDecayedType(cat->getElementType()), 1748 cat->getSize(), 1749 cat->getSizeModifier(), 1750 cat->getIndexTypeCVRQualifiers()); 1751 break; 1752 } 1753 1754 case Type::DependentSizedArray: { 1755 const DependentSizedArrayType *dat = cast<DependentSizedArrayType>(ty); 1756 result = getDependentSizedArrayType( 1757 getVariableArrayDecayedType(dat->getElementType()), 1758 dat->getSizeExpr(), 1759 dat->getSizeModifier(), 1760 dat->getIndexTypeCVRQualifiers(), 1761 dat->getBracketsRange()); 1762 break; 1763 } 1764 1765 // Turn incomplete types into [*] types. 1766 case Type::IncompleteArray: { 1767 const IncompleteArrayType *iat = cast<IncompleteArrayType>(ty); 1768 result = getVariableArrayType( 1769 getVariableArrayDecayedType(iat->getElementType()), 1770 /*size*/ 0, 1771 ArrayType::Normal, 1772 iat->getIndexTypeCVRQualifiers(), 1773 SourceRange()); 1774 break; 1775 } 1776 1777 // Turn VLA types into [*] types. 1778 case Type::VariableArray: { 1779 const VariableArrayType *vat = cast<VariableArrayType>(ty); 1780 result = getVariableArrayType( 1781 getVariableArrayDecayedType(vat->getElementType()), 1782 /*size*/ 0, 1783 ArrayType::Star, 1784 vat->getIndexTypeCVRQualifiers(), 1785 vat->getBracketsRange()); 1786 break; 1787 } 1788 } 1789 1790 // Apply the top-level qualifiers from the original. 1791 return getQualifiedType(result, split.second); 1792 } 1793 1794 /// getVariableArrayType - Returns a non-unique reference to the type for a 1795 /// variable array of the specified element type. 1796 QualType ASTContext::getVariableArrayType(QualType EltTy, 1797 Expr *NumElts, 1798 ArrayType::ArraySizeModifier ASM, 1799 unsigned IndexTypeQuals, 1800 SourceRange Brackets) const { 1801 // Since we don't unique expressions, it isn't possible to unique VLA's 1802 // that have an expression provided for their size. 1803 QualType Canon; 1804 1805 // Be sure to pull qualifiers off the element type. 1806 if (!EltTy.isCanonical() || EltTy.hasLocalQualifiers()) { 1807 SplitQualType canonSplit = getCanonicalType(EltTy).split(); 1808 Canon = getVariableArrayType(QualType(canonSplit.first, 0), NumElts, ASM, 1809 IndexTypeQuals, Brackets); 1810 Canon = getQualifiedType(Canon, canonSplit.second); 1811 } 1812 1813 VariableArrayType *New = new(*this, TypeAlignment) 1814 VariableArrayType(EltTy, Canon, NumElts, ASM, IndexTypeQuals, Brackets); 1815 1816 VariableArrayTypes.push_back(New); 1817 Types.push_back(New); 1818 return QualType(New, 0); 1819 } 1820 1821 /// getDependentSizedArrayType - Returns a non-unique reference to 1822 /// the type for a dependently-sized array of the specified element 1823 /// type. 1824 QualType ASTContext::getDependentSizedArrayType(QualType elementType, 1825 Expr *numElements, 1826 ArrayType::ArraySizeModifier ASM, 1827 unsigned elementTypeQuals, 1828 SourceRange brackets) const { 1829 assert((!numElements || numElements->isTypeDependent() || 1830 numElements->isValueDependent()) && 1831 "Size must be type- or value-dependent!"); 1832 1833 // Dependently-sized array types that do not have a specified number 1834 // of elements will have their sizes deduced from a dependent 1835 // initializer. We do no canonicalization here at all, which is okay 1836 // because they can't be used in most locations. 1837 if (!numElements) { 1838 DependentSizedArrayType *newType 1839 = new (*this, TypeAlignment) 1840 DependentSizedArrayType(*this, elementType, QualType(), 1841 numElements, ASM, elementTypeQuals, 1842 brackets); 1843 Types.push_back(newType); 1844 return QualType(newType, 0); 1845 } 1846 1847 // Otherwise, we actually build a new type every time, but we 1848 // also build a canonical type. 1849 1850 SplitQualType canonElementType = getCanonicalType(elementType).split(); 1851 1852 void *insertPos = 0; 1853 llvm::FoldingSetNodeID ID; 1854 DependentSizedArrayType::Profile(ID, *this, 1855 QualType(canonElementType.first, 0), 1856 ASM, elementTypeQuals, numElements); 1857 1858 // Look for an existing type with these properties. 1859 DependentSizedArrayType *canonTy = 1860 DependentSizedArrayTypes.FindNodeOrInsertPos(ID, insertPos); 1861 1862 // If we don't have one, build one. 1863 if (!canonTy) { 1864 canonTy = new (*this, TypeAlignment) 1865 DependentSizedArrayType(*this, QualType(canonElementType.first, 0), 1866 QualType(), numElements, ASM, elementTypeQuals, 1867 brackets); 1868 DependentSizedArrayTypes.InsertNode(canonTy, insertPos); 1869 Types.push_back(canonTy); 1870 } 1871 1872 // Apply qualifiers from the element type to the array. 1873 QualType canon = getQualifiedType(QualType(canonTy,0), 1874 canonElementType.second); 1875 1876 // If we didn't need extra canonicalization for the element type, 1877 // then just use that as our result. 1878 if (QualType(canonElementType.first, 0) == elementType) 1879 return canon; 1880 1881 // Otherwise, we need to build a type which follows the spelling 1882 // of the element type. 1883 DependentSizedArrayType *sugaredType 1884 = new (*this, TypeAlignment) 1885 DependentSizedArrayType(*this, elementType, canon, numElements, 1886 ASM, elementTypeQuals, brackets); 1887 Types.push_back(sugaredType); 1888 return QualType(sugaredType, 0); 1889 } 1890 1891 QualType ASTContext::getIncompleteArrayType(QualType elementType, 1892 ArrayType::ArraySizeModifier ASM, 1893 unsigned elementTypeQuals) const { 1894 llvm::FoldingSetNodeID ID; 1895 IncompleteArrayType::Profile(ID, elementType, ASM, elementTypeQuals); 1896 1897 void *insertPos = 0; 1898 if (IncompleteArrayType *iat = 1899 IncompleteArrayTypes.FindNodeOrInsertPos(ID, insertPos)) 1900 return QualType(iat, 0); 1901 1902 // If the element type isn't canonical, this won't be a canonical type 1903 // either, so fill in the canonical type field. We also have to pull 1904 // qualifiers off the element type. 1905 QualType canon; 1906 1907 if (!elementType.isCanonical() || elementType.hasLocalQualifiers()) { 1908 SplitQualType canonSplit = getCanonicalType(elementType).split(); 1909 canon = getIncompleteArrayType(QualType(canonSplit.first, 0), 1910 ASM, elementTypeQuals); 1911 canon = getQualifiedType(canon, canonSplit.second); 1912 1913 // Get the new insert position for the node we care about. 1914 IncompleteArrayType *existing = 1915 IncompleteArrayTypes.FindNodeOrInsertPos(ID, insertPos); 1916 assert(!existing && "Shouldn't be in the map!"); (void) existing; 1917 } 1918 1919 IncompleteArrayType *newType = new (*this, TypeAlignment) 1920 IncompleteArrayType(elementType, canon, ASM, elementTypeQuals); 1921 1922 IncompleteArrayTypes.InsertNode(newType, insertPos); 1923 Types.push_back(newType); 1924 return QualType(newType, 0); 1925 } 1926 1927 /// getVectorType - Return the unique reference to a vector type of 1928 /// the specified element type and size. VectorType must be a built-in type. 1929 QualType ASTContext::getVectorType(QualType vecType, unsigned NumElts, 1930 VectorType::VectorKind VecKind) const { 1931 assert(vecType->isBuiltinType()); 1932 1933 // Check if we've already instantiated a vector of this type. 1934 llvm::FoldingSetNodeID ID; 1935 VectorType::Profile(ID, vecType, NumElts, Type::Vector, VecKind); 1936 1937 void *InsertPos = 0; 1938 if (VectorType *VTP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos)) 1939 return QualType(VTP, 0); 1940 1941 // If the element type isn't canonical, this won't be a canonical type either, 1942 // so fill in the canonical type field. 1943 QualType Canonical; 1944 if (!vecType.isCanonical()) { 1945 Canonical = getVectorType(getCanonicalType(vecType), NumElts, VecKind); 1946 1947 // Get the new insert position for the node we care about. 1948 VectorType *NewIP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos); 1949 assert(NewIP == 0 && "Shouldn't be in the map!"); (void)NewIP; 1950 } 1951 VectorType *New = new (*this, TypeAlignment) 1952 VectorType(vecType, NumElts, Canonical, VecKind); 1953 VectorTypes.InsertNode(New, InsertPos); 1954 Types.push_back(New); 1955 return QualType(New, 0); 1956 } 1957 1958 /// getExtVectorType - Return the unique reference to an extended vector type of 1959 /// the specified element type and size. VectorType must be a built-in type. 1960 QualType 1961 ASTContext::getExtVectorType(QualType vecType, unsigned NumElts) const { 1962 assert(vecType->isBuiltinType() || vecType->isDependentType()); 1963 1964 // Check if we've already instantiated a vector of this type. 1965 llvm::FoldingSetNodeID ID; 1966 VectorType::Profile(ID, vecType, NumElts, Type::ExtVector, 1967 VectorType::GenericVector); 1968 void *InsertPos = 0; 1969 if (VectorType *VTP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos)) 1970 return QualType(VTP, 0); 1971 1972 // If the element type isn't canonical, this won't be a canonical type either, 1973 // so fill in the canonical type field. 1974 QualType Canonical; 1975 if (!vecType.isCanonical()) { 1976 Canonical = getExtVectorType(getCanonicalType(vecType), NumElts); 1977 1978 // Get the new insert position for the node we care about. 1979 VectorType *NewIP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos); 1980 assert(NewIP == 0 && "Shouldn't be in the map!"); (void)NewIP; 1981 } 1982 ExtVectorType *New = new (*this, TypeAlignment) 1983 ExtVectorType(vecType, NumElts, Canonical); 1984 VectorTypes.InsertNode(New, InsertPos); 1985 Types.push_back(New); 1986 return QualType(New, 0); 1987 } 1988 1989 QualType 1990 ASTContext::getDependentSizedExtVectorType(QualType vecType, 1991 Expr *SizeExpr, 1992 SourceLocation AttrLoc) const { 1993 llvm::FoldingSetNodeID ID; 1994 DependentSizedExtVectorType::Profile(ID, *this, getCanonicalType(vecType), 1995 SizeExpr); 1996 1997 void *InsertPos = 0; 1998 DependentSizedExtVectorType *Canon 1999 = DependentSizedExtVectorTypes.FindNodeOrInsertPos(ID, InsertPos); 2000 DependentSizedExtVectorType *New; 2001 if (Canon) { 2002 // We already have a canonical version of this array type; use it as 2003 // the canonical type for a newly-built type. 2004 New = new (*this, TypeAlignment) 2005 DependentSizedExtVectorType(*this, vecType, QualType(Canon, 0), 2006 SizeExpr, AttrLoc); 2007 } else { 2008 QualType CanonVecTy = getCanonicalType(vecType); 2009 if (CanonVecTy == vecType) { 2010 New = new (*this, TypeAlignment) 2011 DependentSizedExtVectorType(*this, vecType, QualType(), SizeExpr, 2012 AttrLoc); 2013 2014 DependentSizedExtVectorType *CanonCheck 2015 = DependentSizedExtVectorTypes.FindNodeOrInsertPos(ID, InsertPos); 2016 assert(!CanonCheck && "Dependent-sized ext_vector canonical type broken"); 2017 (void)CanonCheck; 2018 DependentSizedExtVectorTypes.InsertNode(New, InsertPos); 2019 } else { 2020 QualType Canon = getDependentSizedExtVectorType(CanonVecTy, SizeExpr, 2021 SourceLocation()); 2022 New = new (*this, TypeAlignment) 2023 DependentSizedExtVectorType(*this, vecType, Canon, SizeExpr, AttrLoc); 2024 } 2025 } 2026 2027 Types.push_back(New); 2028 return QualType(New, 0); 2029 } 2030 2031 /// getFunctionNoProtoType - Return a K&R style C function type like 'int()'. 2032 /// 2033 QualType 2034 ASTContext::getFunctionNoProtoType(QualType ResultTy, 2035 const FunctionType::ExtInfo &Info) const { 2036 const CallingConv DefaultCC = Info.getCC(); 2037 const CallingConv CallConv = (LangOpts.MRTD && DefaultCC == CC_Default) ? 2038 CC_X86StdCall : DefaultCC; 2039 // Unique functions, to guarantee there is only one function of a particular 2040 // structure. 2041 llvm::FoldingSetNodeID ID; 2042 FunctionNoProtoType::Profile(ID, ResultTy, Info); 2043 2044 void *InsertPos = 0; 2045 if (FunctionNoProtoType *FT = 2046 FunctionNoProtoTypes.FindNodeOrInsertPos(ID, InsertPos)) 2047 return QualType(FT, 0); 2048 2049 QualType Canonical; 2050 if (!ResultTy.isCanonical() || 2051 getCanonicalCallConv(CallConv) != CallConv) { 2052 Canonical = 2053 getFunctionNoProtoType(getCanonicalType(ResultTy), 2054 Info.withCallingConv(getCanonicalCallConv(CallConv))); 2055 2056 // Get the new insert position for the node we care about. 2057 FunctionNoProtoType *NewIP = 2058 FunctionNoProtoTypes.FindNodeOrInsertPos(ID, InsertPos); 2059 assert(NewIP == 0 && "Shouldn't be in the map!"); (void)NewIP; 2060 } 2061 2062 FunctionProtoType::ExtInfo newInfo = Info.withCallingConv(CallConv); 2063 FunctionNoProtoType *New = new (*this, TypeAlignment) 2064 FunctionNoProtoType(ResultTy, Canonical, newInfo); 2065 Types.push_back(New); 2066 FunctionNoProtoTypes.InsertNode(New, InsertPos); 2067 return QualType(New, 0); 2068 } 2069 2070 /// getFunctionType - Return a normal function type with a typed argument 2071 /// list. isVariadic indicates whether the argument list includes '...'. 2072 QualType 2073 ASTContext::getFunctionType(QualType ResultTy, 2074 const QualType *ArgArray, unsigned NumArgs, 2075 const FunctionProtoType::ExtProtoInfo &EPI) const { 2076 // Unique functions, to guarantee there is only one function of a particular 2077 // structure. 2078 llvm::FoldingSetNodeID ID; 2079 FunctionProtoType::Profile(ID, ResultTy, ArgArray, NumArgs, EPI, *this); 2080 2081 void *InsertPos = 0; 2082 if (FunctionProtoType *FTP = 2083 FunctionProtoTypes.FindNodeOrInsertPos(ID, InsertPos)) 2084 return QualType(FTP, 0); 2085 2086 // Determine whether the type being created is already canonical or not. 2087 bool isCanonical= EPI.ExceptionSpecType == EST_None && ResultTy.isCanonical(); 2088 for (unsigned i = 0; i != NumArgs && isCanonical; ++i) 2089 if (!ArgArray[i].isCanonicalAsParam()) 2090 isCanonical = false; 2091 2092 const CallingConv DefaultCC = EPI.ExtInfo.getCC(); 2093 const CallingConv CallConv = (LangOpts.MRTD && DefaultCC == CC_Default) ? 2094 CC_X86StdCall : DefaultCC; 2095 2096 // If this type isn't canonical, get the canonical version of it. 2097 // The exception spec is not part of the canonical type. 2098 QualType Canonical; 2099 if (!isCanonical || getCanonicalCallConv(CallConv) != CallConv) { 2100 SmallVector<QualType, 16> CanonicalArgs; 2101 CanonicalArgs.reserve(NumArgs); 2102 for (unsigned i = 0; i != NumArgs; ++i) 2103 CanonicalArgs.push_back(getCanonicalParamType(ArgArray[i])); 2104 2105 FunctionProtoType::ExtProtoInfo CanonicalEPI = EPI; 2106 CanonicalEPI.ExceptionSpecType = EST_None; 2107 CanonicalEPI.NumExceptions = 0; 2108 CanonicalEPI.ExtInfo 2109 = CanonicalEPI.ExtInfo.withCallingConv(getCanonicalCallConv(CallConv)); 2110 2111 Canonical = getFunctionType(getCanonicalType(ResultTy), 2112 CanonicalArgs.data(), NumArgs, 2113 CanonicalEPI); 2114 2115 // Get the new insert position for the node we care about. 2116 FunctionProtoType *NewIP = 2117 FunctionProtoTypes.FindNodeOrInsertPos(ID, InsertPos); 2118 assert(NewIP == 0 && "Shouldn't be in the map!"); (void)NewIP; 2119 } 2120 2121 // FunctionProtoType objects are allocated with extra bytes after 2122 // them for three variable size arrays at the end: 2123 // - parameter types 2124 // - exception types 2125 // - consumed-arguments flags 2126 // Instead of the exception types, there could be a noexcept 2127 // expression. 2128 size_t Size = sizeof(FunctionProtoType) + 2129 NumArgs * sizeof(QualType); 2130 if (EPI.ExceptionSpecType == EST_Dynamic) 2131 Size += EPI.NumExceptions * sizeof(QualType); 2132 else if (EPI.ExceptionSpecType == EST_ComputedNoexcept) { 2133 Size += sizeof(Expr*); 2134 } 2135 if (EPI.ConsumedArguments) 2136 Size += NumArgs * sizeof(bool); 2137 2138 FunctionProtoType *FTP = (FunctionProtoType*) Allocate(Size, TypeAlignment); 2139 FunctionProtoType::ExtProtoInfo newEPI = EPI; 2140 newEPI.ExtInfo = EPI.ExtInfo.withCallingConv(CallConv); 2141 new (FTP) FunctionProtoType(ResultTy, ArgArray, NumArgs, Canonical, newEPI); 2142 Types.push_back(FTP); 2143 FunctionProtoTypes.InsertNode(FTP, InsertPos); 2144 return QualType(FTP, 0); 2145 } 2146 2147 #ifndef NDEBUG 2148 static bool NeedsInjectedClassNameType(const RecordDecl *D) { 2149 if (!isa<CXXRecordDecl>(D)) return false; 2150 const CXXRecordDecl *RD = cast<CXXRecordDecl>(D); 2151 if (isa<ClassTemplatePartialSpecializationDecl>(RD)) 2152 return true; 2153 if (RD->getDescribedClassTemplate() && 2154 !isa<ClassTemplateSpecializationDecl>(RD)) 2155 return true; 2156 return false; 2157 } 2158 #endif 2159 2160 /// getInjectedClassNameType - Return the unique reference to the 2161 /// injected class name type for the specified templated declaration. 2162 QualType ASTContext::getInjectedClassNameType(CXXRecordDecl *Decl, 2163 QualType TST) const { 2164 assert(NeedsInjectedClassNameType(Decl)); 2165 if (Decl->TypeForDecl) { 2166 assert(isa<InjectedClassNameType>(Decl->TypeForDecl)); 2167 } else if (CXXRecordDecl *PrevDecl = Decl->getPreviousDeclaration()) { 2168 assert(PrevDecl->TypeForDecl && "previous declaration has no type"); 2169 Decl->TypeForDecl = PrevDecl->TypeForDecl; 2170 assert(isa<InjectedClassNameType>(Decl->TypeForDecl)); 2171 } else { 2172 Type *newType = 2173 new (*this, TypeAlignment) InjectedClassNameType(Decl, TST); 2174 Decl->TypeForDecl = newType; 2175 Types.push_back(newType); 2176 } 2177 return QualType(Decl->TypeForDecl, 0); 2178 } 2179 2180 /// getTypeDeclType - Return the unique reference to the type for the 2181 /// specified type declaration. 2182 QualType ASTContext::getTypeDeclTypeSlow(const TypeDecl *Decl) const { 2183 assert(Decl && "Passed null for Decl param"); 2184 assert(!Decl->TypeForDecl && "TypeForDecl present in slow case"); 2185 2186 if (const TypedefNameDecl *Typedef = dyn_cast<TypedefNameDecl>(Decl)) 2187 return getTypedefType(Typedef); 2188 2189 assert(!isa<TemplateTypeParmDecl>(Decl) && 2190 "Template type parameter types are always available."); 2191 2192 if (const RecordDecl *Record = dyn_cast<RecordDecl>(Decl)) { 2193 assert(!Record->getPreviousDeclaration() && 2194 "struct/union has previous declaration"); 2195 assert(!NeedsInjectedClassNameType(Record)); 2196 return getRecordType(Record); 2197 } else if (const EnumDecl *Enum = dyn_cast<EnumDecl>(Decl)) { 2198 assert(!Enum->getPreviousDeclaration() && 2199 "enum has previous declaration"); 2200 return getEnumType(Enum); 2201 } else if (const UnresolvedUsingTypenameDecl *Using = 2202 dyn_cast<UnresolvedUsingTypenameDecl>(Decl)) { 2203 Type *newType = new (*this, TypeAlignment) UnresolvedUsingType(Using); 2204 Decl->TypeForDecl = newType; 2205 Types.push_back(newType); 2206 } else 2207 llvm_unreachable("TypeDecl without a type?"); 2208 2209 return QualType(Decl->TypeForDecl, 0); 2210 } 2211 2212 /// getTypedefType - Return the unique reference to the type for the 2213 /// specified typedef name decl. 2214 QualType 2215 ASTContext::getTypedefType(const TypedefNameDecl *Decl, 2216 QualType Canonical) const { 2217 if (Decl->TypeForDecl) return QualType(Decl->TypeForDecl, 0); 2218 2219 if (Canonical.isNull()) 2220 Canonical = getCanonicalType(Decl->getUnderlyingType()); 2221 TypedefType *newType = new(*this, TypeAlignment) 2222 TypedefType(Type::Typedef, Decl, Canonical); 2223 Decl->TypeForDecl = newType; 2224 Types.push_back(newType); 2225 return QualType(newType, 0); 2226 } 2227 2228 QualType ASTContext::getRecordType(const RecordDecl *Decl) const { 2229 if (Decl->TypeForDecl) return QualType(Decl->TypeForDecl, 0); 2230 2231 if (const RecordDecl *PrevDecl = Decl->getPreviousDeclaration()) 2232 if (PrevDecl->TypeForDecl) 2233 return QualType(Decl->TypeForDecl = PrevDecl->TypeForDecl, 0); 2234 2235 RecordType *newType = new (*this, TypeAlignment) RecordType(Decl); 2236 Decl->TypeForDecl = newType; 2237 Types.push_back(newType); 2238 return QualType(newType, 0); 2239 } 2240 2241 QualType ASTContext::getEnumType(const EnumDecl *Decl) const { 2242 if (Decl->TypeForDecl) return QualType(Decl->TypeForDecl, 0); 2243 2244 if (const EnumDecl *PrevDecl = Decl->getPreviousDeclaration()) 2245 if (PrevDecl->TypeForDecl) 2246 return QualType(Decl->TypeForDecl = PrevDecl->TypeForDecl, 0); 2247 2248 EnumType *newType = new (*this, TypeAlignment) EnumType(Decl); 2249 Decl->TypeForDecl = newType; 2250 Types.push_back(newType); 2251 return QualType(newType, 0); 2252 } 2253 2254 QualType ASTContext::getAttributedType(AttributedType::Kind attrKind, 2255 QualType modifiedType, 2256 QualType equivalentType) { 2257 llvm::FoldingSetNodeID id; 2258 AttributedType::Profile(id, attrKind, modifiedType, equivalentType); 2259 2260 void *insertPos = 0; 2261 AttributedType *type = AttributedTypes.FindNodeOrInsertPos(id, insertPos); 2262 if (type) return QualType(type, 0); 2263 2264 QualType canon = getCanonicalType(equivalentType); 2265 type = new (*this, TypeAlignment) 2266 AttributedType(canon, attrKind, modifiedType, equivalentType); 2267 2268 Types.push_back(type); 2269 AttributedTypes.InsertNode(type, insertPos); 2270 2271 return QualType(type, 0); 2272 } 2273 2274 2275 /// \brief Retrieve a substitution-result type. 2276 QualType 2277 ASTContext::getSubstTemplateTypeParmType(const TemplateTypeParmType *Parm, 2278 QualType Replacement) const { 2279 assert(Replacement.isCanonical() 2280 && "replacement types must always be canonical"); 2281 2282 llvm::FoldingSetNodeID ID; 2283 SubstTemplateTypeParmType::Profile(ID, Parm, Replacement); 2284 void *InsertPos = 0; 2285 SubstTemplateTypeParmType *SubstParm 2286 = SubstTemplateTypeParmTypes.FindNodeOrInsertPos(ID, InsertPos); 2287 2288 if (!SubstParm) { 2289 SubstParm = new (*this, TypeAlignment) 2290 SubstTemplateTypeParmType(Parm, Replacement); 2291 Types.push_back(SubstParm); 2292 SubstTemplateTypeParmTypes.InsertNode(SubstParm, InsertPos); 2293 } 2294 2295 return QualType(SubstParm, 0); 2296 } 2297 2298 /// \brief Retrieve a 2299 QualType ASTContext::getSubstTemplateTypeParmPackType( 2300 const TemplateTypeParmType *Parm, 2301 const TemplateArgument &ArgPack) { 2302 #ifndef NDEBUG 2303 for (TemplateArgument::pack_iterator P = ArgPack.pack_begin(), 2304 PEnd = ArgPack.pack_end(); 2305 P != PEnd; ++P) { 2306 assert(P->getKind() == TemplateArgument::Type &&"Pack contains a non-type"); 2307 assert(P->getAsType().isCanonical() && "Pack contains non-canonical type"); 2308 } 2309 #endif 2310 2311 llvm::FoldingSetNodeID ID; 2312 SubstTemplateTypeParmPackType::Profile(ID, Parm, ArgPack); 2313 void *InsertPos = 0; 2314 if (SubstTemplateTypeParmPackType *SubstParm 2315 = SubstTemplateTypeParmPackTypes.FindNodeOrInsertPos(ID, InsertPos)) 2316 return QualType(SubstParm, 0); 2317 2318 QualType Canon; 2319 if (!Parm->isCanonicalUnqualified()) { 2320 Canon = getCanonicalType(QualType(Parm, 0)); 2321 Canon = getSubstTemplateTypeParmPackType(cast<TemplateTypeParmType>(Canon), 2322 ArgPack); 2323 SubstTemplateTypeParmPackTypes.FindNodeOrInsertPos(ID, InsertPos); 2324 } 2325 2326 SubstTemplateTypeParmPackType *SubstParm 2327 = new (*this, TypeAlignment) SubstTemplateTypeParmPackType(Parm, Canon, 2328 ArgPack); 2329 Types.push_back(SubstParm); 2330 SubstTemplateTypeParmTypes.InsertNode(SubstParm, InsertPos); 2331 return QualType(SubstParm, 0); 2332 } 2333 2334 /// \brief Retrieve the template type parameter type for a template 2335 /// parameter or parameter pack with the given depth, index, and (optionally) 2336 /// name. 2337 QualType ASTContext::getTemplateTypeParmType(unsigned Depth, unsigned Index, 2338 bool ParameterPack, 2339 TemplateTypeParmDecl *TTPDecl) const { 2340 llvm::FoldingSetNodeID ID; 2341 TemplateTypeParmType::Profile(ID, Depth, Index, ParameterPack, TTPDecl); 2342 void *InsertPos = 0; 2343 TemplateTypeParmType *TypeParm 2344 = TemplateTypeParmTypes.FindNodeOrInsertPos(ID, InsertPos); 2345 2346 if (TypeParm) 2347 return QualType(TypeParm, 0); 2348 2349 if (TTPDecl) { 2350 QualType Canon = getTemplateTypeParmType(Depth, Index, ParameterPack); 2351 TypeParm = new (*this, TypeAlignment) TemplateTypeParmType(TTPDecl, Canon); 2352 2353 TemplateTypeParmType *TypeCheck 2354 = TemplateTypeParmTypes.FindNodeOrInsertPos(ID, InsertPos); 2355 assert(!TypeCheck && "Template type parameter canonical type broken"); 2356 (void)TypeCheck; 2357 } else 2358 TypeParm = new (*this, TypeAlignment) 2359 TemplateTypeParmType(Depth, Index, ParameterPack); 2360 2361 Types.push_back(TypeParm); 2362 TemplateTypeParmTypes.InsertNode(TypeParm, InsertPos); 2363 2364 return QualType(TypeParm, 0); 2365 } 2366 2367 TypeSourceInfo * 2368 ASTContext::getTemplateSpecializationTypeInfo(TemplateName Name, 2369 SourceLocation NameLoc, 2370 const TemplateArgumentListInfo &Args, 2371 QualType Underlying) const { 2372 assert(!Name.getAsDependentTemplateName() && 2373 "No dependent template names here!"); 2374 QualType TST = getTemplateSpecializationType(Name, Args, Underlying); 2375 2376 TypeSourceInfo *DI = CreateTypeSourceInfo(TST); 2377 TemplateSpecializationTypeLoc TL 2378 = cast<TemplateSpecializationTypeLoc>(DI->getTypeLoc()); 2379 TL.setTemplateNameLoc(NameLoc); 2380 TL.setLAngleLoc(Args.getLAngleLoc()); 2381 TL.setRAngleLoc(Args.getRAngleLoc()); 2382 for (unsigned i = 0, e = TL.getNumArgs(); i != e; ++i) 2383 TL.setArgLocInfo(i, Args[i].getLocInfo()); 2384 return DI; 2385 } 2386 2387 QualType 2388 ASTContext::getTemplateSpecializationType(TemplateName Template, 2389 const TemplateArgumentListInfo &Args, 2390 QualType Underlying) const { 2391 assert(!Template.getAsDependentTemplateName() && 2392 "No dependent template names here!"); 2393 2394 unsigned NumArgs = Args.size(); 2395 2396 SmallVector<TemplateArgument, 4> ArgVec; 2397 ArgVec.reserve(NumArgs); 2398 for (unsigned i = 0; i != NumArgs; ++i) 2399 ArgVec.push_back(Args[i].getArgument()); 2400 2401 return getTemplateSpecializationType(Template, ArgVec.data(), NumArgs, 2402 Underlying); 2403 } 2404 2405 QualType 2406 ASTContext::getTemplateSpecializationType(TemplateName Template, 2407 const TemplateArgument *Args, 2408 unsigned NumArgs, 2409 QualType Underlying) const { 2410 assert(!Template.getAsDependentTemplateName() && 2411 "No dependent template names here!"); 2412 // Look through qualified template names. 2413 if (QualifiedTemplateName *QTN = Template.getAsQualifiedTemplateName()) 2414 Template = TemplateName(QTN->getTemplateDecl()); 2415 2416 bool isTypeAlias = 2417 Template.getAsTemplateDecl() && 2418 isa<TypeAliasTemplateDecl>(Template.getAsTemplateDecl()); 2419 2420 QualType CanonType; 2421 if (!Underlying.isNull()) 2422 CanonType = getCanonicalType(Underlying); 2423 else { 2424 assert(!isTypeAlias && 2425 "Underlying type for template alias must be computed by caller"); 2426 CanonType = getCanonicalTemplateSpecializationType(Template, Args, 2427 NumArgs); 2428 } 2429 2430 // Allocate the (non-canonical) template specialization type, but don't 2431 // try to unique it: these types typically have location information that 2432 // we don't unique and don't want to lose. 2433 void *Mem = Allocate(sizeof(TemplateSpecializationType) + 2434 sizeof(TemplateArgument) * NumArgs + 2435 (isTypeAlias ? sizeof(QualType) : 0), 2436 TypeAlignment); 2437 TemplateSpecializationType *Spec 2438 = new (Mem) TemplateSpecializationType(Template, 2439 Args, NumArgs, 2440 CanonType, 2441 isTypeAlias ? Underlying : QualType()); 2442 2443 Types.push_back(Spec); 2444 return QualType(Spec, 0); 2445 } 2446 2447 QualType 2448 ASTContext::getCanonicalTemplateSpecializationType(TemplateName Template, 2449 const TemplateArgument *Args, 2450 unsigned NumArgs) const { 2451 assert(!Template.getAsDependentTemplateName() && 2452 "No dependent template names here!"); 2453 assert((!Template.getAsTemplateDecl() || 2454 !isa<TypeAliasTemplateDecl>(Template.getAsTemplateDecl())) && 2455 "Underlying type for template alias must be computed by caller"); 2456 2457 // Look through qualified template names. 2458 if (QualifiedTemplateName *QTN = Template.getAsQualifiedTemplateName()) 2459 Template = TemplateName(QTN->getTemplateDecl()); 2460 2461 // Build the canonical template specialization type. 2462 TemplateName CanonTemplate = getCanonicalTemplateName(Template); 2463 SmallVector<TemplateArgument, 4> CanonArgs; 2464 CanonArgs.reserve(NumArgs); 2465 for (unsigned I = 0; I != NumArgs; ++I) 2466 CanonArgs.push_back(getCanonicalTemplateArgument(Args[I])); 2467 2468 // Determine whether this canonical template specialization type already 2469 // exists. 2470 llvm::FoldingSetNodeID ID; 2471 TemplateSpecializationType::Profile(ID, CanonTemplate, 2472 CanonArgs.data(), NumArgs, *this); 2473 2474 void *InsertPos = 0; 2475 TemplateSpecializationType *Spec 2476 = TemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos); 2477 2478 if (!Spec) { 2479 // Allocate a new canonical template specialization type. 2480 void *Mem = Allocate((sizeof(TemplateSpecializationType) + 2481 sizeof(TemplateArgument) * NumArgs), 2482 TypeAlignment); 2483 Spec = new (Mem) TemplateSpecializationType(CanonTemplate, 2484 CanonArgs.data(), NumArgs, 2485 QualType(), QualType()); 2486 Types.push_back(Spec); 2487 TemplateSpecializationTypes.InsertNode(Spec, InsertPos); 2488 } 2489 2490 assert(Spec->isDependentType() && 2491 "Non-dependent template-id type must have a canonical type"); 2492 return QualType(Spec, 0); 2493 } 2494 2495 QualType 2496 ASTContext::getElaboratedType(ElaboratedTypeKeyword Keyword, 2497 NestedNameSpecifier *NNS, 2498 QualType NamedType) const { 2499 llvm::FoldingSetNodeID ID; 2500 ElaboratedType::Profile(ID, Keyword, NNS, NamedType); 2501 2502 void *InsertPos = 0; 2503 ElaboratedType *T = ElaboratedTypes.FindNodeOrInsertPos(ID, InsertPos); 2504 if (T) 2505 return QualType(T, 0); 2506 2507 QualType Canon = NamedType; 2508 if (!Canon.isCanonical()) { 2509 Canon = getCanonicalType(NamedType); 2510 ElaboratedType *CheckT = ElaboratedTypes.FindNodeOrInsertPos(ID, InsertPos); 2511 assert(!CheckT && "Elaborated canonical type broken"); 2512 (void)CheckT; 2513 } 2514 2515 T = new (*this) ElaboratedType(Keyword, NNS, NamedType, Canon); 2516 Types.push_back(T); 2517 ElaboratedTypes.InsertNode(T, InsertPos); 2518 return QualType(T, 0); 2519 } 2520 2521 QualType 2522 ASTContext::getParenType(QualType InnerType) const { 2523 llvm::FoldingSetNodeID ID; 2524 ParenType::Profile(ID, InnerType); 2525 2526 void *InsertPos = 0; 2527 ParenType *T = ParenTypes.FindNodeOrInsertPos(ID, InsertPos); 2528 if (T) 2529 return QualType(T, 0); 2530 2531 QualType Canon = InnerType; 2532 if (!Canon.isCanonical()) { 2533 Canon = getCanonicalType(InnerType); 2534 ParenType *CheckT = ParenTypes.FindNodeOrInsertPos(ID, InsertPos); 2535 assert(!CheckT && "Paren canonical type broken"); 2536 (void)CheckT; 2537 } 2538 2539 T = new (*this) ParenType(InnerType, Canon); 2540 Types.push_back(T); 2541 ParenTypes.InsertNode(T, InsertPos); 2542 return QualType(T, 0); 2543 } 2544 2545 QualType ASTContext::getDependentNameType(ElaboratedTypeKeyword Keyword, 2546 NestedNameSpecifier *NNS, 2547 const IdentifierInfo *Name, 2548 QualType Canon) const { 2549 assert(NNS->isDependent() && "nested-name-specifier must be dependent"); 2550 2551 if (Canon.isNull()) { 2552 NestedNameSpecifier *CanonNNS = getCanonicalNestedNameSpecifier(NNS); 2553 ElaboratedTypeKeyword CanonKeyword = Keyword; 2554 if (Keyword == ETK_None) 2555 CanonKeyword = ETK_Typename; 2556 2557 if (CanonNNS != NNS || CanonKeyword != Keyword) 2558 Canon = getDependentNameType(CanonKeyword, CanonNNS, Name); 2559 } 2560 2561 llvm::FoldingSetNodeID ID; 2562 DependentNameType::Profile(ID, Keyword, NNS, Name); 2563 2564 void *InsertPos = 0; 2565 DependentNameType *T 2566 = DependentNameTypes.FindNodeOrInsertPos(ID, InsertPos); 2567 if (T) 2568 return QualType(T, 0); 2569 2570 T = new (*this) DependentNameType(Keyword, NNS, Name, Canon); 2571 Types.push_back(T); 2572 DependentNameTypes.InsertNode(T, InsertPos); 2573 return QualType(T, 0); 2574 } 2575 2576 QualType 2577 ASTContext::getDependentTemplateSpecializationType( 2578 ElaboratedTypeKeyword Keyword, 2579 NestedNameSpecifier *NNS, 2580 const IdentifierInfo *Name, 2581 const TemplateArgumentListInfo &Args) const { 2582 // TODO: avoid this copy 2583 SmallVector<TemplateArgument, 16> ArgCopy; 2584 for (unsigned I = 0, E = Args.size(); I != E; ++I) 2585 ArgCopy.push_back(Args[I].getArgument()); 2586 return getDependentTemplateSpecializationType(Keyword, NNS, Name, 2587 ArgCopy.size(), 2588 ArgCopy.data()); 2589 } 2590 2591 QualType 2592 ASTContext::getDependentTemplateSpecializationType( 2593 ElaboratedTypeKeyword Keyword, 2594 NestedNameSpecifier *NNS, 2595 const IdentifierInfo *Name, 2596 unsigned NumArgs, 2597 const TemplateArgument *Args) const { 2598 assert((!NNS || NNS->isDependent()) && 2599 "nested-name-specifier must be dependent"); 2600 2601 llvm::FoldingSetNodeID ID; 2602 DependentTemplateSpecializationType::Profile(ID, *this, Keyword, NNS, 2603 Name, NumArgs, Args); 2604 2605 void *InsertPos = 0; 2606 DependentTemplateSpecializationType *T 2607 = DependentTemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos); 2608 if (T) 2609 return QualType(T, 0); 2610 2611 NestedNameSpecifier *CanonNNS = getCanonicalNestedNameSpecifier(NNS); 2612 2613 ElaboratedTypeKeyword CanonKeyword = Keyword; 2614 if (Keyword == ETK_None) CanonKeyword = ETK_Typename; 2615 2616 bool AnyNonCanonArgs = false; 2617 SmallVector<TemplateArgument, 16> CanonArgs(NumArgs); 2618 for (unsigned I = 0; I != NumArgs; ++I) { 2619 CanonArgs[I] = getCanonicalTemplateArgument(Args[I]); 2620 if (!CanonArgs[I].structurallyEquals(Args[I])) 2621 AnyNonCanonArgs = true; 2622 } 2623 2624 QualType Canon; 2625 if (AnyNonCanonArgs || CanonNNS != NNS || CanonKeyword != Keyword) { 2626 Canon = getDependentTemplateSpecializationType(CanonKeyword, CanonNNS, 2627 Name, NumArgs, 2628 CanonArgs.data()); 2629 2630 // Find the insert position again. 2631 DependentTemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos); 2632 } 2633 2634 void *Mem = Allocate((sizeof(DependentTemplateSpecializationType) + 2635 sizeof(TemplateArgument) * NumArgs), 2636 TypeAlignment); 2637 T = new (Mem) DependentTemplateSpecializationType(Keyword, NNS, 2638 Name, NumArgs, Args, Canon); 2639 Types.push_back(T); 2640 DependentTemplateSpecializationTypes.InsertNode(T, InsertPos); 2641 return QualType(T, 0); 2642 } 2643 2644 QualType ASTContext::getPackExpansionType(QualType Pattern, 2645 llvm::Optional<unsigned> NumExpansions) { 2646 llvm::FoldingSetNodeID ID; 2647 PackExpansionType::Profile(ID, Pattern, NumExpansions); 2648 2649 assert(Pattern->containsUnexpandedParameterPack() && 2650 "Pack expansions must expand one or more parameter packs"); 2651 void *InsertPos = 0; 2652 PackExpansionType *T 2653 = PackExpansionTypes.FindNodeOrInsertPos(ID, InsertPos); 2654 if (T) 2655 return QualType(T, 0); 2656 2657 QualType Canon; 2658 if (!Pattern.isCanonical()) { 2659 Canon = getPackExpansionType(getCanonicalType(Pattern), NumExpansions); 2660 2661 // Find the insert position again. 2662 PackExpansionTypes.FindNodeOrInsertPos(ID, InsertPos); 2663 } 2664 2665 T = new (*this) PackExpansionType(Pattern, Canon, NumExpansions); 2666 Types.push_back(T); 2667 PackExpansionTypes.InsertNode(T, InsertPos); 2668 return QualType(T, 0); 2669 } 2670 2671 /// CmpProtocolNames - Comparison predicate for sorting protocols 2672 /// alphabetically. 2673 static bool CmpProtocolNames(const ObjCProtocolDecl *LHS, 2674 const ObjCProtocolDecl *RHS) { 2675 return LHS->getDeclName() < RHS->getDeclName(); 2676 } 2677 2678 static bool areSortedAndUniqued(ObjCProtocolDecl * const *Protocols, 2679 unsigned NumProtocols) { 2680 if (NumProtocols == 0) return true; 2681 2682 for (unsigned i = 1; i != NumProtocols; ++i) 2683 if (!CmpProtocolNames(Protocols[i-1], Protocols[i])) 2684 return false; 2685 return true; 2686 } 2687 2688 static void SortAndUniqueProtocols(ObjCProtocolDecl **Protocols, 2689 unsigned &NumProtocols) { 2690 ObjCProtocolDecl **ProtocolsEnd = Protocols+NumProtocols; 2691 2692 // Sort protocols, keyed by name. 2693 std::sort(Protocols, Protocols+NumProtocols, CmpProtocolNames); 2694 2695 // Remove duplicates. 2696 ProtocolsEnd = std::unique(Protocols, ProtocolsEnd); 2697 NumProtocols = ProtocolsEnd-Protocols; 2698 } 2699 2700 QualType ASTContext::getObjCObjectType(QualType BaseType, 2701 ObjCProtocolDecl * const *Protocols, 2702 unsigned NumProtocols) const { 2703 // If the base type is an interface and there aren't any protocols 2704 // to add, then the interface type will do just fine. 2705 if (!NumProtocols && isa<ObjCInterfaceType>(BaseType)) 2706 return BaseType; 2707 2708 // Look in the folding set for an existing type. 2709 llvm::FoldingSetNodeID ID; 2710 ObjCObjectTypeImpl::Profile(ID, BaseType, Protocols, NumProtocols); 2711 void *InsertPos = 0; 2712 if (ObjCObjectType *QT = ObjCObjectTypes.FindNodeOrInsertPos(ID, InsertPos)) 2713 return QualType(QT, 0); 2714 2715 // Build the canonical type, which has the canonical base type and 2716 // a sorted-and-uniqued list of protocols. 2717 QualType Canonical; 2718 bool ProtocolsSorted = areSortedAndUniqued(Protocols, NumProtocols); 2719 if (!ProtocolsSorted || !BaseType.isCanonical()) { 2720 if (!ProtocolsSorted) { 2721 SmallVector<ObjCProtocolDecl*, 8> Sorted(Protocols, 2722 Protocols + NumProtocols); 2723 unsigned UniqueCount = NumProtocols; 2724 2725 SortAndUniqueProtocols(&Sorted[0], UniqueCount); 2726 Canonical = getObjCObjectType(getCanonicalType(BaseType), 2727 &Sorted[0], UniqueCount); 2728 } else { 2729 Canonical = getObjCObjectType(getCanonicalType(BaseType), 2730 Protocols, NumProtocols); 2731 } 2732 2733 // Regenerate InsertPos. 2734 ObjCObjectTypes.FindNodeOrInsertPos(ID, InsertPos); 2735 } 2736 2737 unsigned Size = sizeof(ObjCObjectTypeImpl); 2738 Size += NumProtocols * sizeof(ObjCProtocolDecl *); 2739 void *Mem = Allocate(Size, TypeAlignment); 2740 ObjCObjectTypeImpl *T = 2741 new (Mem) ObjCObjectTypeImpl(Canonical, BaseType, Protocols, NumProtocols); 2742 2743 Types.push_back(T); 2744 ObjCObjectTypes.InsertNode(T, InsertPos); 2745 return QualType(T, 0); 2746 } 2747 2748 /// getObjCObjectPointerType - Return a ObjCObjectPointerType type for 2749 /// the given object type. 2750 QualType ASTContext::getObjCObjectPointerType(QualType ObjectT) const { 2751 llvm::FoldingSetNodeID ID; 2752 ObjCObjectPointerType::Profile(ID, ObjectT); 2753 2754 void *InsertPos = 0; 2755 if (ObjCObjectPointerType *QT = 2756 ObjCObjectPointerTypes.FindNodeOrInsertPos(ID, InsertPos)) 2757 return QualType(QT, 0); 2758 2759 // Find the canonical object type. 2760 QualType Canonical; 2761 if (!ObjectT.isCanonical()) { 2762 Canonical = getObjCObjectPointerType(getCanonicalType(ObjectT)); 2763 2764 // Regenerate InsertPos. 2765 ObjCObjectPointerTypes.FindNodeOrInsertPos(ID, InsertPos); 2766 } 2767 2768 // No match. 2769 void *Mem = Allocate(sizeof(ObjCObjectPointerType), TypeAlignment); 2770 ObjCObjectPointerType *QType = 2771 new (Mem) ObjCObjectPointerType(Canonical, ObjectT); 2772 2773 Types.push_back(QType); 2774 ObjCObjectPointerTypes.InsertNode(QType, InsertPos); 2775 return QualType(QType, 0); 2776 } 2777 2778 /// getObjCInterfaceType - Return the unique reference to the type for the 2779 /// specified ObjC interface decl. The list of protocols is optional. 2780 QualType ASTContext::getObjCInterfaceType(const ObjCInterfaceDecl *Decl) const { 2781 if (Decl->TypeForDecl) 2782 return QualType(Decl->TypeForDecl, 0); 2783 2784 // FIXME: redeclarations? 2785 void *Mem = Allocate(sizeof(ObjCInterfaceType), TypeAlignment); 2786 ObjCInterfaceType *T = new (Mem) ObjCInterfaceType(Decl); 2787 Decl->TypeForDecl = T; 2788 Types.push_back(T); 2789 return QualType(T, 0); 2790 } 2791 2792 /// getTypeOfExprType - Unlike many "get<Type>" functions, we can't unique 2793 /// TypeOfExprType AST's (since expression's are never shared). For example, 2794 /// multiple declarations that refer to "typeof(x)" all contain different 2795 /// DeclRefExpr's. This doesn't effect the type checker, since it operates 2796 /// on canonical type's (which are always unique). 2797 QualType ASTContext::getTypeOfExprType(Expr *tofExpr) const { 2798 TypeOfExprType *toe; 2799 if (tofExpr->isTypeDependent()) { 2800 llvm::FoldingSetNodeID ID; 2801 DependentTypeOfExprType::Profile(ID, *this, tofExpr); 2802 2803 void *InsertPos = 0; 2804 DependentTypeOfExprType *Canon 2805 = DependentTypeOfExprTypes.FindNodeOrInsertPos(ID, InsertPos); 2806 if (Canon) { 2807 // We already have a "canonical" version of an identical, dependent 2808 // typeof(expr) type. Use that as our canonical type. 2809 toe = new (*this, TypeAlignment) TypeOfExprType(tofExpr, 2810 QualType((TypeOfExprType*)Canon, 0)); 2811 } else { 2812 // Build a new, canonical typeof(expr) type. 2813 Canon 2814 = new (*this, TypeAlignment) DependentTypeOfExprType(*this, tofExpr); 2815 DependentTypeOfExprTypes.InsertNode(Canon, InsertPos); 2816 toe = Canon; 2817 } 2818 } else { 2819 QualType Canonical = getCanonicalType(tofExpr->getType()); 2820 toe = new (*this, TypeAlignment) TypeOfExprType(tofExpr, Canonical); 2821 } 2822 Types.push_back(toe); 2823 return QualType(toe, 0); 2824 } 2825 2826 /// getTypeOfType - Unlike many "get<Type>" functions, we don't unique 2827 /// TypeOfType AST's. The only motivation to unique these nodes would be 2828 /// memory savings. Since typeof(t) is fairly uncommon, space shouldn't be 2829 /// an issue. This doesn't effect the type checker, since it operates 2830 /// on canonical type's (which are always unique). 2831 QualType ASTContext::getTypeOfType(QualType tofType) const { 2832 QualType Canonical = getCanonicalType(tofType); 2833 TypeOfType *tot = new (*this, TypeAlignment) TypeOfType(tofType, Canonical); 2834 Types.push_back(tot); 2835 return QualType(tot, 0); 2836 } 2837 2838 /// getDecltypeForExpr - Given an expr, will return the decltype for that 2839 /// expression, according to the rules in C++0x [dcl.type.simple]p4 2840 static QualType getDecltypeForExpr(const Expr *e, const ASTContext &Context) { 2841 if (e->isTypeDependent()) 2842 return Context.DependentTy; 2843 2844 // If e is an id expression or a class member access, decltype(e) is defined 2845 // as the type of the entity named by e. 2846 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(e)) { 2847 if (const ValueDecl *VD = dyn_cast<ValueDecl>(DRE->getDecl())) 2848 return VD->getType(); 2849 } 2850 if (const MemberExpr *ME = dyn_cast<MemberExpr>(e)) { 2851 if (const FieldDecl *FD = dyn_cast<FieldDecl>(ME->getMemberDecl())) 2852 return FD->getType(); 2853 } 2854 // If e is a function call or an invocation of an overloaded operator, 2855 // (parentheses around e are ignored), decltype(e) is defined as the 2856 // return type of that function. 2857 if (const CallExpr *CE = dyn_cast<CallExpr>(e->IgnoreParens())) 2858 return CE->getCallReturnType(); 2859 2860 QualType T = e->getType(); 2861 2862 // Otherwise, where T is the type of e, if e is an lvalue, decltype(e) is 2863 // defined as T&, otherwise decltype(e) is defined as T. 2864 if (e->isLValue()) 2865 T = Context.getLValueReferenceType(T); 2866 2867 return T; 2868 } 2869 2870 /// getDecltypeType - Unlike many "get<Type>" functions, we don't unique 2871 /// DecltypeType AST's. The only motivation to unique these nodes would be 2872 /// memory savings. Since decltype(t) is fairly uncommon, space shouldn't be 2873 /// an issue. This doesn't effect the type checker, since it operates 2874 /// on canonical type's (which are always unique). 2875 QualType ASTContext::getDecltypeType(Expr *e) const { 2876 DecltypeType *dt; 2877 2878 // C++0x [temp.type]p2: 2879 // If an expression e involves a template parameter, decltype(e) denotes a 2880 // unique dependent type. Two such decltype-specifiers refer to the same 2881 // type only if their expressions are equivalent (14.5.6.1). 2882 if (e->isInstantiationDependent()) { 2883 llvm::FoldingSetNodeID ID; 2884 DependentDecltypeType::Profile(ID, *this, e); 2885 2886 void *InsertPos = 0; 2887 DependentDecltypeType *Canon 2888 = DependentDecltypeTypes.FindNodeOrInsertPos(ID, InsertPos); 2889 if (Canon) { 2890 // We already have a "canonical" version of an equivalent, dependent 2891 // decltype type. Use that as our canonical type. 2892 dt = new (*this, TypeAlignment) DecltypeType(e, DependentTy, 2893 QualType((DecltypeType*)Canon, 0)); 2894 } else { 2895 // Build a new, canonical typeof(expr) type. 2896 Canon = new (*this, TypeAlignment) DependentDecltypeType(*this, e); 2897 DependentDecltypeTypes.InsertNode(Canon, InsertPos); 2898 dt = Canon; 2899 } 2900 } else { 2901 QualType T = getDecltypeForExpr(e, *this); 2902 dt = new (*this, TypeAlignment) DecltypeType(e, T, getCanonicalType(T)); 2903 } 2904 Types.push_back(dt); 2905 return QualType(dt, 0); 2906 } 2907 2908 /// getUnaryTransformationType - We don't unique these, since the memory 2909 /// savings are minimal and these are rare. 2910 QualType ASTContext::getUnaryTransformType(QualType BaseType, 2911 QualType UnderlyingType, 2912 UnaryTransformType::UTTKind Kind) 2913 const { 2914 UnaryTransformType *Ty = 2915 new (*this, TypeAlignment) UnaryTransformType (BaseType, UnderlyingType, 2916 Kind, 2917 UnderlyingType->isDependentType() ? 2918 QualType() : UnderlyingType); 2919 Types.push_back(Ty); 2920 return QualType(Ty, 0); 2921 } 2922 2923 /// getAutoType - We only unique auto types after they've been deduced. 2924 QualType ASTContext::getAutoType(QualType DeducedType) const { 2925 void *InsertPos = 0; 2926 if (!DeducedType.isNull()) { 2927 // Look in the folding set for an existing type. 2928 llvm::FoldingSetNodeID ID; 2929 AutoType::Profile(ID, DeducedType); 2930 if (AutoType *AT = AutoTypes.FindNodeOrInsertPos(ID, InsertPos)) 2931 return QualType(AT, 0); 2932 } 2933 2934 AutoType *AT = new (*this, TypeAlignment) AutoType(DeducedType); 2935 Types.push_back(AT); 2936 if (InsertPos) 2937 AutoTypes.InsertNode(AT, InsertPos); 2938 return QualType(AT, 0); 2939 } 2940 2941 /// getAtomicType - Return the uniqued reference to the atomic type for 2942 /// the given value type. 2943 QualType ASTContext::getAtomicType(QualType T) const { 2944 // Unique pointers, to guarantee there is only one pointer of a particular 2945 // structure. 2946 llvm::FoldingSetNodeID ID; 2947 AtomicType::Profile(ID, T); 2948 2949 void *InsertPos = 0; 2950 if (AtomicType *AT = AtomicTypes.FindNodeOrInsertPos(ID, InsertPos)) 2951 return QualType(AT, 0); 2952 2953 // If the atomic value type isn't canonical, this won't be a canonical type 2954 // either, so fill in the canonical type field. 2955 QualType Canonical; 2956 if (!T.isCanonical()) { 2957 Canonical = getAtomicType(getCanonicalType(T)); 2958 2959 // Get the new insert position for the node we care about. 2960 AtomicType *NewIP = AtomicTypes.FindNodeOrInsertPos(ID, InsertPos); 2961 assert(NewIP == 0 && "Shouldn't be in the map!"); (void)NewIP; 2962 } 2963 AtomicType *New = new (*this, TypeAlignment) AtomicType(T, Canonical); 2964 Types.push_back(New); 2965 AtomicTypes.InsertNode(New, InsertPos); 2966 return QualType(New, 0); 2967 } 2968 2969 /// getAutoDeductType - Get type pattern for deducing against 'auto'. 2970 QualType ASTContext::getAutoDeductType() const { 2971 if (AutoDeductTy.isNull()) 2972 AutoDeductTy = getAutoType(QualType()); 2973 assert(!AutoDeductTy.isNull() && "can't build 'auto' pattern"); 2974 return AutoDeductTy; 2975 } 2976 2977 /// getAutoRRefDeductType - Get type pattern for deducing against 'auto &&'. 2978 QualType ASTContext::getAutoRRefDeductType() const { 2979 if (AutoRRefDeductTy.isNull()) 2980 AutoRRefDeductTy = getRValueReferenceType(getAutoDeductType()); 2981 assert(!AutoRRefDeductTy.isNull() && "can't build 'auto &&' pattern"); 2982 return AutoRRefDeductTy; 2983 } 2984 2985 /// getTagDeclType - Return the unique reference to the type for the 2986 /// specified TagDecl (struct/union/class/enum) decl. 2987 QualType ASTContext::getTagDeclType(const TagDecl *Decl) const { 2988 assert (Decl); 2989 // FIXME: What is the design on getTagDeclType when it requires casting 2990 // away const? mutable? 2991 return getTypeDeclType(const_cast<TagDecl*>(Decl)); 2992 } 2993 2994 /// getSizeType - Return the unique type for "size_t" (C99 7.17), the result 2995 /// of the sizeof operator (C99 6.5.3.4p4). The value is target dependent and 2996 /// needs to agree with the definition in <stddef.h>. 2997 CanQualType ASTContext::getSizeType() const { 2998 return getFromTargetType(Target->getSizeType()); 2999 } 3000 3001 /// getSignedWCharType - Return the type of "signed wchar_t". 3002 /// Used when in C++, as a GCC extension. 3003 QualType ASTContext::getSignedWCharType() const { 3004 // FIXME: derive from "Target" ? 3005 return WCharTy; 3006 } 3007 3008 /// getUnsignedWCharType - Return the type of "unsigned wchar_t". 3009 /// Used when in C++, as a GCC extension. 3010 QualType ASTContext::getUnsignedWCharType() const { 3011 // FIXME: derive from "Target" ? 3012 return UnsignedIntTy; 3013 } 3014 3015 /// getPointerDiffType - Return the unique type for "ptrdiff_t" (ref?) 3016 /// defined in <stddef.h>. Pointer - pointer requires this (C99 6.5.6p9). 3017 QualType ASTContext::getPointerDiffType() const { 3018 return getFromTargetType(Target->getPtrDiffType(0)); 3019 } 3020 3021 //===----------------------------------------------------------------------===// 3022 // Type Operators 3023 //===----------------------------------------------------------------------===// 3024 3025 CanQualType ASTContext::getCanonicalParamType(QualType T) const { 3026 // Push qualifiers into arrays, and then discard any remaining 3027 // qualifiers. 3028 T = getCanonicalType(T); 3029 T = getVariableArrayDecayedType(T); 3030 const Type *Ty = T.getTypePtr(); 3031 QualType Result; 3032 if (isa<ArrayType>(Ty)) { 3033 Result = getArrayDecayedType(QualType(Ty,0)); 3034 } else if (isa<FunctionType>(Ty)) { 3035 Result = getPointerType(QualType(Ty, 0)); 3036 } else { 3037 Result = QualType(Ty, 0); 3038 } 3039 3040 return CanQualType::CreateUnsafe(Result); 3041 } 3042 3043 QualType ASTContext::getUnqualifiedArrayType(QualType type, 3044 Qualifiers &quals) { 3045 SplitQualType splitType = type.getSplitUnqualifiedType(); 3046 3047 // FIXME: getSplitUnqualifiedType() actually walks all the way to 3048 // the unqualified desugared type and then drops it on the floor. 3049 // We then have to strip that sugar back off with 3050 // getUnqualifiedDesugaredType(), which is silly. 3051 const ArrayType *AT = 3052 dyn_cast<ArrayType>(splitType.first->getUnqualifiedDesugaredType()); 3053 3054 // If we don't have an array, just use the results in splitType. 3055 if (!AT) { 3056 quals = splitType.second; 3057 return QualType(splitType.first, 0); 3058 } 3059 3060 // Otherwise, recurse on the array's element type. 3061 QualType elementType = AT->getElementType(); 3062 QualType unqualElementType = getUnqualifiedArrayType(elementType, quals); 3063 3064 // If that didn't change the element type, AT has no qualifiers, so we 3065 // can just use the results in splitType. 3066 if (elementType == unqualElementType) { 3067 assert(quals.empty()); // from the recursive call 3068 quals = splitType.second; 3069 return QualType(splitType.first, 0); 3070 } 3071 3072 // Otherwise, add in the qualifiers from the outermost type, then 3073 // build the type back up. 3074 quals.addConsistentQualifiers(splitType.second); 3075 3076 if (const ConstantArrayType *CAT = dyn_cast<ConstantArrayType>(AT)) { 3077 return getConstantArrayType(unqualElementType, CAT->getSize(), 3078 CAT->getSizeModifier(), 0); 3079 } 3080 3081 if (const IncompleteArrayType *IAT = dyn_cast<IncompleteArrayType>(AT)) { 3082 return getIncompleteArrayType(unqualElementType, IAT->getSizeModifier(), 0); 3083 } 3084 3085 if (const VariableArrayType *VAT = dyn_cast<VariableArrayType>(AT)) { 3086 return getVariableArrayType(unqualElementType, 3087 VAT->getSizeExpr(), 3088 VAT->getSizeModifier(), 3089 VAT->getIndexTypeCVRQualifiers(), 3090 VAT->getBracketsRange()); 3091 } 3092 3093 const DependentSizedArrayType *DSAT = cast<DependentSizedArrayType>(AT); 3094 return getDependentSizedArrayType(unqualElementType, DSAT->getSizeExpr(), 3095 DSAT->getSizeModifier(), 0, 3096 SourceRange()); 3097 } 3098 3099 /// UnwrapSimilarPointerTypes - If T1 and T2 are pointer types that 3100 /// may be similar (C++ 4.4), replaces T1 and T2 with the type that 3101 /// they point to and return true. If T1 and T2 aren't pointer types 3102 /// or pointer-to-member types, or if they are not similar at this 3103 /// level, returns false and leaves T1 and T2 unchanged. Top-level 3104 /// qualifiers on T1 and T2 are ignored. This function will typically 3105 /// be called in a loop that successively "unwraps" pointer and 3106 /// pointer-to-member types to compare them at each level. 3107 bool ASTContext::UnwrapSimilarPointerTypes(QualType &T1, QualType &T2) { 3108 const PointerType *T1PtrType = T1->getAs<PointerType>(), 3109 *T2PtrType = T2->getAs<PointerType>(); 3110 if (T1PtrType && T2PtrType) { 3111 T1 = T1PtrType->getPointeeType(); 3112 T2 = T2PtrType->getPointeeType(); 3113 return true; 3114 } 3115 3116 const MemberPointerType *T1MPType = T1->getAs<MemberPointerType>(), 3117 *T2MPType = T2->getAs<MemberPointerType>(); 3118 if (T1MPType && T2MPType && 3119 hasSameUnqualifiedType(QualType(T1MPType->getClass(), 0), 3120 QualType(T2MPType->getClass(), 0))) { 3121 T1 = T1MPType->getPointeeType(); 3122 T2 = T2MPType->getPointeeType(); 3123 return true; 3124 } 3125 3126 if (getLangOptions().ObjC1) { 3127 const ObjCObjectPointerType *T1OPType = T1->getAs<ObjCObjectPointerType>(), 3128 *T2OPType = T2->getAs<ObjCObjectPointerType>(); 3129 if (T1OPType && T2OPType) { 3130 T1 = T1OPType->getPointeeType(); 3131 T2 = T2OPType->getPointeeType(); 3132 return true; 3133 } 3134 } 3135 3136 // FIXME: Block pointers, too? 3137 3138 return false; 3139 } 3140 3141 DeclarationNameInfo 3142 ASTContext::getNameForTemplate(TemplateName Name, 3143 SourceLocation NameLoc) const { 3144 switch (Name.getKind()) { 3145 case TemplateName::QualifiedTemplate: 3146 case TemplateName::Template: 3147 // DNInfo work in progress: CHECKME: what about DNLoc? 3148 return DeclarationNameInfo(Name.getAsTemplateDecl()->getDeclName(), 3149 NameLoc); 3150 3151 case TemplateName::OverloadedTemplate: { 3152 OverloadedTemplateStorage *Storage = Name.getAsOverloadedTemplate(); 3153 // DNInfo work in progress: CHECKME: what about DNLoc? 3154 return DeclarationNameInfo((*Storage->begin())->getDeclName(), NameLoc); 3155 } 3156 3157 case TemplateName::DependentTemplate: { 3158 DependentTemplateName *DTN = Name.getAsDependentTemplateName(); 3159 DeclarationName DName; 3160 if (DTN->isIdentifier()) { 3161 DName = DeclarationNames.getIdentifier(DTN->getIdentifier()); 3162 return DeclarationNameInfo(DName, NameLoc); 3163 } else { 3164 DName = DeclarationNames.getCXXOperatorName(DTN->getOperator()); 3165 // DNInfo work in progress: FIXME: source locations? 3166 DeclarationNameLoc DNLoc; 3167 DNLoc.CXXOperatorName.BeginOpNameLoc = SourceLocation().getRawEncoding(); 3168 DNLoc.CXXOperatorName.EndOpNameLoc = SourceLocation().getRawEncoding(); 3169 return DeclarationNameInfo(DName, NameLoc, DNLoc); 3170 } 3171 } 3172 3173 case TemplateName::SubstTemplateTemplateParm: { 3174 SubstTemplateTemplateParmStorage *subst 3175 = Name.getAsSubstTemplateTemplateParm(); 3176 return DeclarationNameInfo(subst->getParameter()->getDeclName(), 3177 NameLoc); 3178 } 3179 3180 case TemplateName::SubstTemplateTemplateParmPack: { 3181 SubstTemplateTemplateParmPackStorage *subst 3182 = Name.getAsSubstTemplateTemplateParmPack(); 3183 return DeclarationNameInfo(subst->getParameterPack()->getDeclName(), 3184 NameLoc); 3185 } 3186 } 3187 3188 llvm_unreachable("bad template name kind!"); 3189 } 3190 3191 TemplateName ASTContext::getCanonicalTemplateName(TemplateName Name) const { 3192 switch (Name.getKind()) { 3193 case TemplateName::QualifiedTemplate: 3194 case TemplateName::Template: { 3195 TemplateDecl *Template = Name.getAsTemplateDecl(); 3196 if (TemplateTemplateParmDecl *TTP 3197 = dyn_cast<TemplateTemplateParmDecl>(Template)) 3198 Template = getCanonicalTemplateTemplateParmDecl(TTP); 3199 3200 // The canonical template name is the canonical template declaration. 3201 return TemplateName(cast<TemplateDecl>(Template->getCanonicalDecl())); 3202 } 3203 3204 case TemplateName::OverloadedTemplate: 3205 llvm_unreachable("cannot canonicalize overloaded template"); 3206 3207 case TemplateName::DependentTemplate: { 3208 DependentTemplateName *DTN = Name.getAsDependentTemplateName(); 3209 assert(DTN && "Non-dependent template names must refer to template decls."); 3210 return DTN->CanonicalTemplateName; 3211 } 3212 3213 case TemplateName::SubstTemplateTemplateParm: { 3214 SubstTemplateTemplateParmStorage *subst 3215 = Name.getAsSubstTemplateTemplateParm(); 3216 return getCanonicalTemplateName(subst->getReplacement()); 3217 } 3218 3219 case TemplateName::SubstTemplateTemplateParmPack: { 3220 SubstTemplateTemplateParmPackStorage *subst 3221 = Name.getAsSubstTemplateTemplateParmPack(); 3222 TemplateTemplateParmDecl *canonParameter 3223 = getCanonicalTemplateTemplateParmDecl(subst->getParameterPack()); 3224 TemplateArgument canonArgPack 3225 = getCanonicalTemplateArgument(subst->getArgumentPack()); 3226 return getSubstTemplateTemplateParmPack(canonParameter, canonArgPack); 3227 } 3228 } 3229 3230 llvm_unreachable("bad template name!"); 3231 } 3232 3233 bool ASTContext::hasSameTemplateName(TemplateName X, TemplateName Y) { 3234 X = getCanonicalTemplateName(X); 3235 Y = getCanonicalTemplateName(Y); 3236 return X.getAsVoidPointer() == Y.getAsVoidPointer(); 3237 } 3238 3239 TemplateArgument 3240 ASTContext::getCanonicalTemplateArgument(const TemplateArgument &Arg) const { 3241 switch (Arg.getKind()) { 3242 case TemplateArgument::Null: 3243 return Arg; 3244 3245 case TemplateArgument::Expression: 3246 return Arg; 3247 3248 case TemplateArgument::Declaration: 3249 return TemplateArgument(Arg.getAsDecl()->getCanonicalDecl()); 3250 3251 case TemplateArgument::Template: 3252 return TemplateArgument(getCanonicalTemplateName(Arg.getAsTemplate())); 3253 3254 case TemplateArgument::TemplateExpansion: 3255 return TemplateArgument(getCanonicalTemplateName( 3256 Arg.getAsTemplateOrTemplatePattern()), 3257 Arg.getNumTemplateExpansions()); 3258 3259 case TemplateArgument::Integral: 3260 return TemplateArgument(*Arg.getAsIntegral(), 3261 getCanonicalType(Arg.getIntegralType())); 3262 3263 case TemplateArgument::Type: 3264 return TemplateArgument(getCanonicalType(Arg.getAsType())); 3265 3266 case TemplateArgument::Pack: { 3267 if (Arg.pack_size() == 0) 3268 return Arg; 3269 3270 TemplateArgument *CanonArgs 3271 = new (*this) TemplateArgument[Arg.pack_size()]; 3272 unsigned Idx = 0; 3273 for (TemplateArgument::pack_iterator A = Arg.pack_begin(), 3274 AEnd = Arg.pack_end(); 3275 A != AEnd; (void)++A, ++Idx) 3276 CanonArgs[Idx] = getCanonicalTemplateArgument(*A); 3277 3278 return TemplateArgument(CanonArgs, Arg.pack_size()); 3279 } 3280 } 3281 3282 // Silence GCC warning 3283 llvm_unreachable("Unhandled template argument kind"); 3284 } 3285 3286 NestedNameSpecifier * 3287 ASTContext::getCanonicalNestedNameSpecifier(NestedNameSpecifier *NNS) const { 3288 if (!NNS) 3289 return 0; 3290 3291 switch (NNS->getKind()) { 3292 case NestedNameSpecifier::Identifier: 3293 // Canonicalize the prefix but keep the identifier the same. 3294 return NestedNameSpecifier::Create(*this, 3295 getCanonicalNestedNameSpecifier(NNS->getPrefix()), 3296 NNS->getAsIdentifier()); 3297 3298 case NestedNameSpecifier::Namespace: 3299 // A namespace is canonical; build a nested-name-specifier with 3300 // this namespace and no prefix. 3301 return NestedNameSpecifier::Create(*this, 0, 3302 NNS->getAsNamespace()->getOriginalNamespace()); 3303 3304 case NestedNameSpecifier::NamespaceAlias: 3305 // A namespace is canonical; build a nested-name-specifier with 3306 // this namespace and no prefix. 3307 return NestedNameSpecifier::Create(*this, 0, 3308 NNS->getAsNamespaceAlias()->getNamespace() 3309 ->getOriginalNamespace()); 3310 3311 case NestedNameSpecifier::TypeSpec: 3312 case NestedNameSpecifier::TypeSpecWithTemplate: { 3313 QualType T = getCanonicalType(QualType(NNS->getAsType(), 0)); 3314 3315 // If we have some kind of dependent-named type (e.g., "typename T::type"), 3316 // break it apart into its prefix and identifier, then reconsititute those 3317 // as the canonical nested-name-specifier. This is required to canonicalize 3318 // a dependent nested-name-specifier involving typedefs of dependent-name 3319 // types, e.g., 3320 // typedef typename T::type T1; 3321 // typedef typename T1::type T2; 3322 if (const DependentNameType *DNT = T->getAs<DependentNameType>()) { 3323 NestedNameSpecifier *Prefix 3324 = getCanonicalNestedNameSpecifier(DNT->getQualifier()); 3325 return NestedNameSpecifier::Create(*this, Prefix, 3326 const_cast<IdentifierInfo *>(DNT->getIdentifier())); 3327 } 3328 3329 // Do the same thing as above, but with dependent-named specializations. 3330 if (const DependentTemplateSpecializationType *DTST 3331 = T->getAs<DependentTemplateSpecializationType>()) { 3332 NestedNameSpecifier *Prefix 3333 = getCanonicalNestedNameSpecifier(DTST->getQualifier()); 3334 3335 T = getDependentTemplateSpecializationType(DTST->getKeyword(), 3336 Prefix, DTST->getIdentifier(), 3337 DTST->getNumArgs(), 3338 DTST->getArgs()); 3339 T = getCanonicalType(T); 3340 } 3341 3342 return NestedNameSpecifier::Create(*this, 0, false, 3343 const_cast<Type*>(T.getTypePtr())); 3344 } 3345 3346 case NestedNameSpecifier::Global: 3347 // The global specifier is canonical and unique. 3348 return NNS; 3349 } 3350 3351 // Required to silence a GCC warning 3352 return 0; 3353 } 3354 3355 3356 const ArrayType *ASTContext::getAsArrayType(QualType T) const { 3357 // Handle the non-qualified case efficiently. 3358 if (!T.hasLocalQualifiers()) { 3359 // Handle the common positive case fast. 3360 if (const ArrayType *AT = dyn_cast<ArrayType>(T)) 3361 return AT; 3362 } 3363 3364 // Handle the common negative case fast. 3365 if (!isa<ArrayType>(T.getCanonicalType())) 3366 return 0; 3367 3368 // Apply any qualifiers from the array type to the element type. This 3369 // implements C99 6.7.3p8: "If the specification of an array type includes 3370 // any type qualifiers, the element type is so qualified, not the array type." 3371 3372 // If we get here, we either have type qualifiers on the type, or we have 3373 // sugar such as a typedef in the way. If we have type qualifiers on the type 3374 // we must propagate them down into the element type. 3375 3376 SplitQualType split = T.getSplitDesugaredType(); 3377 Qualifiers qs = split.second; 3378 3379 // If we have a simple case, just return now. 3380 const ArrayType *ATy = dyn_cast<ArrayType>(split.first); 3381 if (ATy == 0 || qs.empty()) 3382 return ATy; 3383 3384 // Otherwise, we have an array and we have qualifiers on it. Push the 3385 // qualifiers into the array element type and return a new array type. 3386 QualType NewEltTy = getQualifiedType(ATy->getElementType(), qs); 3387 3388 if (const ConstantArrayType *CAT = dyn_cast<ConstantArrayType>(ATy)) 3389 return cast<ArrayType>(getConstantArrayType(NewEltTy, CAT->getSize(), 3390 CAT->getSizeModifier(), 3391 CAT->getIndexTypeCVRQualifiers())); 3392 if (const IncompleteArrayType *IAT = dyn_cast<IncompleteArrayType>(ATy)) 3393 return cast<ArrayType>(getIncompleteArrayType(NewEltTy, 3394 IAT->getSizeModifier(), 3395 IAT->getIndexTypeCVRQualifiers())); 3396 3397 if (const DependentSizedArrayType *DSAT 3398 = dyn_cast<DependentSizedArrayType>(ATy)) 3399 return cast<ArrayType>( 3400 getDependentSizedArrayType(NewEltTy, 3401 DSAT->getSizeExpr(), 3402 DSAT->getSizeModifier(), 3403 DSAT->getIndexTypeCVRQualifiers(), 3404 DSAT->getBracketsRange())); 3405 3406 const VariableArrayType *VAT = cast<VariableArrayType>(ATy); 3407 return cast<ArrayType>(getVariableArrayType(NewEltTy, 3408 VAT->getSizeExpr(), 3409 VAT->getSizeModifier(), 3410 VAT->getIndexTypeCVRQualifiers(), 3411 VAT->getBracketsRange())); 3412 } 3413 3414 QualType ASTContext::getAdjustedParameterType(QualType T) { 3415 // C99 6.7.5.3p7: 3416 // A declaration of a parameter as "array of type" shall be 3417 // adjusted to "qualified pointer to type", where the type 3418 // qualifiers (if any) are those specified within the [ and ] of 3419 // the array type derivation. 3420 if (T->isArrayType()) 3421 return getArrayDecayedType(T); 3422 3423 // C99 6.7.5.3p8: 3424 // A declaration of a parameter as "function returning type" 3425 // shall be adjusted to "pointer to function returning type", as 3426 // in 6.3.2.1. 3427 if (T->isFunctionType()) 3428 return getPointerType(T); 3429 3430 return T; 3431 } 3432 3433 QualType ASTContext::getSignatureParameterType(QualType T) { 3434 T = getVariableArrayDecayedType(T); 3435 T = getAdjustedParameterType(T); 3436 return T.getUnqualifiedType(); 3437 } 3438 3439 /// getArrayDecayedType - Return the properly qualified result of decaying the 3440 /// specified array type to a pointer. This operation is non-trivial when 3441 /// handling typedefs etc. The canonical type of "T" must be an array type, 3442 /// this returns a pointer to a properly qualified element of the array. 3443 /// 3444 /// See C99 6.7.5.3p7 and C99 6.3.2.1p3. 3445 QualType ASTContext::getArrayDecayedType(QualType Ty) const { 3446 // Get the element type with 'getAsArrayType' so that we don't lose any 3447 // typedefs in the element type of the array. This also handles propagation 3448 // of type qualifiers from the array type into the element type if present 3449 // (C99 6.7.3p8). 3450 const ArrayType *PrettyArrayType = getAsArrayType(Ty); 3451 assert(PrettyArrayType && "Not an array type!"); 3452 3453 QualType PtrTy = getPointerType(PrettyArrayType->getElementType()); 3454 3455 // int x[restrict 4] -> int *restrict 3456 return getQualifiedType(PtrTy, PrettyArrayType->getIndexTypeQualifiers()); 3457 } 3458 3459 QualType ASTContext::getBaseElementType(const ArrayType *array) const { 3460 return getBaseElementType(array->getElementType()); 3461 } 3462 3463 QualType ASTContext::getBaseElementType(QualType type) const { 3464 Qualifiers qs; 3465 while (true) { 3466 SplitQualType split = type.getSplitDesugaredType(); 3467 const ArrayType *array = split.first->getAsArrayTypeUnsafe(); 3468 if (!array) break; 3469 3470 type = array->getElementType(); 3471 qs.addConsistentQualifiers(split.second); 3472 } 3473 3474 return getQualifiedType(type, qs); 3475 } 3476 3477 /// getConstantArrayElementCount - Returns number of constant array elements. 3478 uint64_t 3479 ASTContext::getConstantArrayElementCount(const ConstantArrayType *CA) const { 3480 uint64_t ElementCount = 1; 3481 do { 3482 ElementCount *= CA->getSize().getZExtValue(); 3483 CA = dyn_cast<ConstantArrayType>(CA->getElementType()); 3484 } while (CA); 3485 return ElementCount; 3486 } 3487 3488 /// getFloatingRank - Return a relative rank for floating point types. 3489 /// This routine will assert if passed a built-in type that isn't a float. 3490 static FloatingRank getFloatingRank(QualType T) { 3491 if (const ComplexType *CT = T->getAs<ComplexType>()) 3492 return getFloatingRank(CT->getElementType()); 3493 3494 assert(T->getAs<BuiltinType>() && "getFloatingRank(): not a floating type"); 3495 switch (T->getAs<BuiltinType>()->getKind()) { 3496 default: llvm_unreachable("getFloatingRank(): not a floating type"); 3497 case BuiltinType::Half: return HalfRank; 3498 case BuiltinType::Float: return FloatRank; 3499 case BuiltinType::Double: return DoubleRank; 3500 case BuiltinType::LongDouble: return LongDoubleRank; 3501 } 3502 } 3503 3504 /// getFloatingTypeOfSizeWithinDomain - Returns a real floating 3505 /// point or a complex type (based on typeDomain/typeSize). 3506 /// 'typeDomain' is a real floating point or complex type. 3507 /// 'typeSize' is a real floating point or complex type. 3508 QualType ASTContext::getFloatingTypeOfSizeWithinDomain(QualType Size, 3509 QualType Domain) const { 3510 FloatingRank EltRank = getFloatingRank(Size); 3511 if (Domain->isComplexType()) { 3512 switch (EltRank) { 3513 default: llvm_unreachable("getFloatingRank(): illegal value for rank"); 3514 case FloatRank: return FloatComplexTy; 3515 case DoubleRank: return DoubleComplexTy; 3516 case LongDoubleRank: return LongDoubleComplexTy; 3517 } 3518 } 3519 3520 assert(Domain->isRealFloatingType() && "Unknown domain!"); 3521 switch (EltRank) { 3522 default: llvm_unreachable("getFloatingRank(): illegal value for rank"); 3523 case FloatRank: return FloatTy; 3524 case DoubleRank: return DoubleTy; 3525 case LongDoubleRank: return LongDoubleTy; 3526 } 3527 } 3528 3529 /// getFloatingTypeOrder - Compare the rank of the two specified floating 3530 /// point types, ignoring the domain of the type (i.e. 'double' == 3531 /// '_Complex double'). If LHS > RHS, return 1. If LHS == RHS, return 0. If 3532 /// LHS < RHS, return -1. 3533 int ASTContext::getFloatingTypeOrder(QualType LHS, QualType RHS) const { 3534 FloatingRank LHSR = getFloatingRank(LHS); 3535 FloatingRank RHSR = getFloatingRank(RHS); 3536 3537 if (LHSR == RHSR) 3538 return 0; 3539 if (LHSR > RHSR) 3540 return 1; 3541 return -1; 3542 } 3543 3544 /// getIntegerRank - Return an integer conversion rank (C99 6.3.1.1p1). This 3545 /// routine will assert if passed a built-in type that isn't an integer or enum, 3546 /// or if it is not canonicalized. 3547 unsigned ASTContext::getIntegerRank(const Type *T) const { 3548 assert(T->isCanonicalUnqualified() && "T should be canonicalized"); 3549 if (const EnumType* ET = dyn_cast<EnumType>(T)) 3550 T = ET->getDecl()->getPromotionType().getTypePtr(); 3551 3552 if (T->isSpecificBuiltinType(BuiltinType::WChar_S) || 3553 T->isSpecificBuiltinType(BuiltinType::WChar_U)) 3554 T = getFromTargetType(Target->getWCharType()).getTypePtr(); 3555 3556 if (T->isSpecificBuiltinType(BuiltinType::Char16)) 3557 T = getFromTargetType(Target->getChar16Type()).getTypePtr(); 3558 3559 if (T->isSpecificBuiltinType(BuiltinType::Char32)) 3560 T = getFromTargetType(Target->getChar32Type()).getTypePtr(); 3561 3562 switch (cast<BuiltinType>(T)->getKind()) { 3563 default: llvm_unreachable("getIntegerRank(): not a built-in integer"); 3564 case BuiltinType::Bool: 3565 return 1 + (getIntWidth(BoolTy) << 3); 3566 case BuiltinType::Char_S: 3567 case BuiltinType::Char_U: 3568 case BuiltinType::SChar: 3569 case BuiltinType::UChar: 3570 return 2 + (getIntWidth(CharTy) << 3); 3571 case BuiltinType::Short: 3572 case BuiltinType::UShort: 3573 return 3 + (getIntWidth(ShortTy) << 3); 3574 case BuiltinType::Int: 3575 case BuiltinType::UInt: 3576 return 4 + (getIntWidth(IntTy) << 3); 3577 case BuiltinType::Long: 3578 case BuiltinType::ULong: 3579 return 5 + (getIntWidth(LongTy) << 3); 3580 case BuiltinType::LongLong: 3581 case BuiltinType::ULongLong: 3582 return 6 + (getIntWidth(LongLongTy) << 3); 3583 case BuiltinType::Int128: 3584 case BuiltinType::UInt128: 3585 return 7 + (getIntWidth(Int128Ty) << 3); 3586 } 3587 } 3588 3589 /// \brief Whether this is a promotable bitfield reference according 3590 /// to C99 6.3.1.1p2, bullet 2 (and GCC extensions). 3591 /// 3592 /// \returns the type this bit-field will promote to, or NULL if no 3593 /// promotion occurs. 3594 QualType ASTContext::isPromotableBitField(Expr *E) const { 3595 if (E->isTypeDependent() || E->isValueDependent()) 3596 return QualType(); 3597 3598 FieldDecl *Field = E->getBitField(); 3599 if (!Field) 3600 return QualType(); 3601 3602 QualType FT = Field->getType(); 3603 3604 uint64_t BitWidth = Field->getBitWidthValue(*this); 3605 uint64_t IntSize = getTypeSize(IntTy); 3606 // GCC extension compatibility: if the bit-field size is less than or equal 3607 // to the size of int, it gets promoted no matter what its type is. 3608 // For instance, unsigned long bf : 4 gets promoted to signed int. 3609 if (BitWidth < IntSize) 3610 return IntTy; 3611 3612 if (BitWidth == IntSize) 3613 return FT->isSignedIntegerType() ? IntTy : UnsignedIntTy; 3614 3615 // Types bigger than int are not subject to promotions, and therefore act 3616 // like the base type. 3617 // FIXME: This doesn't quite match what gcc does, but what gcc does here 3618 // is ridiculous. 3619 return QualType(); 3620 } 3621 3622 /// getPromotedIntegerType - Returns the type that Promotable will 3623 /// promote to: C99 6.3.1.1p2, assuming that Promotable is a promotable 3624 /// integer type. 3625 QualType ASTContext::getPromotedIntegerType(QualType Promotable) const { 3626 assert(!Promotable.isNull()); 3627 assert(Promotable->isPromotableIntegerType()); 3628 if (const EnumType *ET = Promotable->getAs<EnumType>()) 3629 return ET->getDecl()->getPromotionType(); 3630 if (Promotable->isSignedIntegerType()) 3631 return IntTy; 3632 uint64_t PromotableSize = getTypeSize(Promotable); 3633 uint64_t IntSize = getTypeSize(IntTy); 3634 assert(Promotable->isUnsignedIntegerType() && PromotableSize <= IntSize); 3635 return (PromotableSize != IntSize) ? IntTy : UnsignedIntTy; 3636 } 3637 3638 /// \brief Recurses in pointer/array types until it finds an objc retainable 3639 /// type and returns its ownership. 3640 Qualifiers::ObjCLifetime ASTContext::getInnerObjCOwnership(QualType T) const { 3641 while (!T.isNull()) { 3642 if (T.getObjCLifetime() != Qualifiers::OCL_None) 3643 return T.getObjCLifetime(); 3644 if (T->isArrayType()) 3645 T = getBaseElementType(T); 3646 else if (const PointerType *PT = T->getAs<PointerType>()) 3647 T = PT->getPointeeType(); 3648 else if (const ReferenceType *RT = T->getAs<ReferenceType>()) 3649 T = RT->getPointeeType(); 3650 else 3651 break; 3652 } 3653 3654 return Qualifiers::OCL_None; 3655 } 3656 3657 /// getIntegerTypeOrder - Returns the highest ranked integer type: 3658 /// C99 6.3.1.8p1. If LHS > RHS, return 1. If LHS == RHS, return 0. If 3659 /// LHS < RHS, return -1. 3660 int ASTContext::getIntegerTypeOrder(QualType LHS, QualType RHS) const { 3661 const Type *LHSC = getCanonicalType(LHS).getTypePtr(); 3662 const Type *RHSC = getCanonicalType(RHS).getTypePtr(); 3663 if (LHSC == RHSC) return 0; 3664 3665 bool LHSUnsigned = LHSC->isUnsignedIntegerType(); 3666 bool RHSUnsigned = RHSC->isUnsignedIntegerType(); 3667 3668 unsigned LHSRank = getIntegerRank(LHSC); 3669 unsigned RHSRank = getIntegerRank(RHSC); 3670 3671 if (LHSUnsigned == RHSUnsigned) { // Both signed or both unsigned. 3672 if (LHSRank == RHSRank) return 0; 3673 return LHSRank > RHSRank ? 1 : -1; 3674 } 3675 3676 // Otherwise, the LHS is signed and the RHS is unsigned or visa versa. 3677 if (LHSUnsigned) { 3678 // If the unsigned [LHS] type is larger, return it. 3679 if (LHSRank >= RHSRank) 3680 return 1; 3681 3682 // If the signed type can represent all values of the unsigned type, it 3683 // wins. Because we are dealing with 2's complement and types that are 3684 // powers of two larger than each other, this is always safe. 3685 return -1; 3686 } 3687 3688 // If the unsigned [RHS] type is larger, return it. 3689 if (RHSRank >= LHSRank) 3690 return -1; 3691 3692 // If the signed type can represent all values of the unsigned type, it 3693 // wins. Because we are dealing with 2's complement and types that are 3694 // powers of two larger than each other, this is always safe. 3695 return 1; 3696 } 3697 3698 static RecordDecl * 3699 CreateRecordDecl(const ASTContext &Ctx, RecordDecl::TagKind TK, 3700 DeclContext *DC, IdentifierInfo *Id) { 3701 SourceLocation Loc; 3702 if (Ctx.getLangOptions().CPlusPlus) 3703 return CXXRecordDecl::Create(Ctx, TK, DC, Loc, Loc, Id); 3704 else 3705 return RecordDecl::Create(Ctx, TK, DC, Loc, Loc, Id); 3706 } 3707 3708 // getCFConstantStringType - Return the type used for constant CFStrings. 3709 QualType ASTContext::getCFConstantStringType() const { 3710 if (!CFConstantStringTypeDecl) { 3711 CFConstantStringTypeDecl = 3712 CreateRecordDecl(*this, TTK_Struct, TUDecl, 3713 &Idents.get("NSConstantString")); 3714 CFConstantStringTypeDecl->startDefinition(); 3715 3716 QualType FieldTypes[4]; 3717 3718 // const int *isa; 3719 FieldTypes[0] = getPointerType(IntTy.withConst()); 3720 // int flags; 3721 FieldTypes[1] = IntTy; 3722 // const char *str; 3723 FieldTypes[2] = getPointerType(CharTy.withConst()); 3724 // long length; 3725 FieldTypes[3] = LongTy; 3726 3727 // Create fields 3728 for (unsigned i = 0; i < 4; ++i) { 3729 FieldDecl *Field = FieldDecl::Create(*this, CFConstantStringTypeDecl, 3730 SourceLocation(), 3731 SourceLocation(), 0, 3732 FieldTypes[i], /*TInfo=*/0, 3733 /*BitWidth=*/0, 3734 /*Mutable=*/false, 3735 /*HasInit=*/false); 3736 Field->setAccess(AS_public); 3737 CFConstantStringTypeDecl->addDecl(Field); 3738 } 3739 3740 CFConstantStringTypeDecl->completeDefinition(); 3741 } 3742 3743 return getTagDeclType(CFConstantStringTypeDecl); 3744 } 3745 3746 void ASTContext::setCFConstantStringType(QualType T) { 3747 const RecordType *Rec = T->getAs<RecordType>(); 3748 assert(Rec && "Invalid CFConstantStringType"); 3749 CFConstantStringTypeDecl = Rec->getDecl(); 3750 } 3751 3752 QualType ASTContext::getBlockDescriptorType() const { 3753 if (BlockDescriptorType) 3754 return getTagDeclType(BlockDescriptorType); 3755 3756 RecordDecl *T; 3757 // FIXME: Needs the FlagAppleBlock bit. 3758 T = CreateRecordDecl(*this, TTK_Struct, TUDecl, 3759 &Idents.get("__block_descriptor")); 3760 T->startDefinition(); 3761 3762 QualType FieldTypes[] = { 3763 UnsignedLongTy, 3764 UnsignedLongTy, 3765 }; 3766 3767 const char *FieldNames[] = { 3768 "reserved", 3769 "Size" 3770 }; 3771 3772 for (size_t i = 0; i < 2; ++i) { 3773 FieldDecl *Field = FieldDecl::Create(*this, T, SourceLocation(), 3774 SourceLocation(), 3775 &Idents.get(FieldNames[i]), 3776 FieldTypes[i], /*TInfo=*/0, 3777 /*BitWidth=*/0, 3778 /*Mutable=*/false, 3779 /*HasInit=*/false); 3780 Field->setAccess(AS_public); 3781 T->addDecl(Field); 3782 } 3783 3784 T->completeDefinition(); 3785 3786 BlockDescriptorType = T; 3787 3788 return getTagDeclType(BlockDescriptorType); 3789 } 3790 3791 QualType ASTContext::getBlockDescriptorExtendedType() const { 3792 if (BlockDescriptorExtendedType) 3793 return getTagDeclType(BlockDescriptorExtendedType); 3794 3795 RecordDecl *T; 3796 // FIXME: Needs the FlagAppleBlock bit. 3797 T = CreateRecordDecl(*this, TTK_Struct, TUDecl, 3798 &Idents.get("__block_descriptor_withcopydispose")); 3799 T->startDefinition(); 3800 3801 QualType FieldTypes[] = { 3802 UnsignedLongTy, 3803 UnsignedLongTy, 3804 getPointerType(VoidPtrTy), 3805 getPointerType(VoidPtrTy) 3806 }; 3807 3808 const char *FieldNames[] = { 3809 "reserved", 3810 "Size", 3811 "CopyFuncPtr", 3812 "DestroyFuncPtr" 3813 }; 3814 3815 for (size_t i = 0; i < 4; ++i) { 3816 FieldDecl *Field = FieldDecl::Create(*this, T, SourceLocation(), 3817 SourceLocation(), 3818 &Idents.get(FieldNames[i]), 3819 FieldTypes[i], /*TInfo=*/0, 3820 /*BitWidth=*/0, 3821 /*Mutable=*/false, 3822 /*HasInit=*/false); 3823 Field->setAccess(AS_public); 3824 T->addDecl(Field); 3825 } 3826 3827 T->completeDefinition(); 3828 3829 BlockDescriptorExtendedType = T; 3830 3831 return getTagDeclType(BlockDescriptorExtendedType); 3832 } 3833 3834 bool ASTContext::BlockRequiresCopying(QualType Ty) const { 3835 if (Ty->isObjCRetainableType()) 3836 return true; 3837 if (getLangOptions().CPlusPlus) { 3838 if (const RecordType *RT = Ty->getAs<RecordType>()) { 3839 CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl()); 3840 return RD->hasConstCopyConstructor(); 3841 3842 } 3843 } 3844 return false; 3845 } 3846 3847 QualType 3848 ASTContext::BuildByRefType(StringRef DeclName, QualType Ty) const { 3849 // type = struct __Block_byref_1_X { 3850 // void *__isa; 3851 // struct __Block_byref_1_X *__forwarding; 3852 // unsigned int __flags; 3853 // unsigned int __size; 3854 // void *__copy_helper; // as needed 3855 // void *__destroy_help // as needed 3856 // int X; 3857 // } * 3858 3859 bool HasCopyAndDispose = BlockRequiresCopying(Ty); 3860 3861 // FIXME: Move up 3862 llvm::SmallString<36> Name; 3863 llvm::raw_svector_ostream(Name) << "__Block_byref_" << 3864 ++UniqueBlockByRefTypeID << '_' << DeclName; 3865 RecordDecl *T; 3866 T = CreateRecordDecl(*this, TTK_Struct, TUDecl, &Idents.get(Name.str())); 3867 T->startDefinition(); 3868 QualType Int32Ty = IntTy; 3869 assert(getIntWidth(IntTy) == 32 && "non-32bit int not supported"); 3870 QualType FieldTypes[] = { 3871 getPointerType(VoidPtrTy), 3872 getPointerType(getTagDeclType(T)), 3873 Int32Ty, 3874 Int32Ty, 3875 getPointerType(VoidPtrTy), 3876 getPointerType(VoidPtrTy), 3877 Ty 3878 }; 3879 3880 StringRef FieldNames[] = { 3881 "__isa", 3882 "__forwarding", 3883 "__flags", 3884 "__size", 3885 "__copy_helper", 3886 "__destroy_helper", 3887 DeclName, 3888 }; 3889 3890 for (size_t i = 0; i < 7; ++i) { 3891 if (!HasCopyAndDispose && i >=4 && i <= 5) 3892 continue; 3893 FieldDecl *Field = FieldDecl::Create(*this, T, SourceLocation(), 3894 SourceLocation(), 3895 &Idents.get(FieldNames[i]), 3896 FieldTypes[i], /*TInfo=*/0, 3897 /*BitWidth=*/0, /*Mutable=*/false, 3898 /*HasInit=*/false); 3899 Field->setAccess(AS_public); 3900 T->addDecl(Field); 3901 } 3902 3903 T->completeDefinition(); 3904 3905 return getPointerType(getTagDeclType(T)); 3906 } 3907 3908 TypedefDecl *ASTContext::getObjCInstanceTypeDecl() { 3909 if (!ObjCInstanceTypeDecl) 3910 ObjCInstanceTypeDecl = TypedefDecl::Create(*this, 3911 getTranslationUnitDecl(), 3912 SourceLocation(), 3913 SourceLocation(), 3914 &Idents.get("instancetype"), 3915 getTrivialTypeSourceInfo(getObjCIdType())); 3916 return ObjCInstanceTypeDecl; 3917 } 3918 3919 // This returns true if a type has been typedefed to BOOL: 3920 // typedef <type> BOOL; 3921 static bool isTypeTypedefedAsBOOL(QualType T) { 3922 if (const TypedefType *TT = dyn_cast<TypedefType>(T)) 3923 if (IdentifierInfo *II = TT->getDecl()->getIdentifier()) 3924 return II->isStr("BOOL"); 3925 3926 return false; 3927 } 3928 3929 /// getObjCEncodingTypeSize returns size of type for objective-c encoding 3930 /// purpose. 3931 CharUnits ASTContext::getObjCEncodingTypeSize(QualType type) const { 3932 if (!type->isIncompleteArrayType() && type->isIncompleteType()) 3933 return CharUnits::Zero(); 3934 3935 CharUnits sz = getTypeSizeInChars(type); 3936 3937 // Make all integer and enum types at least as large as an int 3938 if (sz.isPositive() && type->isIntegralOrEnumerationType()) 3939 sz = std::max(sz, getTypeSizeInChars(IntTy)); 3940 // Treat arrays as pointers, since that's how they're passed in. 3941 else if (type->isArrayType()) 3942 sz = getTypeSizeInChars(VoidPtrTy); 3943 return sz; 3944 } 3945 3946 static inline 3947 std::string charUnitsToString(const CharUnits &CU) { 3948 return llvm::itostr(CU.getQuantity()); 3949 } 3950 3951 /// getObjCEncodingForBlock - Return the encoded type for this block 3952 /// declaration. 3953 std::string ASTContext::getObjCEncodingForBlock(const BlockExpr *Expr) const { 3954 std::string S; 3955 3956 const BlockDecl *Decl = Expr->getBlockDecl(); 3957 QualType BlockTy = 3958 Expr->getType()->getAs<BlockPointerType>()->getPointeeType(); 3959 // Encode result type. 3960 getObjCEncodingForType(BlockTy->getAs<FunctionType>()->getResultType(), S); 3961 // Compute size of all parameters. 3962 // Start with computing size of a pointer in number of bytes. 3963 // FIXME: There might(should) be a better way of doing this computation! 3964 SourceLocation Loc; 3965 CharUnits PtrSize = getTypeSizeInChars(VoidPtrTy); 3966 CharUnits ParmOffset = PtrSize; 3967 for (BlockDecl::param_const_iterator PI = Decl->param_begin(), 3968 E = Decl->param_end(); PI != E; ++PI) { 3969 QualType PType = (*PI)->getType(); 3970 CharUnits sz = getObjCEncodingTypeSize(PType); 3971 assert (sz.isPositive() && "BlockExpr - Incomplete param type"); 3972 ParmOffset += sz; 3973 } 3974 // Size of the argument frame 3975 S += charUnitsToString(ParmOffset); 3976 // Block pointer and offset. 3977 S += "@?0"; 3978 3979 // Argument types. 3980 ParmOffset = PtrSize; 3981 for (BlockDecl::param_const_iterator PI = Decl->param_begin(), E = 3982 Decl->param_end(); PI != E; ++PI) { 3983 ParmVarDecl *PVDecl = *PI; 3984 QualType PType = PVDecl->getOriginalType(); 3985 if (const ArrayType *AT = 3986 dyn_cast<ArrayType>(PType->getCanonicalTypeInternal())) { 3987 // Use array's original type only if it has known number of 3988 // elements. 3989 if (!isa<ConstantArrayType>(AT)) 3990 PType = PVDecl->getType(); 3991 } else if (PType->isFunctionType()) 3992 PType = PVDecl->getType(); 3993 getObjCEncodingForType(PType, S); 3994 S += charUnitsToString(ParmOffset); 3995 ParmOffset += getObjCEncodingTypeSize(PType); 3996 } 3997 3998 return S; 3999 } 4000 4001 bool ASTContext::getObjCEncodingForFunctionDecl(const FunctionDecl *Decl, 4002 std::string& S) { 4003 // Encode result type. 4004 getObjCEncodingForType(Decl->getResultType(), S); 4005 CharUnits ParmOffset; 4006 // Compute size of all parameters. 4007 for (FunctionDecl::param_const_iterator PI = Decl->param_begin(), 4008 E = Decl->param_end(); PI != E; ++PI) { 4009 QualType PType = (*PI)->getType(); 4010 CharUnits sz = getObjCEncodingTypeSize(PType); 4011 if (sz.isZero()) 4012 return true; 4013 4014 assert (sz.isPositive() && 4015 "getObjCEncodingForFunctionDecl - Incomplete param type"); 4016 ParmOffset += sz; 4017 } 4018 S += charUnitsToString(ParmOffset); 4019 ParmOffset = CharUnits::Zero(); 4020 4021 // Argument types. 4022 for (FunctionDecl::param_const_iterator PI = Decl->param_begin(), 4023 E = Decl->param_end(); PI != E; ++PI) { 4024 ParmVarDecl *PVDecl = *PI; 4025 QualType PType = PVDecl->getOriginalType(); 4026 if (const ArrayType *AT = 4027 dyn_cast<ArrayType>(PType->getCanonicalTypeInternal())) { 4028 // Use array's original type only if it has known number of 4029 // elements. 4030 if (!isa<ConstantArrayType>(AT)) 4031 PType = PVDecl->getType(); 4032 } else if (PType->isFunctionType()) 4033 PType = PVDecl->getType(); 4034 getObjCEncodingForType(PType, S); 4035 S += charUnitsToString(ParmOffset); 4036 ParmOffset += getObjCEncodingTypeSize(PType); 4037 } 4038 4039 return false; 4040 } 4041 4042 /// getObjCEncodingForMethodDecl - Return the encoded type for this method 4043 /// declaration. 4044 bool ASTContext::getObjCEncodingForMethodDecl(const ObjCMethodDecl *Decl, 4045 std::string& S) const { 4046 // FIXME: This is not very efficient. 4047 // Encode type qualifer, 'in', 'inout', etc. for the return type. 4048 getObjCEncodingForTypeQualifier(Decl->getObjCDeclQualifier(), S); 4049 // Encode result type. 4050 getObjCEncodingForType(Decl->getResultType(), S); 4051 // Compute size of all parameters. 4052 // Start with computing size of a pointer in number of bytes. 4053 // FIXME: There might(should) be a better way of doing this computation! 4054 SourceLocation Loc; 4055 CharUnits PtrSize = getTypeSizeInChars(VoidPtrTy); 4056 // The first two arguments (self and _cmd) are pointers; account for 4057 // their size. 4058 CharUnits ParmOffset = 2 * PtrSize; 4059 for (ObjCMethodDecl::param_const_iterator PI = Decl->param_begin(), 4060 E = Decl->sel_param_end(); PI != E; ++PI) { 4061 QualType PType = (*PI)->getType(); 4062 CharUnits sz = getObjCEncodingTypeSize(PType); 4063 if (sz.isZero()) 4064 return true; 4065 4066 assert (sz.isPositive() && 4067 "getObjCEncodingForMethodDecl - Incomplete param type"); 4068 ParmOffset += sz; 4069 } 4070 S += charUnitsToString(ParmOffset); 4071 S += "@0:"; 4072 S += charUnitsToString(PtrSize); 4073 4074 // Argument types. 4075 ParmOffset = 2 * PtrSize; 4076 for (ObjCMethodDecl::param_const_iterator PI = Decl->param_begin(), 4077 E = Decl->sel_param_end(); PI != E; ++PI) { 4078 const ParmVarDecl *PVDecl = *PI; 4079 QualType PType = PVDecl->getOriginalType(); 4080 if (const ArrayType *AT = 4081 dyn_cast<ArrayType>(PType->getCanonicalTypeInternal())) { 4082 // Use array's original type only if it has known number of 4083 // elements. 4084 if (!isa<ConstantArrayType>(AT)) 4085 PType = PVDecl->getType(); 4086 } else if (PType->isFunctionType()) 4087 PType = PVDecl->getType(); 4088 // Process argument qualifiers for user supplied arguments; such as, 4089 // 'in', 'inout', etc. 4090 getObjCEncodingForTypeQualifier(PVDecl->getObjCDeclQualifier(), S); 4091 getObjCEncodingForType(PType, S); 4092 S += charUnitsToString(ParmOffset); 4093 ParmOffset += getObjCEncodingTypeSize(PType); 4094 } 4095 4096 return false; 4097 } 4098 4099 /// getObjCEncodingForPropertyDecl - Return the encoded type for this 4100 /// property declaration. If non-NULL, Container must be either an 4101 /// ObjCCategoryImplDecl or ObjCImplementationDecl; it should only be 4102 /// NULL when getting encodings for protocol properties. 4103 /// Property attributes are stored as a comma-delimited C string. The simple 4104 /// attributes readonly and bycopy are encoded as single characters. The 4105 /// parametrized attributes, getter=name, setter=name, and ivar=name, are 4106 /// encoded as single characters, followed by an identifier. Property types 4107 /// are also encoded as a parametrized attribute. The characters used to encode 4108 /// these attributes are defined by the following enumeration: 4109 /// @code 4110 /// enum PropertyAttributes { 4111 /// kPropertyReadOnly = 'R', // property is read-only. 4112 /// kPropertyBycopy = 'C', // property is a copy of the value last assigned 4113 /// kPropertyByref = '&', // property is a reference to the value last assigned 4114 /// kPropertyDynamic = 'D', // property is dynamic 4115 /// kPropertyGetter = 'G', // followed by getter selector name 4116 /// kPropertySetter = 'S', // followed by setter selector name 4117 /// kPropertyInstanceVariable = 'V' // followed by instance variable name 4118 /// kPropertyType = 't' // followed by old-style type encoding. 4119 /// kPropertyWeak = 'W' // 'weak' property 4120 /// kPropertyStrong = 'P' // property GC'able 4121 /// kPropertyNonAtomic = 'N' // property non-atomic 4122 /// }; 4123 /// @endcode 4124 void ASTContext::getObjCEncodingForPropertyDecl(const ObjCPropertyDecl *PD, 4125 const Decl *Container, 4126 std::string& S) const { 4127 // Collect information from the property implementation decl(s). 4128 bool Dynamic = false; 4129 ObjCPropertyImplDecl *SynthesizePID = 0; 4130 4131 // FIXME: Duplicated code due to poor abstraction. 4132 if (Container) { 4133 if (const ObjCCategoryImplDecl *CID = 4134 dyn_cast<ObjCCategoryImplDecl>(Container)) { 4135 for (ObjCCategoryImplDecl::propimpl_iterator 4136 i = CID->propimpl_begin(), e = CID->propimpl_end(); 4137 i != e; ++i) { 4138 ObjCPropertyImplDecl *PID = *i; 4139 if (PID->getPropertyDecl() == PD) { 4140 if (PID->getPropertyImplementation()==ObjCPropertyImplDecl::Dynamic) { 4141 Dynamic = true; 4142 } else { 4143 SynthesizePID = PID; 4144 } 4145 } 4146 } 4147 } else { 4148 const ObjCImplementationDecl *OID=cast<ObjCImplementationDecl>(Container); 4149 for (ObjCCategoryImplDecl::propimpl_iterator 4150 i = OID->propimpl_begin(), e = OID->propimpl_end(); 4151 i != e; ++i) { 4152 ObjCPropertyImplDecl *PID = *i; 4153 if (PID->getPropertyDecl() == PD) { 4154 if (PID->getPropertyImplementation()==ObjCPropertyImplDecl::Dynamic) { 4155 Dynamic = true; 4156 } else { 4157 SynthesizePID = PID; 4158 } 4159 } 4160 } 4161 } 4162 } 4163 4164 // FIXME: This is not very efficient. 4165 S = "T"; 4166 4167 // Encode result type. 4168 // GCC has some special rules regarding encoding of properties which 4169 // closely resembles encoding of ivars. 4170 getObjCEncodingForTypeImpl(PD->getType(), S, true, true, 0, 4171 true /* outermost type */, 4172 true /* encoding for property */); 4173 4174 if (PD->isReadOnly()) { 4175 S += ",R"; 4176 } else { 4177 switch (PD->getSetterKind()) { 4178 case ObjCPropertyDecl::Assign: break; 4179 case ObjCPropertyDecl::Copy: S += ",C"; break; 4180 case ObjCPropertyDecl::Retain: S += ",&"; break; 4181 case ObjCPropertyDecl::Weak: S += ",W"; break; 4182 } 4183 } 4184 4185 // It really isn't clear at all what this means, since properties 4186 // are "dynamic by default". 4187 if (Dynamic) 4188 S += ",D"; 4189 4190 if (PD->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_nonatomic) 4191 S += ",N"; 4192 4193 if (PD->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_getter) { 4194 S += ",G"; 4195 S += PD->getGetterName().getAsString(); 4196 } 4197 4198 if (PD->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_setter) { 4199 S += ",S"; 4200 S += PD->getSetterName().getAsString(); 4201 } 4202 4203 if (SynthesizePID) { 4204 const ObjCIvarDecl *OID = SynthesizePID->getPropertyIvarDecl(); 4205 S += ",V"; 4206 S += OID->getNameAsString(); 4207 } 4208 4209 // FIXME: OBJCGC: weak & strong 4210 } 4211 4212 /// getLegacyIntegralTypeEncoding - 4213 /// Another legacy compatibility encoding: 32-bit longs are encoded as 4214 /// 'l' or 'L' , but not always. For typedefs, we need to use 4215 /// 'i' or 'I' instead if encoding a struct field, or a pointer! 4216 /// 4217 void ASTContext::getLegacyIntegralTypeEncoding (QualType &PointeeTy) const { 4218 if (isa<TypedefType>(PointeeTy.getTypePtr())) { 4219 if (const BuiltinType *BT = PointeeTy->getAs<BuiltinType>()) { 4220 if (BT->getKind() == BuiltinType::ULong && getIntWidth(PointeeTy) == 32) 4221 PointeeTy = UnsignedIntTy; 4222 else 4223 if (BT->getKind() == BuiltinType::Long && getIntWidth(PointeeTy) == 32) 4224 PointeeTy = IntTy; 4225 } 4226 } 4227 } 4228 4229 void ASTContext::getObjCEncodingForType(QualType T, std::string& S, 4230 const FieldDecl *Field) const { 4231 // We follow the behavior of gcc, expanding structures which are 4232 // directly pointed to, and expanding embedded structures. Note that 4233 // these rules are sufficient to prevent recursive encoding of the 4234 // same type. 4235 getObjCEncodingForTypeImpl(T, S, true, true, Field, 4236 true /* outermost type */); 4237 } 4238 4239 static char ObjCEncodingForPrimitiveKind(const ASTContext *C, QualType T) { 4240 switch (T->getAs<BuiltinType>()->getKind()) { 4241 default: llvm_unreachable("Unhandled builtin type kind"); 4242 case BuiltinType::Void: return 'v'; 4243 case BuiltinType::Bool: return 'B'; 4244 case BuiltinType::Char_U: 4245 case BuiltinType::UChar: return 'C'; 4246 case BuiltinType::UShort: return 'S'; 4247 case BuiltinType::UInt: return 'I'; 4248 case BuiltinType::ULong: 4249 return C->getIntWidth(T) == 32 ? 'L' : 'Q'; 4250 case BuiltinType::UInt128: return 'T'; 4251 case BuiltinType::ULongLong: return 'Q'; 4252 case BuiltinType::Char_S: 4253 case BuiltinType::SChar: return 'c'; 4254 case BuiltinType::Short: return 's'; 4255 case BuiltinType::WChar_S: 4256 case BuiltinType::WChar_U: 4257 case BuiltinType::Int: return 'i'; 4258 case BuiltinType::Long: 4259 return C->getIntWidth(T) == 32 ? 'l' : 'q'; 4260 case BuiltinType::LongLong: return 'q'; 4261 case BuiltinType::Int128: return 't'; 4262 case BuiltinType::Float: return 'f'; 4263 case BuiltinType::Double: return 'd'; 4264 case BuiltinType::LongDouble: return 'D'; 4265 } 4266 } 4267 4268 static char ObjCEncodingForEnumType(const ASTContext *C, const EnumType *ET) { 4269 EnumDecl *Enum = ET->getDecl(); 4270 4271 // The encoding of an non-fixed enum type is always 'i', regardless of size. 4272 if (!Enum->isFixed()) 4273 return 'i'; 4274 4275 // The encoding of a fixed enum type matches its fixed underlying type. 4276 return ObjCEncodingForPrimitiveKind(C, Enum->getIntegerType()); 4277 } 4278 4279 static void EncodeBitField(const ASTContext *Ctx, std::string& S, 4280 QualType T, const FieldDecl *FD) { 4281 assert(FD->isBitField() && "not a bitfield - getObjCEncodingForTypeImpl"); 4282 S += 'b'; 4283 // The NeXT runtime encodes bit fields as b followed by the number of bits. 4284 // The GNU runtime requires more information; bitfields are encoded as b, 4285 // then the offset (in bits) of the first element, then the type of the 4286 // bitfield, then the size in bits. For example, in this structure: 4287 // 4288 // struct 4289 // { 4290 // int integer; 4291 // int flags:2; 4292 // }; 4293 // On a 32-bit system, the encoding for flags would be b2 for the NeXT 4294 // runtime, but b32i2 for the GNU runtime. The reason for this extra 4295 // information is not especially sensible, but we're stuck with it for 4296 // compatibility with GCC, although providing it breaks anything that 4297 // actually uses runtime introspection and wants to work on both runtimes... 4298 if (!Ctx->getLangOptions().NeXTRuntime) { 4299 const RecordDecl *RD = FD->getParent(); 4300 const ASTRecordLayout &RL = Ctx->getASTRecordLayout(RD); 4301 S += llvm::utostr(RL.getFieldOffset(FD->getFieldIndex())); 4302 if (const EnumType *ET = T->getAs<EnumType>()) 4303 S += ObjCEncodingForEnumType(Ctx, ET); 4304 else 4305 S += ObjCEncodingForPrimitiveKind(Ctx, T); 4306 } 4307 S += llvm::utostr(FD->getBitWidthValue(*Ctx)); 4308 } 4309 4310 // FIXME: Use SmallString for accumulating string. 4311 void ASTContext::getObjCEncodingForTypeImpl(QualType T, std::string& S, 4312 bool ExpandPointedToStructures, 4313 bool ExpandStructures, 4314 const FieldDecl *FD, 4315 bool OutermostType, 4316 bool EncodingProperty, 4317 bool StructField) const { 4318 if (T->getAs<BuiltinType>()) { 4319 if (FD && FD->isBitField()) 4320 return EncodeBitField(this, S, T, FD); 4321 S += ObjCEncodingForPrimitiveKind(this, T); 4322 return; 4323 } 4324 4325 if (const ComplexType *CT = T->getAs<ComplexType>()) { 4326 S += 'j'; 4327 getObjCEncodingForTypeImpl(CT->getElementType(), S, false, false, 0, false, 4328 false); 4329 return; 4330 } 4331 4332 // encoding for pointer or r3eference types. 4333 QualType PointeeTy; 4334 if (const PointerType *PT = T->getAs<PointerType>()) { 4335 if (PT->isObjCSelType()) { 4336 S += ':'; 4337 return; 4338 } 4339 PointeeTy = PT->getPointeeType(); 4340 } 4341 else if (const ReferenceType *RT = T->getAs<ReferenceType>()) 4342 PointeeTy = RT->getPointeeType(); 4343 if (!PointeeTy.isNull()) { 4344 bool isReadOnly = false; 4345 // For historical/compatibility reasons, the read-only qualifier of the 4346 // pointee gets emitted _before_ the '^'. The read-only qualifier of 4347 // the pointer itself gets ignored, _unless_ we are looking at a typedef! 4348 // Also, do not emit the 'r' for anything but the outermost type! 4349 if (isa<TypedefType>(T.getTypePtr())) { 4350 if (OutermostType && T.isConstQualified()) { 4351 isReadOnly = true; 4352 S += 'r'; 4353 } 4354 } else if (OutermostType) { 4355 QualType P = PointeeTy; 4356 while (P->getAs<PointerType>()) 4357 P = P->getAs<PointerType>()->getPointeeType(); 4358 if (P.isConstQualified()) { 4359 isReadOnly = true; 4360 S += 'r'; 4361 } 4362 } 4363 if (isReadOnly) { 4364 // Another legacy compatibility encoding. Some ObjC qualifier and type 4365 // combinations need to be rearranged. 4366 // Rewrite "in const" from "nr" to "rn" 4367 if (StringRef(S).endswith("nr")) 4368 S.replace(S.end()-2, S.end(), "rn"); 4369 } 4370 4371 if (PointeeTy->isCharType()) { 4372 // char pointer types should be encoded as '*' unless it is a 4373 // type that has been typedef'd to 'BOOL'. 4374 if (!isTypeTypedefedAsBOOL(PointeeTy)) { 4375 S += '*'; 4376 return; 4377 } 4378 } else if (const RecordType *RTy = PointeeTy->getAs<RecordType>()) { 4379 // GCC binary compat: Need to convert "struct objc_class *" to "#". 4380 if (RTy->getDecl()->getIdentifier() == &Idents.get("objc_class")) { 4381 S += '#'; 4382 return; 4383 } 4384 // GCC binary compat: Need to convert "struct objc_object *" to "@". 4385 if (RTy->getDecl()->getIdentifier() == &Idents.get("objc_object")) { 4386 S += '@'; 4387 return; 4388 } 4389 // fall through... 4390 } 4391 S += '^'; 4392 getLegacyIntegralTypeEncoding(PointeeTy); 4393 4394 getObjCEncodingForTypeImpl(PointeeTy, S, false, ExpandPointedToStructures, 4395 NULL); 4396 return; 4397 } 4398 4399 if (const ArrayType *AT = 4400 // Ignore type qualifiers etc. 4401 dyn_cast<ArrayType>(T->getCanonicalTypeInternal())) { 4402 if (isa<IncompleteArrayType>(AT) && !StructField) { 4403 // Incomplete arrays are encoded as a pointer to the array element. 4404 S += '^'; 4405 4406 getObjCEncodingForTypeImpl(AT->getElementType(), S, 4407 false, ExpandStructures, FD); 4408 } else { 4409 S += '['; 4410 4411 if (const ConstantArrayType *CAT = dyn_cast<ConstantArrayType>(AT)) { 4412 if (getTypeSize(CAT->getElementType()) == 0) 4413 S += '0'; 4414 else 4415 S += llvm::utostr(CAT->getSize().getZExtValue()); 4416 } else { 4417 //Variable length arrays are encoded as a regular array with 0 elements. 4418 assert((isa<VariableArrayType>(AT) || isa<IncompleteArrayType>(AT)) && 4419 "Unknown array type!"); 4420 S += '0'; 4421 } 4422 4423 getObjCEncodingForTypeImpl(AT->getElementType(), S, 4424 false, ExpandStructures, FD); 4425 S += ']'; 4426 } 4427 return; 4428 } 4429 4430 if (T->getAs<FunctionType>()) { 4431 S += '?'; 4432 return; 4433 } 4434 4435 if (const RecordType *RTy = T->getAs<RecordType>()) { 4436 RecordDecl *RDecl = RTy->getDecl(); 4437 S += RDecl->isUnion() ? '(' : '{'; 4438 // Anonymous structures print as '?' 4439 if (const IdentifierInfo *II = RDecl->getIdentifier()) { 4440 S += II->getName(); 4441 if (ClassTemplateSpecializationDecl *Spec 4442 = dyn_cast<ClassTemplateSpecializationDecl>(RDecl)) { 4443 const TemplateArgumentList &TemplateArgs = Spec->getTemplateArgs(); 4444 std::string TemplateArgsStr 4445 = TemplateSpecializationType::PrintTemplateArgumentList( 4446 TemplateArgs.data(), 4447 TemplateArgs.size(), 4448 (*this).getPrintingPolicy()); 4449 4450 S += TemplateArgsStr; 4451 } 4452 } else { 4453 S += '?'; 4454 } 4455 if (ExpandStructures) { 4456 S += '='; 4457 if (!RDecl->isUnion()) { 4458 getObjCEncodingForStructureImpl(RDecl, S, FD); 4459 } else { 4460 for (RecordDecl::field_iterator Field = RDecl->field_begin(), 4461 FieldEnd = RDecl->field_end(); 4462 Field != FieldEnd; ++Field) { 4463 if (FD) { 4464 S += '"'; 4465 S += Field->getNameAsString(); 4466 S += '"'; 4467 } 4468 4469 // Special case bit-fields. 4470 if (Field->isBitField()) { 4471 getObjCEncodingForTypeImpl(Field->getType(), S, false, true, 4472 (*Field)); 4473 } else { 4474 QualType qt = Field->getType(); 4475 getLegacyIntegralTypeEncoding(qt); 4476 getObjCEncodingForTypeImpl(qt, S, false, true, 4477 FD, /*OutermostType*/false, 4478 /*EncodingProperty*/false, 4479 /*StructField*/true); 4480 } 4481 } 4482 } 4483 } 4484 S += RDecl->isUnion() ? ')' : '}'; 4485 return; 4486 } 4487 4488 if (const EnumType *ET = T->getAs<EnumType>()) { 4489 if (FD && FD->isBitField()) 4490 EncodeBitField(this, S, T, FD); 4491 else 4492 S += ObjCEncodingForEnumType(this, ET); 4493 return; 4494 } 4495 4496 if (T->isBlockPointerType()) { 4497 S += "@?"; // Unlike a pointer-to-function, which is "^?". 4498 return; 4499 } 4500 4501 // Ignore protocol qualifiers when mangling at this level. 4502 if (const ObjCObjectType *OT = T->getAs<ObjCObjectType>()) 4503 T = OT->getBaseType(); 4504 4505 if (const ObjCInterfaceType *OIT = T->getAs<ObjCInterfaceType>()) { 4506 // @encode(class_name) 4507 ObjCInterfaceDecl *OI = OIT->getDecl(); 4508 S += '{'; 4509 const IdentifierInfo *II = OI->getIdentifier(); 4510 S += II->getName(); 4511 S += '='; 4512 SmallVector<const ObjCIvarDecl*, 32> Ivars; 4513 DeepCollectObjCIvars(OI, true, Ivars); 4514 for (unsigned i = 0, e = Ivars.size(); i != e; ++i) { 4515 const FieldDecl *Field = cast<FieldDecl>(Ivars[i]); 4516 if (Field->isBitField()) 4517 getObjCEncodingForTypeImpl(Field->getType(), S, false, true, Field); 4518 else 4519 getObjCEncodingForTypeImpl(Field->getType(), S, false, true, FD); 4520 } 4521 S += '}'; 4522 return; 4523 } 4524 4525 if (const ObjCObjectPointerType *OPT = T->getAs<ObjCObjectPointerType>()) { 4526 if (OPT->isObjCIdType()) { 4527 S += '@'; 4528 return; 4529 } 4530 4531 if (OPT->isObjCClassType() || OPT->isObjCQualifiedClassType()) { 4532 // FIXME: Consider if we need to output qualifiers for 'Class<p>'. 4533 // Since this is a binary compatibility issue, need to consult with runtime 4534 // folks. Fortunately, this is a *very* obsure construct. 4535 S += '#'; 4536 return; 4537 } 4538 4539 if (OPT->isObjCQualifiedIdType()) { 4540 getObjCEncodingForTypeImpl(getObjCIdType(), S, 4541 ExpandPointedToStructures, 4542 ExpandStructures, FD); 4543 if (FD || EncodingProperty) { 4544 // Note that we do extended encoding of protocol qualifer list 4545 // Only when doing ivar or property encoding. 4546 S += '"'; 4547 for (ObjCObjectPointerType::qual_iterator I = OPT->qual_begin(), 4548 E = OPT->qual_end(); I != E; ++I) { 4549 S += '<'; 4550 S += (*I)->getNameAsString(); 4551 S += '>'; 4552 } 4553 S += '"'; 4554 } 4555 return; 4556 } 4557 4558 QualType PointeeTy = OPT->getPointeeType(); 4559 if (!EncodingProperty && 4560 isa<TypedefType>(PointeeTy.getTypePtr())) { 4561 // Another historical/compatibility reason. 4562 // We encode the underlying type which comes out as 4563 // {...}; 4564 S += '^'; 4565 getObjCEncodingForTypeImpl(PointeeTy, S, 4566 false, ExpandPointedToStructures, 4567 NULL); 4568 return; 4569 } 4570 4571 S += '@'; 4572 if (OPT->getInterfaceDecl() && (FD || EncodingProperty)) { 4573 S += '"'; 4574 S += OPT->getInterfaceDecl()->getIdentifier()->getName(); 4575 for (ObjCObjectPointerType::qual_iterator I = OPT->qual_begin(), 4576 E = OPT->qual_end(); I != E; ++I) { 4577 S += '<'; 4578 S += (*I)->getNameAsString(); 4579 S += '>'; 4580 } 4581 S += '"'; 4582 } 4583 return; 4584 } 4585 4586 // gcc just blithely ignores member pointers. 4587 // TODO: maybe there should be a mangling for these 4588 if (T->getAs<MemberPointerType>()) 4589 return; 4590 4591 if (T->isVectorType()) { 4592 // This matches gcc's encoding, even though technically it is 4593 // insufficient. 4594 // FIXME. We should do a better job than gcc. 4595 return; 4596 } 4597 4598 llvm_unreachable("@encode for type not implemented!"); 4599 } 4600 4601 void ASTContext::getObjCEncodingForStructureImpl(RecordDecl *RDecl, 4602 std::string &S, 4603 const FieldDecl *FD, 4604 bool includeVBases) const { 4605 assert(RDecl && "Expected non-null RecordDecl"); 4606 assert(!RDecl->isUnion() && "Should not be called for unions"); 4607 if (!RDecl->getDefinition()) 4608 return; 4609 4610 CXXRecordDecl *CXXRec = dyn_cast<CXXRecordDecl>(RDecl); 4611 std::multimap<uint64_t, NamedDecl *> FieldOrBaseOffsets; 4612 const ASTRecordLayout &layout = getASTRecordLayout(RDecl); 4613 4614 if (CXXRec) { 4615 for (CXXRecordDecl::base_class_iterator 4616 BI = CXXRec->bases_begin(), 4617 BE = CXXRec->bases_end(); BI != BE; ++BI) { 4618 if (!BI->isVirtual()) { 4619 CXXRecordDecl *base = BI->getType()->getAsCXXRecordDecl(); 4620 if (base->isEmpty()) 4621 continue; 4622 uint64_t offs = layout.getBaseClassOffsetInBits(base); 4623 FieldOrBaseOffsets.insert(FieldOrBaseOffsets.upper_bound(offs), 4624 std::make_pair(offs, base)); 4625 } 4626 } 4627 } 4628 4629 unsigned i = 0; 4630 for (RecordDecl::field_iterator Field = RDecl->field_begin(), 4631 FieldEnd = RDecl->field_end(); 4632 Field != FieldEnd; ++Field, ++i) { 4633 uint64_t offs = layout.getFieldOffset(i); 4634 FieldOrBaseOffsets.insert(FieldOrBaseOffsets.upper_bound(offs), 4635 std::make_pair(offs, *Field)); 4636 } 4637 4638 if (CXXRec && includeVBases) { 4639 for (CXXRecordDecl::base_class_iterator 4640 BI = CXXRec->vbases_begin(), 4641 BE = CXXRec->vbases_end(); BI != BE; ++BI) { 4642 CXXRecordDecl *base = BI->getType()->getAsCXXRecordDecl(); 4643 if (base->isEmpty()) 4644 continue; 4645 uint64_t offs = layout.getVBaseClassOffsetInBits(base); 4646 if (FieldOrBaseOffsets.find(offs) == FieldOrBaseOffsets.end()) 4647 FieldOrBaseOffsets.insert(FieldOrBaseOffsets.end(), 4648 std::make_pair(offs, base)); 4649 } 4650 } 4651 4652 CharUnits size; 4653 if (CXXRec) { 4654 size = includeVBases ? layout.getSize() : layout.getNonVirtualSize(); 4655 } else { 4656 size = layout.getSize(); 4657 } 4658 4659 uint64_t CurOffs = 0; 4660 std::multimap<uint64_t, NamedDecl *>::iterator 4661 CurLayObj = FieldOrBaseOffsets.begin(); 4662 4663 if ((CurLayObj != FieldOrBaseOffsets.end() && CurLayObj->first != 0) || 4664 (CurLayObj == FieldOrBaseOffsets.end() && 4665 CXXRec && CXXRec->isDynamicClass())) { 4666 assert(CXXRec && CXXRec->isDynamicClass() && 4667 "Offset 0 was empty but no VTable ?"); 4668 if (FD) { 4669 S += "\"_vptr$"; 4670 std::string recname = CXXRec->getNameAsString(); 4671 if (recname.empty()) recname = "?"; 4672 S += recname; 4673 S += '"'; 4674 } 4675 S += "^^?"; 4676 CurOffs += getTypeSize(VoidPtrTy); 4677 } 4678 4679 if (!RDecl->hasFlexibleArrayMember()) { 4680 // Mark the end of the structure. 4681 uint64_t offs = toBits(size); 4682 FieldOrBaseOffsets.insert(FieldOrBaseOffsets.upper_bound(offs), 4683 std::make_pair(offs, (NamedDecl*)0)); 4684 } 4685 4686 for (; CurLayObj != FieldOrBaseOffsets.end(); ++CurLayObj) { 4687 assert(CurOffs <= CurLayObj->first); 4688 4689 if (CurOffs < CurLayObj->first) { 4690 uint64_t padding = CurLayObj->first - CurOffs; 4691 // FIXME: There doesn't seem to be a way to indicate in the encoding that 4692 // packing/alignment of members is different that normal, in which case 4693 // the encoding will be out-of-sync with the real layout. 4694 // If the runtime switches to just consider the size of types without 4695 // taking into account alignment, we could make padding explicit in the 4696 // encoding (e.g. using arrays of chars). The encoding strings would be 4697 // longer then though. 4698 CurOffs += padding; 4699 } 4700 4701 NamedDecl *dcl = CurLayObj->second; 4702 if (dcl == 0) 4703 break; // reached end of structure. 4704 4705 if (CXXRecordDecl *base = dyn_cast<CXXRecordDecl>(dcl)) { 4706 // We expand the bases without their virtual bases since those are going 4707 // in the initial structure. Note that this differs from gcc which 4708 // expands virtual bases each time one is encountered in the hierarchy, 4709 // making the encoding type bigger than it really is. 4710 getObjCEncodingForStructureImpl(base, S, FD, /*includeVBases*/false); 4711 assert(!base->isEmpty()); 4712 CurOffs += toBits(getASTRecordLayout(base).getNonVirtualSize()); 4713 } else { 4714 FieldDecl *field = cast<FieldDecl>(dcl); 4715 if (FD) { 4716 S += '"'; 4717 S += field->getNameAsString(); 4718 S += '"'; 4719 } 4720 4721 if (field->isBitField()) { 4722 EncodeBitField(this, S, field->getType(), field); 4723 CurOffs += field->getBitWidthValue(*this); 4724 } else { 4725 QualType qt = field->getType(); 4726 getLegacyIntegralTypeEncoding(qt); 4727 getObjCEncodingForTypeImpl(qt, S, false, true, FD, 4728 /*OutermostType*/false, 4729 /*EncodingProperty*/false, 4730 /*StructField*/true); 4731 CurOffs += getTypeSize(field->getType()); 4732 } 4733 } 4734 } 4735 } 4736 4737 void ASTContext::getObjCEncodingForTypeQualifier(Decl::ObjCDeclQualifier QT, 4738 std::string& S) const { 4739 if (QT & Decl::OBJC_TQ_In) 4740 S += 'n'; 4741 if (QT & Decl::OBJC_TQ_Inout) 4742 S += 'N'; 4743 if (QT & Decl::OBJC_TQ_Out) 4744 S += 'o'; 4745 if (QT & Decl::OBJC_TQ_Bycopy) 4746 S += 'O'; 4747 if (QT & Decl::OBJC_TQ_Byref) 4748 S += 'R'; 4749 if (QT & Decl::OBJC_TQ_Oneway) 4750 S += 'V'; 4751 } 4752 4753 void ASTContext::setBuiltinVaListType(QualType T) { 4754 assert(BuiltinVaListType.isNull() && "__builtin_va_list type already set!"); 4755 4756 BuiltinVaListType = T; 4757 } 4758 4759 TypedefDecl *ASTContext::getObjCIdDecl() const { 4760 if (!ObjCIdDecl) { 4761 QualType T = getObjCObjectType(ObjCBuiltinIdTy, 0, 0); 4762 T = getObjCObjectPointerType(T); 4763 TypeSourceInfo *IdInfo = getTrivialTypeSourceInfo(T); 4764 ObjCIdDecl = TypedefDecl::Create(const_cast<ASTContext &>(*this), 4765 getTranslationUnitDecl(), 4766 SourceLocation(), SourceLocation(), 4767 &Idents.get("id"), IdInfo); 4768 } 4769 4770 return ObjCIdDecl; 4771 } 4772 4773 TypedefDecl *ASTContext::getObjCSelDecl() const { 4774 if (!ObjCSelDecl) { 4775 QualType SelT = getPointerType(ObjCBuiltinSelTy); 4776 TypeSourceInfo *SelInfo = getTrivialTypeSourceInfo(SelT); 4777 ObjCSelDecl = TypedefDecl::Create(const_cast<ASTContext &>(*this), 4778 getTranslationUnitDecl(), 4779 SourceLocation(), SourceLocation(), 4780 &Idents.get("SEL"), SelInfo); 4781 } 4782 return ObjCSelDecl; 4783 } 4784 4785 void ASTContext::setObjCProtoType(QualType QT) { 4786 ObjCProtoType = QT; 4787 } 4788 4789 TypedefDecl *ASTContext::getObjCClassDecl() const { 4790 if (!ObjCClassDecl) { 4791 QualType T = getObjCObjectType(ObjCBuiltinClassTy, 0, 0); 4792 T = getObjCObjectPointerType(T); 4793 TypeSourceInfo *ClassInfo = getTrivialTypeSourceInfo(T); 4794 ObjCClassDecl = TypedefDecl::Create(const_cast<ASTContext &>(*this), 4795 getTranslationUnitDecl(), 4796 SourceLocation(), SourceLocation(), 4797 &Idents.get("Class"), ClassInfo); 4798 } 4799 4800 return ObjCClassDecl; 4801 } 4802 4803 void ASTContext::setObjCConstantStringInterface(ObjCInterfaceDecl *Decl) { 4804 assert(ObjCConstantStringType.isNull() && 4805 "'NSConstantString' type already set!"); 4806 4807 ObjCConstantStringType = getObjCInterfaceType(Decl); 4808 } 4809 4810 /// \brief Retrieve the template name that corresponds to a non-empty 4811 /// lookup. 4812 TemplateName 4813 ASTContext::getOverloadedTemplateName(UnresolvedSetIterator Begin, 4814 UnresolvedSetIterator End) const { 4815 unsigned size = End - Begin; 4816 assert(size > 1 && "set is not overloaded!"); 4817 4818 void *memory = Allocate(sizeof(OverloadedTemplateStorage) + 4819 size * sizeof(FunctionTemplateDecl*)); 4820 OverloadedTemplateStorage *OT = new(memory) OverloadedTemplateStorage(size); 4821 4822 NamedDecl **Storage = OT->getStorage(); 4823 for (UnresolvedSetIterator I = Begin; I != End; ++I) { 4824 NamedDecl *D = *I; 4825 assert(isa<FunctionTemplateDecl>(D) || 4826 (isa<UsingShadowDecl>(D) && 4827 isa<FunctionTemplateDecl>(D->getUnderlyingDecl()))); 4828 *Storage++ = D; 4829 } 4830 4831 return TemplateName(OT); 4832 } 4833 4834 /// \brief Retrieve the template name that represents a qualified 4835 /// template name such as \c std::vector. 4836 TemplateName 4837 ASTContext::getQualifiedTemplateName(NestedNameSpecifier *NNS, 4838 bool TemplateKeyword, 4839 TemplateDecl *Template) const { 4840 assert(NNS && "Missing nested-name-specifier in qualified template name"); 4841 4842 // FIXME: Canonicalization? 4843 llvm::FoldingSetNodeID ID; 4844 QualifiedTemplateName::Profile(ID, NNS, TemplateKeyword, Template); 4845 4846 void *InsertPos = 0; 4847 QualifiedTemplateName *QTN = 4848 QualifiedTemplateNames.FindNodeOrInsertPos(ID, InsertPos); 4849 if (!QTN) { 4850 QTN = new (*this,4) QualifiedTemplateName(NNS, TemplateKeyword, Template); 4851 QualifiedTemplateNames.InsertNode(QTN, InsertPos); 4852 } 4853 4854 return TemplateName(QTN); 4855 } 4856 4857 /// \brief Retrieve the template name that represents a dependent 4858 /// template name such as \c MetaFun::template apply. 4859 TemplateName 4860 ASTContext::getDependentTemplateName(NestedNameSpecifier *NNS, 4861 const IdentifierInfo *Name) const { 4862 assert((!NNS || NNS->isDependent()) && 4863 "Nested name specifier must be dependent"); 4864 4865 llvm::FoldingSetNodeID ID; 4866 DependentTemplateName::Profile(ID, NNS, Name); 4867 4868 void *InsertPos = 0; 4869 DependentTemplateName *QTN = 4870 DependentTemplateNames.FindNodeOrInsertPos(ID, InsertPos); 4871 4872 if (QTN) 4873 return TemplateName(QTN); 4874 4875 NestedNameSpecifier *CanonNNS = getCanonicalNestedNameSpecifier(NNS); 4876 if (CanonNNS == NNS) { 4877 QTN = new (*this,4) DependentTemplateName(NNS, Name); 4878 } else { 4879 TemplateName Canon = getDependentTemplateName(CanonNNS, Name); 4880 QTN = new (*this,4) DependentTemplateName(NNS, Name, Canon); 4881 DependentTemplateName *CheckQTN = 4882 DependentTemplateNames.FindNodeOrInsertPos(ID, InsertPos); 4883 assert(!CheckQTN && "Dependent type name canonicalization broken"); 4884 (void)CheckQTN; 4885 } 4886 4887 DependentTemplateNames.InsertNode(QTN, InsertPos); 4888 return TemplateName(QTN); 4889 } 4890 4891 /// \brief Retrieve the template name that represents a dependent 4892 /// template name such as \c MetaFun::template operator+. 4893 TemplateName 4894 ASTContext::getDependentTemplateName(NestedNameSpecifier *NNS, 4895 OverloadedOperatorKind Operator) const { 4896 assert((!NNS || NNS->isDependent()) && 4897 "Nested name specifier must be dependent"); 4898 4899 llvm::FoldingSetNodeID ID; 4900 DependentTemplateName::Profile(ID, NNS, Operator); 4901 4902 void *InsertPos = 0; 4903 DependentTemplateName *QTN 4904 = DependentTemplateNames.FindNodeOrInsertPos(ID, InsertPos); 4905 4906 if (QTN) 4907 return TemplateName(QTN); 4908 4909 NestedNameSpecifier *CanonNNS = getCanonicalNestedNameSpecifier(NNS); 4910 if (CanonNNS == NNS) { 4911 QTN = new (*this,4) DependentTemplateName(NNS, Operator); 4912 } else { 4913 TemplateName Canon = getDependentTemplateName(CanonNNS, Operator); 4914 QTN = new (*this,4) DependentTemplateName(NNS, Operator, Canon); 4915 4916 DependentTemplateName *CheckQTN 4917 = DependentTemplateNames.FindNodeOrInsertPos(ID, InsertPos); 4918 assert(!CheckQTN && "Dependent template name canonicalization broken"); 4919 (void)CheckQTN; 4920 } 4921 4922 DependentTemplateNames.InsertNode(QTN, InsertPos); 4923 return TemplateName(QTN); 4924 } 4925 4926 TemplateName 4927 ASTContext::getSubstTemplateTemplateParm(TemplateTemplateParmDecl *param, 4928 TemplateName replacement) const { 4929 llvm::FoldingSetNodeID ID; 4930 SubstTemplateTemplateParmStorage::Profile(ID, param, replacement); 4931 4932 void *insertPos = 0; 4933 SubstTemplateTemplateParmStorage *subst 4934 = SubstTemplateTemplateParms.FindNodeOrInsertPos(ID, insertPos); 4935 4936 if (!subst) { 4937 subst = new (*this) SubstTemplateTemplateParmStorage(param, replacement); 4938 SubstTemplateTemplateParms.InsertNode(subst, insertPos); 4939 } 4940 4941 return TemplateName(subst); 4942 } 4943 4944 TemplateName 4945 ASTContext::getSubstTemplateTemplateParmPack(TemplateTemplateParmDecl *Param, 4946 const TemplateArgument &ArgPack) const { 4947 ASTContext &Self = const_cast<ASTContext &>(*this); 4948 llvm::FoldingSetNodeID ID; 4949 SubstTemplateTemplateParmPackStorage::Profile(ID, Self, Param, ArgPack); 4950 4951 void *InsertPos = 0; 4952 SubstTemplateTemplateParmPackStorage *Subst 4953 = SubstTemplateTemplateParmPacks.FindNodeOrInsertPos(ID, InsertPos); 4954 4955 if (!Subst) { 4956 Subst = new (*this) SubstTemplateTemplateParmPackStorage(Param, 4957 ArgPack.pack_size(), 4958 ArgPack.pack_begin()); 4959 SubstTemplateTemplateParmPacks.InsertNode(Subst, InsertPos); 4960 } 4961 4962 return TemplateName(Subst); 4963 } 4964 4965 /// getFromTargetType - Given one of the integer types provided by 4966 /// TargetInfo, produce the corresponding type. The unsigned @p Type 4967 /// is actually a value of type @c TargetInfo::IntType. 4968 CanQualType ASTContext::getFromTargetType(unsigned Type) const { 4969 switch (Type) { 4970 case TargetInfo::NoInt: return CanQualType(); 4971 case TargetInfo::SignedShort: return ShortTy; 4972 case TargetInfo::UnsignedShort: return UnsignedShortTy; 4973 case TargetInfo::SignedInt: return IntTy; 4974 case TargetInfo::UnsignedInt: return UnsignedIntTy; 4975 case TargetInfo::SignedLong: return LongTy; 4976 case TargetInfo::UnsignedLong: return UnsignedLongTy; 4977 case TargetInfo::SignedLongLong: return LongLongTy; 4978 case TargetInfo::UnsignedLongLong: return UnsignedLongLongTy; 4979 } 4980 4981 llvm_unreachable("Unhandled TargetInfo::IntType value"); 4982 } 4983 4984 //===----------------------------------------------------------------------===// 4985 // Type Predicates. 4986 //===----------------------------------------------------------------------===// 4987 4988 /// getObjCGCAttr - Returns one of GCNone, Weak or Strong objc's 4989 /// garbage collection attribute. 4990 /// 4991 Qualifiers::GC ASTContext::getObjCGCAttrKind(QualType Ty) const { 4992 if (getLangOptions().getGC() == LangOptions::NonGC) 4993 return Qualifiers::GCNone; 4994 4995 assert(getLangOptions().ObjC1); 4996 Qualifiers::GC GCAttrs = Ty.getObjCGCAttr(); 4997 4998 // Default behaviour under objective-C's gc is for ObjC pointers 4999 // (or pointers to them) be treated as though they were declared 5000 // as __strong. 5001 if (GCAttrs == Qualifiers::GCNone) { 5002 if (Ty->isObjCObjectPointerType() || Ty->isBlockPointerType()) 5003 return Qualifiers::Strong; 5004 else if (Ty->isPointerType()) 5005 return getObjCGCAttrKind(Ty->getAs<PointerType>()->getPointeeType()); 5006 } else { 5007 // It's not valid to set GC attributes on anything that isn't a 5008 // pointer. 5009 #ifndef NDEBUG 5010 QualType CT = Ty->getCanonicalTypeInternal(); 5011 while (const ArrayType *AT = dyn_cast<ArrayType>(CT)) 5012 CT = AT->getElementType(); 5013 assert(CT->isAnyPointerType() || CT->isBlockPointerType()); 5014 #endif 5015 } 5016 return GCAttrs; 5017 } 5018 5019 //===----------------------------------------------------------------------===// 5020 // Type Compatibility Testing 5021 //===----------------------------------------------------------------------===// 5022 5023 /// areCompatVectorTypes - Return true if the two specified vector types are 5024 /// compatible. 5025 static bool areCompatVectorTypes(const VectorType *LHS, 5026 const VectorType *RHS) { 5027 assert(LHS->isCanonicalUnqualified() && RHS->isCanonicalUnqualified()); 5028 return LHS->getElementType() == RHS->getElementType() && 5029 LHS->getNumElements() == RHS->getNumElements(); 5030 } 5031 5032 bool ASTContext::areCompatibleVectorTypes(QualType FirstVec, 5033 QualType SecondVec) { 5034 assert(FirstVec->isVectorType() && "FirstVec should be a vector type"); 5035 assert(SecondVec->isVectorType() && "SecondVec should be a vector type"); 5036 5037 if (hasSameUnqualifiedType(FirstVec, SecondVec)) 5038 return true; 5039 5040 // Treat Neon vector types and most AltiVec vector types as if they are the 5041 // equivalent GCC vector types. 5042 const VectorType *First = FirstVec->getAs<VectorType>(); 5043 const VectorType *Second = SecondVec->getAs<VectorType>(); 5044 if (First->getNumElements() == Second->getNumElements() && 5045 hasSameType(First->getElementType(), Second->getElementType()) && 5046 First->getVectorKind() != VectorType::AltiVecPixel && 5047 First->getVectorKind() != VectorType::AltiVecBool && 5048 Second->getVectorKind() != VectorType::AltiVecPixel && 5049 Second->getVectorKind() != VectorType::AltiVecBool) 5050 return true; 5051 5052 return false; 5053 } 5054 5055 //===----------------------------------------------------------------------===// 5056 // ObjCQualifiedIdTypesAreCompatible - Compatibility testing for qualified id's. 5057 //===----------------------------------------------------------------------===// 5058 5059 /// ProtocolCompatibleWithProtocol - return 'true' if 'lProto' is in the 5060 /// inheritance hierarchy of 'rProto'. 5061 bool 5062 ASTContext::ProtocolCompatibleWithProtocol(ObjCProtocolDecl *lProto, 5063 ObjCProtocolDecl *rProto) const { 5064 if (lProto == rProto) 5065 return true; 5066 for (ObjCProtocolDecl::protocol_iterator PI = rProto->protocol_begin(), 5067 E = rProto->protocol_end(); PI != E; ++PI) 5068 if (ProtocolCompatibleWithProtocol(lProto, *PI)) 5069 return true; 5070 return false; 5071 } 5072 5073 /// QualifiedIdConformsQualifiedId - compare id<p,...> with id<p1,...> 5074 /// return true if lhs's protocols conform to rhs's protocol; false 5075 /// otherwise. 5076 bool ASTContext::QualifiedIdConformsQualifiedId(QualType lhs, QualType rhs) { 5077 if (lhs->isObjCQualifiedIdType() && rhs->isObjCQualifiedIdType()) 5078 return ObjCQualifiedIdTypesAreCompatible(lhs, rhs, false); 5079 return false; 5080 } 5081 5082 /// ObjCQualifiedClassTypesAreCompatible - compare Class<p,...> and 5083 /// Class<p1, ...>. 5084 bool ASTContext::ObjCQualifiedClassTypesAreCompatible(QualType lhs, 5085 QualType rhs) { 5086 const ObjCObjectPointerType *lhsQID = lhs->getAs<ObjCObjectPointerType>(); 5087 const ObjCObjectPointerType *rhsOPT = rhs->getAs<ObjCObjectPointerType>(); 5088 assert ((lhsQID && rhsOPT) && "ObjCQualifiedClassTypesAreCompatible"); 5089 5090 for (ObjCObjectPointerType::qual_iterator I = lhsQID->qual_begin(), 5091 E = lhsQID->qual_end(); I != E; ++I) { 5092 bool match = false; 5093 ObjCProtocolDecl *lhsProto = *I; 5094 for (ObjCObjectPointerType::qual_iterator J = rhsOPT->qual_begin(), 5095 E = rhsOPT->qual_end(); J != E; ++J) { 5096 ObjCProtocolDecl *rhsProto = *J; 5097 if (ProtocolCompatibleWithProtocol(lhsProto, rhsProto)) { 5098 match = true; 5099 break; 5100 } 5101 } 5102 if (!match) 5103 return false; 5104 } 5105 return true; 5106 } 5107 5108 /// ObjCQualifiedIdTypesAreCompatible - We know that one of lhs/rhs is an 5109 /// ObjCQualifiedIDType. 5110 bool ASTContext::ObjCQualifiedIdTypesAreCompatible(QualType lhs, QualType rhs, 5111 bool compare) { 5112 // Allow id<P..> and an 'id' or void* type in all cases. 5113 if (lhs->isVoidPointerType() || 5114 lhs->isObjCIdType() || lhs->isObjCClassType()) 5115 return true; 5116 else if (rhs->isVoidPointerType() || 5117 rhs->isObjCIdType() || rhs->isObjCClassType()) 5118 return true; 5119 5120 if (const ObjCObjectPointerType *lhsQID = lhs->getAsObjCQualifiedIdType()) { 5121 const ObjCObjectPointerType *rhsOPT = rhs->getAs<ObjCObjectPointerType>(); 5122 5123 if (!rhsOPT) return false; 5124 5125 if (rhsOPT->qual_empty()) { 5126 // If the RHS is a unqualified interface pointer "NSString*", 5127 // make sure we check the class hierarchy. 5128 if (ObjCInterfaceDecl *rhsID = rhsOPT->getInterfaceDecl()) { 5129 for (ObjCObjectPointerType::qual_iterator I = lhsQID->qual_begin(), 5130 E = lhsQID->qual_end(); I != E; ++I) { 5131 // when comparing an id<P> on lhs with a static type on rhs, 5132 // see if static class implements all of id's protocols, directly or 5133 // through its super class and categories. 5134 if (!rhsID->ClassImplementsProtocol(*I, true)) 5135 return false; 5136 } 5137 } 5138 // If there are no qualifiers and no interface, we have an 'id'. 5139 return true; 5140 } 5141 // Both the right and left sides have qualifiers. 5142 for (ObjCObjectPointerType::qual_iterator I = lhsQID->qual_begin(), 5143 E = lhsQID->qual_end(); I != E; ++I) { 5144 ObjCProtocolDecl *lhsProto = *I; 5145 bool match = false; 5146 5147 // when comparing an id<P> on lhs with a static type on rhs, 5148 // see if static class implements all of id's protocols, directly or 5149 // through its super class and categories. 5150 for (ObjCObjectPointerType::qual_iterator J = rhsOPT->qual_begin(), 5151 E = rhsOPT->qual_end(); J != E; ++J) { 5152 ObjCProtocolDecl *rhsProto = *J; 5153 if (ProtocolCompatibleWithProtocol(lhsProto, rhsProto) || 5154 (compare && ProtocolCompatibleWithProtocol(rhsProto, lhsProto))) { 5155 match = true; 5156 break; 5157 } 5158 } 5159 // If the RHS is a qualified interface pointer "NSString<P>*", 5160 // make sure we check the class hierarchy. 5161 if (ObjCInterfaceDecl *rhsID = rhsOPT->getInterfaceDecl()) { 5162 for (ObjCObjectPointerType::qual_iterator I = lhsQID->qual_begin(), 5163 E = lhsQID->qual_end(); I != E; ++I) { 5164 // when comparing an id<P> on lhs with a static type on rhs, 5165 // see if static class implements all of id's protocols, directly or 5166 // through its super class and categories. 5167 if (rhsID->ClassImplementsProtocol(*I, true)) { 5168 match = true; 5169 break; 5170 } 5171 } 5172 } 5173 if (!match) 5174 return false; 5175 } 5176 5177 return true; 5178 } 5179 5180 const ObjCObjectPointerType *rhsQID = rhs->getAsObjCQualifiedIdType(); 5181 assert(rhsQID && "One of the LHS/RHS should be id<x>"); 5182 5183 if (const ObjCObjectPointerType *lhsOPT = 5184 lhs->getAsObjCInterfacePointerType()) { 5185 // If both the right and left sides have qualifiers. 5186 for (ObjCObjectPointerType::qual_iterator I = lhsOPT->qual_begin(), 5187 E = lhsOPT->qual_end(); I != E; ++I) { 5188 ObjCProtocolDecl *lhsProto = *I; 5189 bool match = false; 5190 5191 // when comparing an id<P> on rhs with a static type on lhs, 5192 // see if static class implements all of id's protocols, directly or 5193 // through its super class and categories. 5194 // First, lhs protocols in the qualifier list must be found, direct 5195 // or indirect in rhs's qualifier list or it is a mismatch. 5196 for (ObjCObjectPointerType::qual_iterator J = rhsQID->qual_begin(), 5197 E = rhsQID->qual_end(); J != E; ++J) { 5198 ObjCProtocolDecl *rhsProto = *J; 5199 if (ProtocolCompatibleWithProtocol(lhsProto, rhsProto) || 5200 (compare && ProtocolCompatibleWithProtocol(rhsProto, lhsProto))) { 5201 match = true; 5202 break; 5203 } 5204 } 5205 if (!match) 5206 return false; 5207 } 5208 5209 // Static class's protocols, or its super class or category protocols 5210 // must be found, direct or indirect in rhs's qualifier list or it is a mismatch. 5211 if (ObjCInterfaceDecl *lhsID = lhsOPT->getInterfaceDecl()) { 5212 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> LHSInheritedProtocols; 5213 CollectInheritedProtocols(lhsID, LHSInheritedProtocols); 5214 // This is rather dubious but matches gcc's behavior. If lhs has 5215 // no type qualifier and its class has no static protocol(s) 5216 // assume that it is mismatch. 5217 if (LHSInheritedProtocols.empty() && lhsOPT->qual_empty()) 5218 return false; 5219 for (llvm::SmallPtrSet<ObjCProtocolDecl*,8>::iterator I = 5220 LHSInheritedProtocols.begin(), 5221 E = LHSInheritedProtocols.end(); I != E; ++I) { 5222 bool match = false; 5223 ObjCProtocolDecl *lhsProto = (*I); 5224 for (ObjCObjectPointerType::qual_iterator J = rhsQID->qual_begin(), 5225 E = rhsQID->qual_end(); J != E; ++J) { 5226 ObjCProtocolDecl *rhsProto = *J; 5227 if (ProtocolCompatibleWithProtocol(lhsProto, rhsProto) || 5228 (compare && ProtocolCompatibleWithProtocol(rhsProto, lhsProto))) { 5229 match = true; 5230 break; 5231 } 5232 } 5233 if (!match) 5234 return false; 5235 } 5236 } 5237 return true; 5238 } 5239 return false; 5240 } 5241 5242 /// canAssignObjCInterfaces - Return true if the two interface types are 5243 /// compatible for assignment from RHS to LHS. This handles validation of any 5244 /// protocol qualifiers on the LHS or RHS. 5245 /// 5246 bool ASTContext::canAssignObjCInterfaces(const ObjCObjectPointerType *LHSOPT, 5247 const ObjCObjectPointerType *RHSOPT) { 5248 const ObjCObjectType* LHS = LHSOPT->getObjectType(); 5249 const ObjCObjectType* RHS = RHSOPT->getObjectType(); 5250 5251 // If either type represents the built-in 'id' or 'Class' types, return true. 5252 if (LHS->isObjCUnqualifiedIdOrClass() || 5253 RHS->isObjCUnqualifiedIdOrClass()) 5254 return true; 5255 5256 if (LHS->isObjCQualifiedId() || RHS->isObjCQualifiedId()) 5257 return ObjCQualifiedIdTypesAreCompatible(QualType(LHSOPT,0), 5258 QualType(RHSOPT,0), 5259 false); 5260 5261 if (LHS->isObjCQualifiedClass() && RHS->isObjCQualifiedClass()) 5262 return ObjCQualifiedClassTypesAreCompatible(QualType(LHSOPT,0), 5263 QualType(RHSOPT,0)); 5264 5265 // If we have 2 user-defined types, fall into that path. 5266 if (LHS->getInterface() && RHS->getInterface()) 5267 return canAssignObjCInterfaces(LHS, RHS); 5268 5269 return false; 5270 } 5271 5272 /// canAssignObjCInterfacesInBlockPointer - This routine is specifically written 5273 /// for providing type-safety for objective-c pointers used to pass/return 5274 /// arguments in block literals. When passed as arguments, passing 'A*' where 5275 /// 'id' is expected is not OK. Passing 'Sub *" where 'Super *" is expected is 5276 /// not OK. For the return type, the opposite is not OK. 5277 bool ASTContext::canAssignObjCInterfacesInBlockPointer( 5278 const ObjCObjectPointerType *LHSOPT, 5279 const ObjCObjectPointerType *RHSOPT, 5280 bool BlockReturnType) { 5281 if (RHSOPT->isObjCBuiltinType() || LHSOPT->isObjCIdType()) 5282 return true; 5283 5284 if (LHSOPT->isObjCBuiltinType()) { 5285 return RHSOPT->isObjCBuiltinType() || RHSOPT->isObjCQualifiedIdType(); 5286 } 5287 5288 if (LHSOPT->isObjCQualifiedIdType() || RHSOPT->isObjCQualifiedIdType()) 5289 return ObjCQualifiedIdTypesAreCompatible(QualType(LHSOPT,0), 5290 QualType(RHSOPT,0), 5291 false); 5292 5293 const ObjCInterfaceType* LHS = LHSOPT->getInterfaceType(); 5294 const ObjCInterfaceType* RHS = RHSOPT->getInterfaceType(); 5295 if (LHS && RHS) { // We have 2 user-defined types. 5296 if (LHS != RHS) { 5297 if (LHS->getDecl()->isSuperClassOf(RHS->getDecl())) 5298 return BlockReturnType; 5299 if (RHS->getDecl()->isSuperClassOf(LHS->getDecl())) 5300 return !BlockReturnType; 5301 } 5302 else 5303 return true; 5304 } 5305 return false; 5306 } 5307 5308 /// getIntersectionOfProtocols - This routine finds the intersection of set 5309 /// of protocols inherited from two distinct objective-c pointer objects. 5310 /// It is used to build composite qualifier list of the composite type of 5311 /// the conditional expression involving two objective-c pointer objects. 5312 static 5313 void getIntersectionOfProtocols(ASTContext &Context, 5314 const ObjCObjectPointerType *LHSOPT, 5315 const ObjCObjectPointerType *RHSOPT, 5316 SmallVectorImpl<ObjCProtocolDecl *> &IntersectionOfProtocols) { 5317 5318 const ObjCObjectType* LHS = LHSOPT->getObjectType(); 5319 const ObjCObjectType* RHS = RHSOPT->getObjectType(); 5320 assert(LHS->getInterface() && "LHS must have an interface base"); 5321 assert(RHS->getInterface() && "RHS must have an interface base"); 5322 5323 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> InheritedProtocolSet; 5324 unsigned LHSNumProtocols = LHS->getNumProtocols(); 5325 if (LHSNumProtocols > 0) 5326 InheritedProtocolSet.insert(LHS->qual_begin(), LHS->qual_end()); 5327 else { 5328 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> LHSInheritedProtocols; 5329 Context.CollectInheritedProtocols(LHS->getInterface(), 5330 LHSInheritedProtocols); 5331 InheritedProtocolSet.insert(LHSInheritedProtocols.begin(), 5332 LHSInheritedProtocols.end()); 5333 } 5334 5335 unsigned RHSNumProtocols = RHS->getNumProtocols(); 5336 if (RHSNumProtocols > 0) { 5337 ObjCProtocolDecl **RHSProtocols = 5338 const_cast<ObjCProtocolDecl **>(RHS->qual_begin()); 5339 for (unsigned i = 0; i < RHSNumProtocols; ++i) 5340 if (InheritedProtocolSet.count(RHSProtocols[i])) 5341 IntersectionOfProtocols.push_back(RHSProtocols[i]); 5342 } else { 5343 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> RHSInheritedProtocols; 5344 Context.CollectInheritedProtocols(RHS->getInterface(), 5345 RHSInheritedProtocols); 5346 for (llvm::SmallPtrSet<ObjCProtocolDecl*,8>::iterator I = 5347 RHSInheritedProtocols.begin(), 5348 E = RHSInheritedProtocols.end(); I != E; ++I) 5349 if (InheritedProtocolSet.count((*I))) 5350 IntersectionOfProtocols.push_back((*I)); 5351 } 5352 } 5353 5354 /// areCommonBaseCompatible - Returns common base class of the two classes if 5355 /// one found. Note that this is O'2 algorithm. But it will be called as the 5356 /// last type comparison in a ?-exp of ObjC pointer types before a 5357 /// warning is issued. So, its invokation is extremely rare. 5358 QualType ASTContext::areCommonBaseCompatible( 5359 const ObjCObjectPointerType *Lptr, 5360 const ObjCObjectPointerType *Rptr) { 5361 const ObjCObjectType *LHS = Lptr->getObjectType(); 5362 const ObjCObjectType *RHS = Rptr->getObjectType(); 5363 const ObjCInterfaceDecl* LDecl = LHS->getInterface(); 5364 const ObjCInterfaceDecl* RDecl = RHS->getInterface(); 5365 if (!LDecl || !RDecl || (LDecl == RDecl)) 5366 return QualType(); 5367 5368 do { 5369 LHS = cast<ObjCInterfaceType>(getObjCInterfaceType(LDecl)); 5370 if (canAssignObjCInterfaces(LHS, RHS)) { 5371 SmallVector<ObjCProtocolDecl *, 8> Protocols; 5372 getIntersectionOfProtocols(*this, Lptr, Rptr, Protocols); 5373 5374 QualType Result = QualType(LHS, 0); 5375 if (!Protocols.empty()) 5376 Result = getObjCObjectType(Result, Protocols.data(), Protocols.size()); 5377 Result = getObjCObjectPointerType(Result); 5378 return Result; 5379 } 5380 } while ((LDecl = LDecl->getSuperClass())); 5381 5382 return QualType(); 5383 } 5384 5385 bool ASTContext::canAssignObjCInterfaces(const ObjCObjectType *LHS, 5386 const ObjCObjectType *RHS) { 5387 assert(LHS->getInterface() && "LHS is not an interface type"); 5388 assert(RHS->getInterface() && "RHS is not an interface type"); 5389 5390 // Verify that the base decls are compatible: the RHS must be a subclass of 5391 // the LHS. 5392 if (!LHS->getInterface()->isSuperClassOf(RHS->getInterface())) 5393 return false; 5394 5395 // RHS must have a superset of the protocols in the LHS. If the LHS is not 5396 // protocol qualified at all, then we are good. 5397 if (LHS->getNumProtocols() == 0) 5398 return true; 5399 5400 // Okay, we know the LHS has protocol qualifiers. If the RHS doesn't, 5401 // more detailed analysis is required. 5402 if (RHS->getNumProtocols() == 0) { 5403 // OK, if LHS is a superclass of RHS *and* 5404 // this superclass is assignment compatible with LHS. 5405 // false otherwise. 5406 bool IsSuperClass = 5407 LHS->getInterface()->isSuperClassOf(RHS->getInterface()); 5408 if (IsSuperClass) { 5409 // OK if conversion of LHS to SuperClass results in narrowing of types 5410 // ; i.e., SuperClass may implement at least one of the protocols 5411 // in LHS's protocol list. Example, SuperObj<P1> = lhs<P1,P2> is ok. 5412 // But not SuperObj<P1,P2,P3> = lhs<P1,P2>. 5413 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> SuperClassInheritedProtocols; 5414 CollectInheritedProtocols(RHS->getInterface(), SuperClassInheritedProtocols); 5415 // If super class has no protocols, it is not a match. 5416 if (SuperClassInheritedProtocols.empty()) 5417 return false; 5418 5419 for (ObjCObjectType::qual_iterator LHSPI = LHS->qual_begin(), 5420 LHSPE = LHS->qual_end(); 5421 LHSPI != LHSPE; LHSPI++) { 5422 bool SuperImplementsProtocol = false; 5423 ObjCProtocolDecl *LHSProto = (*LHSPI); 5424 5425 for (llvm::SmallPtrSet<ObjCProtocolDecl*,8>::iterator I = 5426 SuperClassInheritedProtocols.begin(), 5427 E = SuperClassInheritedProtocols.end(); I != E; ++I) { 5428 ObjCProtocolDecl *SuperClassProto = (*I); 5429 if (SuperClassProto->lookupProtocolNamed(LHSProto->getIdentifier())) { 5430 SuperImplementsProtocol = true; 5431 break; 5432 } 5433 } 5434 if (!SuperImplementsProtocol) 5435 return false; 5436 } 5437 return true; 5438 } 5439 return false; 5440 } 5441 5442 for (ObjCObjectType::qual_iterator LHSPI = LHS->qual_begin(), 5443 LHSPE = LHS->qual_end(); 5444 LHSPI != LHSPE; LHSPI++) { 5445 bool RHSImplementsProtocol = false; 5446 5447 // If the RHS doesn't implement the protocol on the left, the types 5448 // are incompatible. 5449 for (ObjCObjectType::qual_iterator RHSPI = RHS->qual_begin(), 5450 RHSPE = RHS->qual_end(); 5451 RHSPI != RHSPE; RHSPI++) { 5452 if ((*RHSPI)->lookupProtocolNamed((*LHSPI)->getIdentifier())) { 5453 RHSImplementsProtocol = true; 5454 break; 5455 } 5456 } 5457 // FIXME: For better diagnostics, consider passing back the protocol name. 5458 if (!RHSImplementsProtocol) 5459 return false; 5460 } 5461 // The RHS implements all protocols listed on the LHS. 5462 return true; 5463 } 5464 5465 bool ASTContext::areComparableObjCPointerTypes(QualType LHS, QualType RHS) { 5466 // get the "pointed to" types 5467 const ObjCObjectPointerType *LHSOPT = LHS->getAs<ObjCObjectPointerType>(); 5468 const ObjCObjectPointerType *RHSOPT = RHS->getAs<ObjCObjectPointerType>(); 5469 5470 if (!LHSOPT || !RHSOPT) 5471 return false; 5472 5473 return canAssignObjCInterfaces(LHSOPT, RHSOPT) || 5474 canAssignObjCInterfaces(RHSOPT, LHSOPT); 5475 } 5476 5477 bool ASTContext::canBindObjCObjectType(QualType To, QualType From) { 5478 return canAssignObjCInterfaces( 5479 getObjCObjectPointerType(To)->getAs<ObjCObjectPointerType>(), 5480 getObjCObjectPointerType(From)->getAs<ObjCObjectPointerType>()); 5481 } 5482 5483 /// typesAreCompatible - C99 6.7.3p9: For two qualified types to be compatible, 5484 /// both shall have the identically qualified version of a compatible type. 5485 /// C99 6.2.7p1: Two types have compatible types if their types are the 5486 /// same. See 6.7.[2,3,5] for additional rules. 5487 bool ASTContext::typesAreCompatible(QualType LHS, QualType RHS, 5488 bool CompareUnqualified) { 5489 if (getLangOptions().CPlusPlus) 5490 return hasSameType(LHS, RHS); 5491 5492 return !mergeTypes(LHS, RHS, false, CompareUnqualified).isNull(); 5493 } 5494 5495 bool ASTContext::propertyTypesAreCompatible(QualType LHS, QualType RHS) { 5496 return typesAreCompatible(LHS, RHS); 5497 } 5498 5499 bool ASTContext::typesAreBlockPointerCompatible(QualType LHS, QualType RHS) { 5500 return !mergeTypes(LHS, RHS, true).isNull(); 5501 } 5502 5503 /// mergeTransparentUnionType - if T is a transparent union type and a member 5504 /// of T is compatible with SubType, return the merged type, else return 5505 /// QualType() 5506 QualType ASTContext::mergeTransparentUnionType(QualType T, QualType SubType, 5507 bool OfBlockPointer, 5508 bool Unqualified) { 5509 if (const RecordType *UT = T->getAsUnionType()) { 5510 RecordDecl *UD = UT->getDecl(); 5511 if (UD->hasAttr<TransparentUnionAttr>()) { 5512 for (RecordDecl::field_iterator it = UD->field_begin(), 5513 itend = UD->field_end(); it != itend; ++it) { 5514 QualType ET = it->getType().getUnqualifiedType(); 5515 QualType MT = mergeTypes(ET, SubType, OfBlockPointer, Unqualified); 5516 if (!MT.isNull()) 5517 return MT; 5518 } 5519 } 5520 } 5521 5522 return QualType(); 5523 } 5524 5525 /// mergeFunctionArgumentTypes - merge two types which appear as function 5526 /// argument types 5527 QualType ASTContext::mergeFunctionArgumentTypes(QualType lhs, QualType rhs, 5528 bool OfBlockPointer, 5529 bool Unqualified) { 5530 // GNU extension: two types are compatible if they appear as a function 5531 // argument, one of the types is a transparent union type and the other 5532 // type is compatible with a union member 5533 QualType lmerge = mergeTransparentUnionType(lhs, rhs, OfBlockPointer, 5534 Unqualified); 5535 if (!lmerge.isNull()) 5536 return lmerge; 5537 5538 QualType rmerge = mergeTransparentUnionType(rhs, lhs, OfBlockPointer, 5539 Unqualified); 5540 if (!rmerge.isNull()) 5541 return rmerge; 5542 5543 return mergeTypes(lhs, rhs, OfBlockPointer, Unqualified); 5544 } 5545 5546 QualType ASTContext::mergeFunctionTypes(QualType lhs, QualType rhs, 5547 bool OfBlockPointer, 5548 bool Unqualified) { 5549 const FunctionType *lbase = lhs->getAs<FunctionType>(); 5550 const FunctionType *rbase = rhs->getAs<FunctionType>(); 5551 const FunctionProtoType *lproto = dyn_cast<FunctionProtoType>(lbase); 5552 const FunctionProtoType *rproto = dyn_cast<FunctionProtoType>(rbase); 5553 bool allLTypes = true; 5554 bool allRTypes = true; 5555 5556 // Check return type 5557 QualType retType; 5558 if (OfBlockPointer) { 5559 QualType RHS = rbase->getResultType(); 5560 QualType LHS = lbase->getResultType(); 5561 bool UnqualifiedResult = Unqualified; 5562 if (!UnqualifiedResult) 5563 UnqualifiedResult = (!RHS.hasQualifiers() && LHS.hasQualifiers()); 5564 retType = mergeTypes(LHS, RHS, true, UnqualifiedResult, true); 5565 } 5566 else 5567 retType = mergeTypes(lbase->getResultType(), rbase->getResultType(), false, 5568 Unqualified); 5569 if (retType.isNull()) return QualType(); 5570 5571 if (Unqualified) 5572 retType = retType.getUnqualifiedType(); 5573 5574 CanQualType LRetType = getCanonicalType(lbase->getResultType()); 5575 CanQualType RRetType = getCanonicalType(rbase->getResultType()); 5576 if (Unqualified) { 5577 LRetType = LRetType.getUnqualifiedType(); 5578 RRetType = RRetType.getUnqualifiedType(); 5579 } 5580 5581 if (getCanonicalType(retType) != LRetType) 5582 allLTypes = false; 5583 if (getCanonicalType(retType) != RRetType) 5584 allRTypes = false; 5585 5586 // FIXME: double check this 5587 // FIXME: should we error if lbase->getRegParmAttr() != 0 && 5588 // rbase->getRegParmAttr() != 0 && 5589 // lbase->getRegParmAttr() != rbase->getRegParmAttr()? 5590 FunctionType::ExtInfo lbaseInfo = lbase->getExtInfo(); 5591 FunctionType::ExtInfo rbaseInfo = rbase->getExtInfo(); 5592 5593 // Compatible functions must have compatible calling conventions 5594 if (!isSameCallConv(lbaseInfo.getCC(), rbaseInfo.getCC())) 5595 return QualType(); 5596 5597 // Regparm is part of the calling convention. 5598 if (lbaseInfo.getHasRegParm() != rbaseInfo.getHasRegParm()) 5599 return QualType(); 5600 if (lbaseInfo.getRegParm() != rbaseInfo.getRegParm()) 5601 return QualType(); 5602 5603 if (lbaseInfo.getProducesResult() != rbaseInfo.getProducesResult()) 5604 return QualType(); 5605 5606 // functypes which return are preferred over those that do not. 5607 if (lbaseInfo.getNoReturn() && !rbaseInfo.getNoReturn()) 5608 allLTypes = false; 5609 else if (!lbaseInfo.getNoReturn() && rbaseInfo.getNoReturn()) 5610 allRTypes = false; 5611 // FIXME: some uses, e.g. conditional exprs, really want this to be 'both'. 5612 bool NoReturn = lbaseInfo.getNoReturn() || rbaseInfo.getNoReturn(); 5613 5614 FunctionType::ExtInfo einfo = lbaseInfo.withNoReturn(NoReturn); 5615 5616 if (lproto && rproto) { // two C99 style function prototypes 5617 assert(!lproto->hasExceptionSpec() && !rproto->hasExceptionSpec() && 5618 "C++ shouldn't be here"); 5619 unsigned lproto_nargs = lproto->getNumArgs(); 5620 unsigned rproto_nargs = rproto->getNumArgs(); 5621 5622 // Compatible functions must have the same number of arguments 5623 if (lproto_nargs != rproto_nargs) 5624 return QualType(); 5625 5626 // Variadic and non-variadic functions aren't compatible 5627 if (lproto->isVariadic() != rproto->isVariadic()) 5628 return QualType(); 5629 5630 if (lproto->getTypeQuals() != rproto->getTypeQuals()) 5631 return QualType(); 5632 5633 if (LangOpts.ObjCAutoRefCount && 5634 !FunctionTypesMatchOnNSConsumedAttrs(rproto, lproto)) 5635 return QualType(); 5636 5637 // Check argument compatibility 5638 SmallVector<QualType, 10> types; 5639 for (unsigned i = 0; i < lproto_nargs; i++) { 5640 QualType largtype = lproto->getArgType(i).getUnqualifiedType(); 5641 QualType rargtype = rproto->getArgType(i).getUnqualifiedType(); 5642 QualType argtype = mergeFunctionArgumentTypes(largtype, rargtype, 5643 OfBlockPointer, 5644 Unqualified); 5645 if (argtype.isNull()) return QualType(); 5646 5647 if (Unqualified) 5648 argtype = argtype.getUnqualifiedType(); 5649 5650 types.push_back(argtype); 5651 if (Unqualified) { 5652 largtype = largtype.getUnqualifiedType(); 5653 rargtype = rargtype.getUnqualifiedType(); 5654 } 5655 5656 if (getCanonicalType(argtype) != getCanonicalType(largtype)) 5657 allLTypes = false; 5658 if (getCanonicalType(argtype) != getCanonicalType(rargtype)) 5659 allRTypes = false; 5660 } 5661 5662 if (allLTypes) return lhs; 5663 if (allRTypes) return rhs; 5664 5665 FunctionProtoType::ExtProtoInfo EPI = lproto->getExtProtoInfo(); 5666 EPI.ExtInfo = einfo; 5667 return getFunctionType(retType, types.begin(), types.size(), EPI); 5668 } 5669 5670 if (lproto) allRTypes = false; 5671 if (rproto) allLTypes = false; 5672 5673 const FunctionProtoType *proto = lproto ? lproto : rproto; 5674 if (proto) { 5675 assert(!proto->hasExceptionSpec() && "C++ shouldn't be here"); 5676 if (proto->isVariadic()) return QualType(); 5677 // Check that the types are compatible with the types that 5678 // would result from default argument promotions (C99 6.7.5.3p15). 5679 // The only types actually affected are promotable integer 5680 // types and floats, which would be passed as a different 5681 // type depending on whether the prototype is visible. 5682 unsigned proto_nargs = proto->getNumArgs(); 5683 for (unsigned i = 0; i < proto_nargs; ++i) { 5684 QualType argTy = proto->getArgType(i); 5685 5686 // Look at the promotion type of enum types, since that is the type used 5687 // to pass enum values. 5688 if (const EnumType *Enum = argTy->getAs<EnumType>()) 5689 argTy = Enum->getDecl()->getPromotionType(); 5690 5691 if (argTy->isPromotableIntegerType() || 5692 getCanonicalType(argTy).getUnqualifiedType() == FloatTy) 5693 return QualType(); 5694 } 5695 5696 if (allLTypes) return lhs; 5697 if (allRTypes) return rhs; 5698 5699 FunctionProtoType::ExtProtoInfo EPI = proto->getExtProtoInfo(); 5700 EPI.ExtInfo = einfo; 5701 return getFunctionType(retType, proto->arg_type_begin(), 5702 proto->getNumArgs(), EPI); 5703 } 5704 5705 if (allLTypes) return lhs; 5706 if (allRTypes) return rhs; 5707 return getFunctionNoProtoType(retType, einfo); 5708 } 5709 5710 QualType ASTContext::mergeTypes(QualType LHS, QualType RHS, 5711 bool OfBlockPointer, 5712 bool Unqualified, bool BlockReturnType) { 5713 // C++ [expr]: If an expression initially has the type "reference to T", the 5714 // type is adjusted to "T" prior to any further analysis, the expression 5715 // designates the object or function denoted by the reference, and the 5716 // expression is an lvalue unless the reference is an rvalue reference and 5717 // the expression is a function call (possibly inside parentheses). 5718 assert(!LHS->getAs<ReferenceType>() && "LHS is a reference type?"); 5719 assert(!RHS->getAs<ReferenceType>() && "RHS is a reference type?"); 5720 5721 if (Unqualified) { 5722 LHS = LHS.getUnqualifiedType(); 5723 RHS = RHS.getUnqualifiedType(); 5724 } 5725 5726 QualType LHSCan = getCanonicalType(LHS), 5727 RHSCan = getCanonicalType(RHS); 5728 5729 // If two types are identical, they are compatible. 5730 if (LHSCan == RHSCan) 5731 return LHS; 5732 5733 // If the qualifiers are different, the types aren't compatible... mostly. 5734 Qualifiers LQuals = LHSCan.getLocalQualifiers(); 5735 Qualifiers RQuals = RHSCan.getLocalQualifiers(); 5736 if (LQuals != RQuals) { 5737 // If any of these qualifiers are different, we have a type 5738 // mismatch. 5739 if (LQuals.getCVRQualifiers() != RQuals.getCVRQualifiers() || 5740 LQuals.getAddressSpace() != RQuals.getAddressSpace() || 5741 LQuals.getObjCLifetime() != RQuals.getObjCLifetime()) 5742 return QualType(); 5743 5744 // Exactly one GC qualifier difference is allowed: __strong is 5745 // okay if the other type has no GC qualifier but is an Objective 5746 // C object pointer (i.e. implicitly strong by default). We fix 5747 // this by pretending that the unqualified type was actually 5748 // qualified __strong. 5749 Qualifiers::GC GC_L = LQuals.getObjCGCAttr(); 5750 Qualifiers::GC GC_R = RQuals.getObjCGCAttr(); 5751 assert((GC_L != GC_R) && "unequal qualifier sets had only equal elements"); 5752 5753 if (GC_L == Qualifiers::Weak || GC_R == Qualifiers::Weak) 5754 return QualType(); 5755 5756 if (GC_L == Qualifiers::Strong && RHSCan->isObjCObjectPointerType()) { 5757 return mergeTypes(LHS, getObjCGCQualType(RHS, Qualifiers::Strong)); 5758 } 5759 if (GC_R == Qualifiers::Strong && LHSCan->isObjCObjectPointerType()) { 5760 return mergeTypes(getObjCGCQualType(LHS, Qualifiers::Strong), RHS); 5761 } 5762 return QualType(); 5763 } 5764 5765 // Okay, qualifiers are equal. 5766 5767 Type::TypeClass LHSClass = LHSCan->getTypeClass(); 5768 Type::TypeClass RHSClass = RHSCan->getTypeClass(); 5769 5770 // We want to consider the two function types to be the same for these 5771 // comparisons, just force one to the other. 5772 if (LHSClass == Type::FunctionProto) LHSClass = Type::FunctionNoProto; 5773 if (RHSClass == Type::FunctionProto) RHSClass = Type::FunctionNoProto; 5774 5775 // Same as above for arrays 5776 if (LHSClass == Type::VariableArray || LHSClass == Type::IncompleteArray) 5777 LHSClass = Type::ConstantArray; 5778 if (RHSClass == Type::VariableArray || RHSClass == Type::IncompleteArray) 5779 RHSClass = Type::ConstantArray; 5780 5781 // ObjCInterfaces are just specialized ObjCObjects. 5782 if (LHSClass == Type::ObjCInterface) LHSClass = Type::ObjCObject; 5783 if (RHSClass == Type::ObjCInterface) RHSClass = Type::ObjCObject; 5784 5785 // Canonicalize ExtVector -> Vector. 5786 if (LHSClass == Type::ExtVector) LHSClass = Type::Vector; 5787 if (RHSClass == Type::ExtVector) RHSClass = Type::Vector; 5788 5789 // If the canonical type classes don't match. 5790 if (LHSClass != RHSClass) { 5791 // C99 6.7.2.2p4: Each enumerated type shall be compatible with char, 5792 // a signed integer type, or an unsigned integer type. 5793 // Compatibility is based on the underlying type, not the promotion 5794 // type. 5795 if (const EnumType* ETy = LHS->getAs<EnumType>()) { 5796 if (ETy->getDecl()->getIntegerType() == RHSCan.getUnqualifiedType()) 5797 return RHS; 5798 } 5799 if (const EnumType* ETy = RHS->getAs<EnumType>()) { 5800 if (ETy->getDecl()->getIntegerType() == LHSCan.getUnqualifiedType()) 5801 return LHS; 5802 } 5803 5804 return QualType(); 5805 } 5806 5807 // The canonical type classes match. 5808 switch (LHSClass) { 5809 #define TYPE(Class, Base) 5810 #define ABSTRACT_TYPE(Class, Base) 5811 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class: 5812 #define NON_CANONICAL_TYPE(Class, Base) case Type::Class: 5813 #define DEPENDENT_TYPE(Class, Base) case Type::Class: 5814 #include "clang/AST/TypeNodes.def" 5815 llvm_unreachable("Non-canonical and dependent types shouldn't get here"); 5816 5817 case Type::LValueReference: 5818 case Type::RValueReference: 5819 case Type::MemberPointer: 5820 llvm_unreachable("C++ should never be in mergeTypes"); 5821 5822 case Type::ObjCInterface: 5823 case Type::IncompleteArray: 5824 case Type::VariableArray: 5825 case Type::FunctionProto: 5826 case Type::ExtVector: 5827 llvm_unreachable("Types are eliminated above"); 5828 5829 case Type::Pointer: 5830 { 5831 // Merge two pointer types, while trying to preserve typedef info 5832 QualType LHSPointee = LHS->getAs<PointerType>()->getPointeeType(); 5833 QualType RHSPointee = RHS->getAs<PointerType>()->getPointeeType(); 5834 if (Unqualified) { 5835 LHSPointee = LHSPointee.getUnqualifiedType(); 5836 RHSPointee = RHSPointee.getUnqualifiedType(); 5837 } 5838 QualType ResultType = mergeTypes(LHSPointee, RHSPointee, false, 5839 Unqualified); 5840 if (ResultType.isNull()) return QualType(); 5841 if (getCanonicalType(LHSPointee) == getCanonicalType(ResultType)) 5842 return LHS; 5843 if (getCanonicalType(RHSPointee) == getCanonicalType(ResultType)) 5844 return RHS; 5845 return getPointerType(ResultType); 5846 } 5847 case Type::BlockPointer: 5848 { 5849 // Merge two block pointer types, while trying to preserve typedef info 5850 QualType LHSPointee = LHS->getAs<BlockPointerType>()->getPointeeType(); 5851 QualType RHSPointee = RHS->getAs<BlockPointerType>()->getPointeeType(); 5852 if (Unqualified) { 5853 LHSPointee = LHSPointee.getUnqualifiedType(); 5854 RHSPointee = RHSPointee.getUnqualifiedType(); 5855 } 5856 QualType ResultType = mergeTypes(LHSPointee, RHSPointee, OfBlockPointer, 5857 Unqualified); 5858 if (ResultType.isNull()) return QualType(); 5859 if (getCanonicalType(LHSPointee) == getCanonicalType(ResultType)) 5860 return LHS; 5861 if (getCanonicalType(RHSPointee) == getCanonicalType(ResultType)) 5862 return RHS; 5863 return getBlockPointerType(ResultType); 5864 } 5865 case Type::Atomic: 5866 { 5867 // Merge two pointer types, while trying to preserve typedef info 5868 QualType LHSValue = LHS->getAs<AtomicType>()->getValueType(); 5869 QualType RHSValue = RHS->getAs<AtomicType>()->getValueType(); 5870 if (Unqualified) { 5871 LHSValue = LHSValue.getUnqualifiedType(); 5872 RHSValue = RHSValue.getUnqualifiedType(); 5873 } 5874 QualType ResultType = mergeTypes(LHSValue, RHSValue, false, 5875 Unqualified); 5876 if (ResultType.isNull()) return QualType(); 5877 if (getCanonicalType(LHSValue) == getCanonicalType(ResultType)) 5878 return LHS; 5879 if (getCanonicalType(RHSValue) == getCanonicalType(ResultType)) 5880 return RHS; 5881 return getAtomicType(ResultType); 5882 } 5883 case Type::ConstantArray: 5884 { 5885 const ConstantArrayType* LCAT = getAsConstantArrayType(LHS); 5886 const ConstantArrayType* RCAT = getAsConstantArrayType(RHS); 5887 if (LCAT && RCAT && RCAT->getSize() != LCAT->getSize()) 5888 return QualType(); 5889 5890 QualType LHSElem = getAsArrayType(LHS)->getElementType(); 5891 QualType RHSElem = getAsArrayType(RHS)->getElementType(); 5892 if (Unqualified) { 5893 LHSElem = LHSElem.getUnqualifiedType(); 5894 RHSElem = RHSElem.getUnqualifiedType(); 5895 } 5896 5897 QualType ResultType = mergeTypes(LHSElem, RHSElem, false, Unqualified); 5898 if (ResultType.isNull()) return QualType(); 5899 if (LCAT && getCanonicalType(LHSElem) == getCanonicalType(ResultType)) 5900 return LHS; 5901 if (RCAT && getCanonicalType(RHSElem) == getCanonicalType(ResultType)) 5902 return RHS; 5903 if (LCAT) return getConstantArrayType(ResultType, LCAT->getSize(), 5904 ArrayType::ArraySizeModifier(), 0); 5905 if (RCAT) return getConstantArrayType(ResultType, RCAT->getSize(), 5906 ArrayType::ArraySizeModifier(), 0); 5907 const VariableArrayType* LVAT = getAsVariableArrayType(LHS); 5908 const VariableArrayType* RVAT = getAsVariableArrayType(RHS); 5909 if (LVAT && getCanonicalType(LHSElem) == getCanonicalType(ResultType)) 5910 return LHS; 5911 if (RVAT && getCanonicalType(RHSElem) == getCanonicalType(ResultType)) 5912 return RHS; 5913 if (LVAT) { 5914 // FIXME: This isn't correct! But tricky to implement because 5915 // the array's size has to be the size of LHS, but the type 5916 // has to be different. 5917 return LHS; 5918 } 5919 if (RVAT) { 5920 // FIXME: This isn't correct! But tricky to implement because 5921 // the array's size has to be the size of RHS, but the type 5922 // has to be different. 5923 return RHS; 5924 } 5925 if (getCanonicalType(LHSElem) == getCanonicalType(ResultType)) return LHS; 5926 if (getCanonicalType(RHSElem) == getCanonicalType(ResultType)) return RHS; 5927 return getIncompleteArrayType(ResultType, 5928 ArrayType::ArraySizeModifier(), 0); 5929 } 5930 case Type::FunctionNoProto: 5931 return mergeFunctionTypes(LHS, RHS, OfBlockPointer, Unqualified); 5932 case Type::Record: 5933 case Type::Enum: 5934 return QualType(); 5935 case Type::Builtin: 5936 // Only exactly equal builtin types are compatible, which is tested above. 5937 return QualType(); 5938 case Type::Complex: 5939 // Distinct complex types are incompatible. 5940 return QualType(); 5941 case Type::Vector: 5942 // FIXME: The merged type should be an ExtVector! 5943 if (areCompatVectorTypes(LHSCan->getAs<VectorType>(), 5944 RHSCan->getAs<VectorType>())) 5945 return LHS; 5946 return QualType(); 5947 case Type::ObjCObject: { 5948 // Check if the types are assignment compatible. 5949 // FIXME: This should be type compatibility, e.g. whether 5950 // "LHS x; RHS x;" at global scope is legal. 5951 const ObjCObjectType* LHSIface = LHS->getAs<ObjCObjectType>(); 5952 const ObjCObjectType* RHSIface = RHS->getAs<ObjCObjectType>(); 5953 if (canAssignObjCInterfaces(LHSIface, RHSIface)) 5954 return LHS; 5955 5956 return QualType(); 5957 } 5958 case Type::ObjCObjectPointer: { 5959 if (OfBlockPointer) { 5960 if (canAssignObjCInterfacesInBlockPointer( 5961 LHS->getAs<ObjCObjectPointerType>(), 5962 RHS->getAs<ObjCObjectPointerType>(), 5963 BlockReturnType)) 5964 return LHS; 5965 return QualType(); 5966 } 5967 if (canAssignObjCInterfaces(LHS->getAs<ObjCObjectPointerType>(), 5968 RHS->getAs<ObjCObjectPointerType>())) 5969 return LHS; 5970 5971 return QualType(); 5972 } 5973 } 5974 5975 return QualType(); 5976 } 5977 5978 bool ASTContext::FunctionTypesMatchOnNSConsumedAttrs( 5979 const FunctionProtoType *FromFunctionType, 5980 const FunctionProtoType *ToFunctionType) { 5981 if (FromFunctionType->hasAnyConsumedArgs() != 5982 ToFunctionType->hasAnyConsumedArgs()) 5983 return false; 5984 FunctionProtoType::ExtProtoInfo FromEPI = 5985 FromFunctionType->getExtProtoInfo(); 5986 FunctionProtoType::ExtProtoInfo ToEPI = 5987 ToFunctionType->getExtProtoInfo(); 5988 if (FromEPI.ConsumedArguments && ToEPI.ConsumedArguments) 5989 for (unsigned ArgIdx = 0, NumArgs = FromFunctionType->getNumArgs(); 5990 ArgIdx != NumArgs; ++ArgIdx) { 5991 if (FromEPI.ConsumedArguments[ArgIdx] != 5992 ToEPI.ConsumedArguments[ArgIdx]) 5993 return false; 5994 } 5995 return true; 5996 } 5997 5998 /// mergeObjCGCQualifiers - This routine merges ObjC's GC attribute of 'LHS' and 5999 /// 'RHS' attributes and returns the merged version; including for function 6000 /// return types. 6001 QualType ASTContext::mergeObjCGCQualifiers(QualType LHS, QualType RHS) { 6002 QualType LHSCan = getCanonicalType(LHS), 6003 RHSCan = getCanonicalType(RHS); 6004 // If two types are identical, they are compatible. 6005 if (LHSCan == RHSCan) 6006 return LHS; 6007 if (RHSCan->isFunctionType()) { 6008 if (!LHSCan->isFunctionType()) 6009 return QualType(); 6010 QualType OldReturnType = 6011 cast<FunctionType>(RHSCan.getTypePtr())->getResultType(); 6012 QualType NewReturnType = 6013 cast<FunctionType>(LHSCan.getTypePtr())->getResultType(); 6014 QualType ResReturnType = 6015 mergeObjCGCQualifiers(NewReturnType, OldReturnType); 6016 if (ResReturnType.isNull()) 6017 return QualType(); 6018 if (ResReturnType == NewReturnType || ResReturnType == OldReturnType) { 6019 // id foo(); ... __strong id foo(); or: __strong id foo(); ... id foo(); 6020 // In either case, use OldReturnType to build the new function type. 6021 const FunctionType *F = LHS->getAs<FunctionType>(); 6022 if (const FunctionProtoType *FPT = cast<FunctionProtoType>(F)) { 6023 FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo(); 6024 EPI.ExtInfo = getFunctionExtInfo(LHS); 6025 QualType ResultType 6026 = getFunctionType(OldReturnType, FPT->arg_type_begin(), 6027 FPT->getNumArgs(), EPI); 6028 return ResultType; 6029 } 6030 } 6031 return QualType(); 6032 } 6033 6034 // If the qualifiers are different, the types can still be merged. 6035 Qualifiers LQuals = LHSCan.getLocalQualifiers(); 6036 Qualifiers RQuals = RHSCan.getLocalQualifiers(); 6037 if (LQuals != RQuals) { 6038 // If any of these qualifiers are different, we have a type mismatch. 6039 if (LQuals.getCVRQualifiers() != RQuals.getCVRQualifiers() || 6040 LQuals.getAddressSpace() != RQuals.getAddressSpace()) 6041 return QualType(); 6042 6043 // Exactly one GC qualifier difference is allowed: __strong is 6044 // okay if the other type has no GC qualifier but is an Objective 6045 // C object pointer (i.e. implicitly strong by default). We fix 6046 // this by pretending that the unqualified type was actually 6047 // qualified __strong. 6048 Qualifiers::GC GC_L = LQuals.getObjCGCAttr(); 6049 Qualifiers::GC GC_R = RQuals.getObjCGCAttr(); 6050 assert((GC_L != GC_R) && "unequal qualifier sets had only equal elements"); 6051 6052 if (GC_L == Qualifiers::Weak || GC_R == Qualifiers::Weak) 6053 return QualType(); 6054 6055 if (GC_L == Qualifiers::Strong) 6056 return LHS; 6057 if (GC_R == Qualifiers::Strong) 6058 return RHS; 6059 return QualType(); 6060 } 6061 6062 if (LHSCan->isObjCObjectPointerType() && RHSCan->isObjCObjectPointerType()) { 6063 QualType LHSBaseQT = LHS->getAs<ObjCObjectPointerType>()->getPointeeType(); 6064 QualType RHSBaseQT = RHS->getAs<ObjCObjectPointerType>()->getPointeeType(); 6065 QualType ResQT = mergeObjCGCQualifiers(LHSBaseQT, RHSBaseQT); 6066 if (ResQT == LHSBaseQT) 6067 return LHS; 6068 if (ResQT == RHSBaseQT) 6069 return RHS; 6070 } 6071 return QualType(); 6072 } 6073 6074 //===----------------------------------------------------------------------===// 6075 // Integer Predicates 6076 //===----------------------------------------------------------------------===// 6077 6078 unsigned ASTContext::getIntWidth(QualType T) const { 6079 if (const EnumType *ET = dyn_cast<EnumType>(T)) 6080 T = ET->getDecl()->getIntegerType(); 6081 if (T->isBooleanType()) 6082 return 1; 6083 // For builtin types, just use the standard type sizing method 6084 return (unsigned)getTypeSize(T); 6085 } 6086 6087 QualType ASTContext::getCorrespondingUnsignedType(QualType T) { 6088 assert(T->hasSignedIntegerRepresentation() && "Unexpected type"); 6089 6090 // Turn <4 x signed int> -> <4 x unsigned int> 6091 if (const VectorType *VTy = T->getAs<VectorType>()) 6092 return getVectorType(getCorrespondingUnsignedType(VTy->getElementType()), 6093 VTy->getNumElements(), VTy->getVectorKind()); 6094 6095 // For enums, we return the unsigned version of the base type. 6096 if (const EnumType *ETy = T->getAs<EnumType>()) 6097 T = ETy->getDecl()->getIntegerType(); 6098 6099 const BuiltinType *BTy = T->getAs<BuiltinType>(); 6100 assert(BTy && "Unexpected signed integer type"); 6101 switch (BTy->getKind()) { 6102 case BuiltinType::Char_S: 6103 case BuiltinType::SChar: 6104 return UnsignedCharTy; 6105 case BuiltinType::Short: 6106 return UnsignedShortTy; 6107 case BuiltinType::Int: 6108 return UnsignedIntTy; 6109 case BuiltinType::Long: 6110 return UnsignedLongTy; 6111 case BuiltinType::LongLong: 6112 return UnsignedLongLongTy; 6113 case BuiltinType::Int128: 6114 return UnsignedInt128Ty; 6115 default: 6116 llvm_unreachable("Unexpected signed integer type"); 6117 } 6118 } 6119 6120 ASTMutationListener::~ASTMutationListener() { } 6121 6122 6123 //===----------------------------------------------------------------------===// 6124 // Builtin Type Computation 6125 //===----------------------------------------------------------------------===// 6126 6127 /// DecodeTypeFromStr - This decodes one type descriptor from Str, advancing the 6128 /// pointer over the consumed characters. This returns the resultant type. If 6129 /// AllowTypeModifiers is false then modifier like * are not parsed, just basic 6130 /// types. This allows "v2i*" to be parsed as a pointer to a v2i instead of 6131 /// a vector of "i*". 6132 /// 6133 /// RequiresICE is filled in on return to indicate whether the value is required 6134 /// to be an Integer Constant Expression. 6135 static QualType DecodeTypeFromStr(const char *&Str, const ASTContext &Context, 6136 ASTContext::GetBuiltinTypeError &Error, 6137 bool &RequiresICE, 6138 bool AllowTypeModifiers) { 6139 // Modifiers. 6140 int HowLong = 0; 6141 bool Signed = false, Unsigned = false; 6142 RequiresICE = false; 6143 6144 // Read the prefixed modifiers first. 6145 bool Done = false; 6146 while (!Done) { 6147 switch (*Str++) { 6148 default: Done = true; --Str; break; 6149 case 'I': 6150 RequiresICE = true; 6151 break; 6152 case 'S': 6153 assert(!Unsigned && "Can't use both 'S' and 'U' modifiers!"); 6154 assert(!Signed && "Can't use 'S' modifier multiple times!"); 6155 Signed = true; 6156 break; 6157 case 'U': 6158 assert(!Signed && "Can't use both 'S' and 'U' modifiers!"); 6159 assert(!Unsigned && "Can't use 'S' modifier multiple times!"); 6160 Unsigned = true; 6161 break; 6162 case 'L': 6163 assert(HowLong <= 2 && "Can't have LLLL modifier"); 6164 ++HowLong; 6165 break; 6166 } 6167 } 6168 6169 QualType Type; 6170 6171 // Read the base type. 6172 switch (*Str++) { 6173 default: llvm_unreachable("Unknown builtin type letter!"); 6174 case 'v': 6175 assert(HowLong == 0 && !Signed && !Unsigned && 6176 "Bad modifiers used with 'v'!"); 6177 Type = Context.VoidTy; 6178 break; 6179 case 'f': 6180 assert(HowLong == 0 && !Signed && !Unsigned && 6181 "Bad modifiers used with 'f'!"); 6182 Type = Context.FloatTy; 6183 break; 6184 case 'd': 6185 assert(HowLong < 2 && !Signed && !Unsigned && 6186 "Bad modifiers used with 'd'!"); 6187 if (HowLong) 6188 Type = Context.LongDoubleTy; 6189 else 6190 Type = Context.DoubleTy; 6191 break; 6192 case 's': 6193 assert(HowLong == 0 && "Bad modifiers used with 's'!"); 6194 if (Unsigned) 6195 Type = Context.UnsignedShortTy; 6196 else 6197 Type = Context.ShortTy; 6198 break; 6199 case 'i': 6200 if (HowLong == 3) 6201 Type = Unsigned ? Context.UnsignedInt128Ty : Context.Int128Ty; 6202 else if (HowLong == 2) 6203 Type = Unsigned ? Context.UnsignedLongLongTy : Context.LongLongTy; 6204 else if (HowLong == 1) 6205 Type = Unsigned ? Context.UnsignedLongTy : Context.LongTy; 6206 else 6207 Type = Unsigned ? Context.UnsignedIntTy : Context.IntTy; 6208 break; 6209 case 'c': 6210 assert(HowLong == 0 && "Bad modifiers used with 'c'!"); 6211 if (Signed) 6212 Type = Context.SignedCharTy; 6213 else if (Unsigned) 6214 Type = Context.UnsignedCharTy; 6215 else 6216 Type = Context.CharTy; 6217 break; 6218 case 'b': // boolean 6219 assert(HowLong == 0 && !Signed && !Unsigned && "Bad modifiers for 'b'!"); 6220 Type = Context.BoolTy; 6221 break; 6222 case 'z': // size_t. 6223 assert(HowLong == 0 && !Signed && !Unsigned && "Bad modifiers for 'z'!"); 6224 Type = Context.getSizeType(); 6225 break; 6226 case 'F': 6227 Type = Context.getCFConstantStringType(); 6228 break; 6229 case 'G': 6230 Type = Context.getObjCIdType(); 6231 break; 6232 case 'H': 6233 Type = Context.getObjCSelType(); 6234 break; 6235 case 'a': 6236 Type = Context.getBuiltinVaListType(); 6237 assert(!Type.isNull() && "builtin va list type not initialized!"); 6238 break; 6239 case 'A': 6240 // This is a "reference" to a va_list; however, what exactly 6241 // this means depends on how va_list is defined. There are two 6242 // different kinds of va_list: ones passed by value, and ones 6243 // passed by reference. An example of a by-value va_list is 6244 // x86, where va_list is a char*. An example of by-ref va_list 6245 // is x86-64, where va_list is a __va_list_tag[1]. For x86, 6246 // we want this argument to be a char*&; for x86-64, we want 6247 // it to be a __va_list_tag*. 6248 Type = Context.getBuiltinVaListType(); 6249 assert(!Type.isNull() && "builtin va list type not initialized!"); 6250 if (Type->isArrayType()) 6251 Type = Context.getArrayDecayedType(Type); 6252 else 6253 Type = Context.getLValueReferenceType(Type); 6254 break; 6255 case 'V': { 6256 char *End; 6257 unsigned NumElements = strtoul(Str, &End, 10); 6258 assert(End != Str && "Missing vector size"); 6259 Str = End; 6260 6261 QualType ElementType = DecodeTypeFromStr(Str, Context, Error, 6262 RequiresICE, false); 6263 assert(!RequiresICE && "Can't require vector ICE"); 6264 6265 // TODO: No way to make AltiVec vectors in builtins yet. 6266 Type = Context.getVectorType(ElementType, NumElements, 6267 VectorType::GenericVector); 6268 break; 6269 } 6270 case 'X': { 6271 QualType ElementType = DecodeTypeFromStr(Str, Context, Error, RequiresICE, 6272 false); 6273 assert(!RequiresICE && "Can't require complex ICE"); 6274 Type = Context.getComplexType(ElementType); 6275 break; 6276 } 6277 case 'Y' : { 6278 Type = Context.getPointerDiffType(); 6279 break; 6280 } 6281 case 'P': 6282 Type = Context.getFILEType(); 6283 if (Type.isNull()) { 6284 Error = ASTContext::GE_Missing_stdio; 6285 return QualType(); 6286 } 6287 break; 6288 case 'J': 6289 if (Signed) 6290 Type = Context.getsigjmp_bufType(); 6291 else 6292 Type = Context.getjmp_bufType(); 6293 6294 if (Type.isNull()) { 6295 Error = ASTContext::GE_Missing_setjmp; 6296 return QualType(); 6297 } 6298 break; 6299 } 6300 6301 // If there are modifiers and if we're allowed to parse them, go for it. 6302 Done = !AllowTypeModifiers; 6303 while (!Done) { 6304 switch (char c = *Str++) { 6305 default: Done = true; --Str; break; 6306 case '*': 6307 case '&': { 6308 // Both pointers and references can have their pointee types 6309 // qualified with an address space. 6310 char *End; 6311 unsigned AddrSpace = strtoul(Str, &End, 10); 6312 if (End != Str && AddrSpace != 0) { 6313 Type = Context.getAddrSpaceQualType(Type, AddrSpace); 6314 Str = End; 6315 } 6316 if (c == '*') 6317 Type = Context.getPointerType(Type); 6318 else 6319 Type = Context.getLValueReferenceType(Type); 6320 break; 6321 } 6322 // FIXME: There's no way to have a built-in with an rvalue ref arg. 6323 case 'C': 6324 Type = Type.withConst(); 6325 break; 6326 case 'D': 6327 Type = Context.getVolatileType(Type); 6328 break; 6329 } 6330 } 6331 6332 assert((!RequiresICE || Type->isIntegralOrEnumerationType()) && 6333 "Integer constant 'I' type must be an integer"); 6334 6335 return Type; 6336 } 6337 6338 /// GetBuiltinType - Return the type for the specified builtin. 6339 QualType ASTContext::GetBuiltinType(unsigned Id, 6340 GetBuiltinTypeError &Error, 6341 unsigned *IntegerConstantArgs) const { 6342 const char *TypeStr = BuiltinInfo.GetTypeString(Id); 6343 6344 SmallVector<QualType, 8> ArgTypes; 6345 6346 bool RequiresICE = false; 6347 Error = GE_None; 6348 QualType ResType = DecodeTypeFromStr(TypeStr, *this, Error, 6349 RequiresICE, true); 6350 if (Error != GE_None) 6351 return QualType(); 6352 6353 assert(!RequiresICE && "Result of intrinsic cannot be required to be an ICE"); 6354 6355 while (TypeStr[0] && TypeStr[0] != '.') { 6356 QualType Ty = DecodeTypeFromStr(TypeStr, *this, Error, RequiresICE, true); 6357 if (Error != GE_None) 6358 return QualType(); 6359 6360 // If this argument is required to be an IntegerConstantExpression and the 6361 // caller cares, fill in the bitmask we return. 6362 if (RequiresICE && IntegerConstantArgs) 6363 *IntegerConstantArgs |= 1 << ArgTypes.size(); 6364 6365 // Do array -> pointer decay. The builtin should use the decayed type. 6366 if (Ty->isArrayType()) 6367 Ty = getArrayDecayedType(Ty); 6368 6369 ArgTypes.push_back(Ty); 6370 } 6371 6372 assert((TypeStr[0] != '.' || TypeStr[1] == 0) && 6373 "'.' should only occur at end of builtin type list!"); 6374 6375 FunctionType::ExtInfo EI; 6376 if (BuiltinInfo.isNoReturn(Id)) EI = EI.withNoReturn(true); 6377 6378 bool Variadic = (TypeStr[0] == '.'); 6379 6380 // We really shouldn't be making a no-proto type here, especially in C++. 6381 if (ArgTypes.empty() && Variadic) 6382 return getFunctionNoProtoType(ResType, EI); 6383 6384 FunctionProtoType::ExtProtoInfo EPI; 6385 EPI.ExtInfo = EI; 6386 EPI.Variadic = Variadic; 6387 6388 return getFunctionType(ResType, ArgTypes.data(), ArgTypes.size(), EPI); 6389 } 6390 6391 GVALinkage ASTContext::GetGVALinkageForFunction(const FunctionDecl *FD) { 6392 GVALinkage External = GVA_StrongExternal; 6393 6394 Linkage L = FD->getLinkage(); 6395 switch (L) { 6396 case NoLinkage: 6397 case InternalLinkage: 6398 case UniqueExternalLinkage: 6399 return GVA_Internal; 6400 6401 case ExternalLinkage: 6402 switch (FD->getTemplateSpecializationKind()) { 6403 case TSK_Undeclared: 6404 case TSK_ExplicitSpecialization: 6405 External = GVA_StrongExternal; 6406 break; 6407 6408 case TSK_ExplicitInstantiationDefinition: 6409 return GVA_ExplicitTemplateInstantiation; 6410 6411 case TSK_ExplicitInstantiationDeclaration: 6412 case TSK_ImplicitInstantiation: 6413 External = GVA_TemplateInstantiation; 6414 break; 6415 } 6416 } 6417 6418 if (!FD->isInlined()) 6419 return External; 6420 6421 if (!getLangOptions().CPlusPlus || FD->hasAttr<GNUInlineAttr>()) { 6422 // GNU or C99 inline semantics. Determine whether this symbol should be 6423 // externally visible. 6424 if (FD->isInlineDefinitionExternallyVisible()) 6425 return External; 6426 6427 // C99 inline semantics, where the symbol is not externally visible. 6428 return GVA_C99Inline; 6429 } 6430 6431 // C++0x [temp.explicit]p9: 6432 // [ Note: The intent is that an inline function that is the subject of 6433 // an explicit instantiation declaration will still be implicitly 6434 // instantiated when used so that the body can be considered for 6435 // inlining, but that no out-of-line copy of the inline function would be 6436 // generated in the translation unit. -- end note ] 6437 if (FD->getTemplateSpecializationKind() 6438 == TSK_ExplicitInstantiationDeclaration) 6439 return GVA_C99Inline; 6440 6441 return GVA_CXXInline; 6442 } 6443 6444 GVALinkage ASTContext::GetGVALinkageForVariable(const VarDecl *VD) { 6445 // If this is a static data member, compute the kind of template 6446 // specialization. Otherwise, this variable is not part of a 6447 // template. 6448 TemplateSpecializationKind TSK = TSK_Undeclared; 6449 if (VD->isStaticDataMember()) 6450 TSK = VD->getTemplateSpecializationKind(); 6451 6452 Linkage L = VD->getLinkage(); 6453 if (L == ExternalLinkage && getLangOptions().CPlusPlus && 6454 VD->getType()->getLinkage() == UniqueExternalLinkage) 6455 L = UniqueExternalLinkage; 6456 6457 switch (L) { 6458 case NoLinkage: 6459 case InternalLinkage: 6460 case UniqueExternalLinkage: 6461 return GVA_Internal; 6462 6463 case ExternalLinkage: 6464 switch (TSK) { 6465 case TSK_Undeclared: 6466 case TSK_ExplicitSpecialization: 6467 return GVA_StrongExternal; 6468 6469 case TSK_ExplicitInstantiationDeclaration: 6470 llvm_unreachable("Variable should not be instantiated"); 6471 // Fall through to treat this like any other instantiation. 6472 6473 case TSK_ExplicitInstantiationDefinition: 6474 return GVA_ExplicitTemplateInstantiation; 6475 6476 case TSK_ImplicitInstantiation: 6477 return GVA_TemplateInstantiation; 6478 } 6479 } 6480 6481 return GVA_StrongExternal; 6482 } 6483 6484 bool ASTContext::DeclMustBeEmitted(const Decl *D) { 6485 if (const VarDecl *VD = dyn_cast<VarDecl>(D)) { 6486 if (!VD->isFileVarDecl()) 6487 return false; 6488 } else if (!isa<FunctionDecl>(D)) 6489 return false; 6490 6491 // Weak references don't produce any output by themselves. 6492 if (D->hasAttr<WeakRefAttr>()) 6493 return false; 6494 6495 // Aliases and used decls are required. 6496 if (D->hasAttr<AliasAttr>() || D->hasAttr<UsedAttr>()) 6497 return true; 6498 6499 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) { 6500 // Forward declarations aren't required. 6501 if (!FD->doesThisDeclarationHaveABody()) 6502 return FD->doesDeclarationForceExternallyVisibleDefinition(); 6503 6504 // Constructors and destructors are required. 6505 if (FD->hasAttr<ConstructorAttr>() || FD->hasAttr<DestructorAttr>()) 6506 return true; 6507 6508 // The key function for a class is required. 6509 if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD)) { 6510 const CXXRecordDecl *RD = MD->getParent(); 6511 if (MD->isOutOfLine() && RD->isDynamicClass()) { 6512 const CXXMethodDecl *KeyFunc = getKeyFunction(RD); 6513 if (KeyFunc && KeyFunc->getCanonicalDecl() == MD->getCanonicalDecl()) 6514 return true; 6515 } 6516 } 6517 6518 GVALinkage Linkage = GetGVALinkageForFunction(FD); 6519 6520 // static, static inline, always_inline, and extern inline functions can 6521 // always be deferred. Normal inline functions can be deferred in C99/C++. 6522 // Implicit template instantiations can also be deferred in C++. 6523 if (Linkage == GVA_Internal || Linkage == GVA_C99Inline || 6524 Linkage == GVA_CXXInline || Linkage == GVA_TemplateInstantiation) 6525 return false; 6526 return true; 6527 } 6528 6529 const VarDecl *VD = cast<VarDecl>(D); 6530 assert(VD->isFileVarDecl() && "Expected file scoped var"); 6531 6532 if (VD->isThisDeclarationADefinition() == VarDecl::DeclarationOnly) 6533 return false; 6534 6535 // Structs that have non-trivial constructors or destructors are required. 6536 6537 // FIXME: Handle references. 6538 // FIXME: Be more selective about which constructors we care about. 6539 if (const RecordType *RT = VD->getType()->getAs<RecordType>()) { 6540 if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl())) { 6541 if (RD->hasDefinition() && !(RD->hasTrivialDefaultConstructor() && 6542 RD->hasTrivialCopyConstructor() && 6543 RD->hasTrivialMoveConstructor() && 6544 RD->hasTrivialDestructor())) 6545 return true; 6546 } 6547 } 6548 6549 GVALinkage L = GetGVALinkageForVariable(VD); 6550 if (L == GVA_Internal || L == GVA_TemplateInstantiation) { 6551 if (!(VD->getInit() && VD->getInit()->HasSideEffects(*this))) 6552 return false; 6553 } 6554 6555 return true; 6556 } 6557 6558 CallingConv ASTContext::getDefaultMethodCallConv() { 6559 // Pass through to the C++ ABI object 6560 return ABI->getDefaultMethodCallConv(); 6561 } 6562 6563 bool ASTContext::isNearlyEmpty(const CXXRecordDecl *RD) const { 6564 // Pass through to the C++ ABI object 6565 return ABI->isNearlyEmpty(RD); 6566 } 6567 6568 MangleContext *ASTContext::createMangleContext() { 6569 switch (Target->getCXXABI()) { 6570 case CXXABI_ARM: 6571 case CXXABI_Itanium: 6572 return createItaniumMangleContext(*this, getDiagnostics()); 6573 case CXXABI_Microsoft: 6574 return createMicrosoftMangleContext(*this, getDiagnostics()); 6575 } 6576 llvm_unreachable("Unsupported ABI"); 6577 } 6578 6579 CXXABI::~CXXABI() {} 6580 6581 size_t ASTContext::getSideTableAllocatedMemory() const { 6582 return ASTRecordLayouts.getMemorySize() 6583 + llvm::capacity_in_bytes(ObjCLayouts) 6584 + llvm::capacity_in_bytes(KeyFunctions) 6585 + llvm::capacity_in_bytes(ObjCImpls) 6586 + llvm::capacity_in_bytes(BlockVarCopyInits) 6587 + llvm::capacity_in_bytes(DeclAttrs) 6588 + llvm::capacity_in_bytes(InstantiatedFromStaticDataMember) 6589 + llvm::capacity_in_bytes(InstantiatedFromUsingDecl) 6590 + llvm::capacity_in_bytes(InstantiatedFromUsingShadowDecl) 6591 + llvm::capacity_in_bytes(InstantiatedFromUnnamedFieldDecl) 6592 + llvm::capacity_in_bytes(OverriddenMethods) 6593 + llvm::capacity_in_bytes(Types) 6594 + llvm::capacity_in_bytes(VariableArrayTypes) 6595 + llvm::capacity_in_bytes(ClassScopeSpecializationPattern); 6596 } 6597 6598 void ASTContext::setParameterIndex(const ParmVarDecl *D, unsigned int index) { 6599 ParamIndices[D] = index; 6600 } 6601 6602 unsigned ASTContext::getParameterIndex(const ParmVarDecl *D) const { 6603 ParameterIndexTable::const_iterator I = ParamIndices.find(D); 6604 assert(I != ParamIndices.end() && 6605 "ParmIndices lacks entry set by ParmVarDecl"); 6606 return I->second; 6607 } 6608