1 //===--- ASTContext.cpp - Context to hold long-lived AST nodes ------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file implements the ASTContext interface. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "clang/AST/ASTContext.h" 15 #include "CXXABI.h" 16 #include "clang/AST/ASTMutationListener.h" 17 #include "clang/AST/Attr.h" 18 #include "clang/AST/CharUnits.h" 19 #include "clang/AST/Comment.h" 20 #include "clang/AST/CommentCommandTraits.h" 21 #include "clang/AST/DeclCXX.h" 22 #include "clang/AST/DeclObjC.h" 23 #include "clang/AST/DeclTemplate.h" 24 #include "clang/AST/Expr.h" 25 #include "clang/AST/ExprCXX.h" 26 #include "clang/AST/ExternalASTSource.h" 27 #include "clang/AST/Mangle.h" 28 #include "clang/AST/RecordLayout.h" 29 #include "clang/AST/RecursiveASTVisitor.h" 30 #include "clang/AST/TypeLoc.h" 31 #include "clang/Basic/Builtins.h" 32 #include "clang/Basic/SourceManager.h" 33 #include "clang/Basic/TargetInfo.h" 34 #include "llvm/ADT/SmallString.h" 35 #include "llvm/ADT/StringExtras.h" 36 #include "llvm/Support/Capacity.h" 37 #include "llvm/Support/MathExtras.h" 38 #include "llvm/Support/raw_ostream.h" 39 #include <map> 40 41 using namespace clang; 42 43 unsigned ASTContext::NumImplicitDefaultConstructors; 44 unsigned ASTContext::NumImplicitDefaultConstructorsDeclared; 45 unsigned ASTContext::NumImplicitCopyConstructors; 46 unsigned ASTContext::NumImplicitCopyConstructorsDeclared; 47 unsigned ASTContext::NumImplicitMoveConstructors; 48 unsigned ASTContext::NumImplicitMoveConstructorsDeclared; 49 unsigned ASTContext::NumImplicitCopyAssignmentOperators; 50 unsigned ASTContext::NumImplicitCopyAssignmentOperatorsDeclared; 51 unsigned ASTContext::NumImplicitMoveAssignmentOperators; 52 unsigned ASTContext::NumImplicitMoveAssignmentOperatorsDeclared; 53 unsigned ASTContext::NumImplicitDestructors; 54 unsigned ASTContext::NumImplicitDestructorsDeclared; 55 56 enum FloatingRank { 57 HalfRank, FloatRank, DoubleRank, LongDoubleRank 58 }; 59 60 RawComment *ASTContext::getRawCommentForDeclNoCache(const Decl *D) const { 61 if (!CommentsLoaded && ExternalSource) { 62 ExternalSource->ReadComments(); 63 CommentsLoaded = true; 64 } 65 66 assert(D); 67 68 // User can not attach documentation to implicit declarations. 69 if (D->isImplicit()) 70 return NULL; 71 72 // User can not attach documentation to implicit instantiations. 73 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) { 74 if (FD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation) 75 return NULL; 76 } 77 78 if (const VarDecl *VD = dyn_cast<VarDecl>(D)) { 79 if (VD->isStaticDataMember() && 80 VD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation) 81 return NULL; 82 } 83 84 if (const CXXRecordDecl *CRD = dyn_cast<CXXRecordDecl>(D)) { 85 if (CRD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation) 86 return NULL; 87 } 88 89 if (const ClassTemplateSpecializationDecl *CTSD = 90 dyn_cast<ClassTemplateSpecializationDecl>(D)) { 91 TemplateSpecializationKind TSK = CTSD->getSpecializationKind(); 92 if (TSK == TSK_ImplicitInstantiation || 93 TSK == TSK_Undeclared) 94 return NULL; 95 } 96 97 if (const EnumDecl *ED = dyn_cast<EnumDecl>(D)) { 98 if (ED->getTemplateSpecializationKind() == TSK_ImplicitInstantiation) 99 return NULL; 100 } 101 if (const TagDecl *TD = dyn_cast<TagDecl>(D)) { 102 // When tag declaration (but not definition!) is part of the 103 // decl-specifier-seq of some other declaration, it doesn't get comment 104 if (TD->isEmbeddedInDeclarator() && !TD->isCompleteDefinition()) 105 return NULL; 106 } 107 // TODO: handle comments for function parameters properly. 108 if (isa<ParmVarDecl>(D)) 109 return NULL; 110 111 // TODO: we could look up template parameter documentation in the template 112 // documentation. 113 if (isa<TemplateTypeParmDecl>(D) || 114 isa<NonTypeTemplateParmDecl>(D) || 115 isa<TemplateTemplateParmDecl>(D)) 116 return NULL; 117 118 ArrayRef<RawComment *> RawComments = Comments.getComments(); 119 120 // If there are no comments anywhere, we won't find anything. 121 if (RawComments.empty()) 122 return NULL; 123 124 // Find declaration location. 125 // For Objective-C declarations we generally don't expect to have multiple 126 // declarators, thus use declaration starting location as the "declaration 127 // location". 128 // For all other declarations multiple declarators are used quite frequently, 129 // so we use the location of the identifier as the "declaration location". 130 SourceLocation DeclLoc; 131 if (isa<ObjCMethodDecl>(D) || isa<ObjCContainerDecl>(D) || 132 isa<ObjCPropertyDecl>(D) || 133 isa<RedeclarableTemplateDecl>(D) || 134 isa<ClassTemplateSpecializationDecl>(D)) 135 DeclLoc = D->getLocStart(); 136 else { 137 DeclLoc = D->getLocation(); 138 // If location of the typedef name is in a macro, it is because being 139 // declared via a macro. Try using declaration's starting location 140 // as the "declaration location". 141 if (DeclLoc.isMacroID() && isa<TypedefDecl>(D)) 142 DeclLoc = D->getLocStart(); 143 } 144 145 // If the declaration doesn't map directly to a location in a file, we 146 // can't find the comment. 147 if (DeclLoc.isInvalid() || !DeclLoc.isFileID()) 148 return NULL; 149 150 // Find the comment that occurs just after this declaration. 151 ArrayRef<RawComment *>::iterator Comment; 152 { 153 // When searching for comments during parsing, the comment we are looking 154 // for is usually among the last two comments we parsed -- check them 155 // first. 156 RawComment CommentAtDeclLoc( 157 SourceMgr, SourceRange(DeclLoc), false, 158 LangOpts.CommentOpts.ParseAllComments); 159 BeforeThanCompare<RawComment> Compare(SourceMgr); 160 ArrayRef<RawComment *>::iterator MaybeBeforeDecl = RawComments.end() - 1; 161 bool Found = Compare(*MaybeBeforeDecl, &CommentAtDeclLoc); 162 if (!Found && RawComments.size() >= 2) { 163 MaybeBeforeDecl--; 164 Found = Compare(*MaybeBeforeDecl, &CommentAtDeclLoc); 165 } 166 167 if (Found) { 168 Comment = MaybeBeforeDecl + 1; 169 assert(Comment == std::lower_bound(RawComments.begin(), RawComments.end(), 170 &CommentAtDeclLoc, Compare)); 171 } else { 172 // Slow path. 173 Comment = std::lower_bound(RawComments.begin(), RawComments.end(), 174 &CommentAtDeclLoc, Compare); 175 } 176 } 177 178 // Decompose the location for the declaration and find the beginning of the 179 // file buffer. 180 std::pair<FileID, unsigned> DeclLocDecomp = SourceMgr.getDecomposedLoc(DeclLoc); 181 182 // First check whether we have a trailing comment. 183 if (Comment != RawComments.end() && 184 (*Comment)->isDocumentation() && (*Comment)->isTrailingComment() && 185 (isa<FieldDecl>(D) || isa<EnumConstantDecl>(D) || isa<VarDecl>(D) || 186 isa<ObjCMethodDecl>(D) || isa<ObjCPropertyDecl>(D))) { 187 std::pair<FileID, unsigned> CommentBeginDecomp 188 = SourceMgr.getDecomposedLoc((*Comment)->getSourceRange().getBegin()); 189 // Check that Doxygen trailing comment comes after the declaration, starts 190 // on the same line and in the same file as the declaration. 191 if (DeclLocDecomp.first == CommentBeginDecomp.first && 192 SourceMgr.getLineNumber(DeclLocDecomp.first, DeclLocDecomp.second) 193 == SourceMgr.getLineNumber(CommentBeginDecomp.first, 194 CommentBeginDecomp.second)) { 195 return *Comment; 196 } 197 } 198 199 // The comment just after the declaration was not a trailing comment. 200 // Let's look at the previous comment. 201 if (Comment == RawComments.begin()) 202 return NULL; 203 --Comment; 204 205 // Check that we actually have a non-member Doxygen comment. 206 if (!(*Comment)->isDocumentation() || (*Comment)->isTrailingComment()) 207 return NULL; 208 209 // Decompose the end of the comment. 210 std::pair<FileID, unsigned> CommentEndDecomp 211 = SourceMgr.getDecomposedLoc((*Comment)->getSourceRange().getEnd()); 212 213 // If the comment and the declaration aren't in the same file, then they 214 // aren't related. 215 if (DeclLocDecomp.first != CommentEndDecomp.first) 216 return NULL; 217 218 // Get the corresponding buffer. 219 bool Invalid = false; 220 const char *Buffer = SourceMgr.getBufferData(DeclLocDecomp.first, 221 &Invalid).data(); 222 if (Invalid) 223 return NULL; 224 225 // Extract text between the comment and declaration. 226 StringRef Text(Buffer + CommentEndDecomp.second, 227 DeclLocDecomp.second - CommentEndDecomp.second); 228 229 // There should be no other declarations or preprocessor directives between 230 // comment and declaration. 231 if (Text.find_first_of(";{}#@") != StringRef::npos) 232 return NULL; 233 234 return *Comment; 235 } 236 237 namespace { 238 /// If we have a 'templated' declaration for a template, adjust 'D' to 239 /// refer to the actual template. 240 /// If we have an implicit instantiation, adjust 'D' to refer to template. 241 const Decl *adjustDeclToTemplate(const Decl *D) { 242 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) { 243 // Is this function declaration part of a function template? 244 if (const FunctionTemplateDecl *FTD = FD->getDescribedFunctionTemplate()) 245 return FTD; 246 247 // Nothing to do if function is not an implicit instantiation. 248 if (FD->getTemplateSpecializationKind() != TSK_ImplicitInstantiation) 249 return D; 250 251 // Function is an implicit instantiation of a function template? 252 if (const FunctionTemplateDecl *FTD = FD->getPrimaryTemplate()) 253 return FTD; 254 255 // Function is instantiated from a member definition of a class template? 256 if (const FunctionDecl *MemberDecl = 257 FD->getInstantiatedFromMemberFunction()) 258 return MemberDecl; 259 260 return D; 261 } 262 if (const VarDecl *VD = dyn_cast<VarDecl>(D)) { 263 // Static data member is instantiated from a member definition of a class 264 // template? 265 if (VD->isStaticDataMember()) 266 if (const VarDecl *MemberDecl = VD->getInstantiatedFromStaticDataMember()) 267 return MemberDecl; 268 269 return D; 270 } 271 if (const CXXRecordDecl *CRD = dyn_cast<CXXRecordDecl>(D)) { 272 // Is this class declaration part of a class template? 273 if (const ClassTemplateDecl *CTD = CRD->getDescribedClassTemplate()) 274 return CTD; 275 276 // Class is an implicit instantiation of a class template or partial 277 // specialization? 278 if (const ClassTemplateSpecializationDecl *CTSD = 279 dyn_cast<ClassTemplateSpecializationDecl>(CRD)) { 280 if (CTSD->getSpecializationKind() != TSK_ImplicitInstantiation) 281 return D; 282 llvm::PointerUnion<ClassTemplateDecl *, 283 ClassTemplatePartialSpecializationDecl *> 284 PU = CTSD->getSpecializedTemplateOrPartial(); 285 return PU.is<ClassTemplateDecl*>() ? 286 static_cast<const Decl*>(PU.get<ClassTemplateDecl *>()) : 287 static_cast<const Decl*>( 288 PU.get<ClassTemplatePartialSpecializationDecl *>()); 289 } 290 291 // Class is instantiated from a member definition of a class template? 292 if (const MemberSpecializationInfo *Info = 293 CRD->getMemberSpecializationInfo()) 294 return Info->getInstantiatedFrom(); 295 296 return D; 297 } 298 if (const EnumDecl *ED = dyn_cast<EnumDecl>(D)) { 299 // Enum is instantiated from a member definition of a class template? 300 if (const EnumDecl *MemberDecl = ED->getInstantiatedFromMemberEnum()) 301 return MemberDecl; 302 303 return D; 304 } 305 // FIXME: Adjust alias templates? 306 return D; 307 } 308 } // unnamed namespace 309 310 const RawComment *ASTContext::getRawCommentForAnyRedecl( 311 const Decl *D, 312 const Decl **OriginalDecl) const { 313 D = adjustDeclToTemplate(D); 314 315 // Check whether we have cached a comment for this declaration already. 316 { 317 llvm::DenseMap<const Decl *, RawCommentAndCacheFlags>::iterator Pos = 318 RedeclComments.find(D); 319 if (Pos != RedeclComments.end()) { 320 const RawCommentAndCacheFlags &Raw = Pos->second; 321 if (Raw.getKind() != RawCommentAndCacheFlags::NoCommentInDecl) { 322 if (OriginalDecl) 323 *OriginalDecl = Raw.getOriginalDecl(); 324 return Raw.getRaw(); 325 } 326 } 327 } 328 329 // Search for comments attached to declarations in the redeclaration chain. 330 const RawComment *RC = NULL; 331 const Decl *OriginalDeclForRC = NULL; 332 for (Decl::redecl_iterator I = D->redecls_begin(), 333 E = D->redecls_end(); 334 I != E; ++I) { 335 llvm::DenseMap<const Decl *, RawCommentAndCacheFlags>::iterator Pos = 336 RedeclComments.find(*I); 337 if (Pos != RedeclComments.end()) { 338 const RawCommentAndCacheFlags &Raw = Pos->second; 339 if (Raw.getKind() != RawCommentAndCacheFlags::NoCommentInDecl) { 340 RC = Raw.getRaw(); 341 OriginalDeclForRC = Raw.getOriginalDecl(); 342 break; 343 } 344 } else { 345 RC = getRawCommentForDeclNoCache(*I); 346 OriginalDeclForRC = *I; 347 RawCommentAndCacheFlags Raw; 348 if (RC) { 349 Raw.setRaw(RC); 350 Raw.setKind(RawCommentAndCacheFlags::FromDecl); 351 } else 352 Raw.setKind(RawCommentAndCacheFlags::NoCommentInDecl); 353 Raw.setOriginalDecl(*I); 354 RedeclComments[*I] = Raw; 355 if (RC) 356 break; 357 } 358 } 359 360 // If we found a comment, it should be a documentation comment. 361 assert(!RC || RC->isDocumentation()); 362 363 if (OriginalDecl) 364 *OriginalDecl = OriginalDeclForRC; 365 366 // Update cache for every declaration in the redeclaration chain. 367 RawCommentAndCacheFlags Raw; 368 Raw.setRaw(RC); 369 Raw.setKind(RawCommentAndCacheFlags::FromRedecl); 370 Raw.setOriginalDecl(OriginalDeclForRC); 371 372 for (Decl::redecl_iterator I = D->redecls_begin(), 373 E = D->redecls_end(); 374 I != E; ++I) { 375 RawCommentAndCacheFlags &R = RedeclComments[*I]; 376 if (R.getKind() == RawCommentAndCacheFlags::NoCommentInDecl) 377 R = Raw; 378 } 379 380 return RC; 381 } 382 383 static void addRedeclaredMethods(const ObjCMethodDecl *ObjCMethod, 384 SmallVectorImpl<const NamedDecl *> &Redeclared) { 385 const DeclContext *DC = ObjCMethod->getDeclContext(); 386 if (const ObjCImplDecl *IMD = dyn_cast<ObjCImplDecl>(DC)) { 387 const ObjCInterfaceDecl *ID = IMD->getClassInterface(); 388 if (!ID) 389 return; 390 // Add redeclared method here. 391 for (ObjCInterfaceDecl::known_extensions_iterator 392 Ext = ID->known_extensions_begin(), 393 ExtEnd = ID->known_extensions_end(); 394 Ext != ExtEnd; ++Ext) { 395 if (ObjCMethodDecl *RedeclaredMethod = 396 Ext->getMethod(ObjCMethod->getSelector(), 397 ObjCMethod->isInstanceMethod())) 398 Redeclared.push_back(RedeclaredMethod); 399 } 400 } 401 } 402 403 comments::FullComment *ASTContext::cloneFullComment(comments::FullComment *FC, 404 const Decl *D) const { 405 comments::DeclInfo *ThisDeclInfo = new (*this) comments::DeclInfo; 406 ThisDeclInfo->CommentDecl = D; 407 ThisDeclInfo->IsFilled = false; 408 ThisDeclInfo->fill(); 409 ThisDeclInfo->CommentDecl = FC->getDecl(); 410 comments::FullComment *CFC = 411 new (*this) comments::FullComment(FC->getBlocks(), 412 ThisDeclInfo); 413 return CFC; 414 415 } 416 417 comments::FullComment *ASTContext::getLocalCommentForDeclUncached(const Decl *D) const { 418 const RawComment *RC = getRawCommentForDeclNoCache(D); 419 return RC ? RC->parse(*this, 0, D) : 0; 420 } 421 422 comments::FullComment *ASTContext::getCommentForDecl( 423 const Decl *D, 424 const Preprocessor *PP) const { 425 if (D->isInvalidDecl()) 426 return NULL; 427 D = adjustDeclToTemplate(D); 428 429 const Decl *Canonical = D->getCanonicalDecl(); 430 llvm::DenseMap<const Decl *, comments::FullComment *>::iterator Pos = 431 ParsedComments.find(Canonical); 432 433 if (Pos != ParsedComments.end()) { 434 if (Canonical != D) { 435 comments::FullComment *FC = Pos->second; 436 comments::FullComment *CFC = cloneFullComment(FC, D); 437 return CFC; 438 } 439 return Pos->second; 440 } 441 442 const Decl *OriginalDecl; 443 444 const RawComment *RC = getRawCommentForAnyRedecl(D, &OriginalDecl); 445 if (!RC) { 446 if (isa<ObjCMethodDecl>(D) || isa<FunctionDecl>(D)) { 447 SmallVector<const NamedDecl*, 8> Overridden; 448 const ObjCMethodDecl *OMD = dyn_cast<ObjCMethodDecl>(D); 449 if (OMD && OMD->isPropertyAccessor()) 450 if (const ObjCPropertyDecl *PDecl = OMD->findPropertyDecl()) 451 if (comments::FullComment *FC = getCommentForDecl(PDecl, PP)) 452 return cloneFullComment(FC, D); 453 if (OMD) 454 addRedeclaredMethods(OMD, Overridden); 455 getOverriddenMethods(dyn_cast<NamedDecl>(D), Overridden); 456 for (unsigned i = 0, e = Overridden.size(); i < e; i++) 457 if (comments::FullComment *FC = getCommentForDecl(Overridden[i], PP)) 458 return cloneFullComment(FC, D); 459 } 460 else if (const TypedefNameDecl *TD = dyn_cast<TypedefNameDecl>(D)) { 461 // Attach any tag type's documentation to its typedef if latter 462 // does not have one of its own. 463 QualType QT = TD->getUnderlyingType(); 464 if (const TagType *TT = QT->getAs<TagType>()) 465 if (const Decl *TD = TT->getDecl()) 466 if (comments::FullComment *FC = getCommentForDecl(TD, PP)) 467 return cloneFullComment(FC, D); 468 } 469 else if (const ObjCInterfaceDecl *IC = dyn_cast<ObjCInterfaceDecl>(D)) { 470 while (IC->getSuperClass()) { 471 IC = IC->getSuperClass(); 472 if (comments::FullComment *FC = getCommentForDecl(IC, PP)) 473 return cloneFullComment(FC, D); 474 } 475 } 476 else if (const ObjCCategoryDecl *CD = dyn_cast<ObjCCategoryDecl>(D)) { 477 if (const ObjCInterfaceDecl *IC = CD->getClassInterface()) 478 if (comments::FullComment *FC = getCommentForDecl(IC, PP)) 479 return cloneFullComment(FC, D); 480 } 481 else if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D)) { 482 if (!(RD = RD->getDefinition())) 483 return NULL; 484 // Check non-virtual bases. 485 for (CXXRecordDecl::base_class_const_iterator I = 486 RD->bases_begin(), E = RD->bases_end(); I != E; ++I) { 487 if (I->isVirtual() || (I->getAccessSpecifier() != AS_public)) 488 continue; 489 QualType Ty = I->getType(); 490 if (Ty.isNull()) 491 continue; 492 if (const CXXRecordDecl *NonVirtualBase = Ty->getAsCXXRecordDecl()) { 493 if (!(NonVirtualBase= NonVirtualBase->getDefinition())) 494 continue; 495 496 if (comments::FullComment *FC = getCommentForDecl((NonVirtualBase), PP)) 497 return cloneFullComment(FC, D); 498 } 499 } 500 // Check virtual bases. 501 for (CXXRecordDecl::base_class_const_iterator I = 502 RD->vbases_begin(), E = RD->vbases_end(); I != E; ++I) { 503 if (I->getAccessSpecifier() != AS_public) 504 continue; 505 QualType Ty = I->getType(); 506 if (Ty.isNull()) 507 continue; 508 if (const CXXRecordDecl *VirtualBase = Ty->getAsCXXRecordDecl()) { 509 if (!(VirtualBase= VirtualBase->getDefinition())) 510 continue; 511 if (comments::FullComment *FC = getCommentForDecl((VirtualBase), PP)) 512 return cloneFullComment(FC, D); 513 } 514 } 515 } 516 return NULL; 517 } 518 519 // If the RawComment was attached to other redeclaration of this Decl, we 520 // should parse the comment in context of that other Decl. This is important 521 // because comments can contain references to parameter names which can be 522 // different across redeclarations. 523 if (D != OriginalDecl) 524 return getCommentForDecl(OriginalDecl, PP); 525 526 comments::FullComment *FC = RC->parse(*this, PP, D); 527 ParsedComments[Canonical] = FC; 528 return FC; 529 } 530 531 void 532 ASTContext::CanonicalTemplateTemplateParm::Profile(llvm::FoldingSetNodeID &ID, 533 TemplateTemplateParmDecl *Parm) { 534 ID.AddInteger(Parm->getDepth()); 535 ID.AddInteger(Parm->getPosition()); 536 ID.AddBoolean(Parm->isParameterPack()); 537 538 TemplateParameterList *Params = Parm->getTemplateParameters(); 539 ID.AddInteger(Params->size()); 540 for (TemplateParameterList::const_iterator P = Params->begin(), 541 PEnd = Params->end(); 542 P != PEnd; ++P) { 543 if (TemplateTypeParmDecl *TTP = dyn_cast<TemplateTypeParmDecl>(*P)) { 544 ID.AddInteger(0); 545 ID.AddBoolean(TTP->isParameterPack()); 546 continue; 547 } 548 549 if (NonTypeTemplateParmDecl *NTTP = dyn_cast<NonTypeTemplateParmDecl>(*P)) { 550 ID.AddInteger(1); 551 ID.AddBoolean(NTTP->isParameterPack()); 552 ID.AddPointer(NTTP->getType().getCanonicalType().getAsOpaquePtr()); 553 if (NTTP->isExpandedParameterPack()) { 554 ID.AddBoolean(true); 555 ID.AddInteger(NTTP->getNumExpansionTypes()); 556 for (unsigned I = 0, N = NTTP->getNumExpansionTypes(); I != N; ++I) { 557 QualType T = NTTP->getExpansionType(I); 558 ID.AddPointer(T.getCanonicalType().getAsOpaquePtr()); 559 } 560 } else 561 ID.AddBoolean(false); 562 continue; 563 } 564 565 TemplateTemplateParmDecl *TTP = cast<TemplateTemplateParmDecl>(*P); 566 ID.AddInteger(2); 567 Profile(ID, TTP); 568 } 569 } 570 571 TemplateTemplateParmDecl * 572 ASTContext::getCanonicalTemplateTemplateParmDecl( 573 TemplateTemplateParmDecl *TTP) const { 574 // Check if we already have a canonical template template parameter. 575 llvm::FoldingSetNodeID ID; 576 CanonicalTemplateTemplateParm::Profile(ID, TTP); 577 void *InsertPos = 0; 578 CanonicalTemplateTemplateParm *Canonical 579 = CanonTemplateTemplateParms.FindNodeOrInsertPos(ID, InsertPos); 580 if (Canonical) 581 return Canonical->getParam(); 582 583 // Build a canonical template parameter list. 584 TemplateParameterList *Params = TTP->getTemplateParameters(); 585 SmallVector<NamedDecl *, 4> CanonParams; 586 CanonParams.reserve(Params->size()); 587 for (TemplateParameterList::const_iterator P = Params->begin(), 588 PEnd = Params->end(); 589 P != PEnd; ++P) { 590 if (TemplateTypeParmDecl *TTP = dyn_cast<TemplateTypeParmDecl>(*P)) 591 CanonParams.push_back( 592 TemplateTypeParmDecl::Create(*this, getTranslationUnitDecl(), 593 SourceLocation(), 594 SourceLocation(), 595 TTP->getDepth(), 596 TTP->getIndex(), 0, false, 597 TTP->isParameterPack())); 598 else if (NonTypeTemplateParmDecl *NTTP 599 = dyn_cast<NonTypeTemplateParmDecl>(*P)) { 600 QualType T = getCanonicalType(NTTP->getType()); 601 TypeSourceInfo *TInfo = getTrivialTypeSourceInfo(T); 602 NonTypeTemplateParmDecl *Param; 603 if (NTTP->isExpandedParameterPack()) { 604 SmallVector<QualType, 2> ExpandedTypes; 605 SmallVector<TypeSourceInfo *, 2> ExpandedTInfos; 606 for (unsigned I = 0, N = NTTP->getNumExpansionTypes(); I != N; ++I) { 607 ExpandedTypes.push_back(getCanonicalType(NTTP->getExpansionType(I))); 608 ExpandedTInfos.push_back( 609 getTrivialTypeSourceInfo(ExpandedTypes.back())); 610 } 611 612 Param = NonTypeTemplateParmDecl::Create(*this, getTranslationUnitDecl(), 613 SourceLocation(), 614 SourceLocation(), 615 NTTP->getDepth(), 616 NTTP->getPosition(), 0, 617 T, 618 TInfo, 619 ExpandedTypes.data(), 620 ExpandedTypes.size(), 621 ExpandedTInfos.data()); 622 } else { 623 Param = NonTypeTemplateParmDecl::Create(*this, getTranslationUnitDecl(), 624 SourceLocation(), 625 SourceLocation(), 626 NTTP->getDepth(), 627 NTTP->getPosition(), 0, 628 T, 629 NTTP->isParameterPack(), 630 TInfo); 631 } 632 CanonParams.push_back(Param); 633 634 } else 635 CanonParams.push_back(getCanonicalTemplateTemplateParmDecl( 636 cast<TemplateTemplateParmDecl>(*P))); 637 } 638 639 TemplateTemplateParmDecl *CanonTTP 640 = TemplateTemplateParmDecl::Create(*this, getTranslationUnitDecl(), 641 SourceLocation(), TTP->getDepth(), 642 TTP->getPosition(), 643 TTP->isParameterPack(), 644 0, 645 TemplateParameterList::Create(*this, SourceLocation(), 646 SourceLocation(), 647 CanonParams.data(), 648 CanonParams.size(), 649 SourceLocation())); 650 651 // Get the new insert position for the node we care about. 652 Canonical = CanonTemplateTemplateParms.FindNodeOrInsertPos(ID, InsertPos); 653 assert(Canonical == 0 && "Shouldn't be in the map!"); 654 (void)Canonical; 655 656 // Create the canonical template template parameter entry. 657 Canonical = new (*this) CanonicalTemplateTemplateParm(CanonTTP); 658 CanonTemplateTemplateParms.InsertNode(Canonical, InsertPos); 659 return CanonTTP; 660 } 661 662 CXXABI *ASTContext::createCXXABI(const TargetInfo &T) { 663 if (!LangOpts.CPlusPlus) return 0; 664 665 switch (T.getCXXABI().getKind()) { 666 case TargetCXXABI::GenericARM: 667 case TargetCXXABI::iOS: 668 return CreateARMCXXABI(*this); 669 case TargetCXXABI::GenericAArch64: // Same as Itanium at this level 670 case TargetCXXABI::GenericItanium: 671 return CreateItaniumCXXABI(*this); 672 case TargetCXXABI::Microsoft: 673 return CreateMicrosoftCXXABI(*this); 674 } 675 llvm_unreachable("Invalid CXXABI type!"); 676 } 677 678 static const LangAS::Map *getAddressSpaceMap(const TargetInfo &T, 679 const LangOptions &LOpts) { 680 if (LOpts.FakeAddressSpaceMap) { 681 // The fake address space map must have a distinct entry for each 682 // language-specific address space. 683 static const unsigned FakeAddrSpaceMap[] = { 684 1, // opencl_global 685 2, // opencl_local 686 3, // opencl_constant 687 4, // cuda_device 688 5, // cuda_constant 689 6 // cuda_shared 690 }; 691 return &FakeAddrSpaceMap; 692 } else { 693 return &T.getAddressSpaceMap(); 694 } 695 } 696 697 ASTContext::ASTContext(LangOptions& LOpts, SourceManager &SM, 698 const TargetInfo *t, 699 IdentifierTable &idents, SelectorTable &sels, 700 Builtin::Context &builtins, 701 unsigned size_reserve, 702 bool DelayInitialization) 703 : FunctionProtoTypes(this_()), 704 TemplateSpecializationTypes(this_()), 705 DependentTemplateSpecializationTypes(this_()), 706 SubstTemplateTemplateParmPacks(this_()), 707 GlobalNestedNameSpecifier(0), 708 Int128Decl(0), UInt128Decl(0), Float128StubDecl(0), 709 BuiltinVaListDecl(0), 710 ObjCIdDecl(0), ObjCSelDecl(0), ObjCClassDecl(0), ObjCProtocolClassDecl(0), 711 BOOLDecl(0), 712 CFConstantStringTypeDecl(0), ObjCInstanceTypeDecl(0), 713 FILEDecl(0), 714 jmp_bufDecl(0), sigjmp_bufDecl(0), ucontext_tDecl(0), 715 BlockDescriptorType(0), BlockDescriptorExtendedType(0), 716 cudaConfigureCallDecl(0), 717 NullTypeSourceInfo(QualType()), 718 FirstLocalImport(), LastLocalImport(), 719 SourceMgr(SM), LangOpts(LOpts), 720 AddrSpaceMap(0), Target(t), PrintingPolicy(LOpts), 721 Idents(idents), Selectors(sels), 722 BuiltinInfo(builtins), 723 DeclarationNames(*this), 724 ExternalSource(0), Listener(0), 725 Comments(SM), CommentsLoaded(false), 726 CommentCommandTraits(BumpAlloc, LOpts.CommentOpts), 727 LastSDM(0, 0) 728 { 729 if (size_reserve > 0) Types.reserve(size_reserve); 730 TUDecl = TranslationUnitDecl::Create(*this); 731 732 if (!DelayInitialization) { 733 assert(t && "No target supplied for ASTContext initialization"); 734 InitBuiltinTypes(*t); 735 } 736 } 737 738 ASTContext::~ASTContext() { 739 // Release the DenseMaps associated with DeclContext objects. 740 // FIXME: Is this the ideal solution? 741 ReleaseDeclContextMaps(); 742 743 // Call all of the deallocation functions on all of their targets. 744 for (DeallocationMap::const_iterator I = Deallocations.begin(), 745 E = Deallocations.end(); I != E; ++I) 746 for (unsigned J = 0, N = I->second.size(); J != N; ++J) 747 (I->first)((I->second)[J]); 748 749 // ASTRecordLayout objects in ASTRecordLayouts must always be destroyed 750 // because they can contain DenseMaps. 751 for (llvm::DenseMap<const ObjCContainerDecl*, 752 const ASTRecordLayout*>::iterator 753 I = ObjCLayouts.begin(), E = ObjCLayouts.end(); I != E; ) 754 // Increment in loop to prevent using deallocated memory. 755 if (ASTRecordLayout *R = const_cast<ASTRecordLayout*>((I++)->second)) 756 R->Destroy(*this); 757 758 for (llvm::DenseMap<const RecordDecl*, const ASTRecordLayout*>::iterator 759 I = ASTRecordLayouts.begin(), E = ASTRecordLayouts.end(); I != E; ) { 760 // Increment in loop to prevent using deallocated memory. 761 if (ASTRecordLayout *R = const_cast<ASTRecordLayout*>((I++)->second)) 762 R->Destroy(*this); 763 } 764 765 for (llvm::DenseMap<const Decl*, AttrVec*>::iterator A = DeclAttrs.begin(), 766 AEnd = DeclAttrs.end(); 767 A != AEnd; ++A) 768 A->second->~AttrVec(); 769 } 770 771 void ASTContext::AddDeallocation(void (*Callback)(void*), void *Data) { 772 Deallocations[Callback].push_back(Data); 773 } 774 775 void 776 ASTContext::setExternalSource(OwningPtr<ExternalASTSource> &Source) { 777 ExternalSource.reset(Source.take()); 778 } 779 780 void ASTContext::PrintStats() const { 781 llvm::errs() << "\n*** AST Context Stats:\n"; 782 llvm::errs() << " " << Types.size() << " types total.\n"; 783 784 unsigned counts[] = { 785 #define TYPE(Name, Parent) 0, 786 #define ABSTRACT_TYPE(Name, Parent) 787 #include "clang/AST/TypeNodes.def" 788 0 // Extra 789 }; 790 791 for (unsigned i = 0, e = Types.size(); i != e; ++i) { 792 Type *T = Types[i]; 793 counts[(unsigned)T->getTypeClass()]++; 794 } 795 796 unsigned Idx = 0; 797 unsigned TotalBytes = 0; 798 #define TYPE(Name, Parent) \ 799 if (counts[Idx]) \ 800 llvm::errs() << " " << counts[Idx] << " " << #Name \ 801 << " types\n"; \ 802 TotalBytes += counts[Idx] * sizeof(Name##Type); \ 803 ++Idx; 804 #define ABSTRACT_TYPE(Name, Parent) 805 #include "clang/AST/TypeNodes.def" 806 807 llvm::errs() << "Total bytes = " << TotalBytes << "\n"; 808 809 // Implicit special member functions. 810 llvm::errs() << NumImplicitDefaultConstructorsDeclared << "/" 811 << NumImplicitDefaultConstructors 812 << " implicit default constructors created\n"; 813 llvm::errs() << NumImplicitCopyConstructorsDeclared << "/" 814 << NumImplicitCopyConstructors 815 << " implicit copy constructors created\n"; 816 if (getLangOpts().CPlusPlus) 817 llvm::errs() << NumImplicitMoveConstructorsDeclared << "/" 818 << NumImplicitMoveConstructors 819 << " implicit move constructors created\n"; 820 llvm::errs() << NumImplicitCopyAssignmentOperatorsDeclared << "/" 821 << NumImplicitCopyAssignmentOperators 822 << " implicit copy assignment operators created\n"; 823 if (getLangOpts().CPlusPlus) 824 llvm::errs() << NumImplicitMoveAssignmentOperatorsDeclared << "/" 825 << NumImplicitMoveAssignmentOperators 826 << " implicit move assignment operators created\n"; 827 llvm::errs() << NumImplicitDestructorsDeclared << "/" 828 << NumImplicitDestructors 829 << " implicit destructors created\n"; 830 831 if (ExternalSource.get()) { 832 llvm::errs() << "\n"; 833 ExternalSource->PrintStats(); 834 } 835 836 BumpAlloc.PrintStats(); 837 } 838 839 TypedefDecl *ASTContext::getInt128Decl() const { 840 if (!Int128Decl) { 841 TypeSourceInfo *TInfo = getTrivialTypeSourceInfo(Int128Ty); 842 Int128Decl = TypedefDecl::Create(const_cast<ASTContext &>(*this), 843 getTranslationUnitDecl(), 844 SourceLocation(), 845 SourceLocation(), 846 &Idents.get("__int128_t"), 847 TInfo); 848 } 849 850 return Int128Decl; 851 } 852 853 TypedefDecl *ASTContext::getUInt128Decl() const { 854 if (!UInt128Decl) { 855 TypeSourceInfo *TInfo = getTrivialTypeSourceInfo(UnsignedInt128Ty); 856 UInt128Decl = TypedefDecl::Create(const_cast<ASTContext &>(*this), 857 getTranslationUnitDecl(), 858 SourceLocation(), 859 SourceLocation(), 860 &Idents.get("__uint128_t"), 861 TInfo); 862 } 863 864 return UInt128Decl; 865 } 866 867 TypeDecl *ASTContext::getFloat128StubType() const { 868 assert(LangOpts.CPlusPlus && "should only be called for c++"); 869 if (!Float128StubDecl) { 870 Float128StubDecl = CXXRecordDecl::Create(const_cast<ASTContext &>(*this), 871 TTK_Struct, 872 getTranslationUnitDecl(), 873 SourceLocation(), 874 SourceLocation(), 875 &Idents.get("__float128")); 876 } 877 878 return Float128StubDecl; 879 } 880 881 void ASTContext::InitBuiltinType(CanQualType &R, BuiltinType::Kind K) { 882 BuiltinType *Ty = new (*this, TypeAlignment) BuiltinType(K); 883 R = CanQualType::CreateUnsafe(QualType(Ty, 0)); 884 Types.push_back(Ty); 885 } 886 887 void ASTContext::InitBuiltinTypes(const TargetInfo &Target) { 888 assert((!this->Target || this->Target == &Target) && 889 "Incorrect target reinitialization"); 890 assert(VoidTy.isNull() && "Context reinitialized?"); 891 892 this->Target = &Target; 893 894 ABI.reset(createCXXABI(Target)); 895 AddrSpaceMap = getAddressSpaceMap(Target, LangOpts); 896 897 // C99 6.2.5p19. 898 InitBuiltinType(VoidTy, BuiltinType::Void); 899 900 // C99 6.2.5p2. 901 InitBuiltinType(BoolTy, BuiltinType::Bool); 902 // C99 6.2.5p3. 903 if (LangOpts.CharIsSigned) 904 InitBuiltinType(CharTy, BuiltinType::Char_S); 905 else 906 InitBuiltinType(CharTy, BuiltinType::Char_U); 907 // C99 6.2.5p4. 908 InitBuiltinType(SignedCharTy, BuiltinType::SChar); 909 InitBuiltinType(ShortTy, BuiltinType::Short); 910 InitBuiltinType(IntTy, BuiltinType::Int); 911 InitBuiltinType(LongTy, BuiltinType::Long); 912 InitBuiltinType(LongLongTy, BuiltinType::LongLong); 913 914 // C99 6.2.5p6. 915 InitBuiltinType(UnsignedCharTy, BuiltinType::UChar); 916 InitBuiltinType(UnsignedShortTy, BuiltinType::UShort); 917 InitBuiltinType(UnsignedIntTy, BuiltinType::UInt); 918 InitBuiltinType(UnsignedLongTy, BuiltinType::ULong); 919 InitBuiltinType(UnsignedLongLongTy, BuiltinType::ULongLong); 920 921 // C99 6.2.5p10. 922 InitBuiltinType(FloatTy, BuiltinType::Float); 923 InitBuiltinType(DoubleTy, BuiltinType::Double); 924 InitBuiltinType(LongDoubleTy, BuiltinType::LongDouble); 925 926 // GNU extension, 128-bit integers. 927 InitBuiltinType(Int128Ty, BuiltinType::Int128); 928 InitBuiltinType(UnsignedInt128Ty, BuiltinType::UInt128); 929 930 // C++ 3.9.1p5 931 if (TargetInfo::isTypeSigned(Target.getWCharType())) 932 InitBuiltinType(WCharTy, BuiltinType::WChar_S); 933 else // -fshort-wchar makes wchar_t be unsigned. 934 InitBuiltinType(WCharTy, BuiltinType::WChar_U); 935 if (LangOpts.CPlusPlus && LangOpts.WChar) 936 WideCharTy = WCharTy; 937 else { 938 // C99 (or C++ using -fno-wchar). 939 WideCharTy = getFromTargetType(Target.getWCharType()); 940 } 941 942 WIntTy = getFromTargetType(Target.getWIntType()); 943 944 if (LangOpts.CPlusPlus) // C++0x 3.9.1p5, extension for C++ 945 InitBuiltinType(Char16Ty, BuiltinType::Char16); 946 else // C99 947 Char16Ty = getFromTargetType(Target.getChar16Type()); 948 949 if (LangOpts.CPlusPlus) // C++0x 3.9.1p5, extension for C++ 950 InitBuiltinType(Char32Ty, BuiltinType::Char32); 951 else // C99 952 Char32Ty = getFromTargetType(Target.getChar32Type()); 953 954 // Placeholder type for type-dependent expressions whose type is 955 // completely unknown. No code should ever check a type against 956 // DependentTy and users should never see it; however, it is here to 957 // help diagnose failures to properly check for type-dependent 958 // expressions. 959 InitBuiltinType(DependentTy, BuiltinType::Dependent); 960 961 // Placeholder type for functions. 962 InitBuiltinType(OverloadTy, BuiltinType::Overload); 963 964 // Placeholder type for bound members. 965 InitBuiltinType(BoundMemberTy, BuiltinType::BoundMember); 966 967 // Placeholder type for pseudo-objects. 968 InitBuiltinType(PseudoObjectTy, BuiltinType::PseudoObject); 969 970 // "any" type; useful for debugger-like clients. 971 InitBuiltinType(UnknownAnyTy, BuiltinType::UnknownAny); 972 973 // Placeholder type for unbridged ARC casts. 974 InitBuiltinType(ARCUnbridgedCastTy, BuiltinType::ARCUnbridgedCast); 975 976 // Placeholder type for builtin functions. 977 InitBuiltinType(BuiltinFnTy, BuiltinType::BuiltinFn); 978 979 // C99 6.2.5p11. 980 FloatComplexTy = getComplexType(FloatTy); 981 DoubleComplexTy = getComplexType(DoubleTy); 982 LongDoubleComplexTy = getComplexType(LongDoubleTy); 983 984 // Builtin types for 'id', 'Class', and 'SEL'. 985 InitBuiltinType(ObjCBuiltinIdTy, BuiltinType::ObjCId); 986 InitBuiltinType(ObjCBuiltinClassTy, BuiltinType::ObjCClass); 987 InitBuiltinType(ObjCBuiltinSelTy, BuiltinType::ObjCSel); 988 989 if (LangOpts.OpenCL) { 990 InitBuiltinType(OCLImage1dTy, BuiltinType::OCLImage1d); 991 InitBuiltinType(OCLImage1dArrayTy, BuiltinType::OCLImage1dArray); 992 InitBuiltinType(OCLImage1dBufferTy, BuiltinType::OCLImage1dBuffer); 993 InitBuiltinType(OCLImage2dTy, BuiltinType::OCLImage2d); 994 InitBuiltinType(OCLImage2dArrayTy, BuiltinType::OCLImage2dArray); 995 InitBuiltinType(OCLImage3dTy, BuiltinType::OCLImage3d); 996 997 InitBuiltinType(OCLSamplerTy, BuiltinType::OCLSampler); 998 InitBuiltinType(OCLEventTy, BuiltinType::OCLEvent); 999 } 1000 1001 // Builtin type for __objc_yes and __objc_no 1002 ObjCBuiltinBoolTy = (Target.useSignedCharForObjCBool() ? 1003 SignedCharTy : BoolTy); 1004 1005 ObjCConstantStringType = QualType(); 1006 1007 ObjCSuperType = QualType(); 1008 1009 // void * type 1010 VoidPtrTy = getPointerType(VoidTy); 1011 1012 // nullptr type (C++0x 2.14.7) 1013 InitBuiltinType(NullPtrTy, BuiltinType::NullPtr); 1014 1015 // half type (OpenCL 6.1.1.1) / ARM NEON __fp16 1016 InitBuiltinType(HalfTy, BuiltinType::Half); 1017 1018 // Builtin type used to help define __builtin_va_list. 1019 VaListTagTy = QualType(); 1020 } 1021 1022 DiagnosticsEngine &ASTContext::getDiagnostics() const { 1023 return SourceMgr.getDiagnostics(); 1024 } 1025 1026 AttrVec& ASTContext::getDeclAttrs(const Decl *D) { 1027 AttrVec *&Result = DeclAttrs[D]; 1028 if (!Result) { 1029 void *Mem = Allocate(sizeof(AttrVec)); 1030 Result = new (Mem) AttrVec; 1031 } 1032 1033 return *Result; 1034 } 1035 1036 /// \brief Erase the attributes corresponding to the given declaration. 1037 void ASTContext::eraseDeclAttrs(const Decl *D) { 1038 llvm::DenseMap<const Decl*, AttrVec*>::iterator Pos = DeclAttrs.find(D); 1039 if (Pos != DeclAttrs.end()) { 1040 Pos->second->~AttrVec(); 1041 DeclAttrs.erase(Pos); 1042 } 1043 } 1044 1045 // FIXME: Remove ? 1046 MemberSpecializationInfo * 1047 ASTContext::getInstantiatedFromStaticDataMember(const VarDecl *Var) { 1048 assert(Var->isStaticDataMember() && "Not a static data member"); 1049 return getTemplateOrSpecializationInfo(Var) 1050 .dyn_cast<MemberSpecializationInfo *>(); 1051 } 1052 1053 ASTContext::TemplateOrSpecializationInfo 1054 ASTContext::getTemplateOrSpecializationInfo(const VarDecl *Var) { 1055 llvm::DenseMap<const VarDecl *, TemplateOrSpecializationInfo>::iterator Pos = 1056 TemplateOrInstantiation.find(Var); 1057 if (Pos == TemplateOrInstantiation.end()) 1058 return TemplateOrSpecializationInfo(); 1059 1060 return Pos->second; 1061 } 1062 1063 void 1064 ASTContext::setInstantiatedFromStaticDataMember(VarDecl *Inst, VarDecl *Tmpl, 1065 TemplateSpecializationKind TSK, 1066 SourceLocation PointOfInstantiation) { 1067 assert(Inst->isStaticDataMember() && "Not a static data member"); 1068 assert(Tmpl->isStaticDataMember() && "Not a static data member"); 1069 setTemplateOrSpecializationInfo(Inst, new (*this) MemberSpecializationInfo( 1070 Tmpl, TSK, PointOfInstantiation)); 1071 } 1072 1073 void 1074 ASTContext::setTemplateOrSpecializationInfo(VarDecl *Inst, 1075 TemplateOrSpecializationInfo TSI) { 1076 assert(!TemplateOrInstantiation[Inst] && 1077 "Already noted what the variable was instantiated from"); 1078 TemplateOrInstantiation[Inst] = TSI; 1079 } 1080 1081 FunctionDecl *ASTContext::getClassScopeSpecializationPattern( 1082 const FunctionDecl *FD){ 1083 assert(FD && "Specialization is 0"); 1084 llvm::DenseMap<const FunctionDecl*, FunctionDecl *>::const_iterator Pos 1085 = ClassScopeSpecializationPattern.find(FD); 1086 if (Pos == ClassScopeSpecializationPattern.end()) 1087 return 0; 1088 1089 return Pos->second; 1090 } 1091 1092 void ASTContext::setClassScopeSpecializationPattern(FunctionDecl *FD, 1093 FunctionDecl *Pattern) { 1094 assert(FD && "Specialization is 0"); 1095 assert(Pattern && "Class scope specialization pattern is 0"); 1096 ClassScopeSpecializationPattern[FD] = Pattern; 1097 } 1098 1099 NamedDecl * 1100 ASTContext::getInstantiatedFromUsingDecl(UsingDecl *UUD) { 1101 llvm::DenseMap<UsingDecl *, NamedDecl *>::const_iterator Pos 1102 = InstantiatedFromUsingDecl.find(UUD); 1103 if (Pos == InstantiatedFromUsingDecl.end()) 1104 return 0; 1105 1106 return Pos->second; 1107 } 1108 1109 void 1110 ASTContext::setInstantiatedFromUsingDecl(UsingDecl *Inst, NamedDecl *Pattern) { 1111 assert((isa<UsingDecl>(Pattern) || 1112 isa<UnresolvedUsingValueDecl>(Pattern) || 1113 isa<UnresolvedUsingTypenameDecl>(Pattern)) && 1114 "pattern decl is not a using decl"); 1115 assert(!InstantiatedFromUsingDecl[Inst] && "pattern already exists"); 1116 InstantiatedFromUsingDecl[Inst] = Pattern; 1117 } 1118 1119 UsingShadowDecl * 1120 ASTContext::getInstantiatedFromUsingShadowDecl(UsingShadowDecl *Inst) { 1121 llvm::DenseMap<UsingShadowDecl*, UsingShadowDecl*>::const_iterator Pos 1122 = InstantiatedFromUsingShadowDecl.find(Inst); 1123 if (Pos == InstantiatedFromUsingShadowDecl.end()) 1124 return 0; 1125 1126 return Pos->second; 1127 } 1128 1129 void 1130 ASTContext::setInstantiatedFromUsingShadowDecl(UsingShadowDecl *Inst, 1131 UsingShadowDecl *Pattern) { 1132 assert(!InstantiatedFromUsingShadowDecl[Inst] && "pattern already exists"); 1133 InstantiatedFromUsingShadowDecl[Inst] = Pattern; 1134 } 1135 1136 FieldDecl *ASTContext::getInstantiatedFromUnnamedFieldDecl(FieldDecl *Field) { 1137 llvm::DenseMap<FieldDecl *, FieldDecl *>::iterator Pos 1138 = InstantiatedFromUnnamedFieldDecl.find(Field); 1139 if (Pos == InstantiatedFromUnnamedFieldDecl.end()) 1140 return 0; 1141 1142 return Pos->second; 1143 } 1144 1145 void ASTContext::setInstantiatedFromUnnamedFieldDecl(FieldDecl *Inst, 1146 FieldDecl *Tmpl) { 1147 assert(!Inst->getDeclName() && "Instantiated field decl is not unnamed"); 1148 assert(!Tmpl->getDeclName() && "Template field decl is not unnamed"); 1149 assert(!InstantiatedFromUnnamedFieldDecl[Inst] && 1150 "Already noted what unnamed field was instantiated from"); 1151 1152 InstantiatedFromUnnamedFieldDecl[Inst] = Tmpl; 1153 } 1154 1155 ASTContext::overridden_cxx_method_iterator 1156 ASTContext::overridden_methods_begin(const CXXMethodDecl *Method) const { 1157 llvm::DenseMap<const CXXMethodDecl *, CXXMethodVector>::const_iterator Pos 1158 = OverriddenMethods.find(Method->getCanonicalDecl()); 1159 if (Pos == OverriddenMethods.end()) 1160 return 0; 1161 1162 return Pos->second.begin(); 1163 } 1164 1165 ASTContext::overridden_cxx_method_iterator 1166 ASTContext::overridden_methods_end(const CXXMethodDecl *Method) const { 1167 llvm::DenseMap<const CXXMethodDecl *, CXXMethodVector>::const_iterator Pos 1168 = OverriddenMethods.find(Method->getCanonicalDecl()); 1169 if (Pos == OverriddenMethods.end()) 1170 return 0; 1171 1172 return Pos->second.end(); 1173 } 1174 1175 unsigned 1176 ASTContext::overridden_methods_size(const CXXMethodDecl *Method) const { 1177 llvm::DenseMap<const CXXMethodDecl *, CXXMethodVector>::const_iterator Pos 1178 = OverriddenMethods.find(Method->getCanonicalDecl()); 1179 if (Pos == OverriddenMethods.end()) 1180 return 0; 1181 1182 return Pos->second.size(); 1183 } 1184 1185 void ASTContext::addOverriddenMethod(const CXXMethodDecl *Method, 1186 const CXXMethodDecl *Overridden) { 1187 assert(Method->isCanonicalDecl() && Overridden->isCanonicalDecl()); 1188 OverriddenMethods[Method].push_back(Overridden); 1189 } 1190 1191 void ASTContext::getOverriddenMethods( 1192 const NamedDecl *D, 1193 SmallVectorImpl<const NamedDecl *> &Overridden) const { 1194 assert(D); 1195 1196 if (const CXXMethodDecl *CXXMethod = dyn_cast<CXXMethodDecl>(D)) { 1197 Overridden.append(overridden_methods_begin(CXXMethod), 1198 overridden_methods_end(CXXMethod)); 1199 return; 1200 } 1201 1202 const ObjCMethodDecl *Method = dyn_cast<ObjCMethodDecl>(D); 1203 if (!Method) 1204 return; 1205 1206 SmallVector<const ObjCMethodDecl *, 8> OverDecls; 1207 Method->getOverriddenMethods(OverDecls); 1208 Overridden.append(OverDecls.begin(), OverDecls.end()); 1209 } 1210 1211 void ASTContext::addedLocalImportDecl(ImportDecl *Import) { 1212 assert(!Import->NextLocalImport && "Import declaration already in the chain"); 1213 assert(!Import->isFromASTFile() && "Non-local import declaration"); 1214 if (!FirstLocalImport) { 1215 FirstLocalImport = Import; 1216 LastLocalImport = Import; 1217 return; 1218 } 1219 1220 LastLocalImport->NextLocalImport = Import; 1221 LastLocalImport = Import; 1222 } 1223 1224 //===----------------------------------------------------------------------===// 1225 // Type Sizing and Analysis 1226 //===----------------------------------------------------------------------===// 1227 1228 /// getFloatTypeSemantics - Return the APFloat 'semantics' for the specified 1229 /// scalar floating point type. 1230 const llvm::fltSemantics &ASTContext::getFloatTypeSemantics(QualType T) const { 1231 const BuiltinType *BT = T->getAs<BuiltinType>(); 1232 assert(BT && "Not a floating point type!"); 1233 switch (BT->getKind()) { 1234 default: llvm_unreachable("Not a floating point type!"); 1235 case BuiltinType::Half: return Target->getHalfFormat(); 1236 case BuiltinType::Float: return Target->getFloatFormat(); 1237 case BuiltinType::Double: return Target->getDoubleFormat(); 1238 case BuiltinType::LongDouble: return Target->getLongDoubleFormat(); 1239 } 1240 } 1241 1242 /// getDeclAlign - Return a conservative estimate of the alignment of the 1243 /// specified decl. Note that bitfields do not have a valid alignment, so 1244 /// this method will assert on them. 1245 /// If @p RefAsPointee, references are treated like their underlying type 1246 /// (for alignof), else they're treated like pointers (for CodeGen). 1247 CharUnits ASTContext::getDeclAlign(const Decl *D, bool RefAsPointee) const { 1248 unsigned Align = Target->getCharWidth(); 1249 1250 bool UseAlignAttrOnly = false; 1251 if (unsigned AlignFromAttr = D->getMaxAlignment()) { 1252 Align = AlignFromAttr; 1253 1254 // __attribute__((aligned)) can increase or decrease alignment 1255 // *except* on a struct or struct member, where it only increases 1256 // alignment unless 'packed' is also specified. 1257 // 1258 // It is an error for alignas to decrease alignment, so we can 1259 // ignore that possibility; Sema should diagnose it. 1260 if (isa<FieldDecl>(D)) { 1261 UseAlignAttrOnly = D->hasAttr<PackedAttr>() || 1262 cast<FieldDecl>(D)->getParent()->hasAttr<PackedAttr>(); 1263 } else { 1264 UseAlignAttrOnly = true; 1265 } 1266 } 1267 else if (isa<FieldDecl>(D)) 1268 UseAlignAttrOnly = 1269 D->hasAttr<PackedAttr>() || 1270 cast<FieldDecl>(D)->getParent()->hasAttr<PackedAttr>(); 1271 1272 // If we're using the align attribute only, just ignore everything 1273 // else about the declaration and its type. 1274 if (UseAlignAttrOnly) { 1275 // do nothing 1276 1277 } else if (const ValueDecl *VD = dyn_cast<ValueDecl>(D)) { 1278 QualType T = VD->getType(); 1279 if (const ReferenceType* RT = T->getAs<ReferenceType>()) { 1280 if (RefAsPointee) 1281 T = RT->getPointeeType(); 1282 else 1283 T = getPointerType(RT->getPointeeType()); 1284 } 1285 if (!T->isIncompleteType() && !T->isFunctionType()) { 1286 // Adjust alignments of declarations with array type by the 1287 // large-array alignment on the target. 1288 unsigned MinWidth = Target->getLargeArrayMinWidth(); 1289 if (const ArrayType *arrayType = getAsArrayType(T)) { 1290 if (MinWidth) { 1291 if (isa<VariableArrayType>(arrayType)) 1292 Align = std::max(Align, Target->getLargeArrayAlign()); 1293 else if (isa<ConstantArrayType>(arrayType) && 1294 MinWidth <= getTypeSize(cast<ConstantArrayType>(arrayType))) 1295 Align = std::max(Align, Target->getLargeArrayAlign()); 1296 } 1297 1298 // Walk through any array types while we're at it. 1299 T = getBaseElementType(arrayType); 1300 } 1301 Align = std::max(Align, getPreferredTypeAlign(T.getTypePtr())); 1302 if (const VarDecl *VD = dyn_cast<VarDecl>(D)) { 1303 if (VD->hasGlobalStorage()) 1304 Align = std::max(Align, getTargetInfo().getMinGlobalAlign()); 1305 } 1306 } 1307 1308 // Fields can be subject to extra alignment constraints, like if 1309 // the field is packed, the struct is packed, or the struct has a 1310 // a max-field-alignment constraint (#pragma pack). So calculate 1311 // the actual alignment of the field within the struct, and then 1312 // (as we're expected to) constrain that by the alignment of the type. 1313 if (const FieldDecl *Field = dyn_cast<FieldDecl>(VD)) { 1314 const RecordDecl *Parent = Field->getParent(); 1315 // We can only produce a sensible answer if the record is valid. 1316 if (!Parent->isInvalidDecl()) { 1317 const ASTRecordLayout &Layout = getASTRecordLayout(Parent); 1318 1319 // Start with the record's overall alignment. 1320 unsigned FieldAlign = toBits(Layout.getAlignment()); 1321 1322 // Use the GCD of that and the offset within the record. 1323 uint64_t Offset = Layout.getFieldOffset(Field->getFieldIndex()); 1324 if (Offset > 0) { 1325 // Alignment is always a power of 2, so the GCD will be a power of 2, 1326 // which means we get to do this crazy thing instead of Euclid's. 1327 uint64_t LowBitOfOffset = Offset & (~Offset + 1); 1328 if (LowBitOfOffset < FieldAlign) 1329 FieldAlign = static_cast<unsigned>(LowBitOfOffset); 1330 } 1331 1332 Align = std::min(Align, FieldAlign); 1333 } 1334 } 1335 } 1336 1337 return toCharUnitsFromBits(Align); 1338 } 1339 1340 // getTypeInfoDataSizeInChars - Return the size of a type, in 1341 // chars. If the type is a record, its data size is returned. This is 1342 // the size of the memcpy that's performed when assigning this type 1343 // using a trivial copy/move assignment operator. 1344 std::pair<CharUnits, CharUnits> 1345 ASTContext::getTypeInfoDataSizeInChars(QualType T) const { 1346 std::pair<CharUnits, CharUnits> sizeAndAlign = getTypeInfoInChars(T); 1347 1348 // In C++, objects can sometimes be allocated into the tail padding 1349 // of a base-class subobject. We decide whether that's possible 1350 // during class layout, so here we can just trust the layout results. 1351 if (getLangOpts().CPlusPlus) { 1352 if (const RecordType *RT = T->getAs<RecordType>()) { 1353 const ASTRecordLayout &layout = getASTRecordLayout(RT->getDecl()); 1354 sizeAndAlign.first = layout.getDataSize(); 1355 } 1356 } 1357 1358 return sizeAndAlign; 1359 } 1360 1361 /// getConstantArrayInfoInChars - Performing the computation in CharUnits 1362 /// instead of in bits prevents overflowing the uint64_t for some large arrays. 1363 std::pair<CharUnits, CharUnits> 1364 static getConstantArrayInfoInChars(const ASTContext &Context, 1365 const ConstantArrayType *CAT) { 1366 std::pair<CharUnits, CharUnits> EltInfo = 1367 Context.getTypeInfoInChars(CAT->getElementType()); 1368 uint64_t Size = CAT->getSize().getZExtValue(); 1369 assert((Size == 0 || static_cast<uint64_t>(EltInfo.first.getQuantity()) <= 1370 (uint64_t)(-1)/Size) && 1371 "Overflow in array type char size evaluation"); 1372 uint64_t Width = EltInfo.first.getQuantity() * Size; 1373 unsigned Align = EltInfo.second.getQuantity(); 1374 Width = llvm::RoundUpToAlignment(Width, Align); 1375 return std::make_pair(CharUnits::fromQuantity(Width), 1376 CharUnits::fromQuantity(Align)); 1377 } 1378 1379 std::pair<CharUnits, CharUnits> 1380 ASTContext::getTypeInfoInChars(const Type *T) const { 1381 if (const ConstantArrayType *CAT = dyn_cast<ConstantArrayType>(T)) 1382 return getConstantArrayInfoInChars(*this, CAT); 1383 std::pair<uint64_t, unsigned> Info = getTypeInfo(T); 1384 return std::make_pair(toCharUnitsFromBits(Info.first), 1385 toCharUnitsFromBits(Info.second)); 1386 } 1387 1388 std::pair<CharUnits, CharUnits> 1389 ASTContext::getTypeInfoInChars(QualType T) const { 1390 return getTypeInfoInChars(T.getTypePtr()); 1391 } 1392 1393 std::pair<uint64_t, unsigned> ASTContext::getTypeInfo(const Type *T) const { 1394 TypeInfoMap::iterator it = MemoizedTypeInfo.find(T); 1395 if (it != MemoizedTypeInfo.end()) 1396 return it->second; 1397 1398 std::pair<uint64_t, unsigned> Info = getTypeInfoImpl(T); 1399 MemoizedTypeInfo.insert(std::make_pair(T, Info)); 1400 return Info; 1401 } 1402 1403 /// getTypeInfoImpl - Return the size of the specified type, in bits. This 1404 /// method does not work on incomplete types. 1405 /// 1406 /// FIXME: Pointers into different addr spaces could have different sizes and 1407 /// alignment requirements: getPointerInfo should take an AddrSpace, this 1408 /// should take a QualType, &c. 1409 std::pair<uint64_t, unsigned> 1410 ASTContext::getTypeInfoImpl(const Type *T) const { 1411 uint64_t Width=0; 1412 unsigned Align=8; 1413 switch (T->getTypeClass()) { 1414 #define TYPE(Class, Base) 1415 #define ABSTRACT_TYPE(Class, Base) 1416 #define NON_CANONICAL_TYPE(Class, Base) 1417 #define DEPENDENT_TYPE(Class, Base) case Type::Class: 1418 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) \ 1419 case Type::Class: \ 1420 assert(!T->isDependentType() && "should not see dependent types here"); \ 1421 return getTypeInfo(cast<Class##Type>(T)->desugar().getTypePtr()); 1422 #include "clang/AST/TypeNodes.def" 1423 llvm_unreachable("Should not see dependent types"); 1424 1425 case Type::FunctionNoProto: 1426 case Type::FunctionProto: 1427 // GCC extension: alignof(function) = 32 bits 1428 Width = 0; 1429 Align = 32; 1430 break; 1431 1432 case Type::IncompleteArray: 1433 case Type::VariableArray: 1434 Width = 0; 1435 Align = getTypeAlign(cast<ArrayType>(T)->getElementType()); 1436 break; 1437 1438 case Type::ConstantArray: { 1439 const ConstantArrayType *CAT = cast<ConstantArrayType>(T); 1440 1441 std::pair<uint64_t, unsigned> EltInfo = getTypeInfo(CAT->getElementType()); 1442 uint64_t Size = CAT->getSize().getZExtValue(); 1443 assert((Size == 0 || EltInfo.first <= (uint64_t)(-1)/Size) && 1444 "Overflow in array type bit size evaluation"); 1445 Width = EltInfo.first*Size; 1446 Align = EltInfo.second; 1447 Width = llvm::RoundUpToAlignment(Width, Align); 1448 break; 1449 } 1450 case Type::ExtVector: 1451 case Type::Vector: { 1452 const VectorType *VT = cast<VectorType>(T); 1453 std::pair<uint64_t, unsigned> EltInfo = getTypeInfo(VT->getElementType()); 1454 Width = EltInfo.first*VT->getNumElements(); 1455 Align = Width; 1456 // If the alignment is not a power of 2, round up to the next power of 2. 1457 // This happens for non-power-of-2 length vectors. 1458 if (Align & (Align-1)) { 1459 Align = llvm::NextPowerOf2(Align); 1460 Width = llvm::RoundUpToAlignment(Width, Align); 1461 } 1462 // Adjust the alignment based on the target max. 1463 uint64_t TargetVectorAlign = Target->getMaxVectorAlign(); 1464 if (TargetVectorAlign && TargetVectorAlign < Align) 1465 Align = TargetVectorAlign; 1466 break; 1467 } 1468 1469 case Type::Builtin: 1470 switch (cast<BuiltinType>(T)->getKind()) { 1471 default: llvm_unreachable("Unknown builtin type!"); 1472 case BuiltinType::Void: 1473 // GCC extension: alignof(void) = 8 bits. 1474 Width = 0; 1475 Align = 8; 1476 break; 1477 1478 case BuiltinType::Bool: 1479 Width = Target->getBoolWidth(); 1480 Align = Target->getBoolAlign(); 1481 break; 1482 case BuiltinType::Char_S: 1483 case BuiltinType::Char_U: 1484 case BuiltinType::UChar: 1485 case BuiltinType::SChar: 1486 Width = Target->getCharWidth(); 1487 Align = Target->getCharAlign(); 1488 break; 1489 case BuiltinType::WChar_S: 1490 case BuiltinType::WChar_U: 1491 Width = Target->getWCharWidth(); 1492 Align = Target->getWCharAlign(); 1493 break; 1494 case BuiltinType::Char16: 1495 Width = Target->getChar16Width(); 1496 Align = Target->getChar16Align(); 1497 break; 1498 case BuiltinType::Char32: 1499 Width = Target->getChar32Width(); 1500 Align = Target->getChar32Align(); 1501 break; 1502 case BuiltinType::UShort: 1503 case BuiltinType::Short: 1504 Width = Target->getShortWidth(); 1505 Align = Target->getShortAlign(); 1506 break; 1507 case BuiltinType::UInt: 1508 case BuiltinType::Int: 1509 Width = Target->getIntWidth(); 1510 Align = Target->getIntAlign(); 1511 break; 1512 case BuiltinType::ULong: 1513 case BuiltinType::Long: 1514 Width = Target->getLongWidth(); 1515 Align = Target->getLongAlign(); 1516 break; 1517 case BuiltinType::ULongLong: 1518 case BuiltinType::LongLong: 1519 Width = Target->getLongLongWidth(); 1520 Align = Target->getLongLongAlign(); 1521 break; 1522 case BuiltinType::Int128: 1523 case BuiltinType::UInt128: 1524 Width = 128; 1525 Align = 128; // int128_t is 128-bit aligned on all targets. 1526 break; 1527 case BuiltinType::Half: 1528 Width = Target->getHalfWidth(); 1529 Align = Target->getHalfAlign(); 1530 break; 1531 case BuiltinType::Float: 1532 Width = Target->getFloatWidth(); 1533 Align = Target->getFloatAlign(); 1534 break; 1535 case BuiltinType::Double: 1536 Width = Target->getDoubleWidth(); 1537 Align = Target->getDoubleAlign(); 1538 break; 1539 case BuiltinType::LongDouble: 1540 Width = Target->getLongDoubleWidth(); 1541 Align = Target->getLongDoubleAlign(); 1542 break; 1543 case BuiltinType::NullPtr: 1544 Width = Target->getPointerWidth(0); // C++ 3.9.1p11: sizeof(nullptr_t) 1545 Align = Target->getPointerAlign(0); // == sizeof(void*) 1546 break; 1547 case BuiltinType::ObjCId: 1548 case BuiltinType::ObjCClass: 1549 case BuiltinType::ObjCSel: 1550 Width = Target->getPointerWidth(0); 1551 Align = Target->getPointerAlign(0); 1552 break; 1553 case BuiltinType::OCLSampler: 1554 // Samplers are modeled as integers. 1555 Width = Target->getIntWidth(); 1556 Align = Target->getIntAlign(); 1557 break; 1558 case BuiltinType::OCLEvent: 1559 case BuiltinType::OCLImage1d: 1560 case BuiltinType::OCLImage1dArray: 1561 case BuiltinType::OCLImage1dBuffer: 1562 case BuiltinType::OCLImage2d: 1563 case BuiltinType::OCLImage2dArray: 1564 case BuiltinType::OCLImage3d: 1565 // Currently these types are pointers to opaque types. 1566 Width = Target->getPointerWidth(0); 1567 Align = Target->getPointerAlign(0); 1568 break; 1569 } 1570 break; 1571 case Type::ObjCObjectPointer: 1572 Width = Target->getPointerWidth(0); 1573 Align = Target->getPointerAlign(0); 1574 break; 1575 case Type::BlockPointer: { 1576 unsigned AS = getTargetAddressSpace( 1577 cast<BlockPointerType>(T)->getPointeeType()); 1578 Width = Target->getPointerWidth(AS); 1579 Align = Target->getPointerAlign(AS); 1580 break; 1581 } 1582 case Type::LValueReference: 1583 case Type::RValueReference: { 1584 // alignof and sizeof should never enter this code path here, so we go 1585 // the pointer route. 1586 unsigned AS = getTargetAddressSpace( 1587 cast<ReferenceType>(T)->getPointeeType()); 1588 Width = Target->getPointerWidth(AS); 1589 Align = Target->getPointerAlign(AS); 1590 break; 1591 } 1592 case Type::Pointer: { 1593 unsigned AS = getTargetAddressSpace(cast<PointerType>(T)->getPointeeType()); 1594 Width = Target->getPointerWidth(AS); 1595 Align = Target->getPointerAlign(AS); 1596 break; 1597 } 1598 case Type::MemberPointer: { 1599 const MemberPointerType *MPT = cast<MemberPointerType>(T); 1600 llvm::tie(Width, Align) = ABI->getMemberPointerWidthAndAlign(MPT); 1601 break; 1602 } 1603 case Type::Complex: { 1604 // Complex types have the same alignment as their elements, but twice the 1605 // size. 1606 std::pair<uint64_t, unsigned> EltInfo = 1607 getTypeInfo(cast<ComplexType>(T)->getElementType()); 1608 Width = EltInfo.first*2; 1609 Align = EltInfo.second; 1610 break; 1611 } 1612 case Type::ObjCObject: 1613 return getTypeInfo(cast<ObjCObjectType>(T)->getBaseType().getTypePtr()); 1614 case Type::Decayed: 1615 return getTypeInfo(cast<DecayedType>(T)->getDecayedType().getTypePtr()); 1616 case Type::ObjCInterface: { 1617 const ObjCInterfaceType *ObjCI = cast<ObjCInterfaceType>(T); 1618 const ASTRecordLayout &Layout = getASTObjCInterfaceLayout(ObjCI->getDecl()); 1619 Width = toBits(Layout.getSize()); 1620 Align = toBits(Layout.getAlignment()); 1621 break; 1622 } 1623 case Type::Record: 1624 case Type::Enum: { 1625 const TagType *TT = cast<TagType>(T); 1626 1627 if (TT->getDecl()->isInvalidDecl()) { 1628 Width = 8; 1629 Align = 8; 1630 break; 1631 } 1632 1633 if (const EnumType *ET = dyn_cast<EnumType>(TT)) 1634 return getTypeInfo(ET->getDecl()->getIntegerType()); 1635 1636 const RecordType *RT = cast<RecordType>(TT); 1637 const ASTRecordLayout &Layout = getASTRecordLayout(RT->getDecl()); 1638 Width = toBits(Layout.getSize()); 1639 Align = toBits(Layout.getAlignment()); 1640 break; 1641 } 1642 1643 case Type::SubstTemplateTypeParm: 1644 return getTypeInfo(cast<SubstTemplateTypeParmType>(T)-> 1645 getReplacementType().getTypePtr()); 1646 1647 case Type::Auto: { 1648 const AutoType *A = cast<AutoType>(T); 1649 assert(!A->getDeducedType().isNull() && 1650 "cannot request the size of an undeduced or dependent auto type"); 1651 return getTypeInfo(A->getDeducedType().getTypePtr()); 1652 } 1653 1654 case Type::Paren: 1655 return getTypeInfo(cast<ParenType>(T)->getInnerType().getTypePtr()); 1656 1657 case Type::Typedef: { 1658 const TypedefNameDecl *Typedef = cast<TypedefType>(T)->getDecl(); 1659 std::pair<uint64_t, unsigned> Info 1660 = getTypeInfo(Typedef->getUnderlyingType().getTypePtr()); 1661 // If the typedef has an aligned attribute on it, it overrides any computed 1662 // alignment we have. This violates the GCC documentation (which says that 1663 // attribute(aligned) can only round up) but matches its implementation. 1664 if (unsigned AttrAlign = Typedef->getMaxAlignment()) 1665 Align = AttrAlign; 1666 else 1667 Align = Info.second; 1668 Width = Info.first; 1669 break; 1670 } 1671 1672 case Type::Elaborated: 1673 return getTypeInfo(cast<ElaboratedType>(T)->getNamedType().getTypePtr()); 1674 1675 case Type::Attributed: 1676 return getTypeInfo( 1677 cast<AttributedType>(T)->getEquivalentType().getTypePtr()); 1678 1679 case Type::Atomic: { 1680 // Start with the base type information. 1681 std::pair<uint64_t, unsigned> Info 1682 = getTypeInfo(cast<AtomicType>(T)->getValueType()); 1683 Width = Info.first; 1684 Align = Info.second; 1685 1686 // If the size of the type doesn't exceed the platform's max 1687 // atomic promotion width, make the size and alignment more 1688 // favorable to atomic operations: 1689 if (Width != 0 && Width <= Target->getMaxAtomicPromoteWidth()) { 1690 // Round the size up to a power of 2. 1691 if (!llvm::isPowerOf2_64(Width)) 1692 Width = llvm::NextPowerOf2(Width); 1693 1694 // Set the alignment equal to the size. 1695 Align = static_cast<unsigned>(Width); 1696 } 1697 } 1698 1699 } 1700 1701 assert(llvm::isPowerOf2_32(Align) && "Alignment must be power of 2"); 1702 return std::make_pair(Width, Align); 1703 } 1704 1705 /// toCharUnitsFromBits - Convert a size in bits to a size in characters. 1706 CharUnits ASTContext::toCharUnitsFromBits(int64_t BitSize) const { 1707 return CharUnits::fromQuantity(BitSize / getCharWidth()); 1708 } 1709 1710 /// toBits - Convert a size in characters to a size in characters. 1711 int64_t ASTContext::toBits(CharUnits CharSize) const { 1712 return CharSize.getQuantity() * getCharWidth(); 1713 } 1714 1715 /// getTypeSizeInChars - Return the size of the specified type, in characters. 1716 /// This method does not work on incomplete types. 1717 CharUnits ASTContext::getTypeSizeInChars(QualType T) const { 1718 return getTypeInfoInChars(T).first; 1719 } 1720 CharUnits ASTContext::getTypeSizeInChars(const Type *T) const { 1721 return getTypeInfoInChars(T).first; 1722 } 1723 1724 /// getTypeAlignInChars - Return the ABI-specified alignment of a type, in 1725 /// characters. This method does not work on incomplete types. 1726 CharUnits ASTContext::getTypeAlignInChars(QualType T) const { 1727 return toCharUnitsFromBits(getTypeAlign(T)); 1728 } 1729 CharUnits ASTContext::getTypeAlignInChars(const Type *T) const { 1730 return toCharUnitsFromBits(getTypeAlign(T)); 1731 } 1732 1733 /// getPreferredTypeAlign - Return the "preferred" alignment of the specified 1734 /// type for the current target in bits. This can be different than the ABI 1735 /// alignment in cases where it is beneficial for performance to overalign 1736 /// a data type. 1737 unsigned ASTContext::getPreferredTypeAlign(const Type *T) const { 1738 unsigned ABIAlign = getTypeAlign(T); 1739 1740 // Double and long long should be naturally aligned if possible. 1741 if (const ComplexType* CT = T->getAs<ComplexType>()) 1742 T = CT->getElementType().getTypePtr(); 1743 if (T->isSpecificBuiltinType(BuiltinType::Double) || 1744 T->isSpecificBuiltinType(BuiltinType::LongLong) || 1745 T->isSpecificBuiltinType(BuiltinType::ULongLong)) 1746 return std::max(ABIAlign, (unsigned)getTypeSize(T)); 1747 1748 return ABIAlign; 1749 } 1750 1751 /// getAlignOfGlobalVar - Return the alignment in bits that should be given 1752 /// to a global variable of the specified type. 1753 unsigned ASTContext::getAlignOfGlobalVar(QualType T) const { 1754 return std::max(getTypeAlign(T), getTargetInfo().getMinGlobalAlign()); 1755 } 1756 1757 /// getAlignOfGlobalVarInChars - Return the alignment in characters that 1758 /// should be given to a global variable of the specified type. 1759 CharUnits ASTContext::getAlignOfGlobalVarInChars(QualType T) const { 1760 return toCharUnitsFromBits(getAlignOfGlobalVar(T)); 1761 } 1762 1763 /// DeepCollectObjCIvars - 1764 /// This routine first collects all declared, but not synthesized, ivars in 1765 /// super class and then collects all ivars, including those synthesized for 1766 /// current class. This routine is used for implementation of current class 1767 /// when all ivars, declared and synthesized are known. 1768 /// 1769 void ASTContext::DeepCollectObjCIvars(const ObjCInterfaceDecl *OI, 1770 bool leafClass, 1771 SmallVectorImpl<const ObjCIvarDecl*> &Ivars) const { 1772 if (const ObjCInterfaceDecl *SuperClass = OI->getSuperClass()) 1773 DeepCollectObjCIvars(SuperClass, false, Ivars); 1774 if (!leafClass) { 1775 for (ObjCInterfaceDecl::ivar_iterator I = OI->ivar_begin(), 1776 E = OI->ivar_end(); I != E; ++I) 1777 Ivars.push_back(*I); 1778 } else { 1779 ObjCInterfaceDecl *IDecl = const_cast<ObjCInterfaceDecl *>(OI); 1780 for (const ObjCIvarDecl *Iv = IDecl->all_declared_ivar_begin(); Iv; 1781 Iv= Iv->getNextIvar()) 1782 Ivars.push_back(Iv); 1783 } 1784 } 1785 1786 /// CollectInheritedProtocols - Collect all protocols in current class and 1787 /// those inherited by it. 1788 void ASTContext::CollectInheritedProtocols(const Decl *CDecl, 1789 llvm::SmallPtrSet<ObjCProtocolDecl*, 8> &Protocols) { 1790 if (const ObjCInterfaceDecl *OI = dyn_cast<ObjCInterfaceDecl>(CDecl)) { 1791 // We can use protocol_iterator here instead of 1792 // all_referenced_protocol_iterator since we are walking all categories. 1793 for (ObjCInterfaceDecl::all_protocol_iterator P = OI->all_referenced_protocol_begin(), 1794 PE = OI->all_referenced_protocol_end(); P != PE; ++P) { 1795 ObjCProtocolDecl *Proto = (*P); 1796 Protocols.insert(Proto->getCanonicalDecl()); 1797 for (ObjCProtocolDecl::protocol_iterator P = Proto->protocol_begin(), 1798 PE = Proto->protocol_end(); P != PE; ++P) { 1799 Protocols.insert((*P)->getCanonicalDecl()); 1800 CollectInheritedProtocols(*P, Protocols); 1801 } 1802 } 1803 1804 // Categories of this Interface. 1805 for (ObjCInterfaceDecl::visible_categories_iterator 1806 Cat = OI->visible_categories_begin(), 1807 CatEnd = OI->visible_categories_end(); 1808 Cat != CatEnd; ++Cat) { 1809 CollectInheritedProtocols(*Cat, Protocols); 1810 } 1811 1812 if (ObjCInterfaceDecl *SD = OI->getSuperClass()) 1813 while (SD) { 1814 CollectInheritedProtocols(SD, Protocols); 1815 SD = SD->getSuperClass(); 1816 } 1817 } else if (const ObjCCategoryDecl *OC = dyn_cast<ObjCCategoryDecl>(CDecl)) { 1818 for (ObjCCategoryDecl::protocol_iterator P = OC->protocol_begin(), 1819 PE = OC->protocol_end(); P != PE; ++P) { 1820 ObjCProtocolDecl *Proto = (*P); 1821 Protocols.insert(Proto->getCanonicalDecl()); 1822 for (ObjCProtocolDecl::protocol_iterator P = Proto->protocol_begin(), 1823 PE = Proto->protocol_end(); P != PE; ++P) 1824 CollectInheritedProtocols(*P, Protocols); 1825 } 1826 } else if (const ObjCProtocolDecl *OP = dyn_cast<ObjCProtocolDecl>(CDecl)) { 1827 for (ObjCProtocolDecl::protocol_iterator P = OP->protocol_begin(), 1828 PE = OP->protocol_end(); P != PE; ++P) { 1829 ObjCProtocolDecl *Proto = (*P); 1830 Protocols.insert(Proto->getCanonicalDecl()); 1831 for (ObjCProtocolDecl::protocol_iterator P = Proto->protocol_begin(), 1832 PE = Proto->protocol_end(); P != PE; ++P) 1833 CollectInheritedProtocols(*P, Protocols); 1834 } 1835 } 1836 } 1837 1838 unsigned ASTContext::CountNonClassIvars(const ObjCInterfaceDecl *OI) const { 1839 unsigned count = 0; 1840 // Count ivars declared in class extension. 1841 for (ObjCInterfaceDecl::known_extensions_iterator 1842 Ext = OI->known_extensions_begin(), 1843 ExtEnd = OI->known_extensions_end(); 1844 Ext != ExtEnd; ++Ext) { 1845 count += Ext->ivar_size(); 1846 } 1847 1848 // Count ivar defined in this class's implementation. This 1849 // includes synthesized ivars. 1850 if (ObjCImplementationDecl *ImplDecl = OI->getImplementation()) 1851 count += ImplDecl->ivar_size(); 1852 1853 return count; 1854 } 1855 1856 bool ASTContext::isSentinelNullExpr(const Expr *E) { 1857 if (!E) 1858 return false; 1859 1860 // nullptr_t is always treated as null. 1861 if (E->getType()->isNullPtrType()) return true; 1862 1863 if (E->getType()->isAnyPointerType() && 1864 E->IgnoreParenCasts()->isNullPointerConstant(*this, 1865 Expr::NPC_ValueDependentIsNull)) 1866 return true; 1867 1868 // Unfortunately, __null has type 'int'. 1869 if (isa<GNUNullExpr>(E)) return true; 1870 1871 return false; 1872 } 1873 1874 /// \brief Get the implementation of ObjCInterfaceDecl,or NULL if none exists. 1875 ObjCImplementationDecl *ASTContext::getObjCImplementation(ObjCInterfaceDecl *D) { 1876 llvm::DenseMap<ObjCContainerDecl*, ObjCImplDecl*>::iterator 1877 I = ObjCImpls.find(D); 1878 if (I != ObjCImpls.end()) 1879 return cast<ObjCImplementationDecl>(I->second); 1880 return 0; 1881 } 1882 /// \brief Get the implementation of ObjCCategoryDecl, or NULL if none exists. 1883 ObjCCategoryImplDecl *ASTContext::getObjCImplementation(ObjCCategoryDecl *D) { 1884 llvm::DenseMap<ObjCContainerDecl*, ObjCImplDecl*>::iterator 1885 I = ObjCImpls.find(D); 1886 if (I != ObjCImpls.end()) 1887 return cast<ObjCCategoryImplDecl>(I->second); 1888 return 0; 1889 } 1890 1891 /// \brief Set the implementation of ObjCInterfaceDecl. 1892 void ASTContext::setObjCImplementation(ObjCInterfaceDecl *IFaceD, 1893 ObjCImplementationDecl *ImplD) { 1894 assert(IFaceD && ImplD && "Passed null params"); 1895 ObjCImpls[IFaceD] = ImplD; 1896 } 1897 /// \brief Set the implementation of ObjCCategoryDecl. 1898 void ASTContext::setObjCImplementation(ObjCCategoryDecl *CatD, 1899 ObjCCategoryImplDecl *ImplD) { 1900 assert(CatD && ImplD && "Passed null params"); 1901 ObjCImpls[CatD] = ImplD; 1902 } 1903 1904 const ObjCInterfaceDecl *ASTContext::getObjContainingInterface( 1905 const NamedDecl *ND) const { 1906 if (const ObjCInterfaceDecl *ID = 1907 dyn_cast<ObjCInterfaceDecl>(ND->getDeclContext())) 1908 return ID; 1909 if (const ObjCCategoryDecl *CD = 1910 dyn_cast<ObjCCategoryDecl>(ND->getDeclContext())) 1911 return CD->getClassInterface(); 1912 if (const ObjCImplDecl *IMD = 1913 dyn_cast<ObjCImplDecl>(ND->getDeclContext())) 1914 return IMD->getClassInterface(); 1915 1916 return 0; 1917 } 1918 1919 /// \brief Get the copy initialization expression of VarDecl,or NULL if 1920 /// none exists. 1921 Expr *ASTContext::getBlockVarCopyInits(const VarDecl*VD) { 1922 assert(VD && "Passed null params"); 1923 assert(VD->hasAttr<BlocksAttr>() && 1924 "getBlockVarCopyInits - not __block var"); 1925 llvm::DenseMap<const VarDecl*, Expr*>::iterator 1926 I = BlockVarCopyInits.find(VD); 1927 return (I != BlockVarCopyInits.end()) ? cast<Expr>(I->second) : 0; 1928 } 1929 1930 /// \brief Set the copy inialization expression of a block var decl. 1931 void ASTContext::setBlockVarCopyInits(VarDecl*VD, Expr* Init) { 1932 assert(VD && Init && "Passed null params"); 1933 assert(VD->hasAttr<BlocksAttr>() && 1934 "setBlockVarCopyInits - not __block var"); 1935 BlockVarCopyInits[VD] = Init; 1936 } 1937 1938 TypeSourceInfo *ASTContext::CreateTypeSourceInfo(QualType T, 1939 unsigned DataSize) const { 1940 if (!DataSize) 1941 DataSize = TypeLoc::getFullDataSizeForType(T); 1942 else 1943 assert(DataSize == TypeLoc::getFullDataSizeForType(T) && 1944 "incorrect data size provided to CreateTypeSourceInfo!"); 1945 1946 TypeSourceInfo *TInfo = 1947 (TypeSourceInfo*)BumpAlloc.Allocate(sizeof(TypeSourceInfo) + DataSize, 8); 1948 new (TInfo) TypeSourceInfo(T); 1949 return TInfo; 1950 } 1951 1952 TypeSourceInfo *ASTContext::getTrivialTypeSourceInfo(QualType T, 1953 SourceLocation L) const { 1954 TypeSourceInfo *DI = CreateTypeSourceInfo(T); 1955 DI->getTypeLoc().initialize(const_cast<ASTContext &>(*this), L); 1956 return DI; 1957 } 1958 1959 const ASTRecordLayout & 1960 ASTContext::getASTObjCInterfaceLayout(const ObjCInterfaceDecl *D) const { 1961 return getObjCLayout(D, 0); 1962 } 1963 1964 const ASTRecordLayout & 1965 ASTContext::getASTObjCImplementationLayout( 1966 const ObjCImplementationDecl *D) const { 1967 return getObjCLayout(D->getClassInterface(), D); 1968 } 1969 1970 //===----------------------------------------------------------------------===// 1971 // Type creation/memoization methods 1972 //===----------------------------------------------------------------------===// 1973 1974 QualType 1975 ASTContext::getExtQualType(const Type *baseType, Qualifiers quals) const { 1976 unsigned fastQuals = quals.getFastQualifiers(); 1977 quals.removeFastQualifiers(); 1978 1979 // Check if we've already instantiated this type. 1980 llvm::FoldingSetNodeID ID; 1981 ExtQuals::Profile(ID, baseType, quals); 1982 void *insertPos = 0; 1983 if (ExtQuals *eq = ExtQualNodes.FindNodeOrInsertPos(ID, insertPos)) { 1984 assert(eq->getQualifiers() == quals); 1985 return QualType(eq, fastQuals); 1986 } 1987 1988 // If the base type is not canonical, make the appropriate canonical type. 1989 QualType canon; 1990 if (!baseType->isCanonicalUnqualified()) { 1991 SplitQualType canonSplit = baseType->getCanonicalTypeInternal().split(); 1992 canonSplit.Quals.addConsistentQualifiers(quals); 1993 canon = getExtQualType(canonSplit.Ty, canonSplit.Quals); 1994 1995 // Re-find the insert position. 1996 (void) ExtQualNodes.FindNodeOrInsertPos(ID, insertPos); 1997 } 1998 1999 ExtQuals *eq = new (*this, TypeAlignment) ExtQuals(baseType, canon, quals); 2000 ExtQualNodes.InsertNode(eq, insertPos); 2001 return QualType(eq, fastQuals); 2002 } 2003 2004 QualType 2005 ASTContext::getAddrSpaceQualType(QualType T, unsigned AddressSpace) const { 2006 QualType CanT = getCanonicalType(T); 2007 if (CanT.getAddressSpace() == AddressSpace) 2008 return T; 2009 2010 // If we are composing extended qualifiers together, merge together 2011 // into one ExtQuals node. 2012 QualifierCollector Quals; 2013 const Type *TypeNode = Quals.strip(T); 2014 2015 // If this type already has an address space specified, it cannot get 2016 // another one. 2017 assert(!Quals.hasAddressSpace() && 2018 "Type cannot be in multiple addr spaces!"); 2019 Quals.addAddressSpace(AddressSpace); 2020 2021 return getExtQualType(TypeNode, Quals); 2022 } 2023 2024 QualType ASTContext::getObjCGCQualType(QualType T, 2025 Qualifiers::GC GCAttr) const { 2026 QualType CanT = getCanonicalType(T); 2027 if (CanT.getObjCGCAttr() == GCAttr) 2028 return T; 2029 2030 if (const PointerType *ptr = T->getAs<PointerType>()) { 2031 QualType Pointee = ptr->getPointeeType(); 2032 if (Pointee->isAnyPointerType()) { 2033 QualType ResultType = getObjCGCQualType(Pointee, GCAttr); 2034 return getPointerType(ResultType); 2035 } 2036 } 2037 2038 // If we are composing extended qualifiers together, merge together 2039 // into one ExtQuals node. 2040 QualifierCollector Quals; 2041 const Type *TypeNode = Quals.strip(T); 2042 2043 // If this type already has an ObjCGC specified, it cannot get 2044 // another one. 2045 assert(!Quals.hasObjCGCAttr() && 2046 "Type cannot have multiple ObjCGCs!"); 2047 Quals.addObjCGCAttr(GCAttr); 2048 2049 return getExtQualType(TypeNode, Quals); 2050 } 2051 2052 const FunctionType *ASTContext::adjustFunctionType(const FunctionType *T, 2053 FunctionType::ExtInfo Info) { 2054 if (T->getExtInfo() == Info) 2055 return T; 2056 2057 QualType Result; 2058 if (const FunctionNoProtoType *FNPT = dyn_cast<FunctionNoProtoType>(T)) { 2059 Result = getFunctionNoProtoType(FNPT->getResultType(), Info); 2060 } else { 2061 const FunctionProtoType *FPT = cast<FunctionProtoType>(T); 2062 FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo(); 2063 EPI.ExtInfo = Info; 2064 Result = getFunctionType(FPT->getResultType(), FPT->getArgTypes(), EPI); 2065 } 2066 2067 return cast<FunctionType>(Result.getTypePtr()); 2068 } 2069 2070 void ASTContext::adjustDeducedFunctionResultType(FunctionDecl *FD, 2071 QualType ResultType) { 2072 FD = FD->getMostRecentDecl(); 2073 while (true) { 2074 const FunctionProtoType *FPT = FD->getType()->castAs<FunctionProtoType>(); 2075 FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo(); 2076 FD->setType(getFunctionType(ResultType, FPT->getArgTypes(), EPI)); 2077 if (FunctionDecl *Next = FD->getPreviousDecl()) 2078 FD = Next; 2079 else 2080 break; 2081 } 2082 if (ASTMutationListener *L = getASTMutationListener()) 2083 L->DeducedReturnType(FD, ResultType); 2084 } 2085 2086 /// getComplexType - Return the uniqued reference to the type for a complex 2087 /// number with the specified element type. 2088 QualType ASTContext::getComplexType(QualType T) const { 2089 // Unique pointers, to guarantee there is only one pointer of a particular 2090 // structure. 2091 llvm::FoldingSetNodeID ID; 2092 ComplexType::Profile(ID, T); 2093 2094 void *InsertPos = 0; 2095 if (ComplexType *CT = ComplexTypes.FindNodeOrInsertPos(ID, InsertPos)) 2096 return QualType(CT, 0); 2097 2098 // If the pointee type isn't canonical, this won't be a canonical type either, 2099 // so fill in the canonical type field. 2100 QualType Canonical; 2101 if (!T.isCanonical()) { 2102 Canonical = getComplexType(getCanonicalType(T)); 2103 2104 // Get the new insert position for the node we care about. 2105 ComplexType *NewIP = ComplexTypes.FindNodeOrInsertPos(ID, InsertPos); 2106 assert(NewIP == 0 && "Shouldn't be in the map!"); (void)NewIP; 2107 } 2108 ComplexType *New = new (*this, TypeAlignment) ComplexType(T, Canonical); 2109 Types.push_back(New); 2110 ComplexTypes.InsertNode(New, InsertPos); 2111 return QualType(New, 0); 2112 } 2113 2114 /// getPointerType - Return the uniqued reference to the type for a pointer to 2115 /// the specified type. 2116 QualType ASTContext::getPointerType(QualType T) const { 2117 // Unique pointers, to guarantee there is only one pointer of a particular 2118 // structure. 2119 llvm::FoldingSetNodeID ID; 2120 PointerType::Profile(ID, T); 2121 2122 void *InsertPos = 0; 2123 if (PointerType *PT = PointerTypes.FindNodeOrInsertPos(ID, InsertPos)) 2124 return QualType(PT, 0); 2125 2126 // If the pointee type isn't canonical, this won't be a canonical type either, 2127 // so fill in the canonical type field. 2128 QualType Canonical; 2129 if (!T.isCanonical()) { 2130 Canonical = getPointerType(getCanonicalType(T)); 2131 2132 // Get the new insert position for the node we care about. 2133 PointerType *NewIP = PointerTypes.FindNodeOrInsertPos(ID, InsertPos); 2134 assert(NewIP == 0 && "Shouldn't be in the map!"); (void)NewIP; 2135 } 2136 PointerType *New = new (*this, TypeAlignment) PointerType(T, Canonical); 2137 Types.push_back(New); 2138 PointerTypes.InsertNode(New, InsertPos); 2139 return QualType(New, 0); 2140 } 2141 2142 QualType ASTContext::getDecayedType(QualType T) const { 2143 assert((T->isArrayType() || T->isFunctionType()) && "T does not decay"); 2144 2145 llvm::FoldingSetNodeID ID; 2146 DecayedType::Profile(ID, T); 2147 void *InsertPos = 0; 2148 if (DecayedType *DT = DecayedTypes.FindNodeOrInsertPos(ID, InsertPos)) 2149 return QualType(DT, 0); 2150 2151 QualType Decayed; 2152 2153 // C99 6.7.5.3p7: 2154 // A declaration of a parameter as "array of type" shall be 2155 // adjusted to "qualified pointer to type", where the type 2156 // qualifiers (if any) are those specified within the [ and ] of 2157 // the array type derivation. 2158 if (T->isArrayType()) 2159 Decayed = getArrayDecayedType(T); 2160 2161 // C99 6.7.5.3p8: 2162 // A declaration of a parameter as "function returning type" 2163 // shall be adjusted to "pointer to function returning type", as 2164 // in 6.3.2.1. 2165 if (T->isFunctionType()) 2166 Decayed = getPointerType(T); 2167 2168 QualType Canonical = getCanonicalType(Decayed); 2169 2170 // Get the new insert position for the node we care about. 2171 DecayedType *NewIP = DecayedTypes.FindNodeOrInsertPos(ID, InsertPos); 2172 assert(NewIP == 0 && "Shouldn't be in the map!"); (void)NewIP; 2173 2174 DecayedType *New = 2175 new (*this, TypeAlignment) DecayedType(T, Decayed, Canonical); 2176 Types.push_back(New); 2177 DecayedTypes.InsertNode(New, InsertPos); 2178 return QualType(New, 0); 2179 } 2180 2181 /// getBlockPointerType - Return the uniqued reference to the type for 2182 /// a pointer to the specified block. 2183 QualType ASTContext::getBlockPointerType(QualType T) const { 2184 assert(T->isFunctionType() && "block of function types only"); 2185 // Unique pointers, to guarantee there is only one block of a particular 2186 // structure. 2187 llvm::FoldingSetNodeID ID; 2188 BlockPointerType::Profile(ID, T); 2189 2190 void *InsertPos = 0; 2191 if (BlockPointerType *PT = 2192 BlockPointerTypes.FindNodeOrInsertPos(ID, InsertPos)) 2193 return QualType(PT, 0); 2194 2195 // If the block pointee type isn't canonical, this won't be a canonical 2196 // type either so fill in the canonical type field. 2197 QualType Canonical; 2198 if (!T.isCanonical()) { 2199 Canonical = getBlockPointerType(getCanonicalType(T)); 2200 2201 // Get the new insert position for the node we care about. 2202 BlockPointerType *NewIP = 2203 BlockPointerTypes.FindNodeOrInsertPos(ID, InsertPos); 2204 assert(NewIP == 0 && "Shouldn't be in the map!"); (void)NewIP; 2205 } 2206 BlockPointerType *New 2207 = new (*this, TypeAlignment) BlockPointerType(T, Canonical); 2208 Types.push_back(New); 2209 BlockPointerTypes.InsertNode(New, InsertPos); 2210 return QualType(New, 0); 2211 } 2212 2213 /// getLValueReferenceType - Return the uniqued reference to the type for an 2214 /// lvalue reference to the specified type. 2215 QualType 2216 ASTContext::getLValueReferenceType(QualType T, bool SpelledAsLValue) const { 2217 assert(getCanonicalType(T) != OverloadTy && 2218 "Unresolved overloaded function type"); 2219 2220 // Unique pointers, to guarantee there is only one pointer of a particular 2221 // structure. 2222 llvm::FoldingSetNodeID ID; 2223 ReferenceType::Profile(ID, T, SpelledAsLValue); 2224 2225 void *InsertPos = 0; 2226 if (LValueReferenceType *RT = 2227 LValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos)) 2228 return QualType(RT, 0); 2229 2230 const ReferenceType *InnerRef = T->getAs<ReferenceType>(); 2231 2232 // If the referencee type isn't canonical, this won't be a canonical type 2233 // either, so fill in the canonical type field. 2234 QualType Canonical; 2235 if (!SpelledAsLValue || InnerRef || !T.isCanonical()) { 2236 QualType PointeeType = (InnerRef ? InnerRef->getPointeeType() : T); 2237 Canonical = getLValueReferenceType(getCanonicalType(PointeeType)); 2238 2239 // Get the new insert position for the node we care about. 2240 LValueReferenceType *NewIP = 2241 LValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos); 2242 assert(NewIP == 0 && "Shouldn't be in the map!"); (void)NewIP; 2243 } 2244 2245 LValueReferenceType *New 2246 = new (*this, TypeAlignment) LValueReferenceType(T, Canonical, 2247 SpelledAsLValue); 2248 Types.push_back(New); 2249 LValueReferenceTypes.InsertNode(New, InsertPos); 2250 2251 return QualType(New, 0); 2252 } 2253 2254 /// getRValueReferenceType - Return the uniqued reference to the type for an 2255 /// rvalue reference to the specified type. 2256 QualType ASTContext::getRValueReferenceType(QualType T) const { 2257 // Unique pointers, to guarantee there is only one pointer of a particular 2258 // structure. 2259 llvm::FoldingSetNodeID ID; 2260 ReferenceType::Profile(ID, T, false); 2261 2262 void *InsertPos = 0; 2263 if (RValueReferenceType *RT = 2264 RValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos)) 2265 return QualType(RT, 0); 2266 2267 const ReferenceType *InnerRef = T->getAs<ReferenceType>(); 2268 2269 // If the referencee type isn't canonical, this won't be a canonical type 2270 // either, so fill in the canonical type field. 2271 QualType Canonical; 2272 if (InnerRef || !T.isCanonical()) { 2273 QualType PointeeType = (InnerRef ? InnerRef->getPointeeType() : T); 2274 Canonical = getRValueReferenceType(getCanonicalType(PointeeType)); 2275 2276 // Get the new insert position for the node we care about. 2277 RValueReferenceType *NewIP = 2278 RValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos); 2279 assert(NewIP == 0 && "Shouldn't be in the map!"); (void)NewIP; 2280 } 2281 2282 RValueReferenceType *New 2283 = new (*this, TypeAlignment) RValueReferenceType(T, Canonical); 2284 Types.push_back(New); 2285 RValueReferenceTypes.InsertNode(New, InsertPos); 2286 return QualType(New, 0); 2287 } 2288 2289 /// getMemberPointerType - Return the uniqued reference to the type for a 2290 /// member pointer to the specified type, in the specified class. 2291 QualType ASTContext::getMemberPointerType(QualType T, const Type *Cls) const { 2292 // Unique pointers, to guarantee there is only one pointer of a particular 2293 // structure. 2294 llvm::FoldingSetNodeID ID; 2295 MemberPointerType::Profile(ID, T, Cls); 2296 2297 void *InsertPos = 0; 2298 if (MemberPointerType *PT = 2299 MemberPointerTypes.FindNodeOrInsertPos(ID, InsertPos)) 2300 return QualType(PT, 0); 2301 2302 // If the pointee or class type isn't canonical, this won't be a canonical 2303 // type either, so fill in the canonical type field. 2304 QualType Canonical; 2305 if (!T.isCanonical() || !Cls->isCanonicalUnqualified()) { 2306 Canonical = getMemberPointerType(getCanonicalType(T),getCanonicalType(Cls)); 2307 2308 // Get the new insert position for the node we care about. 2309 MemberPointerType *NewIP = 2310 MemberPointerTypes.FindNodeOrInsertPos(ID, InsertPos); 2311 assert(NewIP == 0 && "Shouldn't be in the map!"); (void)NewIP; 2312 } 2313 MemberPointerType *New 2314 = new (*this, TypeAlignment) MemberPointerType(T, Cls, Canonical); 2315 Types.push_back(New); 2316 MemberPointerTypes.InsertNode(New, InsertPos); 2317 return QualType(New, 0); 2318 } 2319 2320 /// getConstantArrayType - Return the unique reference to the type for an 2321 /// array of the specified element type. 2322 QualType ASTContext::getConstantArrayType(QualType EltTy, 2323 const llvm::APInt &ArySizeIn, 2324 ArrayType::ArraySizeModifier ASM, 2325 unsigned IndexTypeQuals) const { 2326 assert((EltTy->isDependentType() || 2327 EltTy->isIncompleteType() || EltTy->isConstantSizeType()) && 2328 "Constant array of VLAs is illegal!"); 2329 2330 // Convert the array size into a canonical width matching the pointer size for 2331 // the target. 2332 llvm::APInt ArySize(ArySizeIn); 2333 ArySize = 2334 ArySize.zextOrTrunc(Target->getPointerWidth(getTargetAddressSpace(EltTy))); 2335 2336 llvm::FoldingSetNodeID ID; 2337 ConstantArrayType::Profile(ID, EltTy, ArySize, ASM, IndexTypeQuals); 2338 2339 void *InsertPos = 0; 2340 if (ConstantArrayType *ATP = 2341 ConstantArrayTypes.FindNodeOrInsertPos(ID, InsertPos)) 2342 return QualType(ATP, 0); 2343 2344 // If the element type isn't canonical or has qualifiers, this won't 2345 // be a canonical type either, so fill in the canonical type field. 2346 QualType Canon; 2347 if (!EltTy.isCanonical() || EltTy.hasLocalQualifiers()) { 2348 SplitQualType canonSplit = getCanonicalType(EltTy).split(); 2349 Canon = getConstantArrayType(QualType(canonSplit.Ty, 0), ArySize, 2350 ASM, IndexTypeQuals); 2351 Canon = getQualifiedType(Canon, canonSplit.Quals); 2352 2353 // Get the new insert position for the node we care about. 2354 ConstantArrayType *NewIP = 2355 ConstantArrayTypes.FindNodeOrInsertPos(ID, InsertPos); 2356 assert(NewIP == 0 && "Shouldn't be in the map!"); (void)NewIP; 2357 } 2358 2359 ConstantArrayType *New = new(*this,TypeAlignment) 2360 ConstantArrayType(EltTy, Canon, ArySize, ASM, IndexTypeQuals); 2361 ConstantArrayTypes.InsertNode(New, InsertPos); 2362 Types.push_back(New); 2363 return QualType(New, 0); 2364 } 2365 2366 /// getVariableArrayDecayedType - Turns the given type, which may be 2367 /// variably-modified, into the corresponding type with all the known 2368 /// sizes replaced with [*]. 2369 QualType ASTContext::getVariableArrayDecayedType(QualType type) const { 2370 // Vastly most common case. 2371 if (!type->isVariablyModifiedType()) return type; 2372 2373 QualType result; 2374 2375 SplitQualType split = type.getSplitDesugaredType(); 2376 const Type *ty = split.Ty; 2377 switch (ty->getTypeClass()) { 2378 #define TYPE(Class, Base) 2379 #define ABSTRACT_TYPE(Class, Base) 2380 #define NON_CANONICAL_TYPE(Class, Base) case Type::Class: 2381 #include "clang/AST/TypeNodes.def" 2382 llvm_unreachable("didn't desugar past all non-canonical types?"); 2383 2384 // These types should never be variably-modified. 2385 case Type::Builtin: 2386 case Type::Complex: 2387 case Type::Vector: 2388 case Type::ExtVector: 2389 case Type::DependentSizedExtVector: 2390 case Type::ObjCObject: 2391 case Type::ObjCInterface: 2392 case Type::ObjCObjectPointer: 2393 case Type::Record: 2394 case Type::Enum: 2395 case Type::UnresolvedUsing: 2396 case Type::TypeOfExpr: 2397 case Type::TypeOf: 2398 case Type::Decltype: 2399 case Type::UnaryTransform: 2400 case Type::DependentName: 2401 case Type::InjectedClassName: 2402 case Type::TemplateSpecialization: 2403 case Type::DependentTemplateSpecialization: 2404 case Type::TemplateTypeParm: 2405 case Type::SubstTemplateTypeParmPack: 2406 case Type::Auto: 2407 case Type::PackExpansion: 2408 llvm_unreachable("type should never be variably-modified"); 2409 2410 // These types can be variably-modified but should never need to 2411 // further decay. 2412 case Type::FunctionNoProto: 2413 case Type::FunctionProto: 2414 case Type::BlockPointer: 2415 case Type::MemberPointer: 2416 return type; 2417 2418 // These types can be variably-modified. All these modifications 2419 // preserve structure except as noted by comments. 2420 // TODO: if we ever care about optimizing VLAs, there are no-op 2421 // optimizations available here. 2422 case Type::Pointer: 2423 result = getPointerType(getVariableArrayDecayedType( 2424 cast<PointerType>(ty)->getPointeeType())); 2425 break; 2426 2427 case Type::LValueReference: { 2428 const LValueReferenceType *lv = cast<LValueReferenceType>(ty); 2429 result = getLValueReferenceType( 2430 getVariableArrayDecayedType(lv->getPointeeType()), 2431 lv->isSpelledAsLValue()); 2432 break; 2433 } 2434 2435 case Type::RValueReference: { 2436 const RValueReferenceType *lv = cast<RValueReferenceType>(ty); 2437 result = getRValueReferenceType( 2438 getVariableArrayDecayedType(lv->getPointeeType())); 2439 break; 2440 } 2441 2442 case Type::Atomic: { 2443 const AtomicType *at = cast<AtomicType>(ty); 2444 result = getAtomicType(getVariableArrayDecayedType(at->getValueType())); 2445 break; 2446 } 2447 2448 case Type::ConstantArray: { 2449 const ConstantArrayType *cat = cast<ConstantArrayType>(ty); 2450 result = getConstantArrayType( 2451 getVariableArrayDecayedType(cat->getElementType()), 2452 cat->getSize(), 2453 cat->getSizeModifier(), 2454 cat->getIndexTypeCVRQualifiers()); 2455 break; 2456 } 2457 2458 case Type::DependentSizedArray: { 2459 const DependentSizedArrayType *dat = cast<DependentSizedArrayType>(ty); 2460 result = getDependentSizedArrayType( 2461 getVariableArrayDecayedType(dat->getElementType()), 2462 dat->getSizeExpr(), 2463 dat->getSizeModifier(), 2464 dat->getIndexTypeCVRQualifiers(), 2465 dat->getBracketsRange()); 2466 break; 2467 } 2468 2469 // Turn incomplete types into [*] types. 2470 case Type::IncompleteArray: { 2471 const IncompleteArrayType *iat = cast<IncompleteArrayType>(ty); 2472 result = getVariableArrayType( 2473 getVariableArrayDecayedType(iat->getElementType()), 2474 /*size*/ 0, 2475 ArrayType::Normal, 2476 iat->getIndexTypeCVRQualifiers(), 2477 SourceRange()); 2478 break; 2479 } 2480 2481 // Turn VLA types into [*] types. 2482 case Type::VariableArray: { 2483 const VariableArrayType *vat = cast<VariableArrayType>(ty); 2484 result = getVariableArrayType( 2485 getVariableArrayDecayedType(vat->getElementType()), 2486 /*size*/ 0, 2487 ArrayType::Star, 2488 vat->getIndexTypeCVRQualifiers(), 2489 vat->getBracketsRange()); 2490 break; 2491 } 2492 } 2493 2494 // Apply the top-level qualifiers from the original. 2495 return getQualifiedType(result, split.Quals); 2496 } 2497 2498 /// getVariableArrayType - Returns a non-unique reference to the type for a 2499 /// variable array of the specified element type. 2500 QualType ASTContext::getVariableArrayType(QualType EltTy, 2501 Expr *NumElts, 2502 ArrayType::ArraySizeModifier ASM, 2503 unsigned IndexTypeQuals, 2504 SourceRange Brackets) const { 2505 // Since we don't unique expressions, it isn't possible to unique VLA's 2506 // that have an expression provided for their size. 2507 QualType Canon; 2508 2509 // Be sure to pull qualifiers off the element type. 2510 if (!EltTy.isCanonical() || EltTy.hasLocalQualifiers()) { 2511 SplitQualType canonSplit = getCanonicalType(EltTy).split(); 2512 Canon = getVariableArrayType(QualType(canonSplit.Ty, 0), NumElts, ASM, 2513 IndexTypeQuals, Brackets); 2514 Canon = getQualifiedType(Canon, canonSplit.Quals); 2515 } 2516 2517 VariableArrayType *New = new(*this, TypeAlignment) 2518 VariableArrayType(EltTy, Canon, NumElts, ASM, IndexTypeQuals, Brackets); 2519 2520 VariableArrayTypes.push_back(New); 2521 Types.push_back(New); 2522 return QualType(New, 0); 2523 } 2524 2525 /// getDependentSizedArrayType - Returns a non-unique reference to 2526 /// the type for a dependently-sized array of the specified element 2527 /// type. 2528 QualType ASTContext::getDependentSizedArrayType(QualType elementType, 2529 Expr *numElements, 2530 ArrayType::ArraySizeModifier ASM, 2531 unsigned elementTypeQuals, 2532 SourceRange brackets) const { 2533 assert((!numElements || numElements->isTypeDependent() || 2534 numElements->isValueDependent()) && 2535 "Size must be type- or value-dependent!"); 2536 2537 // Dependently-sized array types that do not have a specified number 2538 // of elements will have their sizes deduced from a dependent 2539 // initializer. We do no canonicalization here at all, which is okay 2540 // because they can't be used in most locations. 2541 if (!numElements) { 2542 DependentSizedArrayType *newType 2543 = new (*this, TypeAlignment) 2544 DependentSizedArrayType(*this, elementType, QualType(), 2545 numElements, ASM, elementTypeQuals, 2546 brackets); 2547 Types.push_back(newType); 2548 return QualType(newType, 0); 2549 } 2550 2551 // Otherwise, we actually build a new type every time, but we 2552 // also build a canonical type. 2553 2554 SplitQualType canonElementType = getCanonicalType(elementType).split(); 2555 2556 void *insertPos = 0; 2557 llvm::FoldingSetNodeID ID; 2558 DependentSizedArrayType::Profile(ID, *this, 2559 QualType(canonElementType.Ty, 0), 2560 ASM, elementTypeQuals, numElements); 2561 2562 // Look for an existing type with these properties. 2563 DependentSizedArrayType *canonTy = 2564 DependentSizedArrayTypes.FindNodeOrInsertPos(ID, insertPos); 2565 2566 // If we don't have one, build one. 2567 if (!canonTy) { 2568 canonTy = new (*this, TypeAlignment) 2569 DependentSizedArrayType(*this, QualType(canonElementType.Ty, 0), 2570 QualType(), numElements, ASM, elementTypeQuals, 2571 brackets); 2572 DependentSizedArrayTypes.InsertNode(canonTy, insertPos); 2573 Types.push_back(canonTy); 2574 } 2575 2576 // Apply qualifiers from the element type to the array. 2577 QualType canon = getQualifiedType(QualType(canonTy,0), 2578 canonElementType.Quals); 2579 2580 // If we didn't need extra canonicalization for the element type, 2581 // then just use that as our result. 2582 if (QualType(canonElementType.Ty, 0) == elementType) 2583 return canon; 2584 2585 // Otherwise, we need to build a type which follows the spelling 2586 // of the element type. 2587 DependentSizedArrayType *sugaredType 2588 = new (*this, TypeAlignment) 2589 DependentSizedArrayType(*this, elementType, canon, numElements, 2590 ASM, elementTypeQuals, brackets); 2591 Types.push_back(sugaredType); 2592 return QualType(sugaredType, 0); 2593 } 2594 2595 QualType ASTContext::getIncompleteArrayType(QualType elementType, 2596 ArrayType::ArraySizeModifier ASM, 2597 unsigned elementTypeQuals) const { 2598 llvm::FoldingSetNodeID ID; 2599 IncompleteArrayType::Profile(ID, elementType, ASM, elementTypeQuals); 2600 2601 void *insertPos = 0; 2602 if (IncompleteArrayType *iat = 2603 IncompleteArrayTypes.FindNodeOrInsertPos(ID, insertPos)) 2604 return QualType(iat, 0); 2605 2606 // If the element type isn't canonical, this won't be a canonical type 2607 // either, so fill in the canonical type field. We also have to pull 2608 // qualifiers off the element type. 2609 QualType canon; 2610 2611 if (!elementType.isCanonical() || elementType.hasLocalQualifiers()) { 2612 SplitQualType canonSplit = getCanonicalType(elementType).split(); 2613 canon = getIncompleteArrayType(QualType(canonSplit.Ty, 0), 2614 ASM, elementTypeQuals); 2615 canon = getQualifiedType(canon, canonSplit.Quals); 2616 2617 // Get the new insert position for the node we care about. 2618 IncompleteArrayType *existing = 2619 IncompleteArrayTypes.FindNodeOrInsertPos(ID, insertPos); 2620 assert(!existing && "Shouldn't be in the map!"); (void) existing; 2621 } 2622 2623 IncompleteArrayType *newType = new (*this, TypeAlignment) 2624 IncompleteArrayType(elementType, canon, ASM, elementTypeQuals); 2625 2626 IncompleteArrayTypes.InsertNode(newType, insertPos); 2627 Types.push_back(newType); 2628 return QualType(newType, 0); 2629 } 2630 2631 /// getVectorType - Return the unique reference to a vector type of 2632 /// the specified element type and size. VectorType must be a built-in type. 2633 QualType ASTContext::getVectorType(QualType vecType, unsigned NumElts, 2634 VectorType::VectorKind VecKind) const { 2635 assert(vecType->isBuiltinType()); 2636 2637 // Check if we've already instantiated a vector of this type. 2638 llvm::FoldingSetNodeID ID; 2639 VectorType::Profile(ID, vecType, NumElts, Type::Vector, VecKind); 2640 2641 void *InsertPos = 0; 2642 if (VectorType *VTP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos)) 2643 return QualType(VTP, 0); 2644 2645 // If the element type isn't canonical, this won't be a canonical type either, 2646 // so fill in the canonical type field. 2647 QualType Canonical; 2648 if (!vecType.isCanonical()) { 2649 Canonical = getVectorType(getCanonicalType(vecType), NumElts, VecKind); 2650 2651 // Get the new insert position for the node we care about. 2652 VectorType *NewIP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos); 2653 assert(NewIP == 0 && "Shouldn't be in the map!"); (void)NewIP; 2654 } 2655 VectorType *New = new (*this, TypeAlignment) 2656 VectorType(vecType, NumElts, Canonical, VecKind); 2657 VectorTypes.InsertNode(New, InsertPos); 2658 Types.push_back(New); 2659 return QualType(New, 0); 2660 } 2661 2662 /// getExtVectorType - Return the unique reference to an extended vector type of 2663 /// the specified element type and size. VectorType must be a built-in type. 2664 QualType 2665 ASTContext::getExtVectorType(QualType vecType, unsigned NumElts) const { 2666 assert(vecType->isBuiltinType() || vecType->isDependentType()); 2667 2668 // Check if we've already instantiated a vector of this type. 2669 llvm::FoldingSetNodeID ID; 2670 VectorType::Profile(ID, vecType, NumElts, Type::ExtVector, 2671 VectorType::GenericVector); 2672 void *InsertPos = 0; 2673 if (VectorType *VTP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos)) 2674 return QualType(VTP, 0); 2675 2676 // If the element type isn't canonical, this won't be a canonical type either, 2677 // so fill in the canonical type field. 2678 QualType Canonical; 2679 if (!vecType.isCanonical()) { 2680 Canonical = getExtVectorType(getCanonicalType(vecType), NumElts); 2681 2682 // Get the new insert position for the node we care about. 2683 VectorType *NewIP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos); 2684 assert(NewIP == 0 && "Shouldn't be in the map!"); (void)NewIP; 2685 } 2686 ExtVectorType *New = new (*this, TypeAlignment) 2687 ExtVectorType(vecType, NumElts, Canonical); 2688 VectorTypes.InsertNode(New, InsertPos); 2689 Types.push_back(New); 2690 return QualType(New, 0); 2691 } 2692 2693 QualType 2694 ASTContext::getDependentSizedExtVectorType(QualType vecType, 2695 Expr *SizeExpr, 2696 SourceLocation AttrLoc) const { 2697 llvm::FoldingSetNodeID ID; 2698 DependentSizedExtVectorType::Profile(ID, *this, getCanonicalType(vecType), 2699 SizeExpr); 2700 2701 void *InsertPos = 0; 2702 DependentSizedExtVectorType *Canon 2703 = DependentSizedExtVectorTypes.FindNodeOrInsertPos(ID, InsertPos); 2704 DependentSizedExtVectorType *New; 2705 if (Canon) { 2706 // We already have a canonical version of this array type; use it as 2707 // the canonical type for a newly-built type. 2708 New = new (*this, TypeAlignment) 2709 DependentSizedExtVectorType(*this, vecType, QualType(Canon, 0), 2710 SizeExpr, AttrLoc); 2711 } else { 2712 QualType CanonVecTy = getCanonicalType(vecType); 2713 if (CanonVecTy == vecType) { 2714 New = new (*this, TypeAlignment) 2715 DependentSizedExtVectorType(*this, vecType, QualType(), SizeExpr, 2716 AttrLoc); 2717 2718 DependentSizedExtVectorType *CanonCheck 2719 = DependentSizedExtVectorTypes.FindNodeOrInsertPos(ID, InsertPos); 2720 assert(!CanonCheck && "Dependent-sized ext_vector canonical type broken"); 2721 (void)CanonCheck; 2722 DependentSizedExtVectorTypes.InsertNode(New, InsertPos); 2723 } else { 2724 QualType Canon = getDependentSizedExtVectorType(CanonVecTy, SizeExpr, 2725 SourceLocation()); 2726 New = new (*this, TypeAlignment) 2727 DependentSizedExtVectorType(*this, vecType, Canon, SizeExpr, AttrLoc); 2728 } 2729 } 2730 2731 Types.push_back(New); 2732 return QualType(New, 0); 2733 } 2734 2735 /// getFunctionNoProtoType - Return a K&R style C function type like 'int()'. 2736 /// 2737 QualType 2738 ASTContext::getFunctionNoProtoType(QualType ResultTy, 2739 const FunctionType::ExtInfo &Info) const { 2740 const CallingConv DefaultCC = Info.getCC(); 2741 const CallingConv CallConv = (LangOpts.MRTD && DefaultCC == CC_Default) ? 2742 CC_X86StdCall : DefaultCC; 2743 // Unique functions, to guarantee there is only one function of a particular 2744 // structure. 2745 llvm::FoldingSetNodeID ID; 2746 FunctionNoProtoType::Profile(ID, ResultTy, Info); 2747 2748 void *InsertPos = 0; 2749 if (FunctionNoProtoType *FT = 2750 FunctionNoProtoTypes.FindNodeOrInsertPos(ID, InsertPos)) 2751 return QualType(FT, 0); 2752 2753 QualType Canonical; 2754 if (!ResultTy.isCanonical() || 2755 getCanonicalCallConv(CallConv) != CallConv) { 2756 Canonical = 2757 getFunctionNoProtoType(getCanonicalType(ResultTy), 2758 Info.withCallingConv(getCanonicalCallConv(CallConv))); 2759 2760 // Get the new insert position for the node we care about. 2761 FunctionNoProtoType *NewIP = 2762 FunctionNoProtoTypes.FindNodeOrInsertPos(ID, InsertPos); 2763 assert(NewIP == 0 && "Shouldn't be in the map!"); (void)NewIP; 2764 } 2765 2766 FunctionProtoType::ExtInfo newInfo = Info.withCallingConv(CallConv); 2767 FunctionNoProtoType *New = new (*this, TypeAlignment) 2768 FunctionNoProtoType(ResultTy, Canonical, newInfo); 2769 Types.push_back(New); 2770 FunctionNoProtoTypes.InsertNode(New, InsertPos); 2771 return QualType(New, 0); 2772 } 2773 2774 /// \brief Determine whether \p T is canonical as the result type of a function. 2775 static bool isCanonicalResultType(QualType T) { 2776 return T.isCanonical() && 2777 (T.getObjCLifetime() == Qualifiers::OCL_None || 2778 T.getObjCLifetime() == Qualifiers::OCL_ExplicitNone); 2779 } 2780 2781 /// getFunctionType - Return a normal function type with a typed argument 2782 /// list. isVariadic indicates whether the argument list includes '...'. 2783 QualType 2784 ASTContext::getFunctionType(QualType ResultTy, ArrayRef<QualType> ArgArray, 2785 const FunctionProtoType::ExtProtoInfo &EPI) const { 2786 size_t NumArgs = ArgArray.size(); 2787 2788 // Unique functions, to guarantee there is only one function of a particular 2789 // structure. 2790 llvm::FoldingSetNodeID ID; 2791 FunctionProtoType::Profile(ID, ResultTy, ArgArray.begin(), NumArgs, EPI, 2792 *this); 2793 2794 void *InsertPos = 0; 2795 if (FunctionProtoType *FTP = 2796 FunctionProtoTypes.FindNodeOrInsertPos(ID, InsertPos)) 2797 return QualType(FTP, 0); 2798 2799 // Determine whether the type being created is already canonical or not. 2800 bool isCanonical = 2801 EPI.ExceptionSpecType == EST_None && isCanonicalResultType(ResultTy) && 2802 !EPI.HasTrailingReturn; 2803 for (unsigned i = 0; i != NumArgs && isCanonical; ++i) 2804 if (!ArgArray[i].isCanonicalAsParam()) 2805 isCanonical = false; 2806 2807 const CallingConv DefaultCC = EPI.ExtInfo.getCC(); 2808 const CallingConv CallConv = (LangOpts.MRTD && DefaultCC == CC_Default) ? 2809 CC_X86StdCall : DefaultCC; 2810 2811 // If this type isn't canonical, get the canonical version of it. 2812 // The exception spec is not part of the canonical type. 2813 QualType Canonical; 2814 if (!isCanonical || getCanonicalCallConv(CallConv) != CallConv) { 2815 SmallVector<QualType, 16> CanonicalArgs; 2816 CanonicalArgs.reserve(NumArgs); 2817 for (unsigned i = 0; i != NumArgs; ++i) 2818 CanonicalArgs.push_back(getCanonicalParamType(ArgArray[i])); 2819 2820 FunctionProtoType::ExtProtoInfo CanonicalEPI = EPI; 2821 CanonicalEPI.HasTrailingReturn = false; 2822 CanonicalEPI.ExceptionSpecType = EST_None; 2823 CanonicalEPI.NumExceptions = 0; 2824 CanonicalEPI.ExtInfo 2825 = CanonicalEPI.ExtInfo.withCallingConv(getCanonicalCallConv(CallConv)); 2826 2827 // Result types do not have ARC lifetime qualifiers. 2828 QualType CanResultTy = getCanonicalType(ResultTy); 2829 if (ResultTy.getQualifiers().hasObjCLifetime()) { 2830 Qualifiers Qs = CanResultTy.getQualifiers(); 2831 Qs.removeObjCLifetime(); 2832 CanResultTy = getQualifiedType(CanResultTy.getUnqualifiedType(), Qs); 2833 } 2834 2835 Canonical = getFunctionType(CanResultTy, CanonicalArgs, CanonicalEPI); 2836 2837 // Get the new insert position for the node we care about. 2838 FunctionProtoType *NewIP = 2839 FunctionProtoTypes.FindNodeOrInsertPos(ID, InsertPos); 2840 assert(NewIP == 0 && "Shouldn't be in the map!"); (void)NewIP; 2841 } 2842 2843 // FunctionProtoType objects are allocated with extra bytes after 2844 // them for three variable size arrays at the end: 2845 // - parameter types 2846 // - exception types 2847 // - consumed-arguments flags 2848 // Instead of the exception types, there could be a noexcept 2849 // expression, or information used to resolve the exception 2850 // specification. 2851 size_t Size = sizeof(FunctionProtoType) + 2852 NumArgs * sizeof(QualType); 2853 if (EPI.ExceptionSpecType == EST_Dynamic) { 2854 Size += EPI.NumExceptions * sizeof(QualType); 2855 } else if (EPI.ExceptionSpecType == EST_ComputedNoexcept) { 2856 Size += sizeof(Expr*); 2857 } else if (EPI.ExceptionSpecType == EST_Uninstantiated) { 2858 Size += 2 * sizeof(FunctionDecl*); 2859 } else if (EPI.ExceptionSpecType == EST_Unevaluated) { 2860 Size += sizeof(FunctionDecl*); 2861 } 2862 if (EPI.ConsumedArguments) 2863 Size += NumArgs * sizeof(bool); 2864 2865 FunctionProtoType *FTP = (FunctionProtoType*) Allocate(Size, TypeAlignment); 2866 FunctionProtoType::ExtProtoInfo newEPI = EPI; 2867 newEPI.ExtInfo = EPI.ExtInfo.withCallingConv(CallConv); 2868 new (FTP) FunctionProtoType(ResultTy, ArgArray, Canonical, newEPI); 2869 Types.push_back(FTP); 2870 FunctionProtoTypes.InsertNode(FTP, InsertPos); 2871 return QualType(FTP, 0); 2872 } 2873 2874 #ifndef NDEBUG 2875 static bool NeedsInjectedClassNameType(const RecordDecl *D) { 2876 if (!isa<CXXRecordDecl>(D)) return false; 2877 const CXXRecordDecl *RD = cast<CXXRecordDecl>(D); 2878 if (isa<ClassTemplatePartialSpecializationDecl>(RD)) 2879 return true; 2880 if (RD->getDescribedClassTemplate() && 2881 !isa<ClassTemplateSpecializationDecl>(RD)) 2882 return true; 2883 return false; 2884 } 2885 #endif 2886 2887 /// getInjectedClassNameType - Return the unique reference to the 2888 /// injected class name type for the specified templated declaration. 2889 QualType ASTContext::getInjectedClassNameType(CXXRecordDecl *Decl, 2890 QualType TST) const { 2891 assert(NeedsInjectedClassNameType(Decl)); 2892 if (Decl->TypeForDecl) { 2893 assert(isa<InjectedClassNameType>(Decl->TypeForDecl)); 2894 } else if (CXXRecordDecl *PrevDecl = Decl->getPreviousDecl()) { 2895 assert(PrevDecl->TypeForDecl && "previous declaration has no type"); 2896 Decl->TypeForDecl = PrevDecl->TypeForDecl; 2897 assert(isa<InjectedClassNameType>(Decl->TypeForDecl)); 2898 } else { 2899 Type *newType = 2900 new (*this, TypeAlignment) InjectedClassNameType(Decl, TST); 2901 Decl->TypeForDecl = newType; 2902 Types.push_back(newType); 2903 } 2904 return QualType(Decl->TypeForDecl, 0); 2905 } 2906 2907 /// getTypeDeclType - Return the unique reference to the type for the 2908 /// specified type declaration. 2909 QualType ASTContext::getTypeDeclTypeSlow(const TypeDecl *Decl) const { 2910 assert(Decl && "Passed null for Decl param"); 2911 assert(!Decl->TypeForDecl && "TypeForDecl present in slow case"); 2912 2913 if (const TypedefNameDecl *Typedef = dyn_cast<TypedefNameDecl>(Decl)) 2914 return getTypedefType(Typedef); 2915 2916 assert(!isa<TemplateTypeParmDecl>(Decl) && 2917 "Template type parameter types are always available."); 2918 2919 if (const RecordDecl *Record = dyn_cast<RecordDecl>(Decl)) { 2920 assert(!Record->getPreviousDecl() && 2921 "struct/union has previous declaration"); 2922 assert(!NeedsInjectedClassNameType(Record)); 2923 return getRecordType(Record); 2924 } else if (const EnumDecl *Enum = dyn_cast<EnumDecl>(Decl)) { 2925 assert(!Enum->getPreviousDecl() && 2926 "enum has previous declaration"); 2927 return getEnumType(Enum); 2928 } else if (const UnresolvedUsingTypenameDecl *Using = 2929 dyn_cast<UnresolvedUsingTypenameDecl>(Decl)) { 2930 Type *newType = new (*this, TypeAlignment) UnresolvedUsingType(Using); 2931 Decl->TypeForDecl = newType; 2932 Types.push_back(newType); 2933 } else 2934 llvm_unreachable("TypeDecl without a type?"); 2935 2936 return QualType(Decl->TypeForDecl, 0); 2937 } 2938 2939 /// getTypedefType - Return the unique reference to the type for the 2940 /// specified typedef name decl. 2941 QualType 2942 ASTContext::getTypedefType(const TypedefNameDecl *Decl, 2943 QualType Canonical) const { 2944 if (Decl->TypeForDecl) return QualType(Decl->TypeForDecl, 0); 2945 2946 if (Canonical.isNull()) 2947 Canonical = getCanonicalType(Decl->getUnderlyingType()); 2948 TypedefType *newType = new(*this, TypeAlignment) 2949 TypedefType(Type::Typedef, Decl, Canonical); 2950 Decl->TypeForDecl = newType; 2951 Types.push_back(newType); 2952 return QualType(newType, 0); 2953 } 2954 2955 QualType ASTContext::getRecordType(const RecordDecl *Decl) const { 2956 if (Decl->TypeForDecl) return QualType(Decl->TypeForDecl, 0); 2957 2958 if (const RecordDecl *PrevDecl = Decl->getPreviousDecl()) 2959 if (PrevDecl->TypeForDecl) 2960 return QualType(Decl->TypeForDecl = PrevDecl->TypeForDecl, 0); 2961 2962 RecordType *newType = new (*this, TypeAlignment) RecordType(Decl); 2963 Decl->TypeForDecl = newType; 2964 Types.push_back(newType); 2965 return QualType(newType, 0); 2966 } 2967 2968 QualType ASTContext::getEnumType(const EnumDecl *Decl) const { 2969 if (Decl->TypeForDecl) return QualType(Decl->TypeForDecl, 0); 2970 2971 if (const EnumDecl *PrevDecl = Decl->getPreviousDecl()) 2972 if (PrevDecl->TypeForDecl) 2973 return QualType(Decl->TypeForDecl = PrevDecl->TypeForDecl, 0); 2974 2975 EnumType *newType = new (*this, TypeAlignment) EnumType(Decl); 2976 Decl->TypeForDecl = newType; 2977 Types.push_back(newType); 2978 return QualType(newType, 0); 2979 } 2980 2981 QualType ASTContext::getAttributedType(AttributedType::Kind attrKind, 2982 QualType modifiedType, 2983 QualType equivalentType) { 2984 llvm::FoldingSetNodeID id; 2985 AttributedType::Profile(id, attrKind, modifiedType, equivalentType); 2986 2987 void *insertPos = 0; 2988 AttributedType *type = AttributedTypes.FindNodeOrInsertPos(id, insertPos); 2989 if (type) return QualType(type, 0); 2990 2991 QualType canon = getCanonicalType(equivalentType); 2992 type = new (*this, TypeAlignment) 2993 AttributedType(canon, attrKind, modifiedType, equivalentType); 2994 2995 Types.push_back(type); 2996 AttributedTypes.InsertNode(type, insertPos); 2997 2998 return QualType(type, 0); 2999 } 3000 3001 3002 /// \brief Retrieve a substitution-result type. 3003 QualType 3004 ASTContext::getSubstTemplateTypeParmType(const TemplateTypeParmType *Parm, 3005 QualType Replacement) const { 3006 assert(Replacement.isCanonical() 3007 && "replacement types must always be canonical"); 3008 3009 llvm::FoldingSetNodeID ID; 3010 SubstTemplateTypeParmType::Profile(ID, Parm, Replacement); 3011 void *InsertPos = 0; 3012 SubstTemplateTypeParmType *SubstParm 3013 = SubstTemplateTypeParmTypes.FindNodeOrInsertPos(ID, InsertPos); 3014 3015 if (!SubstParm) { 3016 SubstParm = new (*this, TypeAlignment) 3017 SubstTemplateTypeParmType(Parm, Replacement); 3018 Types.push_back(SubstParm); 3019 SubstTemplateTypeParmTypes.InsertNode(SubstParm, InsertPos); 3020 } 3021 3022 return QualType(SubstParm, 0); 3023 } 3024 3025 /// \brief Retrieve a 3026 QualType ASTContext::getSubstTemplateTypeParmPackType( 3027 const TemplateTypeParmType *Parm, 3028 const TemplateArgument &ArgPack) { 3029 #ifndef NDEBUG 3030 for (TemplateArgument::pack_iterator P = ArgPack.pack_begin(), 3031 PEnd = ArgPack.pack_end(); 3032 P != PEnd; ++P) { 3033 assert(P->getKind() == TemplateArgument::Type &&"Pack contains a non-type"); 3034 assert(P->getAsType().isCanonical() && "Pack contains non-canonical type"); 3035 } 3036 #endif 3037 3038 llvm::FoldingSetNodeID ID; 3039 SubstTemplateTypeParmPackType::Profile(ID, Parm, ArgPack); 3040 void *InsertPos = 0; 3041 if (SubstTemplateTypeParmPackType *SubstParm 3042 = SubstTemplateTypeParmPackTypes.FindNodeOrInsertPos(ID, InsertPos)) 3043 return QualType(SubstParm, 0); 3044 3045 QualType Canon; 3046 if (!Parm->isCanonicalUnqualified()) { 3047 Canon = getCanonicalType(QualType(Parm, 0)); 3048 Canon = getSubstTemplateTypeParmPackType(cast<TemplateTypeParmType>(Canon), 3049 ArgPack); 3050 SubstTemplateTypeParmPackTypes.FindNodeOrInsertPos(ID, InsertPos); 3051 } 3052 3053 SubstTemplateTypeParmPackType *SubstParm 3054 = new (*this, TypeAlignment) SubstTemplateTypeParmPackType(Parm, Canon, 3055 ArgPack); 3056 Types.push_back(SubstParm); 3057 SubstTemplateTypeParmTypes.InsertNode(SubstParm, InsertPos); 3058 return QualType(SubstParm, 0); 3059 } 3060 3061 /// \brief Retrieve the template type parameter type for a template 3062 /// parameter or parameter pack with the given depth, index, and (optionally) 3063 /// name. 3064 QualType ASTContext::getTemplateTypeParmType(unsigned Depth, unsigned Index, 3065 bool ParameterPack, 3066 TemplateTypeParmDecl *TTPDecl) const { 3067 llvm::FoldingSetNodeID ID; 3068 TemplateTypeParmType::Profile(ID, Depth, Index, ParameterPack, TTPDecl); 3069 void *InsertPos = 0; 3070 TemplateTypeParmType *TypeParm 3071 = TemplateTypeParmTypes.FindNodeOrInsertPos(ID, InsertPos); 3072 3073 if (TypeParm) 3074 return QualType(TypeParm, 0); 3075 3076 if (TTPDecl) { 3077 QualType Canon = getTemplateTypeParmType(Depth, Index, ParameterPack); 3078 TypeParm = new (*this, TypeAlignment) TemplateTypeParmType(TTPDecl, Canon); 3079 3080 TemplateTypeParmType *TypeCheck 3081 = TemplateTypeParmTypes.FindNodeOrInsertPos(ID, InsertPos); 3082 assert(!TypeCheck && "Template type parameter canonical type broken"); 3083 (void)TypeCheck; 3084 } else 3085 TypeParm = new (*this, TypeAlignment) 3086 TemplateTypeParmType(Depth, Index, ParameterPack); 3087 3088 Types.push_back(TypeParm); 3089 TemplateTypeParmTypes.InsertNode(TypeParm, InsertPos); 3090 3091 return QualType(TypeParm, 0); 3092 } 3093 3094 TypeSourceInfo * 3095 ASTContext::getTemplateSpecializationTypeInfo(TemplateName Name, 3096 SourceLocation NameLoc, 3097 const TemplateArgumentListInfo &Args, 3098 QualType Underlying) const { 3099 assert(!Name.getAsDependentTemplateName() && 3100 "No dependent template names here!"); 3101 QualType TST = getTemplateSpecializationType(Name, Args, Underlying); 3102 3103 TypeSourceInfo *DI = CreateTypeSourceInfo(TST); 3104 TemplateSpecializationTypeLoc TL = 3105 DI->getTypeLoc().castAs<TemplateSpecializationTypeLoc>(); 3106 TL.setTemplateKeywordLoc(SourceLocation()); 3107 TL.setTemplateNameLoc(NameLoc); 3108 TL.setLAngleLoc(Args.getLAngleLoc()); 3109 TL.setRAngleLoc(Args.getRAngleLoc()); 3110 for (unsigned i = 0, e = TL.getNumArgs(); i != e; ++i) 3111 TL.setArgLocInfo(i, Args[i].getLocInfo()); 3112 return DI; 3113 } 3114 3115 QualType 3116 ASTContext::getTemplateSpecializationType(TemplateName Template, 3117 const TemplateArgumentListInfo &Args, 3118 QualType Underlying) const { 3119 assert(!Template.getAsDependentTemplateName() && 3120 "No dependent template names here!"); 3121 3122 unsigned NumArgs = Args.size(); 3123 3124 SmallVector<TemplateArgument, 4> ArgVec; 3125 ArgVec.reserve(NumArgs); 3126 for (unsigned i = 0; i != NumArgs; ++i) 3127 ArgVec.push_back(Args[i].getArgument()); 3128 3129 return getTemplateSpecializationType(Template, ArgVec.data(), NumArgs, 3130 Underlying); 3131 } 3132 3133 #ifndef NDEBUG 3134 static bool hasAnyPackExpansions(const TemplateArgument *Args, 3135 unsigned NumArgs) { 3136 for (unsigned I = 0; I != NumArgs; ++I) 3137 if (Args[I].isPackExpansion()) 3138 return true; 3139 3140 return true; 3141 } 3142 #endif 3143 3144 QualType 3145 ASTContext::getTemplateSpecializationType(TemplateName Template, 3146 const TemplateArgument *Args, 3147 unsigned NumArgs, 3148 QualType Underlying) const { 3149 assert(!Template.getAsDependentTemplateName() && 3150 "No dependent template names here!"); 3151 // Look through qualified template names. 3152 if (QualifiedTemplateName *QTN = Template.getAsQualifiedTemplateName()) 3153 Template = TemplateName(QTN->getTemplateDecl()); 3154 3155 bool IsTypeAlias = 3156 Template.getAsTemplateDecl() && 3157 isa<TypeAliasTemplateDecl>(Template.getAsTemplateDecl()); 3158 QualType CanonType; 3159 if (!Underlying.isNull()) 3160 CanonType = getCanonicalType(Underlying); 3161 else { 3162 // We can get here with an alias template when the specialization contains 3163 // a pack expansion that does not match up with a parameter pack. 3164 assert((!IsTypeAlias || hasAnyPackExpansions(Args, NumArgs)) && 3165 "Caller must compute aliased type"); 3166 IsTypeAlias = false; 3167 CanonType = getCanonicalTemplateSpecializationType(Template, Args, 3168 NumArgs); 3169 } 3170 3171 // Allocate the (non-canonical) template specialization type, but don't 3172 // try to unique it: these types typically have location information that 3173 // we don't unique and don't want to lose. 3174 void *Mem = Allocate(sizeof(TemplateSpecializationType) + 3175 sizeof(TemplateArgument) * NumArgs + 3176 (IsTypeAlias? sizeof(QualType) : 0), 3177 TypeAlignment); 3178 TemplateSpecializationType *Spec 3179 = new (Mem) TemplateSpecializationType(Template, Args, NumArgs, CanonType, 3180 IsTypeAlias ? Underlying : QualType()); 3181 3182 Types.push_back(Spec); 3183 return QualType(Spec, 0); 3184 } 3185 3186 QualType 3187 ASTContext::getCanonicalTemplateSpecializationType(TemplateName Template, 3188 const TemplateArgument *Args, 3189 unsigned NumArgs) const { 3190 assert(!Template.getAsDependentTemplateName() && 3191 "No dependent template names here!"); 3192 3193 // Look through qualified template names. 3194 if (QualifiedTemplateName *QTN = Template.getAsQualifiedTemplateName()) 3195 Template = TemplateName(QTN->getTemplateDecl()); 3196 3197 // Build the canonical template specialization type. 3198 TemplateName CanonTemplate = getCanonicalTemplateName(Template); 3199 SmallVector<TemplateArgument, 4> CanonArgs; 3200 CanonArgs.reserve(NumArgs); 3201 for (unsigned I = 0; I != NumArgs; ++I) 3202 CanonArgs.push_back(getCanonicalTemplateArgument(Args[I])); 3203 3204 // Determine whether this canonical template specialization type already 3205 // exists. 3206 llvm::FoldingSetNodeID ID; 3207 TemplateSpecializationType::Profile(ID, CanonTemplate, 3208 CanonArgs.data(), NumArgs, *this); 3209 3210 void *InsertPos = 0; 3211 TemplateSpecializationType *Spec 3212 = TemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos); 3213 3214 if (!Spec) { 3215 // Allocate a new canonical template specialization type. 3216 void *Mem = Allocate((sizeof(TemplateSpecializationType) + 3217 sizeof(TemplateArgument) * NumArgs), 3218 TypeAlignment); 3219 Spec = new (Mem) TemplateSpecializationType(CanonTemplate, 3220 CanonArgs.data(), NumArgs, 3221 QualType(), QualType()); 3222 Types.push_back(Spec); 3223 TemplateSpecializationTypes.InsertNode(Spec, InsertPos); 3224 } 3225 3226 assert(Spec->isDependentType() && 3227 "Non-dependent template-id type must have a canonical type"); 3228 return QualType(Spec, 0); 3229 } 3230 3231 QualType 3232 ASTContext::getElaboratedType(ElaboratedTypeKeyword Keyword, 3233 NestedNameSpecifier *NNS, 3234 QualType NamedType) const { 3235 llvm::FoldingSetNodeID ID; 3236 ElaboratedType::Profile(ID, Keyword, NNS, NamedType); 3237 3238 void *InsertPos = 0; 3239 ElaboratedType *T = ElaboratedTypes.FindNodeOrInsertPos(ID, InsertPos); 3240 if (T) 3241 return QualType(T, 0); 3242 3243 QualType Canon = NamedType; 3244 if (!Canon.isCanonical()) { 3245 Canon = getCanonicalType(NamedType); 3246 ElaboratedType *CheckT = ElaboratedTypes.FindNodeOrInsertPos(ID, InsertPos); 3247 assert(!CheckT && "Elaborated canonical type broken"); 3248 (void)CheckT; 3249 } 3250 3251 T = new (*this) ElaboratedType(Keyword, NNS, NamedType, Canon); 3252 Types.push_back(T); 3253 ElaboratedTypes.InsertNode(T, InsertPos); 3254 return QualType(T, 0); 3255 } 3256 3257 QualType 3258 ASTContext::getParenType(QualType InnerType) const { 3259 llvm::FoldingSetNodeID ID; 3260 ParenType::Profile(ID, InnerType); 3261 3262 void *InsertPos = 0; 3263 ParenType *T = ParenTypes.FindNodeOrInsertPos(ID, InsertPos); 3264 if (T) 3265 return QualType(T, 0); 3266 3267 QualType Canon = InnerType; 3268 if (!Canon.isCanonical()) { 3269 Canon = getCanonicalType(InnerType); 3270 ParenType *CheckT = ParenTypes.FindNodeOrInsertPos(ID, InsertPos); 3271 assert(!CheckT && "Paren canonical type broken"); 3272 (void)CheckT; 3273 } 3274 3275 T = new (*this) ParenType(InnerType, Canon); 3276 Types.push_back(T); 3277 ParenTypes.InsertNode(T, InsertPos); 3278 return QualType(T, 0); 3279 } 3280 3281 QualType ASTContext::getDependentNameType(ElaboratedTypeKeyword Keyword, 3282 NestedNameSpecifier *NNS, 3283 const IdentifierInfo *Name, 3284 QualType Canon) const { 3285 assert(NNS->isDependent() && "nested-name-specifier must be dependent"); 3286 3287 if (Canon.isNull()) { 3288 NestedNameSpecifier *CanonNNS = getCanonicalNestedNameSpecifier(NNS); 3289 ElaboratedTypeKeyword CanonKeyword = Keyword; 3290 if (Keyword == ETK_None) 3291 CanonKeyword = ETK_Typename; 3292 3293 if (CanonNNS != NNS || CanonKeyword != Keyword) 3294 Canon = getDependentNameType(CanonKeyword, CanonNNS, Name); 3295 } 3296 3297 llvm::FoldingSetNodeID ID; 3298 DependentNameType::Profile(ID, Keyword, NNS, Name); 3299 3300 void *InsertPos = 0; 3301 DependentNameType *T 3302 = DependentNameTypes.FindNodeOrInsertPos(ID, InsertPos); 3303 if (T) 3304 return QualType(T, 0); 3305 3306 T = new (*this) DependentNameType(Keyword, NNS, Name, Canon); 3307 Types.push_back(T); 3308 DependentNameTypes.InsertNode(T, InsertPos); 3309 return QualType(T, 0); 3310 } 3311 3312 QualType 3313 ASTContext::getDependentTemplateSpecializationType( 3314 ElaboratedTypeKeyword Keyword, 3315 NestedNameSpecifier *NNS, 3316 const IdentifierInfo *Name, 3317 const TemplateArgumentListInfo &Args) const { 3318 // TODO: avoid this copy 3319 SmallVector<TemplateArgument, 16> ArgCopy; 3320 for (unsigned I = 0, E = Args.size(); I != E; ++I) 3321 ArgCopy.push_back(Args[I].getArgument()); 3322 return getDependentTemplateSpecializationType(Keyword, NNS, Name, 3323 ArgCopy.size(), 3324 ArgCopy.data()); 3325 } 3326 3327 QualType 3328 ASTContext::getDependentTemplateSpecializationType( 3329 ElaboratedTypeKeyword Keyword, 3330 NestedNameSpecifier *NNS, 3331 const IdentifierInfo *Name, 3332 unsigned NumArgs, 3333 const TemplateArgument *Args) const { 3334 assert((!NNS || NNS->isDependent()) && 3335 "nested-name-specifier must be dependent"); 3336 3337 llvm::FoldingSetNodeID ID; 3338 DependentTemplateSpecializationType::Profile(ID, *this, Keyword, NNS, 3339 Name, NumArgs, Args); 3340 3341 void *InsertPos = 0; 3342 DependentTemplateSpecializationType *T 3343 = DependentTemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos); 3344 if (T) 3345 return QualType(T, 0); 3346 3347 NestedNameSpecifier *CanonNNS = getCanonicalNestedNameSpecifier(NNS); 3348 3349 ElaboratedTypeKeyword CanonKeyword = Keyword; 3350 if (Keyword == ETK_None) CanonKeyword = ETK_Typename; 3351 3352 bool AnyNonCanonArgs = false; 3353 SmallVector<TemplateArgument, 16> CanonArgs(NumArgs); 3354 for (unsigned I = 0; I != NumArgs; ++I) { 3355 CanonArgs[I] = getCanonicalTemplateArgument(Args[I]); 3356 if (!CanonArgs[I].structurallyEquals(Args[I])) 3357 AnyNonCanonArgs = true; 3358 } 3359 3360 QualType Canon; 3361 if (AnyNonCanonArgs || CanonNNS != NNS || CanonKeyword != Keyword) { 3362 Canon = getDependentTemplateSpecializationType(CanonKeyword, CanonNNS, 3363 Name, NumArgs, 3364 CanonArgs.data()); 3365 3366 // Find the insert position again. 3367 DependentTemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos); 3368 } 3369 3370 void *Mem = Allocate((sizeof(DependentTemplateSpecializationType) + 3371 sizeof(TemplateArgument) * NumArgs), 3372 TypeAlignment); 3373 T = new (Mem) DependentTemplateSpecializationType(Keyword, NNS, 3374 Name, NumArgs, Args, Canon); 3375 Types.push_back(T); 3376 DependentTemplateSpecializationTypes.InsertNode(T, InsertPos); 3377 return QualType(T, 0); 3378 } 3379 3380 QualType ASTContext::getPackExpansionType(QualType Pattern, 3381 Optional<unsigned> NumExpansions) { 3382 llvm::FoldingSetNodeID ID; 3383 PackExpansionType::Profile(ID, Pattern, NumExpansions); 3384 3385 assert(Pattern->containsUnexpandedParameterPack() && 3386 "Pack expansions must expand one or more parameter packs"); 3387 void *InsertPos = 0; 3388 PackExpansionType *T 3389 = PackExpansionTypes.FindNodeOrInsertPos(ID, InsertPos); 3390 if (T) 3391 return QualType(T, 0); 3392 3393 QualType Canon; 3394 if (!Pattern.isCanonical()) { 3395 Canon = getCanonicalType(Pattern); 3396 // The canonical type might not contain an unexpanded parameter pack, if it 3397 // contains an alias template specialization which ignores one of its 3398 // parameters. 3399 if (Canon->containsUnexpandedParameterPack()) { 3400 Canon = getPackExpansionType(getCanonicalType(Pattern), NumExpansions); 3401 3402 // Find the insert position again, in case we inserted an element into 3403 // PackExpansionTypes and invalidated our insert position. 3404 PackExpansionTypes.FindNodeOrInsertPos(ID, InsertPos); 3405 } 3406 } 3407 3408 T = new (*this) PackExpansionType(Pattern, Canon, NumExpansions); 3409 Types.push_back(T); 3410 PackExpansionTypes.InsertNode(T, InsertPos); 3411 return QualType(T, 0); 3412 } 3413 3414 /// CmpProtocolNames - Comparison predicate for sorting protocols 3415 /// alphabetically. 3416 static bool CmpProtocolNames(const ObjCProtocolDecl *LHS, 3417 const ObjCProtocolDecl *RHS) { 3418 return LHS->getDeclName() < RHS->getDeclName(); 3419 } 3420 3421 static bool areSortedAndUniqued(ObjCProtocolDecl * const *Protocols, 3422 unsigned NumProtocols) { 3423 if (NumProtocols == 0) return true; 3424 3425 if (Protocols[0]->getCanonicalDecl() != Protocols[0]) 3426 return false; 3427 3428 for (unsigned i = 1; i != NumProtocols; ++i) 3429 if (!CmpProtocolNames(Protocols[i-1], Protocols[i]) || 3430 Protocols[i]->getCanonicalDecl() != Protocols[i]) 3431 return false; 3432 return true; 3433 } 3434 3435 static void SortAndUniqueProtocols(ObjCProtocolDecl **Protocols, 3436 unsigned &NumProtocols) { 3437 ObjCProtocolDecl **ProtocolsEnd = Protocols+NumProtocols; 3438 3439 // Sort protocols, keyed by name. 3440 std::sort(Protocols, Protocols+NumProtocols, CmpProtocolNames); 3441 3442 // Canonicalize. 3443 for (unsigned I = 0, N = NumProtocols; I != N; ++I) 3444 Protocols[I] = Protocols[I]->getCanonicalDecl(); 3445 3446 // Remove duplicates. 3447 ProtocolsEnd = std::unique(Protocols, ProtocolsEnd); 3448 NumProtocols = ProtocolsEnd-Protocols; 3449 } 3450 3451 QualType ASTContext::getObjCObjectType(QualType BaseType, 3452 ObjCProtocolDecl * const *Protocols, 3453 unsigned NumProtocols) const { 3454 // If the base type is an interface and there aren't any protocols 3455 // to add, then the interface type will do just fine. 3456 if (!NumProtocols && isa<ObjCInterfaceType>(BaseType)) 3457 return BaseType; 3458 3459 // Look in the folding set for an existing type. 3460 llvm::FoldingSetNodeID ID; 3461 ObjCObjectTypeImpl::Profile(ID, BaseType, Protocols, NumProtocols); 3462 void *InsertPos = 0; 3463 if (ObjCObjectType *QT = ObjCObjectTypes.FindNodeOrInsertPos(ID, InsertPos)) 3464 return QualType(QT, 0); 3465 3466 // Build the canonical type, which has the canonical base type and 3467 // a sorted-and-uniqued list of protocols. 3468 QualType Canonical; 3469 bool ProtocolsSorted = areSortedAndUniqued(Protocols, NumProtocols); 3470 if (!ProtocolsSorted || !BaseType.isCanonical()) { 3471 if (!ProtocolsSorted) { 3472 SmallVector<ObjCProtocolDecl*, 8> Sorted(Protocols, 3473 Protocols + NumProtocols); 3474 unsigned UniqueCount = NumProtocols; 3475 3476 SortAndUniqueProtocols(&Sorted[0], UniqueCount); 3477 Canonical = getObjCObjectType(getCanonicalType(BaseType), 3478 &Sorted[0], UniqueCount); 3479 } else { 3480 Canonical = getObjCObjectType(getCanonicalType(BaseType), 3481 Protocols, NumProtocols); 3482 } 3483 3484 // Regenerate InsertPos. 3485 ObjCObjectTypes.FindNodeOrInsertPos(ID, InsertPos); 3486 } 3487 3488 unsigned Size = sizeof(ObjCObjectTypeImpl); 3489 Size += NumProtocols * sizeof(ObjCProtocolDecl *); 3490 void *Mem = Allocate(Size, TypeAlignment); 3491 ObjCObjectTypeImpl *T = 3492 new (Mem) ObjCObjectTypeImpl(Canonical, BaseType, Protocols, NumProtocols); 3493 3494 Types.push_back(T); 3495 ObjCObjectTypes.InsertNode(T, InsertPos); 3496 return QualType(T, 0); 3497 } 3498 3499 /// getObjCObjectPointerType - Return a ObjCObjectPointerType type for 3500 /// the given object type. 3501 QualType ASTContext::getObjCObjectPointerType(QualType ObjectT) const { 3502 llvm::FoldingSetNodeID ID; 3503 ObjCObjectPointerType::Profile(ID, ObjectT); 3504 3505 void *InsertPos = 0; 3506 if (ObjCObjectPointerType *QT = 3507 ObjCObjectPointerTypes.FindNodeOrInsertPos(ID, InsertPos)) 3508 return QualType(QT, 0); 3509 3510 // Find the canonical object type. 3511 QualType Canonical; 3512 if (!ObjectT.isCanonical()) { 3513 Canonical = getObjCObjectPointerType(getCanonicalType(ObjectT)); 3514 3515 // Regenerate InsertPos. 3516 ObjCObjectPointerTypes.FindNodeOrInsertPos(ID, InsertPos); 3517 } 3518 3519 // No match. 3520 void *Mem = Allocate(sizeof(ObjCObjectPointerType), TypeAlignment); 3521 ObjCObjectPointerType *QType = 3522 new (Mem) ObjCObjectPointerType(Canonical, ObjectT); 3523 3524 Types.push_back(QType); 3525 ObjCObjectPointerTypes.InsertNode(QType, InsertPos); 3526 return QualType(QType, 0); 3527 } 3528 3529 /// getObjCInterfaceType - Return the unique reference to the type for the 3530 /// specified ObjC interface decl. The list of protocols is optional. 3531 QualType ASTContext::getObjCInterfaceType(const ObjCInterfaceDecl *Decl, 3532 ObjCInterfaceDecl *PrevDecl) const { 3533 if (Decl->TypeForDecl) 3534 return QualType(Decl->TypeForDecl, 0); 3535 3536 if (PrevDecl) { 3537 assert(PrevDecl->TypeForDecl && "previous decl has no TypeForDecl"); 3538 Decl->TypeForDecl = PrevDecl->TypeForDecl; 3539 return QualType(PrevDecl->TypeForDecl, 0); 3540 } 3541 3542 // Prefer the definition, if there is one. 3543 if (const ObjCInterfaceDecl *Def = Decl->getDefinition()) 3544 Decl = Def; 3545 3546 void *Mem = Allocate(sizeof(ObjCInterfaceType), TypeAlignment); 3547 ObjCInterfaceType *T = new (Mem) ObjCInterfaceType(Decl); 3548 Decl->TypeForDecl = T; 3549 Types.push_back(T); 3550 return QualType(T, 0); 3551 } 3552 3553 /// getTypeOfExprType - Unlike many "get<Type>" functions, we can't unique 3554 /// TypeOfExprType AST's (since expression's are never shared). For example, 3555 /// multiple declarations that refer to "typeof(x)" all contain different 3556 /// DeclRefExpr's. This doesn't effect the type checker, since it operates 3557 /// on canonical type's (which are always unique). 3558 QualType ASTContext::getTypeOfExprType(Expr *tofExpr) const { 3559 TypeOfExprType *toe; 3560 if (tofExpr->isTypeDependent()) { 3561 llvm::FoldingSetNodeID ID; 3562 DependentTypeOfExprType::Profile(ID, *this, tofExpr); 3563 3564 void *InsertPos = 0; 3565 DependentTypeOfExprType *Canon 3566 = DependentTypeOfExprTypes.FindNodeOrInsertPos(ID, InsertPos); 3567 if (Canon) { 3568 // We already have a "canonical" version of an identical, dependent 3569 // typeof(expr) type. Use that as our canonical type. 3570 toe = new (*this, TypeAlignment) TypeOfExprType(tofExpr, 3571 QualType((TypeOfExprType*)Canon, 0)); 3572 } else { 3573 // Build a new, canonical typeof(expr) type. 3574 Canon 3575 = new (*this, TypeAlignment) DependentTypeOfExprType(*this, tofExpr); 3576 DependentTypeOfExprTypes.InsertNode(Canon, InsertPos); 3577 toe = Canon; 3578 } 3579 } else { 3580 QualType Canonical = getCanonicalType(tofExpr->getType()); 3581 toe = new (*this, TypeAlignment) TypeOfExprType(tofExpr, Canonical); 3582 } 3583 Types.push_back(toe); 3584 return QualType(toe, 0); 3585 } 3586 3587 /// getTypeOfType - Unlike many "get<Type>" functions, we don't unique 3588 /// TypeOfType AST's. The only motivation to unique these nodes would be 3589 /// memory savings. Since typeof(t) is fairly uncommon, space shouldn't be 3590 /// an issue. This doesn't effect the type checker, since it operates 3591 /// on canonical type's (which are always unique). 3592 QualType ASTContext::getTypeOfType(QualType tofType) const { 3593 QualType Canonical = getCanonicalType(tofType); 3594 TypeOfType *tot = new (*this, TypeAlignment) TypeOfType(tofType, Canonical); 3595 Types.push_back(tot); 3596 return QualType(tot, 0); 3597 } 3598 3599 3600 /// getDecltypeType - Unlike many "get<Type>" functions, we don't unique 3601 /// DecltypeType AST's. The only motivation to unique these nodes would be 3602 /// memory savings. Since decltype(t) is fairly uncommon, space shouldn't be 3603 /// an issue. This doesn't effect the type checker, since it operates 3604 /// on canonical types (which are always unique). 3605 QualType ASTContext::getDecltypeType(Expr *e, QualType UnderlyingType) const { 3606 DecltypeType *dt; 3607 3608 // C++0x [temp.type]p2: 3609 // If an expression e involves a template parameter, decltype(e) denotes a 3610 // unique dependent type. Two such decltype-specifiers refer to the same 3611 // type only if their expressions are equivalent (14.5.6.1). 3612 if (e->isInstantiationDependent()) { 3613 llvm::FoldingSetNodeID ID; 3614 DependentDecltypeType::Profile(ID, *this, e); 3615 3616 void *InsertPos = 0; 3617 DependentDecltypeType *Canon 3618 = DependentDecltypeTypes.FindNodeOrInsertPos(ID, InsertPos); 3619 if (Canon) { 3620 // We already have a "canonical" version of an equivalent, dependent 3621 // decltype type. Use that as our canonical type. 3622 dt = new (*this, TypeAlignment) DecltypeType(e, UnderlyingType, 3623 QualType((DecltypeType*)Canon, 0)); 3624 } else { 3625 // Build a new, canonical typeof(expr) type. 3626 Canon = new (*this, TypeAlignment) DependentDecltypeType(*this, e); 3627 DependentDecltypeTypes.InsertNode(Canon, InsertPos); 3628 dt = Canon; 3629 } 3630 } else { 3631 dt = new (*this, TypeAlignment) DecltypeType(e, UnderlyingType, 3632 getCanonicalType(UnderlyingType)); 3633 } 3634 Types.push_back(dt); 3635 return QualType(dt, 0); 3636 } 3637 3638 /// getUnaryTransformationType - We don't unique these, since the memory 3639 /// savings are minimal and these are rare. 3640 QualType ASTContext::getUnaryTransformType(QualType BaseType, 3641 QualType UnderlyingType, 3642 UnaryTransformType::UTTKind Kind) 3643 const { 3644 UnaryTransformType *Ty = 3645 new (*this, TypeAlignment) UnaryTransformType (BaseType, UnderlyingType, 3646 Kind, 3647 UnderlyingType->isDependentType() ? 3648 QualType() : getCanonicalType(UnderlyingType)); 3649 Types.push_back(Ty); 3650 return QualType(Ty, 0); 3651 } 3652 3653 /// getAutoType - Return the uniqued reference to the 'auto' type which has been 3654 /// deduced to the given type, or to the canonical undeduced 'auto' type, or the 3655 /// canonical deduced-but-dependent 'auto' type. 3656 QualType ASTContext::getAutoType(QualType DeducedType, bool IsDecltypeAuto, 3657 bool IsDependent) const { 3658 if (DeducedType.isNull() && !IsDecltypeAuto && !IsDependent) 3659 return getAutoDeductType(); 3660 3661 // Look in the folding set for an existing type. 3662 void *InsertPos = 0; 3663 llvm::FoldingSetNodeID ID; 3664 AutoType::Profile(ID, DeducedType, IsDecltypeAuto, IsDependent); 3665 if (AutoType *AT = AutoTypes.FindNodeOrInsertPos(ID, InsertPos)) 3666 return QualType(AT, 0); 3667 3668 AutoType *AT = new (*this, TypeAlignment) AutoType(DeducedType, 3669 IsDecltypeAuto, 3670 IsDependent); 3671 Types.push_back(AT); 3672 if (InsertPos) 3673 AutoTypes.InsertNode(AT, InsertPos); 3674 return QualType(AT, 0); 3675 } 3676 3677 /// getAtomicType - Return the uniqued reference to the atomic type for 3678 /// the given value type. 3679 QualType ASTContext::getAtomicType(QualType T) const { 3680 // Unique pointers, to guarantee there is only one pointer of a particular 3681 // structure. 3682 llvm::FoldingSetNodeID ID; 3683 AtomicType::Profile(ID, T); 3684 3685 void *InsertPos = 0; 3686 if (AtomicType *AT = AtomicTypes.FindNodeOrInsertPos(ID, InsertPos)) 3687 return QualType(AT, 0); 3688 3689 // If the atomic value type isn't canonical, this won't be a canonical type 3690 // either, so fill in the canonical type field. 3691 QualType Canonical; 3692 if (!T.isCanonical()) { 3693 Canonical = getAtomicType(getCanonicalType(T)); 3694 3695 // Get the new insert position for the node we care about. 3696 AtomicType *NewIP = AtomicTypes.FindNodeOrInsertPos(ID, InsertPos); 3697 assert(NewIP == 0 && "Shouldn't be in the map!"); (void)NewIP; 3698 } 3699 AtomicType *New = new (*this, TypeAlignment) AtomicType(T, Canonical); 3700 Types.push_back(New); 3701 AtomicTypes.InsertNode(New, InsertPos); 3702 return QualType(New, 0); 3703 } 3704 3705 /// getAutoDeductType - Get type pattern for deducing against 'auto'. 3706 QualType ASTContext::getAutoDeductType() const { 3707 if (AutoDeductTy.isNull()) 3708 AutoDeductTy = QualType( 3709 new (*this, TypeAlignment) AutoType(QualType(), /*decltype(auto)*/false, 3710 /*dependent*/false), 3711 0); 3712 return AutoDeductTy; 3713 } 3714 3715 /// getAutoRRefDeductType - Get type pattern for deducing against 'auto &&'. 3716 QualType ASTContext::getAutoRRefDeductType() const { 3717 if (AutoRRefDeductTy.isNull()) 3718 AutoRRefDeductTy = getRValueReferenceType(getAutoDeductType()); 3719 assert(!AutoRRefDeductTy.isNull() && "can't build 'auto &&' pattern"); 3720 return AutoRRefDeductTy; 3721 } 3722 3723 /// getTagDeclType - Return the unique reference to the type for the 3724 /// specified TagDecl (struct/union/class/enum) decl. 3725 QualType ASTContext::getTagDeclType(const TagDecl *Decl) const { 3726 assert (Decl); 3727 // FIXME: What is the design on getTagDeclType when it requires casting 3728 // away const? mutable? 3729 return getTypeDeclType(const_cast<TagDecl*>(Decl)); 3730 } 3731 3732 /// getSizeType - Return the unique type for "size_t" (C99 7.17), the result 3733 /// of the sizeof operator (C99 6.5.3.4p4). The value is target dependent and 3734 /// needs to agree with the definition in <stddef.h>. 3735 CanQualType ASTContext::getSizeType() const { 3736 return getFromTargetType(Target->getSizeType()); 3737 } 3738 3739 /// getIntMaxType - Return the unique type for "intmax_t" (C99 7.18.1.5). 3740 CanQualType ASTContext::getIntMaxType() const { 3741 return getFromTargetType(Target->getIntMaxType()); 3742 } 3743 3744 /// getUIntMaxType - Return the unique type for "uintmax_t" (C99 7.18.1.5). 3745 CanQualType ASTContext::getUIntMaxType() const { 3746 return getFromTargetType(Target->getUIntMaxType()); 3747 } 3748 3749 /// getSignedWCharType - Return the type of "signed wchar_t". 3750 /// Used when in C++, as a GCC extension. 3751 QualType ASTContext::getSignedWCharType() const { 3752 // FIXME: derive from "Target" ? 3753 return WCharTy; 3754 } 3755 3756 /// getUnsignedWCharType - Return the type of "unsigned wchar_t". 3757 /// Used when in C++, as a GCC extension. 3758 QualType ASTContext::getUnsignedWCharType() const { 3759 // FIXME: derive from "Target" ? 3760 return UnsignedIntTy; 3761 } 3762 3763 QualType ASTContext::getIntPtrType() const { 3764 return getFromTargetType(Target->getIntPtrType()); 3765 } 3766 3767 QualType ASTContext::getUIntPtrType() const { 3768 return getCorrespondingUnsignedType(getIntPtrType()); 3769 } 3770 3771 /// getPointerDiffType - Return the unique type for "ptrdiff_t" (C99 7.17) 3772 /// defined in <stddef.h>. Pointer - pointer requires this (C99 6.5.6p9). 3773 QualType ASTContext::getPointerDiffType() const { 3774 return getFromTargetType(Target->getPtrDiffType(0)); 3775 } 3776 3777 /// \brief Return the unique type for "pid_t" defined in 3778 /// <sys/types.h>. We need this to compute the correct type for vfork(). 3779 QualType ASTContext::getProcessIDType() const { 3780 return getFromTargetType(Target->getProcessIDType()); 3781 } 3782 3783 //===----------------------------------------------------------------------===// 3784 // Type Operators 3785 //===----------------------------------------------------------------------===// 3786 3787 CanQualType ASTContext::getCanonicalParamType(QualType T) const { 3788 // Push qualifiers into arrays, and then discard any remaining 3789 // qualifiers. 3790 T = getCanonicalType(T); 3791 T = getVariableArrayDecayedType(T); 3792 const Type *Ty = T.getTypePtr(); 3793 QualType Result; 3794 if (isa<ArrayType>(Ty)) { 3795 Result = getArrayDecayedType(QualType(Ty,0)); 3796 } else if (isa<FunctionType>(Ty)) { 3797 Result = getPointerType(QualType(Ty, 0)); 3798 } else { 3799 Result = QualType(Ty, 0); 3800 } 3801 3802 return CanQualType::CreateUnsafe(Result); 3803 } 3804 3805 QualType ASTContext::getUnqualifiedArrayType(QualType type, 3806 Qualifiers &quals) { 3807 SplitQualType splitType = type.getSplitUnqualifiedType(); 3808 3809 // FIXME: getSplitUnqualifiedType() actually walks all the way to 3810 // the unqualified desugared type and then drops it on the floor. 3811 // We then have to strip that sugar back off with 3812 // getUnqualifiedDesugaredType(), which is silly. 3813 const ArrayType *AT = 3814 dyn_cast<ArrayType>(splitType.Ty->getUnqualifiedDesugaredType()); 3815 3816 // If we don't have an array, just use the results in splitType. 3817 if (!AT) { 3818 quals = splitType.Quals; 3819 return QualType(splitType.Ty, 0); 3820 } 3821 3822 // Otherwise, recurse on the array's element type. 3823 QualType elementType = AT->getElementType(); 3824 QualType unqualElementType = getUnqualifiedArrayType(elementType, quals); 3825 3826 // If that didn't change the element type, AT has no qualifiers, so we 3827 // can just use the results in splitType. 3828 if (elementType == unqualElementType) { 3829 assert(quals.empty()); // from the recursive call 3830 quals = splitType.Quals; 3831 return QualType(splitType.Ty, 0); 3832 } 3833 3834 // Otherwise, add in the qualifiers from the outermost type, then 3835 // build the type back up. 3836 quals.addConsistentQualifiers(splitType.Quals); 3837 3838 if (const ConstantArrayType *CAT = dyn_cast<ConstantArrayType>(AT)) { 3839 return getConstantArrayType(unqualElementType, CAT->getSize(), 3840 CAT->getSizeModifier(), 0); 3841 } 3842 3843 if (const IncompleteArrayType *IAT = dyn_cast<IncompleteArrayType>(AT)) { 3844 return getIncompleteArrayType(unqualElementType, IAT->getSizeModifier(), 0); 3845 } 3846 3847 if (const VariableArrayType *VAT = dyn_cast<VariableArrayType>(AT)) { 3848 return getVariableArrayType(unqualElementType, 3849 VAT->getSizeExpr(), 3850 VAT->getSizeModifier(), 3851 VAT->getIndexTypeCVRQualifiers(), 3852 VAT->getBracketsRange()); 3853 } 3854 3855 const DependentSizedArrayType *DSAT = cast<DependentSizedArrayType>(AT); 3856 return getDependentSizedArrayType(unqualElementType, DSAT->getSizeExpr(), 3857 DSAT->getSizeModifier(), 0, 3858 SourceRange()); 3859 } 3860 3861 /// UnwrapSimilarPointerTypes - If T1 and T2 are pointer types that 3862 /// may be similar (C++ 4.4), replaces T1 and T2 with the type that 3863 /// they point to and return true. If T1 and T2 aren't pointer types 3864 /// or pointer-to-member types, or if they are not similar at this 3865 /// level, returns false and leaves T1 and T2 unchanged. Top-level 3866 /// qualifiers on T1 and T2 are ignored. This function will typically 3867 /// be called in a loop that successively "unwraps" pointer and 3868 /// pointer-to-member types to compare them at each level. 3869 bool ASTContext::UnwrapSimilarPointerTypes(QualType &T1, QualType &T2) { 3870 const PointerType *T1PtrType = T1->getAs<PointerType>(), 3871 *T2PtrType = T2->getAs<PointerType>(); 3872 if (T1PtrType && T2PtrType) { 3873 T1 = T1PtrType->getPointeeType(); 3874 T2 = T2PtrType->getPointeeType(); 3875 return true; 3876 } 3877 3878 const MemberPointerType *T1MPType = T1->getAs<MemberPointerType>(), 3879 *T2MPType = T2->getAs<MemberPointerType>(); 3880 if (T1MPType && T2MPType && 3881 hasSameUnqualifiedType(QualType(T1MPType->getClass(), 0), 3882 QualType(T2MPType->getClass(), 0))) { 3883 T1 = T1MPType->getPointeeType(); 3884 T2 = T2MPType->getPointeeType(); 3885 return true; 3886 } 3887 3888 if (getLangOpts().ObjC1) { 3889 const ObjCObjectPointerType *T1OPType = T1->getAs<ObjCObjectPointerType>(), 3890 *T2OPType = T2->getAs<ObjCObjectPointerType>(); 3891 if (T1OPType && T2OPType) { 3892 T1 = T1OPType->getPointeeType(); 3893 T2 = T2OPType->getPointeeType(); 3894 return true; 3895 } 3896 } 3897 3898 // FIXME: Block pointers, too? 3899 3900 return false; 3901 } 3902 3903 DeclarationNameInfo 3904 ASTContext::getNameForTemplate(TemplateName Name, 3905 SourceLocation NameLoc) const { 3906 switch (Name.getKind()) { 3907 case TemplateName::QualifiedTemplate: 3908 case TemplateName::Template: 3909 // DNInfo work in progress: CHECKME: what about DNLoc? 3910 return DeclarationNameInfo(Name.getAsTemplateDecl()->getDeclName(), 3911 NameLoc); 3912 3913 case TemplateName::OverloadedTemplate: { 3914 OverloadedTemplateStorage *Storage = Name.getAsOverloadedTemplate(); 3915 // DNInfo work in progress: CHECKME: what about DNLoc? 3916 return DeclarationNameInfo((*Storage->begin())->getDeclName(), NameLoc); 3917 } 3918 3919 case TemplateName::DependentTemplate: { 3920 DependentTemplateName *DTN = Name.getAsDependentTemplateName(); 3921 DeclarationName DName; 3922 if (DTN->isIdentifier()) { 3923 DName = DeclarationNames.getIdentifier(DTN->getIdentifier()); 3924 return DeclarationNameInfo(DName, NameLoc); 3925 } else { 3926 DName = DeclarationNames.getCXXOperatorName(DTN->getOperator()); 3927 // DNInfo work in progress: FIXME: source locations? 3928 DeclarationNameLoc DNLoc; 3929 DNLoc.CXXOperatorName.BeginOpNameLoc = SourceLocation().getRawEncoding(); 3930 DNLoc.CXXOperatorName.EndOpNameLoc = SourceLocation().getRawEncoding(); 3931 return DeclarationNameInfo(DName, NameLoc, DNLoc); 3932 } 3933 } 3934 3935 case TemplateName::SubstTemplateTemplateParm: { 3936 SubstTemplateTemplateParmStorage *subst 3937 = Name.getAsSubstTemplateTemplateParm(); 3938 return DeclarationNameInfo(subst->getParameter()->getDeclName(), 3939 NameLoc); 3940 } 3941 3942 case TemplateName::SubstTemplateTemplateParmPack: { 3943 SubstTemplateTemplateParmPackStorage *subst 3944 = Name.getAsSubstTemplateTemplateParmPack(); 3945 return DeclarationNameInfo(subst->getParameterPack()->getDeclName(), 3946 NameLoc); 3947 } 3948 } 3949 3950 llvm_unreachable("bad template name kind!"); 3951 } 3952 3953 TemplateName ASTContext::getCanonicalTemplateName(TemplateName Name) const { 3954 switch (Name.getKind()) { 3955 case TemplateName::QualifiedTemplate: 3956 case TemplateName::Template: { 3957 TemplateDecl *Template = Name.getAsTemplateDecl(); 3958 if (TemplateTemplateParmDecl *TTP 3959 = dyn_cast<TemplateTemplateParmDecl>(Template)) 3960 Template = getCanonicalTemplateTemplateParmDecl(TTP); 3961 3962 // The canonical template name is the canonical template declaration. 3963 return TemplateName(cast<TemplateDecl>(Template->getCanonicalDecl())); 3964 } 3965 3966 case TemplateName::OverloadedTemplate: 3967 llvm_unreachable("cannot canonicalize overloaded template"); 3968 3969 case TemplateName::DependentTemplate: { 3970 DependentTemplateName *DTN = Name.getAsDependentTemplateName(); 3971 assert(DTN && "Non-dependent template names must refer to template decls."); 3972 return DTN->CanonicalTemplateName; 3973 } 3974 3975 case TemplateName::SubstTemplateTemplateParm: { 3976 SubstTemplateTemplateParmStorage *subst 3977 = Name.getAsSubstTemplateTemplateParm(); 3978 return getCanonicalTemplateName(subst->getReplacement()); 3979 } 3980 3981 case TemplateName::SubstTemplateTemplateParmPack: { 3982 SubstTemplateTemplateParmPackStorage *subst 3983 = Name.getAsSubstTemplateTemplateParmPack(); 3984 TemplateTemplateParmDecl *canonParameter 3985 = getCanonicalTemplateTemplateParmDecl(subst->getParameterPack()); 3986 TemplateArgument canonArgPack 3987 = getCanonicalTemplateArgument(subst->getArgumentPack()); 3988 return getSubstTemplateTemplateParmPack(canonParameter, canonArgPack); 3989 } 3990 } 3991 3992 llvm_unreachable("bad template name!"); 3993 } 3994 3995 bool ASTContext::hasSameTemplateName(TemplateName X, TemplateName Y) { 3996 X = getCanonicalTemplateName(X); 3997 Y = getCanonicalTemplateName(Y); 3998 return X.getAsVoidPointer() == Y.getAsVoidPointer(); 3999 } 4000 4001 TemplateArgument 4002 ASTContext::getCanonicalTemplateArgument(const TemplateArgument &Arg) const { 4003 switch (Arg.getKind()) { 4004 case TemplateArgument::Null: 4005 return Arg; 4006 4007 case TemplateArgument::Expression: 4008 return Arg; 4009 4010 case TemplateArgument::Declaration: { 4011 ValueDecl *D = cast<ValueDecl>(Arg.getAsDecl()->getCanonicalDecl()); 4012 return TemplateArgument(D, Arg.isDeclForReferenceParam()); 4013 } 4014 4015 case TemplateArgument::NullPtr: 4016 return TemplateArgument(getCanonicalType(Arg.getNullPtrType()), 4017 /*isNullPtr*/true); 4018 4019 case TemplateArgument::Template: 4020 return TemplateArgument(getCanonicalTemplateName(Arg.getAsTemplate())); 4021 4022 case TemplateArgument::TemplateExpansion: 4023 return TemplateArgument(getCanonicalTemplateName( 4024 Arg.getAsTemplateOrTemplatePattern()), 4025 Arg.getNumTemplateExpansions()); 4026 4027 case TemplateArgument::Integral: 4028 return TemplateArgument(Arg, getCanonicalType(Arg.getIntegralType())); 4029 4030 case TemplateArgument::Type: 4031 return TemplateArgument(getCanonicalType(Arg.getAsType())); 4032 4033 case TemplateArgument::Pack: { 4034 if (Arg.pack_size() == 0) 4035 return Arg; 4036 4037 TemplateArgument *CanonArgs 4038 = new (*this) TemplateArgument[Arg.pack_size()]; 4039 unsigned Idx = 0; 4040 for (TemplateArgument::pack_iterator A = Arg.pack_begin(), 4041 AEnd = Arg.pack_end(); 4042 A != AEnd; (void)++A, ++Idx) 4043 CanonArgs[Idx] = getCanonicalTemplateArgument(*A); 4044 4045 return TemplateArgument(CanonArgs, Arg.pack_size()); 4046 } 4047 } 4048 4049 // Silence GCC warning 4050 llvm_unreachable("Unhandled template argument kind"); 4051 } 4052 4053 NestedNameSpecifier * 4054 ASTContext::getCanonicalNestedNameSpecifier(NestedNameSpecifier *NNS) const { 4055 if (!NNS) 4056 return 0; 4057 4058 switch (NNS->getKind()) { 4059 case NestedNameSpecifier::Identifier: 4060 // Canonicalize the prefix but keep the identifier the same. 4061 return NestedNameSpecifier::Create(*this, 4062 getCanonicalNestedNameSpecifier(NNS->getPrefix()), 4063 NNS->getAsIdentifier()); 4064 4065 case NestedNameSpecifier::Namespace: 4066 // A namespace is canonical; build a nested-name-specifier with 4067 // this namespace and no prefix. 4068 return NestedNameSpecifier::Create(*this, 0, 4069 NNS->getAsNamespace()->getOriginalNamespace()); 4070 4071 case NestedNameSpecifier::NamespaceAlias: 4072 // A namespace is canonical; build a nested-name-specifier with 4073 // this namespace and no prefix. 4074 return NestedNameSpecifier::Create(*this, 0, 4075 NNS->getAsNamespaceAlias()->getNamespace() 4076 ->getOriginalNamespace()); 4077 4078 case NestedNameSpecifier::TypeSpec: 4079 case NestedNameSpecifier::TypeSpecWithTemplate: { 4080 QualType T = getCanonicalType(QualType(NNS->getAsType(), 0)); 4081 4082 // If we have some kind of dependent-named type (e.g., "typename T::type"), 4083 // break it apart into its prefix and identifier, then reconsititute those 4084 // as the canonical nested-name-specifier. This is required to canonicalize 4085 // a dependent nested-name-specifier involving typedefs of dependent-name 4086 // types, e.g., 4087 // typedef typename T::type T1; 4088 // typedef typename T1::type T2; 4089 if (const DependentNameType *DNT = T->getAs<DependentNameType>()) 4090 return NestedNameSpecifier::Create(*this, DNT->getQualifier(), 4091 const_cast<IdentifierInfo *>(DNT->getIdentifier())); 4092 4093 // Otherwise, just canonicalize the type, and force it to be a TypeSpec. 4094 // FIXME: Why are TypeSpec and TypeSpecWithTemplate distinct in the 4095 // first place? 4096 return NestedNameSpecifier::Create(*this, 0, false, 4097 const_cast<Type*>(T.getTypePtr())); 4098 } 4099 4100 case NestedNameSpecifier::Global: 4101 // The global specifier is canonical and unique. 4102 return NNS; 4103 } 4104 4105 llvm_unreachable("Invalid NestedNameSpecifier::Kind!"); 4106 } 4107 4108 4109 const ArrayType *ASTContext::getAsArrayType(QualType T) const { 4110 // Handle the non-qualified case efficiently. 4111 if (!T.hasLocalQualifiers()) { 4112 // Handle the common positive case fast. 4113 if (const ArrayType *AT = dyn_cast<ArrayType>(T)) 4114 return AT; 4115 } 4116 4117 // Handle the common negative case fast. 4118 if (!isa<ArrayType>(T.getCanonicalType())) 4119 return 0; 4120 4121 // Apply any qualifiers from the array type to the element type. This 4122 // implements C99 6.7.3p8: "If the specification of an array type includes 4123 // any type qualifiers, the element type is so qualified, not the array type." 4124 4125 // If we get here, we either have type qualifiers on the type, or we have 4126 // sugar such as a typedef in the way. If we have type qualifiers on the type 4127 // we must propagate them down into the element type. 4128 4129 SplitQualType split = T.getSplitDesugaredType(); 4130 Qualifiers qs = split.Quals; 4131 4132 // If we have a simple case, just return now. 4133 const ArrayType *ATy = dyn_cast<ArrayType>(split.Ty); 4134 if (ATy == 0 || qs.empty()) 4135 return ATy; 4136 4137 // Otherwise, we have an array and we have qualifiers on it. Push the 4138 // qualifiers into the array element type and return a new array type. 4139 QualType NewEltTy = getQualifiedType(ATy->getElementType(), qs); 4140 4141 if (const ConstantArrayType *CAT = dyn_cast<ConstantArrayType>(ATy)) 4142 return cast<ArrayType>(getConstantArrayType(NewEltTy, CAT->getSize(), 4143 CAT->getSizeModifier(), 4144 CAT->getIndexTypeCVRQualifiers())); 4145 if (const IncompleteArrayType *IAT = dyn_cast<IncompleteArrayType>(ATy)) 4146 return cast<ArrayType>(getIncompleteArrayType(NewEltTy, 4147 IAT->getSizeModifier(), 4148 IAT->getIndexTypeCVRQualifiers())); 4149 4150 if (const DependentSizedArrayType *DSAT 4151 = dyn_cast<DependentSizedArrayType>(ATy)) 4152 return cast<ArrayType>( 4153 getDependentSizedArrayType(NewEltTy, 4154 DSAT->getSizeExpr(), 4155 DSAT->getSizeModifier(), 4156 DSAT->getIndexTypeCVRQualifiers(), 4157 DSAT->getBracketsRange())); 4158 4159 const VariableArrayType *VAT = cast<VariableArrayType>(ATy); 4160 return cast<ArrayType>(getVariableArrayType(NewEltTy, 4161 VAT->getSizeExpr(), 4162 VAT->getSizeModifier(), 4163 VAT->getIndexTypeCVRQualifiers(), 4164 VAT->getBracketsRange())); 4165 } 4166 4167 QualType ASTContext::getAdjustedParameterType(QualType T) const { 4168 if (T->isArrayType() || T->isFunctionType()) 4169 return getDecayedType(T); 4170 return T; 4171 } 4172 4173 QualType ASTContext::getSignatureParameterType(QualType T) const { 4174 T = getVariableArrayDecayedType(T); 4175 T = getAdjustedParameterType(T); 4176 return T.getUnqualifiedType(); 4177 } 4178 4179 /// getArrayDecayedType - Return the properly qualified result of decaying the 4180 /// specified array type to a pointer. This operation is non-trivial when 4181 /// handling typedefs etc. The canonical type of "T" must be an array type, 4182 /// this returns a pointer to a properly qualified element of the array. 4183 /// 4184 /// See C99 6.7.5.3p7 and C99 6.3.2.1p3. 4185 QualType ASTContext::getArrayDecayedType(QualType Ty) const { 4186 // Get the element type with 'getAsArrayType' so that we don't lose any 4187 // typedefs in the element type of the array. This also handles propagation 4188 // of type qualifiers from the array type into the element type if present 4189 // (C99 6.7.3p8). 4190 const ArrayType *PrettyArrayType = getAsArrayType(Ty); 4191 assert(PrettyArrayType && "Not an array type!"); 4192 4193 QualType PtrTy = getPointerType(PrettyArrayType->getElementType()); 4194 4195 // int x[restrict 4] -> int *restrict 4196 return getQualifiedType(PtrTy, PrettyArrayType->getIndexTypeQualifiers()); 4197 } 4198 4199 QualType ASTContext::getBaseElementType(const ArrayType *array) const { 4200 return getBaseElementType(array->getElementType()); 4201 } 4202 4203 QualType ASTContext::getBaseElementType(QualType type) const { 4204 Qualifiers qs; 4205 while (true) { 4206 SplitQualType split = type.getSplitDesugaredType(); 4207 const ArrayType *array = split.Ty->getAsArrayTypeUnsafe(); 4208 if (!array) break; 4209 4210 type = array->getElementType(); 4211 qs.addConsistentQualifiers(split.Quals); 4212 } 4213 4214 return getQualifiedType(type, qs); 4215 } 4216 4217 /// getConstantArrayElementCount - Returns number of constant array elements. 4218 uint64_t 4219 ASTContext::getConstantArrayElementCount(const ConstantArrayType *CA) const { 4220 uint64_t ElementCount = 1; 4221 do { 4222 ElementCount *= CA->getSize().getZExtValue(); 4223 CA = dyn_cast_or_null<ConstantArrayType>( 4224 CA->getElementType()->getAsArrayTypeUnsafe()); 4225 } while (CA); 4226 return ElementCount; 4227 } 4228 4229 /// getFloatingRank - Return a relative rank for floating point types. 4230 /// This routine will assert if passed a built-in type that isn't a float. 4231 static FloatingRank getFloatingRank(QualType T) { 4232 if (const ComplexType *CT = T->getAs<ComplexType>()) 4233 return getFloatingRank(CT->getElementType()); 4234 4235 assert(T->getAs<BuiltinType>() && "getFloatingRank(): not a floating type"); 4236 switch (T->getAs<BuiltinType>()->getKind()) { 4237 default: llvm_unreachable("getFloatingRank(): not a floating type"); 4238 case BuiltinType::Half: return HalfRank; 4239 case BuiltinType::Float: return FloatRank; 4240 case BuiltinType::Double: return DoubleRank; 4241 case BuiltinType::LongDouble: return LongDoubleRank; 4242 } 4243 } 4244 4245 /// getFloatingTypeOfSizeWithinDomain - Returns a real floating 4246 /// point or a complex type (based on typeDomain/typeSize). 4247 /// 'typeDomain' is a real floating point or complex type. 4248 /// 'typeSize' is a real floating point or complex type. 4249 QualType ASTContext::getFloatingTypeOfSizeWithinDomain(QualType Size, 4250 QualType Domain) const { 4251 FloatingRank EltRank = getFloatingRank(Size); 4252 if (Domain->isComplexType()) { 4253 switch (EltRank) { 4254 case HalfRank: llvm_unreachable("Complex half is not supported"); 4255 case FloatRank: return FloatComplexTy; 4256 case DoubleRank: return DoubleComplexTy; 4257 case LongDoubleRank: return LongDoubleComplexTy; 4258 } 4259 } 4260 4261 assert(Domain->isRealFloatingType() && "Unknown domain!"); 4262 switch (EltRank) { 4263 case HalfRank: return HalfTy; 4264 case FloatRank: return FloatTy; 4265 case DoubleRank: return DoubleTy; 4266 case LongDoubleRank: return LongDoubleTy; 4267 } 4268 llvm_unreachable("getFloatingRank(): illegal value for rank"); 4269 } 4270 4271 /// getFloatingTypeOrder - Compare the rank of the two specified floating 4272 /// point types, ignoring the domain of the type (i.e. 'double' == 4273 /// '_Complex double'). If LHS > RHS, return 1. If LHS == RHS, return 0. If 4274 /// LHS < RHS, return -1. 4275 int ASTContext::getFloatingTypeOrder(QualType LHS, QualType RHS) const { 4276 FloatingRank LHSR = getFloatingRank(LHS); 4277 FloatingRank RHSR = getFloatingRank(RHS); 4278 4279 if (LHSR == RHSR) 4280 return 0; 4281 if (LHSR > RHSR) 4282 return 1; 4283 return -1; 4284 } 4285 4286 /// getIntegerRank - Return an integer conversion rank (C99 6.3.1.1p1). This 4287 /// routine will assert if passed a built-in type that isn't an integer or enum, 4288 /// or if it is not canonicalized. 4289 unsigned ASTContext::getIntegerRank(const Type *T) const { 4290 assert(T->isCanonicalUnqualified() && "T should be canonicalized"); 4291 4292 switch (cast<BuiltinType>(T)->getKind()) { 4293 default: llvm_unreachable("getIntegerRank(): not a built-in integer"); 4294 case BuiltinType::Bool: 4295 return 1 + (getIntWidth(BoolTy) << 3); 4296 case BuiltinType::Char_S: 4297 case BuiltinType::Char_U: 4298 case BuiltinType::SChar: 4299 case BuiltinType::UChar: 4300 return 2 + (getIntWidth(CharTy) << 3); 4301 case BuiltinType::Short: 4302 case BuiltinType::UShort: 4303 return 3 + (getIntWidth(ShortTy) << 3); 4304 case BuiltinType::Int: 4305 case BuiltinType::UInt: 4306 return 4 + (getIntWidth(IntTy) << 3); 4307 case BuiltinType::Long: 4308 case BuiltinType::ULong: 4309 return 5 + (getIntWidth(LongTy) << 3); 4310 case BuiltinType::LongLong: 4311 case BuiltinType::ULongLong: 4312 return 6 + (getIntWidth(LongLongTy) << 3); 4313 case BuiltinType::Int128: 4314 case BuiltinType::UInt128: 4315 return 7 + (getIntWidth(Int128Ty) << 3); 4316 } 4317 } 4318 4319 /// \brief Whether this is a promotable bitfield reference according 4320 /// to C99 6.3.1.1p2, bullet 2 (and GCC extensions). 4321 /// 4322 /// \returns the type this bit-field will promote to, or NULL if no 4323 /// promotion occurs. 4324 QualType ASTContext::isPromotableBitField(Expr *E) const { 4325 if (E->isTypeDependent() || E->isValueDependent()) 4326 return QualType(); 4327 4328 FieldDecl *Field = E->getSourceBitField(); // FIXME: conditional bit-fields? 4329 if (!Field) 4330 return QualType(); 4331 4332 QualType FT = Field->getType(); 4333 4334 uint64_t BitWidth = Field->getBitWidthValue(*this); 4335 uint64_t IntSize = getTypeSize(IntTy); 4336 // GCC extension compatibility: if the bit-field size is less than or equal 4337 // to the size of int, it gets promoted no matter what its type is. 4338 // For instance, unsigned long bf : 4 gets promoted to signed int. 4339 if (BitWidth < IntSize) 4340 return IntTy; 4341 4342 if (BitWidth == IntSize) 4343 return FT->isSignedIntegerType() ? IntTy : UnsignedIntTy; 4344 4345 // Types bigger than int are not subject to promotions, and therefore act 4346 // like the base type. 4347 // FIXME: This doesn't quite match what gcc does, but what gcc does here 4348 // is ridiculous. 4349 return QualType(); 4350 } 4351 4352 /// getPromotedIntegerType - Returns the type that Promotable will 4353 /// promote to: C99 6.3.1.1p2, assuming that Promotable is a promotable 4354 /// integer type. 4355 QualType ASTContext::getPromotedIntegerType(QualType Promotable) const { 4356 assert(!Promotable.isNull()); 4357 assert(Promotable->isPromotableIntegerType()); 4358 if (const EnumType *ET = Promotable->getAs<EnumType>()) 4359 return ET->getDecl()->getPromotionType(); 4360 4361 if (const BuiltinType *BT = Promotable->getAs<BuiltinType>()) { 4362 // C++ [conv.prom]: A prvalue of type char16_t, char32_t, or wchar_t 4363 // (3.9.1) can be converted to a prvalue of the first of the following 4364 // types that can represent all the values of its underlying type: 4365 // int, unsigned int, long int, unsigned long int, long long int, or 4366 // unsigned long long int [...] 4367 // FIXME: Is there some better way to compute this? 4368 if (BT->getKind() == BuiltinType::WChar_S || 4369 BT->getKind() == BuiltinType::WChar_U || 4370 BT->getKind() == BuiltinType::Char16 || 4371 BT->getKind() == BuiltinType::Char32) { 4372 bool FromIsSigned = BT->getKind() == BuiltinType::WChar_S; 4373 uint64_t FromSize = getTypeSize(BT); 4374 QualType PromoteTypes[] = { IntTy, UnsignedIntTy, LongTy, UnsignedLongTy, 4375 LongLongTy, UnsignedLongLongTy }; 4376 for (size_t Idx = 0; Idx < llvm::array_lengthof(PromoteTypes); ++Idx) { 4377 uint64_t ToSize = getTypeSize(PromoteTypes[Idx]); 4378 if (FromSize < ToSize || 4379 (FromSize == ToSize && 4380 FromIsSigned == PromoteTypes[Idx]->isSignedIntegerType())) 4381 return PromoteTypes[Idx]; 4382 } 4383 llvm_unreachable("char type should fit into long long"); 4384 } 4385 } 4386 4387 // At this point, we should have a signed or unsigned integer type. 4388 if (Promotable->isSignedIntegerType()) 4389 return IntTy; 4390 uint64_t PromotableSize = getIntWidth(Promotable); 4391 uint64_t IntSize = getIntWidth(IntTy); 4392 assert(Promotable->isUnsignedIntegerType() && PromotableSize <= IntSize); 4393 return (PromotableSize != IntSize) ? IntTy : UnsignedIntTy; 4394 } 4395 4396 /// \brief Recurses in pointer/array types until it finds an objc retainable 4397 /// type and returns its ownership. 4398 Qualifiers::ObjCLifetime ASTContext::getInnerObjCOwnership(QualType T) const { 4399 while (!T.isNull()) { 4400 if (T.getObjCLifetime() != Qualifiers::OCL_None) 4401 return T.getObjCLifetime(); 4402 if (T->isArrayType()) 4403 T = getBaseElementType(T); 4404 else if (const PointerType *PT = T->getAs<PointerType>()) 4405 T = PT->getPointeeType(); 4406 else if (const ReferenceType *RT = T->getAs<ReferenceType>()) 4407 T = RT->getPointeeType(); 4408 else 4409 break; 4410 } 4411 4412 return Qualifiers::OCL_None; 4413 } 4414 4415 /// getIntegerTypeOrder - Returns the highest ranked integer type: 4416 /// C99 6.3.1.8p1. If LHS > RHS, return 1. If LHS == RHS, return 0. If 4417 /// LHS < RHS, return -1. 4418 int ASTContext::getIntegerTypeOrder(QualType LHS, QualType RHS) const { 4419 const Type *LHSC = getCanonicalType(LHS).getTypePtr(); 4420 const Type *RHSC = getCanonicalType(RHS).getTypePtr(); 4421 if (LHSC == RHSC) return 0; 4422 4423 bool LHSUnsigned = LHSC->isUnsignedIntegerType(); 4424 bool RHSUnsigned = RHSC->isUnsignedIntegerType(); 4425 4426 unsigned LHSRank = getIntegerRank(LHSC); 4427 unsigned RHSRank = getIntegerRank(RHSC); 4428 4429 if (LHSUnsigned == RHSUnsigned) { // Both signed or both unsigned. 4430 if (LHSRank == RHSRank) return 0; 4431 return LHSRank > RHSRank ? 1 : -1; 4432 } 4433 4434 // Otherwise, the LHS is signed and the RHS is unsigned or visa versa. 4435 if (LHSUnsigned) { 4436 // If the unsigned [LHS] type is larger, return it. 4437 if (LHSRank >= RHSRank) 4438 return 1; 4439 4440 // If the signed type can represent all values of the unsigned type, it 4441 // wins. Because we are dealing with 2's complement and types that are 4442 // powers of two larger than each other, this is always safe. 4443 return -1; 4444 } 4445 4446 // If the unsigned [RHS] type is larger, return it. 4447 if (RHSRank >= LHSRank) 4448 return -1; 4449 4450 // If the signed type can represent all values of the unsigned type, it 4451 // wins. Because we are dealing with 2's complement and types that are 4452 // powers of two larger than each other, this is always safe. 4453 return 1; 4454 } 4455 4456 static RecordDecl * 4457 CreateRecordDecl(const ASTContext &Ctx, RecordDecl::TagKind TK, 4458 DeclContext *DC, IdentifierInfo *Id) { 4459 SourceLocation Loc; 4460 if (Ctx.getLangOpts().CPlusPlus) 4461 return CXXRecordDecl::Create(Ctx, TK, DC, Loc, Loc, Id); 4462 else 4463 return RecordDecl::Create(Ctx, TK, DC, Loc, Loc, Id); 4464 } 4465 4466 // getCFConstantStringType - Return the type used for constant CFStrings. 4467 QualType ASTContext::getCFConstantStringType() const { 4468 if (!CFConstantStringTypeDecl) { 4469 CFConstantStringTypeDecl = 4470 CreateRecordDecl(*this, TTK_Struct, TUDecl, 4471 &Idents.get("NSConstantString")); 4472 CFConstantStringTypeDecl->startDefinition(); 4473 4474 QualType FieldTypes[4]; 4475 4476 // const int *isa; 4477 FieldTypes[0] = getPointerType(IntTy.withConst()); 4478 // int flags; 4479 FieldTypes[1] = IntTy; 4480 // const char *str; 4481 FieldTypes[2] = getPointerType(CharTy.withConst()); 4482 // long length; 4483 FieldTypes[3] = LongTy; 4484 4485 // Create fields 4486 for (unsigned i = 0; i < 4; ++i) { 4487 FieldDecl *Field = FieldDecl::Create(*this, CFConstantStringTypeDecl, 4488 SourceLocation(), 4489 SourceLocation(), 0, 4490 FieldTypes[i], /*TInfo=*/0, 4491 /*BitWidth=*/0, 4492 /*Mutable=*/false, 4493 ICIS_NoInit); 4494 Field->setAccess(AS_public); 4495 CFConstantStringTypeDecl->addDecl(Field); 4496 } 4497 4498 CFConstantStringTypeDecl->completeDefinition(); 4499 } 4500 4501 return getTagDeclType(CFConstantStringTypeDecl); 4502 } 4503 4504 QualType ASTContext::getObjCSuperType() const { 4505 if (ObjCSuperType.isNull()) { 4506 RecordDecl *ObjCSuperTypeDecl = 4507 CreateRecordDecl(*this, TTK_Struct, TUDecl, &Idents.get("objc_super")); 4508 TUDecl->addDecl(ObjCSuperTypeDecl); 4509 ObjCSuperType = getTagDeclType(ObjCSuperTypeDecl); 4510 } 4511 return ObjCSuperType; 4512 } 4513 4514 void ASTContext::setCFConstantStringType(QualType T) { 4515 const RecordType *Rec = T->getAs<RecordType>(); 4516 assert(Rec && "Invalid CFConstantStringType"); 4517 CFConstantStringTypeDecl = Rec->getDecl(); 4518 } 4519 4520 QualType ASTContext::getBlockDescriptorType() const { 4521 if (BlockDescriptorType) 4522 return getTagDeclType(BlockDescriptorType); 4523 4524 RecordDecl *T; 4525 // FIXME: Needs the FlagAppleBlock bit. 4526 T = CreateRecordDecl(*this, TTK_Struct, TUDecl, 4527 &Idents.get("__block_descriptor")); 4528 T->startDefinition(); 4529 4530 QualType FieldTypes[] = { 4531 UnsignedLongTy, 4532 UnsignedLongTy, 4533 }; 4534 4535 static const char *const FieldNames[] = { 4536 "reserved", 4537 "Size" 4538 }; 4539 4540 for (size_t i = 0; i < 2; ++i) { 4541 FieldDecl *Field = FieldDecl::Create(*this, T, SourceLocation(), 4542 SourceLocation(), 4543 &Idents.get(FieldNames[i]), 4544 FieldTypes[i], /*TInfo=*/0, 4545 /*BitWidth=*/0, 4546 /*Mutable=*/false, 4547 ICIS_NoInit); 4548 Field->setAccess(AS_public); 4549 T->addDecl(Field); 4550 } 4551 4552 T->completeDefinition(); 4553 4554 BlockDescriptorType = T; 4555 4556 return getTagDeclType(BlockDescriptorType); 4557 } 4558 4559 QualType ASTContext::getBlockDescriptorExtendedType() const { 4560 if (BlockDescriptorExtendedType) 4561 return getTagDeclType(BlockDescriptorExtendedType); 4562 4563 RecordDecl *T; 4564 // FIXME: Needs the FlagAppleBlock bit. 4565 T = CreateRecordDecl(*this, TTK_Struct, TUDecl, 4566 &Idents.get("__block_descriptor_withcopydispose")); 4567 T->startDefinition(); 4568 4569 QualType FieldTypes[] = { 4570 UnsignedLongTy, 4571 UnsignedLongTy, 4572 getPointerType(VoidPtrTy), 4573 getPointerType(VoidPtrTy) 4574 }; 4575 4576 static const char *const FieldNames[] = { 4577 "reserved", 4578 "Size", 4579 "CopyFuncPtr", 4580 "DestroyFuncPtr" 4581 }; 4582 4583 for (size_t i = 0; i < 4; ++i) { 4584 FieldDecl *Field = FieldDecl::Create(*this, T, SourceLocation(), 4585 SourceLocation(), 4586 &Idents.get(FieldNames[i]), 4587 FieldTypes[i], /*TInfo=*/0, 4588 /*BitWidth=*/0, 4589 /*Mutable=*/false, 4590 ICIS_NoInit); 4591 Field->setAccess(AS_public); 4592 T->addDecl(Field); 4593 } 4594 4595 T->completeDefinition(); 4596 4597 BlockDescriptorExtendedType = T; 4598 4599 return getTagDeclType(BlockDescriptorExtendedType); 4600 } 4601 4602 /// BlockRequiresCopying - Returns true if byref variable "D" of type "Ty" 4603 /// requires copy/dispose. Note that this must match the logic 4604 /// in buildByrefHelpers. 4605 bool ASTContext::BlockRequiresCopying(QualType Ty, 4606 const VarDecl *D) { 4607 if (const CXXRecordDecl *record = Ty->getAsCXXRecordDecl()) { 4608 const Expr *copyExpr = getBlockVarCopyInits(D); 4609 if (!copyExpr && record->hasTrivialDestructor()) return false; 4610 4611 return true; 4612 } 4613 4614 if (!Ty->isObjCRetainableType()) return false; 4615 4616 Qualifiers qs = Ty.getQualifiers(); 4617 4618 // If we have lifetime, that dominates. 4619 if (Qualifiers::ObjCLifetime lifetime = qs.getObjCLifetime()) { 4620 assert(getLangOpts().ObjCAutoRefCount); 4621 4622 switch (lifetime) { 4623 case Qualifiers::OCL_None: llvm_unreachable("impossible"); 4624 4625 // These are just bits as far as the runtime is concerned. 4626 case Qualifiers::OCL_ExplicitNone: 4627 case Qualifiers::OCL_Autoreleasing: 4628 return false; 4629 4630 // Tell the runtime that this is ARC __weak, called by the 4631 // byref routines. 4632 case Qualifiers::OCL_Weak: 4633 // ARC __strong __block variables need to be retained. 4634 case Qualifiers::OCL_Strong: 4635 return true; 4636 } 4637 llvm_unreachable("fell out of lifetime switch!"); 4638 } 4639 return (Ty->isBlockPointerType() || isObjCNSObjectType(Ty) || 4640 Ty->isObjCObjectPointerType()); 4641 } 4642 4643 bool ASTContext::getByrefLifetime(QualType Ty, 4644 Qualifiers::ObjCLifetime &LifeTime, 4645 bool &HasByrefExtendedLayout) const { 4646 4647 if (!getLangOpts().ObjC1 || 4648 getLangOpts().getGC() != LangOptions::NonGC) 4649 return false; 4650 4651 HasByrefExtendedLayout = false; 4652 if (Ty->isRecordType()) { 4653 HasByrefExtendedLayout = true; 4654 LifeTime = Qualifiers::OCL_None; 4655 } 4656 else if (getLangOpts().ObjCAutoRefCount) 4657 LifeTime = Ty.getObjCLifetime(); 4658 // MRR. 4659 else if (Ty->isObjCObjectPointerType() || Ty->isBlockPointerType()) 4660 LifeTime = Qualifiers::OCL_ExplicitNone; 4661 else 4662 LifeTime = Qualifiers::OCL_None; 4663 return true; 4664 } 4665 4666 TypedefDecl *ASTContext::getObjCInstanceTypeDecl() { 4667 if (!ObjCInstanceTypeDecl) 4668 ObjCInstanceTypeDecl = TypedefDecl::Create(*this, 4669 getTranslationUnitDecl(), 4670 SourceLocation(), 4671 SourceLocation(), 4672 &Idents.get("instancetype"), 4673 getTrivialTypeSourceInfo(getObjCIdType())); 4674 return ObjCInstanceTypeDecl; 4675 } 4676 4677 // This returns true if a type has been typedefed to BOOL: 4678 // typedef <type> BOOL; 4679 static bool isTypeTypedefedAsBOOL(QualType T) { 4680 if (const TypedefType *TT = dyn_cast<TypedefType>(T)) 4681 if (IdentifierInfo *II = TT->getDecl()->getIdentifier()) 4682 return II->isStr("BOOL"); 4683 4684 return false; 4685 } 4686 4687 /// getObjCEncodingTypeSize returns size of type for objective-c encoding 4688 /// purpose. 4689 CharUnits ASTContext::getObjCEncodingTypeSize(QualType type) const { 4690 if (!type->isIncompleteArrayType() && type->isIncompleteType()) 4691 return CharUnits::Zero(); 4692 4693 CharUnits sz = getTypeSizeInChars(type); 4694 4695 // Make all integer and enum types at least as large as an int 4696 if (sz.isPositive() && type->isIntegralOrEnumerationType()) 4697 sz = std::max(sz, getTypeSizeInChars(IntTy)); 4698 // Treat arrays as pointers, since that's how they're passed in. 4699 else if (type->isArrayType()) 4700 sz = getTypeSizeInChars(VoidPtrTy); 4701 return sz; 4702 } 4703 4704 static inline 4705 std::string charUnitsToString(const CharUnits &CU) { 4706 return llvm::itostr(CU.getQuantity()); 4707 } 4708 4709 /// getObjCEncodingForBlock - Return the encoded type for this block 4710 /// declaration. 4711 std::string ASTContext::getObjCEncodingForBlock(const BlockExpr *Expr) const { 4712 std::string S; 4713 4714 const BlockDecl *Decl = Expr->getBlockDecl(); 4715 QualType BlockTy = 4716 Expr->getType()->getAs<BlockPointerType>()->getPointeeType(); 4717 // Encode result type. 4718 if (getLangOpts().EncodeExtendedBlockSig) 4719 getObjCEncodingForMethodParameter(Decl::OBJC_TQ_None, 4720 BlockTy->getAs<FunctionType>()->getResultType(), 4721 S, true /*Extended*/); 4722 else 4723 getObjCEncodingForType(BlockTy->getAs<FunctionType>()->getResultType(), 4724 S); 4725 // Compute size of all parameters. 4726 // Start with computing size of a pointer in number of bytes. 4727 // FIXME: There might(should) be a better way of doing this computation! 4728 SourceLocation Loc; 4729 CharUnits PtrSize = getTypeSizeInChars(VoidPtrTy); 4730 CharUnits ParmOffset = PtrSize; 4731 for (BlockDecl::param_const_iterator PI = Decl->param_begin(), 4732 E = Decl->param_end(); PI != E; ++PI) { 4733 QualType PType = (*PI)->getType(); 4734 CharUnits sz = getObjCEncodingTypeSize(PType); 4735 if (sz.isZero()) 4736 continue; 4737 assert (sz.isPositive() && "BlockExpr - Incomplete param type"); 4738 ParmOffset += sz; 4739 } 4740 // Size of the argument frame 4741 S += charUnitsToString(ParmOffset); 4742 // Block pointer and offset. 4743 S += "@?0"; 4744 4745 // Argument types. 4746 ParmOffset = PtrSize; 4747 for (BlockDecl::param_const_iterator PI = Decl->param_begin(), E = 4748 Decl->param_end(); PI != E; ++PI) { 4749 ParmVarDecl *PVDecl = *PI; 4750 QualType PType = PVDecl->getOriginalType(); 4751 if (const ArrayType *AT = 4752 dyn_cast<ArrayType>(PType->getCanonicalTypeInternal())) { 4753 // Use array's original type only if it has known number of 4754 // elements. 4755 if (!isa<ConstantArrayType>(AT)) 4756 PType = PVDecl->getType(); 4757 } else if (PType->isFunctionType()) 4758 PType = PVDecl->getType(); 4759 if (getLangOpts().EncodeExtendedBlockSig) 4760 getObjCEncodingForMethodParameter(Decl::OBJC_TQ_None, PType, 4761 S, true /*Extended*/); 4762 else 4763 getObjCEncodingForType(PType, S); 4764 S += charUnitsToString(ParmOffset); 4765 ParmOffset += getObjCEncodingTypeSize(PType); 4766 } 4767 4768 return S; 4769 } 4770 4771 bool ASTContext::getObjCEncodingForFunctionDecl(const FunctionDecl *Decl, 4772 std::string& S) { 4773 // Encode result type. 4774 getObjCEncodingForType(Decl->getResultType(), S); 4775 CharUnits ParmOffset; 4776 // Compute size of all parameters. 4777 for (FunctionDecl::param_const_iterator PI = Decl->param_begin(), 4778 E = Decl->param_end(); PI != E; ++PI) { 4779 QualType PType = (*PI)->getType(); 4780 CharUnits sz = getObjCEncodingTypeSize(PType); 4781 if (sz.isZero()) 4782 continue; 4783 4784 assert (sz.isPositive() && 4785 "getObjCEncodingForFunctionDecl - Incomplete param type"); 4786 ParmOffset += sz; 4787 } 4788 S += charUnitsToString(ParmOffset); 4789 ParmOffset = CharUnits::Zero(); 4790 4791 // Argument types. 4792 for (FunctionDecl::param_const_iterator PI = Decl->param_begin(), 4793 E = Decl->param_end(); PI != E; ++PI) { 4794 ParmVarDecl *PVDecl = *PI; 4795 QualType PType = PVDecl->getOriginalType(); 4796 if (const ArrayType *AT = 4797 dyn_cast<ArrayType>(PType->getCanonicalTypeInternal())) { 4798 // Use array's original type only if it has known number of 4799 // elements. 4800 if (!isa<ConstantArrayType>(AT)) 4801 PType = PVDecl->getType(); 4802 } else if (PType->isFunctionType()) 4803 PType = PVDecl->getType(); 4804 getObjCEncodingForType(PType, S); 4805 S += charUnitsToString(ParmOffset); 4806 ParmOffset += getObjCEncodingTypeSize(PType); 4807 } 4808 4809 return false; 4810 } 4811 4812 /// getObjCEncodingForMethodParameter - Return the encoded type for a single 4813 /// method parameter or return type. If Extended, include class names and 4814 /// block object types. 4815 void ASTContext::getObjCEncodingForMethodParameter(Decl::ObjCDeclQualifier QT, 4816 QualType T, std::string& S, 4817 bool Extended) const { 4818 // Encode type qualifer, 'in', 'inout', etc. for the parameter. 4819 getObjCEncodingForTypeQualifier(QT, S); 4820 // Encode parameter type. 4821 getObjCEncodingForTypeImpl(T, S, true, true, 0, 4822 true /*OutermostType*/, 4823 false /*EncodingProperty*/, 4824 false /*StructField*/, 4825 Extended /*EncodeBlockParameters*/, 4826 Extended /*EncodeClassNames*/); 4827 } 4828 4829 /// getObjCEncodingForMethodDecl - Return the encoded type for this method 4830 /// declaration. 4831 bool ASTContext::getObjCEncodingForMethodDecl(const ObjCMethodDecl *Decl, 4832 std::string& S, 4833 bool Extended) const { 4834 // FIXME: This is not very efficient. 4835 // Encode return type. 4836 getObjCEncodingForMethodParameter(Decl->getObjCDeclQualifier(), 4837 Decl->getResultType(), S, Extended); 4838 // Compute size of all parameters. 4839 // Start with computing size of a pointer in number of bytes. 4840 // FIXME: There might(should) be a better way of doing this computation! 4841 SourceLocation Loc; 4842 CharUnits PtrSize = getTypeSizeInChars(VoidPtrTy); 4843 // The first two arguments (self and _cmd) are pointers; account for 4844 // their size. 4845 CharUnits ParmOffset = 2 * PtrSize; 4846 for (ObjCMethodDecl::param_const_iterator PI = Decl->param_begin(), 4847 E = Decl->sel_param_end(); PI != E; ++PI) { 4848 QualType PType = (*PI)->getType(); 4849 CharUnits sz = getObjCEncodingTypeSize(PType); 4850 if (sz.isZero()) 4851 continue; 4852 4853 assert (sz.isPositive() && 4854 "getObjCEncodingForMethodDecl - Incomplete param type"); 4855 ParmOffset += sz; 4856 } 4857 S += charUnitsToString(ParmOffset); 4858 S += "@0:"; 4859 S += charUnitsToString(PtrSize); 4860 4861 // Argument types. 4862 ParmOffset = 2 * PtrSize; 4863 for (ObjCMethodDecl::param_const_iterator PI = Decl->param_begin(), 4864 E = Decl->sel_param_end(); PI != E; ++PI) { 4865 const ParmVarDecl *PVDecl = *PI; 4866 QualType PType = PVDecl->getOriginalType(); 4867 if (const ArrayType *AT = 4868 dyn_cast<ArrayType>(PType->getCanonicalTypeInternal())) { 4869 // Use array's original type only if it has known number of 4870 // elements. 4871 if (!isa<ConstantArrayType>(AT)) 4872 PType = PVDecl->getType(); 4873 } else if (PType->isFunctionType()) 4874 PType = PVDecl->getType(); 4875 getObjCEncodingForMethodParameter(PVDecl->getObjCDeclQualifier(), 4876 PType, S, Extended); 4877 S += charUnitsToString(ParmOffset); 4878 ParmOffset += getObjCEncodingTypeSize(PType); 4879 } 4880 4881 return false; 4882 } 4883 4884 /// getObjCEncodingForPropertyDecl - Return the encoded type for this 4885 /// property declaration. If non-NULL, Container must be either an 4886 /// ObjCCategoryImplDecl or ObjCImplementationDecl; it should only be 4887 /// NULL when getting encodings for protocol properties. 4888 /// Property attributes are stored as a comma-delimited C string. The simple 4889 /// attributes readonly and bycopy are encoded as single characters. The 4890 /// parametrized attributes, getter=name, setter=name, and ivar=name, are 4891 /// encoded as single characters, followed by an identifier. Property types 4892 /// are also encoded as a parametrized attribute. The characters used to encode 4893 /// these attributes are defined by the following enumeration: 4894 /// @code 4895 /// enum PropertyAttributes { 4896 /// kPropertyReadOnly = 'R', // property is read-only. 4897 /// kPropertyBycopy = 'C', // property is a copy of the value last assigned 4898 /// kPropertyByref = '&', // property is a reference to the value last assigned 4899 /// kPropertyDynamic = 'D', // property is dynamic 4900 /// kPropertyGetter = 'G', // followed by getter selector name 4901 /// kPropertySetter = 'S', // followed by setter selector name 4902 /// kPropertyInstanceVariable = 'V' // followed by instance variable name 4903 /// kPropertyType = 'T' // followed by old-style type encoding. 4904 /// kPropertyWeak = 'W' // 'weak' property 4905 /// kPropertyStrong = 'P' // property GC'able 4906 /// kPropertyNonAtomic = 'N' // property non-atomic 4907 /// }; 4908 /// @endcode 4909 void ASTContext::getObjCEncodingForPropertyDecl(const ObjCPropertyDecl *PD, 4910 const Decl *Container, 4911 std::string& S) const { 4912 // Collect information from the property implementation decl(s). 4913 bool Dynamic = false; 4914 ObjCPropertyImplDecl *SynthesizePID = 0; 4915 4916 // FIXME: Duplicated code due to poor abstraction. 4917 if (Container) { 4918 if (const ObjCCategoryImplDecl *CID = 4919 dyn_cast<ObjCCategoryImplDecl>(Container)) { 4920 for (ObjCCategoryImplDecl::propimpl_iterator 4921 i = CID->propimpl_begin(), e = CID->propimpl_end(); 4922 i != e; ++i) { 4923 ObjCPropertyImplDecl *PID = *i; 4924 if (PID->getPropertyDecl() == PD) { 4925 if (PID->getPropertyImplementation()==ObjCPropertyImplDecl::Dynamic) { 4926 Dynamic = true; 4927 } else { 4928 SynthesizePID = PID; 4929 } 4930 } 4931 } 4932 } else { 4933 const ObjCImplementationDecl *OID=cast<ObjCImplementationDecl>(Container); 4934 for (ObjCCategoryImplDecl::propimpl_iterator 4935 i = OID->propimpl_begin(), e = OID->propimpl_end(); 4936 i != e; ++i) { 4937 ObjCPropertyImplDecl *PID = *i; 4938 if (PID->getPropertyDecl() == PD) { 4939 if (PID->getPropertyImplementation()==ObjCPropertyImplDecl::Dynamic) { 4940 Dynamic = true; 4941 } else { 4942 SynthesizePID = PID; 4943 } 4944 } 4945 } 4946 } 4947 } 4948 4949 // FIXME: This is not very efficient. 4950 S = "T"; 4951 4952 // Encode result type. 4953 // GCC has some special rules regarding encoding of properties which 4954 // closely resembles encoding of ivars. 4955 getObjCEncodingForTypeImpl(PD->getType(), S, true, true, 0, 4956 true /* outermost type */, 4957 true /* encoding for property */); 4958 4959 if (PD->isReadOnly()) { 4960 S += ",R"; 4961 if (PD->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_copy) 4962 S += ",C"; 4963 if (PD->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_retain) 4964 S += ",&"; 4965 } else { 4966 switch (PD->getSetterKind()) { 4967 case ObjCPropertyDecl::Assign: break; 4968 case ObjCPropertyDecl::Copy: S += ",C"; break; 4969 case ObjCPropertyDecl::Retain: S += ",&"; break; 4970 case ObjCPropertyDecl::Weak: S += ",W"; break; 4971 } 4972 } 4973 4974 // It really isn't clear at all what this means, since properties 4975 // are "dynamic by default". 4976 if (Dynamic) 4977 S += ",D"; 4978 4979 if (PD->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_nonatomic) 4980 S += ",N"; 4981 4982 if (PD->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_getter) { 4983 S += ",G"; 4984 S += PD->getGetterName().getAsString(); 4985 } 4986 4987 if (PD->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_setter) { 4988 S += ",S"; 4989 S += PD->getSetterName().getAsString(); 4990 } 4991 4992 if (SynthesizePID) { 4993 const ObjCIvarDecl *OID = SynthesizePID->getPropertyIvarDecl(); 4994 S += ",V"; 4995 S += OID->getNameAsString(); 4996 } 4997 4998 // FIXME: OBJCGC: weak & strong 4999 } 5000 5001 /// getLegacyIntegralTypeEncoding - 5002 /// Another legacy compatibility encoding: 32-bit longs are encoded as 5003 /// 'l' or 'L' , but not always. For typedefs, we need to use 5004 /// 'i' or 'I' instead if encoding a struct field, or a pointer! 5005 /// 5006 void ASTContext::getLegacyIntegralTypeEncoding (QualType &PointeeTy) const { 5007 if (isa<TypedefType>(PointeeTy.getTypePtr())) { 5008 if (const BuiltinType *BT = PointeeTy->getAs<BuiltinType>()) { 5009 if (BT->getKind() == BuiltinType::ULong && getIntWidth(PointeeTy) == 32) 5010 PointeeTy = UnsignedIntTy; 5011 else 5012 if (BT->getKind() == BuiltinType::Long && getIntWidth(PointeeTy) == 32) 5013 PointeeTy = IntTy; 5014 } 5015 } 5016 } 5017 5018 void ASTContext::getObjCEncodingForType(QualType T, std::string& S, 5019 const FieldDecl *Field) const { 5020 // We follow the behavior of gcc, expanding structures which are 5021 // directly pointed to, and expanding embedded structures. Note that 5022 // these rules are sufficient to prevent recursive encoding of the 5023 // same type. 5024 getObjCEncodingForTypeImpl(T, S, true, true, Field, 5025 true /* outermost type */); 5026 } 5027 5028 static char getObjCEncodingForPrimitiveKind(const ASTContext *C, 5029 BuiltinType::Kind kind) { 5030 switch (kind) { 5031 case BuiltinType::Void: return 'v'; 5032 case BuiltinType::Bool: return 'B'; 5033 case BuiltinType::Char_U: 5034 case BuiltinType::UChar: return 'C'; 5035 case BuiltinType::Char16: 5036 case BuiltinType::UShort: return 'S'; 5037 case BuiltinType::Char32: 5038 case BuiltinType::UInt: return 'I'; 5039 case BuiltinType::ULong: 5040 return C->getTargetInfo().getLongWidth() == 32 ? 'L' : 'Q'; 5041 case BuiltinType::UInt128: return 'T'; 5042 case BuiltinType::ULongLong: return 'Q'; 5043 case BuiltinType::Char_S: 5044 case BuiltinType::SChar: return 'c'; 5045 case BuiltinType::Short: return 's'; 5046 case BuiltinType::WChar_S: 5047 case BuiltinType::WChar_U: 5048 case BuiltinType::Int: return 'i'; 5049 case BuiltinType::Long: 5050 return C->getTargetInfo().getLongWidth() == 32 ? 'l' : 'q'; 5051 case BuiltinType::LongLong: return 'q'; 5052 case BuiltinType::Int128: return 't'; 5053 case BuiltinType::Float: return 'f'; 5054 case BuiltinType::Double: return 'd'; 5055 case BuiltinType::LongDouble: return 'D'; 5056 case BuiltinType::NullPtr: return '*'; // like char* 5057 5058 case BuiltinType::Half: 5059 // FIXME: potentially need @encodes for these! 5060 return ' '; 5061 5062 case BuiltinType::ObjCId: 5063 case BuiltinType::ObjCClass: 5064 case BuiltinType::ObjCSel: 5065 llvm_unreachable("@encoding ObjC primitive type"); 5066 5067 // OpenCL and placeholder types don't need @encodings. 5068 case BuiltinType::OCLImage1d: 5069 case BuiltinType::OCLImage1dArray: 5070 case BuiltinType::OCLImage1dBuffer: 5071 case BuiltinType::OCLImage2d: 5072 case BuiltinType::OCLImage2dArray: 5073 case BuiltinType::OCLImage3d: 5074 case BuiltinType::OCLEvent: 5075 case BuiltinType::OCLSampler: 5076 case BuiltinType::Dependent: 5077 #define BUILTIN_TYPE(KIND, ID) 5078 #define PLACEHOLDER_TYPE(KIND, ID) \ 5079 case BuiltinType::KIND: 5080 #include "clang/AST/BuiltinTypes.def" 5081 llvm_unreachable("invalid builtin type for @encode"); 5082 } 5083 llvm_unreachable("invalid BuiltinType::Kind value"); 5084 } 5085 5086 static char ObjCEncodingForEnumType(const ASTContext *C, const EnumType *ET) { 5087 EnumDecl *Enum = ET->getDecl(); 5088 5089 // The encoding of an non-fixed enum type is always 'i', regardless of size. 5090 if (!Enum->isFixed()) 5091 return 'i'; 5092 5093 // The encoding of a fixed enum type matches its fixed underlying type. 5094 const BuiltinType *BT = Enum->getIntegerType()->castAs<BuiltinType>(); 5095 return getObjCEncodingForPrimitiveKind(C, BT->getKind()); 5096 } 5097 5098 static void EncodeBitField(const ASTContext *Ctx, std::string& S, 5099 QualType T, const FieldDecl *FD) { 5100 assert(FD->isBitField() && "not a bitfield - getObjCEncodingForTypeImpl"); 5101 S += 'b'; 5102 // The NeXT runtime encodes bit fields as b followed by the number of bits. 5103 // The GNU runtime requires more information; bitfields are encoded as b, 5104 // then the offset (in bits) of the first element, then the type of the 5105 // bitfield, then the size in bits. For example, in this structure: 5106 // 5107 // struct 5108 // { 5109 // int integer; 5110 // int flags:2; 5111 // }; 5112 // On a 32-bit system, the encoding for flags would be b2 for the NeXT 5113 // runtime, but b32i2 for the GNU runtime. The reason for this extra 5114 // information is not especially sensible, but we're stuck with it for 5115 // compatibility with GCC, although providing it breaks anything that 5116 // actually uses runtime introspection and wants to work on both runtimes... 5117 if (Ctx->getLangOpts().ObjCRuntime.isGNUFamily()) { 5118 const RecordDecl *RD = FD->getParent(); 5119 const ASTRecordLayout &RL = Ctx->getASTRecordLayout(RD); 5120 S += llvm::utostr(RL.getFieldOffset(FD->getFieldIndex())); 5121 if (const EnumType *ET = T->getAs<EnumType>()) 5122 S += ObjCEncodingForEnumType(Ctx, ET); 5123 else { 5124 const BuiltinType *BT = T->castAs<BuiltinType>(); 5125 S += getObjCEncodingForPrimitiveKind(Ctx, BT->getKind()); 5126 } 5127 } 5128 S += llvm::utostr(FD->getBitWidthValue(*Ctx)); 5129 } 5130 5131 // FIXME: Use SmallString for accumulating string. 5132 void ASTContext::getObjCEncodingForTypeImpl(QualType T, std::string& S, 5133 bool ExpandPointedToStructures, 5134 bool ExpandStructures, 5135 const FieldDecl *FD, 5136 bool OutermostType, 5137 bool EncodingProperty, 5138 bool StructField, 5139 bool EncodeBlockParameters, 5140 bool EncodeClassNames, 5141 bool EncodePointerToObjCTypedef) const { 5142 CanQualType CT = getCanonicalType(T); 5143 switch (CT->getTypeClass()) { 5144 case Type::Builtin: 5145 case Type::Enum: 5146 if (FD && FD->isBitField()) 5147 return EncodeBitField(this, S, T, FD); 5148 if (const BuiltinType *BT = dyn_cast<BuiltinType>(CT)) 5149 S += getObjCEncodingForPrimitiveKind(this, BT->getKind()); 5150 else 5151 S += ObjCEncodingForEnumType(this, cast<EnumType>(CT)); 5152 return; 5153 5154 case Type::Complex: { 5155 const ComplexType *CT = T->castAs<ComplexType>(); 5156 S += 'j'; 5157 getObjCEncodingForTypeImpl(CT->getElementType(), S, false, false, 0, false, 5158 false); 5159 return; 5160 } 5161 5162 case Type::Atomic: { 5163 const AtomicType *AT = T->castAs<AtomicType>(); 5164 S += 'A'; 5165 getObjCEncodingForTypeImpl(AT->getValueType(), S, false, false, 0, 5166 false, false); 5167 return; 5168 } 5169 5170 // encoding for pointer or reference types. 5171 case Type::Pointer: 5172 case Type::LValueReference: 5173 case Type::RValueReference: { 5174 QualType PointeeTy; 5175 if (isa<PointerType>(CT)) { 5176 const PointerType *PT = T->castAs<PointerType>(); 5177 if (PT->isObjCSelType()) { 5178 S += ':'; 5179 return; 5180 } 5181 PointeeTy = PT->getPointeeType(); 5182 } else { 5183 PointeeTy = T->castAs<ReferenceType>()->getPointeeType(); 5184 } 5185 5186 bool isReadOnly = false; 5187 // For historical/compatibility reasons, the read-only qualifier of the 5188 // pointee gets emitted _before_ the '^'. The read-only qualifier of 5189 // the pointer itself gets ignored, _unless_ we are looking at a typedef! 5190 // Also, do not emit the 'r' for anything but the outermost type! 5191 if (isa<TypedefType>(T.getTypePtr())) { 5192 if (OutermostType && T.isConstQualified()) { 5193 isReadOnly = true; 5194 S += 'r'; 5195 } 5196 } else if (OutermostType) { 5197 QualType P = PointeeTy; 5198 while (P->getAs<PointerType>()) 5199 P = P->getAs<PointerType>()->getPointeeType(); 5200 if (P.isConstQualified()) { 5201 isReadOnly = true; 5202 S += 'r'; 5203 } 5204 } 5205 if (isReadOnly) { 5206 // Another legacy compatibility encoding. Some ObjC qualifier and type 5207 // combinations need to be rearranged. 5208 // Rewrite "in const" from "nr" to "rn" 5209 if (StringRef(S).endswith("nr")) 5210 S.replace(S.end()-2, S.end(), "rn"); 5211 } 5212 5213 if (PointeeTy->isCharType()) { 5214 // char pointer types should be encoded as '*' unless it is a 5215 // type that has been typedef'd to 'BOOL'. 5216 if (!isTypeTypedefedAsBOOL(PointeeTy)) { 5217 S += '*'; 5218 return; 5219 } 5220 } else if (const RecordType *RTy = PointeeTy->getAs<RecordType>()) { 5221 // GCC binary compat: Need to convert "struct objc_class *" to "#". 5222 if (RTy->getDecl()->getIdentifier() == &Idents.get("objc_class")) { 5223 S += '#'; 5224 return; 5225 } 5226 // GCC binary compat: Need to convert "struct objc_object *" to "@". 5227 if (RTy->getDecl()->getIdentifier() == &Idents.get("objc_object")) { 5228 S += '@'; 5229 return; 5230 } 5231 // fall through... 5232 } 5233 S += '^'; 5234 getLegacyIntegralTypeEncoding(PointeeTy); 5235 5236 getObjCEncodingForTypeImpl(PointeeTy, S, false, ExpandPointedToStructures, 5237 NULL); 5238 return; 5239 } 5240 5241 case Type::ConstantArray: 5242 case Type::IncompleteArray: 5243 case Type::VariableArray: { 5244 const ArrayType *AT = cast<ArrayType>(CT); 5245 5246 if (isa<IncompleteArrayType>(AT) && !StructField) { 5247 // Incomplete arrays are encoded as a pointer to the array element. 5248 S += '^'; 5249 5250 getObjCEncodingForTypeImpl(AT->getElementType(), S, 5251 false, ExpandStructures, FD); 5252 } else { 5253 S += '['; 5254 5255 if (const ConstantArrayType *CAT = dyn_cast<ConstantArrayType>(AT)) 5256 S += llvm::utostr(CAT->getSize().getZExtValue()); 5257 else { 5258 //Variable length arrays are encoded as a regular array with 0 elements. 5259 assert((isa<VariableArrayType>(AT) || isa<IncompleteArrayType>(AT)) && 5260 "Unknown array type!"); 5261 S += '0'; 5262 } 5263 5264 getObjCEncodingForTypeImpl(AT->getElementType(), S, 5265 false, ExpandStructures, FD); 5266 S += ']'; 5267 } 5268 return; 5269 } 5270 5271 case Type::FunctionNoProto: 5272 case Type::FunctionProto: 5273 S += '?'; 5274 return; 5275 5276 case Type::Record: { 5277 RecordDecl *RDecl = cast<RecordType>(CT)->getDecl(); 5278 S += RDecl->isUnion() ? '(' : '{'; 5279 // Anonymous structures print as '?' 5280 if (const IdentifierInfo *II = RDecl->getIdentifier()) { 5281 S += II->getName(); 5282 if (ClassTemplateSpecializationDecl *Spec 5283 = dyn_cast<ClassTemplateSpecializationDecl>(RDecl)) { 5284 const TemplateArgumentList &TemplateArgs = Spec->getTemplateArgs(); 5285 llvm::raw_string_ostream OS(S); 5286 TemplateSpecializationType::PrintTemplateArgumentList(OS, 5287 TemplateArgs.data(), 5288 TemplateArgs.size(), 5289 (*this).getPrintingPolicy()); 5290 } 5291 } else { 5292 S += '?'; 5293 } 5294 if (ExpandStructures) { 5295 S += '='; 5296 if (!RDecl->isUnion()) { 5297 getObjCEncodingForStructureImpl(RDecl, S, FD); 5298 } else { 5299 for (RecordDecl::field_iterator Field = RDecl->field_begin(), 5300 FieldEnd = RDecl->field_end(); 5301 Field != FieldEnd; ++Field) { 5302 if (FD) { 5303 S += '"'; 5304 S += Field->getNameAsString(); 5305 S += '"'; 5306 } 5307 5308 // Special case bit-fields. 5309 if (Field->isBitField()) { 5310 getObjCEncodingForTypeImpl(Field->getType(), S, false, true, 5311 *Field); 5312 } else { 5313 QualType qt = Field->getType(); 5314 getLegacyIntegralTypeEncoding(qt); 5315 getObjCEncodingForTypeImpl(qt, S, false, true, 5316 FD, /*OutermostType*/false, 5317 /*EncodingProperty*/false, 5318 /*StructField*/true); 5319 } 5320 } 5321 } 5322 } 5323 S += RDecl->isUnion() ? ')' : '}'; 5324 return; 5325 } 5326 5327 case Type::BlockPointer: { 5328 const BlockPointerType *BT = T->castAs<BlockPointerType>(); 5329 S += "@?"; // Unlike a pointer-to-function, which is "^?". 5330 if (EncodeBlockParameters) { 5331 const FunctionType *FT = BT->getPointeeType()->castAs<FunctionType>(); 5332 5333 S += '<'; 5334 // Block return type 5335 getObjCEncodingForTypeImpl(FT->getResultType(), S, 5336 ExpandPointedToStructures, ExpandStructures, 5337 FD, 5338 false /* OutermostType */, 5339 EncodingProperty, 5340 false /* StructField */, 5341 EncodeBlockParameters, 5342 EncodeClassNames); 5343 // Block self 5344 S += "@?"; 5345 // Block parameters 5346 if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(FT)) { 5347 for (FunctionProtoType::arg_type_iterator I = FPT->arg_type_begin(), 5348 E = FPT->arg_type_end(); I && (I != E); ++I) { 5349 getObjCEncodingForTypeImpl(*I, S, 5350 ExpandPointedToStructures, 5351 ExpandStructures, 5352 FD, 5353 false /* OutermostType */, 5354 EncodingProperty, 5355 false /* StructField */, 5356 EncodeBlockParameters, 5357 EncodeClassNames); 5358 } 5359 } 5360 S += '>'; 5361 } 5362 return; 5363 } 5364 5365 case Type::ObjCObject: 5366 case Type::ObjCInterface: { 5367 // Ignore protocol qualifiers when mangling at this level. 5368 T = T->castAs<ObjCObjectType>()->getBaseType(); 5369 5370 // The assumption seems to be that this assert will succeed 5371 // because nested levels will have filtered out 'id' and 'Class'. 5372 const ObjCInterfaceType *OIT = T->castAs<ObjCInterfaceType>(); 5373 // @encode(class_name) 5374 ObjCInterfaceDecl *OI = OIT->getDecl(); 5375 S += '{'; 5376 const IdentifierInfo *II = OI->getIdentifier(); 5377 S += II->getName(); 5378 S += '='; 5379 SmallVector<const ObjCIvarDecl*, 32> Ivars; 5380 DeepCollectObjCIvars(OI, true, Ivars); 5381 for (unsigned i = 0, e = Ivars.size(); i != e; ++i) { 5382 const FieldDecl *Field = cast<FieldDecl>(Ivars[i]); 5383 if (Field->isBitField()) 5384 getObjCEncodingForTypeImpl(Field->getType(), S, false, true, Field); 5385 else 5386 getObjCEncodingForTypeImpl(Field->getType(), S, false, true, FD, 5387 false, false, false, false, false, 5388 EncodePointerToObjCTypedef); 5389 } 5390 S += '}'; 5391 return; 5392 } 5393 5394 case Type::ObjCObjectPointer: { 5395 const ObjCObjectPointerType *OPT = T->castAs<ObjCObjectPointerType>(); 5396 if (OPT->isObjCIdType()) { 5397 S += '@'; 5398 return; 5399 } 5400 5401 if (OPT->isObjCClassType() || OPT->isObjCQualifiedClassType()) { 5402 // FIXME: Consider if we need to output qualifiers for 'Class<p>'. 5403 // Since this is a binary compatibility issue, need to consult with runtime 5404 // folks. Fortunately, this is a *very* obsure construct. 5405 S += '#'; 5406 return; 5407 } 5408 5409 if (OPT->isObjCQualifiedIdType()) { 5410 getObjCEncodingForTypeImpl(getObjCIdType(), S, 5411 ExpandPointedToStructures, 5412 ExpandStructures, FD); 5413 if (FD || EncodingProperty || EncodeClassNames) { 5414 // Note that we do extended encoding of protocol qualifer list 5415 // Only when doing ivar or property encoding. 5416 S += '"'; 5417 for (ObjCObjectPointerType::qual_iterator I = OPT->qual_begin(), 5418 E = OPT->qual_end(); I != E; ++I) { 5419 S += '<'; 5420 S += (*I)->getNameAsString(); 5421 S += '>'; 5422 } 5423 S += '"'; 5424 } 5425 return; 5426 } 5427 5428 QualType PointeeTy = OPT->getPointeeType(); 5429 if (!EncodingProperty && 5430 isa<TypedefType>(PointeeTy.getTypePtr()) && 5431 !EncodePointerToObjCTypedef) { 5432 // Another historical/compatibility reason. 5433 // We encode the underlying type which comes out as 5434 // {...}; 5435 S += '^'; 5436 if (FD && OPT->getInterfaceDecl()) { 5437 // Prevent recursive encoding of fields in some rare cases. 5438 ObjCInterfaceDecl *OI = OPT->getInterfaceDecl(); 5439 SmallVector<const ObjCIvarDecl*, 32> Ivars; 5440 DeepCollectObjCIvars(OI, true, Ivars); 5441 for (unsigned i = 0, e = Ivars.size(); i != e; ++i) { 5442 if (cast<FieldDecl>(Ivars[i]) == FD) { 5443 S += '{'; 5444 S += OI->getIdentifier()->getName(); 5445 S += '}'; 5446 return; 5447 } 5448 } 5449 } 5450 getObjCEncodingForTypeImpl(PointeeTy, S, 5451 false, ExpandPointedToStructures, 5452 NULL, 5453 false, false, false, false, false, 5454 /*EncodePointerToObjCTypedef*/true); 5455 return; 5456 } 5457 5458 S += '@'; 5459 if (OPT->getInterfaceDecl() && 5460 (FD || EncodingProperty || EncodeClassNames)) { 5461 S += '"'; 5462 S += OPT->getInterfaceDecl()->getIdentifier()->getName(); 5463 for (ObjCObjectPointerType::qual_iterator I = OPT->qual_begin(), 5464 E = OPT->qual_end(); I != E; ++I) { 5465 S += '<'; 5466 S += (*I)->getNameAsString(); 5467 S += '>'; 5468 } 5469 S += '"'; 5470 } 5471 return; 5472 } 5473 5474 // gcc just blithely ignores member pointers. 5475 // FIXME: we shoul do better than that. 'M' is available. 5476 case Type::MemberPointer: 5477 return; 5478 5479 case Type::Vector: 5480 case Type::ExtVector: 5481 // This matches gcc's encoding, even though technically it is 5482 // insufficient. 5483 // FIXME. We should do a better job than gcc. 5484 return; 5485 5486 case Type::Auto: 5487 // We could see an undeduced auto type here during error recovery. 5488 // Just ignore it. 5489 return; 5490 5491 #define ABSTRACT_TYPE(KIND, BASE) 5492 #define TYPE(KIND, BASE) 5493 #define DEPENDENT_TYPE(KIND, BASE) \ 5494 case Type::KIND: 5495 #define NON_CANONICAL_TYPE(KIND, BASE) \ 5496 case Type::KIND: 5497 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(KIND, BASE) \ 5498 case Type::KIND: 5499 #include "clang/AST/TypeNodes.def" 5500 llvm_unreachable("@encode for dependent type!"); 5501 } 5502 llvm_unreachable("bad type kind!"); 5503 } 5504 5505 void ASTContext::getObjCEncodingForStructureImpl(RecordDecl *RDecl, 5506 std::string &S, 5507 const FieldDecl *FD, 5508 bool includeVBases) const { 5509 assert(RDecl && "Expected non-null RecordDecl"); 5510 assert(!RDecl->isUnion() && "Should not be called for unions"); 5511 if (!RDecl->getDefinition()) 5512 return; 5513 5514 CXXRecordDecl *CXXRec = dyn_cast<CXXRecordDecl>(RDecl); 5515 std::multimap<uint64_t, NamedDecl *> FieldOrBaseOffsets; 5516 const ASTRecordLayout &layout = getASTRecordLayout(RDecl); 5517 5518 if (CXXRec) { 5519 for (CXXRecordDecl::base_class_iterator 5520 BI = CXXRec->bases_begin(), 5521 BE = CXXRec->bases_end(); BI != BE; ++BI) { 5522 if (!BI->isVirtual()) { 5523 CXXRecordDecl *base = BI->getType()->getAsCXXRecordDecl(); 5524 if (base->isEmpty()) 5525 continue; 5526 uint64_t offs = toBits(layout.getBaseClassOffset(base)); 5527 FieldOrBaseOffsets.insert(FieldOrBaseOffsets.upper_bound(offs), 5528 std::make_pair(offs, base)); 5529 } 5530 } 5531 } 5532 5533 unsigned i = 0; 5534 for (RecordDecl::field_iterator Field = RDecl->field_begin(), 5535 FieldEnd = RDecl->field_end(); 5536 Field != FieldEnd; ++Field, ++i) { 5537 uint64_t offs = layout.getFieldOffset(i); 5538 FieldOrBaseOffsets.insert(FieldOrBaseOffsets.upper_bound(offs), 5539 std::make_pair(offs, *Field)); 5540 } 5541 5542 if (CXXRec && includeVBases) { 5543 for (CXXRecordDecl::base_class_iterator 5544 BI = CXXRec->vbases_begin(), 5545 BE = CXXRec->vbases_end(); BI != BE; ++BI) { 5546 CXXRecordDecl *base = BI->getType()->getAsCXXRecordDecl(); 5547 if (base->isEmpty()) 5548 continue; 5549 uint64_t offs = toBits(layout.getVBaseClassOffset(base)); 5550 if (FieldOrBaseOffsets.find(offs) == FieldOrBaseOffsets.end()) 5551 FieldOrBaseOffsets.insert(FieldOrBaseOffsets.end(), 5552 std::make_pair(offs, base)); 5553 } 5554 } 5555 5556 CharUnits size; 5557 if (CXXRec) { 5558 size = includeVBases ? layout.getSize() : layout.getNonVirtualSize(); 5559 } else { 5560 size = layout.getSize(); 5561 } 5562 5563 uint64_t CurOffs = 0; 5564 std::multimap<uint64_t, NamedDecl *>::iterator 5565 CurLayObj = FieldOrBaseOffsets.begin(); 5566 5567 if (CXXRec && CXXRec->isDynamicClass() && 5568 (CurLayObj == FieldOrBaseOffsets.end() || CurLayObj->first != 0)) { 5569 if (FD) { 5570 S += "\"_vptr$"; 5571 std::string recname = CXXRec->getNameAsString(); 5572 if (recname.empty()) recname = "?"; 5573 S += recname; 5574 S += '"'; 5575 } 5576 S += "^^?"; 5577 CurOffs += getTypeSize(VoidPtrTy); 5578 } 5579 5580 if (!RDecl->hasFlexibleArrayMember()) { 5581 // Mark the end of the structure. 5582 uint64_t offs = toBits(size); 5583 FieldOrBaseOffsets.insert(FieldOrBaseOffsets.upper_bound(offs), 5584 std::make_pair(offs, (NamedDecl*)0)); 5585 } 5586 5587 for (; CurLayObj != FieldOrBaseOffsets.end(); ++CurLayObj) { 5588 assert(CurOffs <= CurLayObj->first); 5589 5590 if (CurOffs < CurLayObj->first) { 5591 uint64_t padding = CurLayObj->first - CurOffs; 5592 // FIXME: There doesn't seem to be a way to indicate in the encoding that 5593 // packing/alignment of members is different that normal, in which case 5594 // the encoding will be out-of-sync with the real layout. 5595 // If the runtime switches to just consider the size of types without 5596 // taking into account alignment, we could make padding explicit in the 5597 // encoding (e.g. using arrays of chars). The encoding strings would be 5598 // longer then though. 5599 CurOffs += padding; 5600 } 5601 5602 NamedDecl *dcl = CurLayObj->second; 5603 if (dcl == 0) 5604 break; // reached end of structure. 5605 5606 if (CXXRecordDecl *base = dyn_cast<CXXRecordDecl>(dcl)) { 5607 // We expand the bases without their virtual bases since those are going 5608 // in the initial structure. Note that this differs from gcc which 5609 // expands virtual bases each time one is encountered in the hierarchy, 5610 // making the encoding type bigger than it really is. 5611 getObjCEncodingForStructureImpl(base, S, FD, /*includeVBases*/false); 5612 assert(!base->isEmpty()); 5613 CurOffs += toBits(getASTRecordLayout(base).getNonVirtualSize()); 5614 } else { 5615 FieldDecl *field = cast<FieldDecl>(dcl); 5616 if (FD) { 5617 S += '"'; 5618 S += field->getNameAsString(); 5619 S += '"'; 5620 } 5621 5622 if (field->isBitField()) { 5623 EncodeBitField(this, S, field->getType(), field); 5624 CurOffs += field->getBitWidthValue(*this); 5625 } else { 5626 QualType qt = field->getType(); 5627 getLegacyIntegralTypeEncoding(qt); 5628 getObjCEncodingForTypeImpl(qt, S, false, true, FD, 5629 /*OutermostType*/false, 5630 /*EncodingProperty*/false, 5631 /*StructField*/true); 5632 CurOffs += getTypeSize(field->getType()); 5633 } 5634 } 5635 } 5636 } 5637 5638 void ASTContext::getObjCEncodingForTypeQualifier(Decl::ObjCDeclQualifier QT, 5639 std::string& S) const { 5640 if (QT & Decl::OBJC_TQ_In) 5641 S += 'n'; 5642 if (QT & Decl::OBJC_TQ_Inout) 5643 S += 'N'; 5644 if (QT & Decl::OBJC_TQ_Out) 5645 S += 'o'; 5646 if (QT & Decl::OBJC_TQ_Bycopy) 5647 S += 'O'; 5648 if (QT & Decl::OBJC_TQ_Byref) 5649 S += 'R'; 5650 if (QT & Decl::OBJC_TQ_Oneway) 5651 S += 'V'; 5652 } 5653 5654 TypedefDecl *ASTContext::getObjCIdDecl() const { 5655 if (!ObjCIdDecl) { 5656 QualType T = getObjCObjectType(ObjCBuiltinIdTy, 0, 0); 5657 T = getObjCObjectPointerType(T); 5658 TypeSourceInfo *IdInfo = getTrivialTypeSourceInfo(T); 5659 ObjCIdDecl = TypedefDecl::Create(const_cast<ASTContext &>(*this), 5660 getTranslationUnitDecl(), 5661 SourceLocation(), SourceLocation(), 5662 &Idents.get("id"), IdInfo); 5663 } 5664 5665 return ObjCIdDecl; 5666 } 5667 5668 TypedefDecl *ASTContext::getObjCSelDecl() const { 5669 if (!ObjCSelDecl) { 5670 QualType SelT = getPointerType(ObjCBuiltinSelTy); 5671 TypeSourceInfo *SelInfo = getTrivialTypeSourceInfo(SelT); 5672 ObjCSelDecl = TypedefDecl::Create(const_cast<ASTContext &>(*this), 5673 getTranslationUnitDecl(), 5674 SourceLocation(), SourceLocation(), 5675 &Idents.get("SEL"), SelInfo); 5676 } 5677 return ObjCSelDecl; 5678 } 5679 5680 TypedefDecl *ASTContext::getObjCClassDecl() const { 5681 if (!ObjCClassDecl) { 5682 QualType T = getObjCObjectType(ObjCBuiltinClassTy, 0, 0); 5683 T = getObjCObjectPointerType(T); 5684 TypeSourceInfo *ClassInfo = getTrivialTypeSourceInfo(T); 5685 ObjCClassDecl = TypedefDecl::Create(const_cast<ASTContext &>(*this), 5686 getTranslationUnitDecl(), 5687 SourceLocation(), SourceLocation(), 5688 &Idents.get("Class"), ClassInfo); 5689 } 5690 5691 return ObjCClassDecl; 5692 } 5693 5694 ObjCInterfaceDecl *ASTContext::getObjCProtocolDecl() const { 5695 if (!ObjCProtocolClassDecl) { 5696 ObjCProtocolClassDecl 5697 = ObjCInterfaceDecl::Create(*this, getTranslationUnitDecl(), 5698 SourceLocation(), 5699 &Idents.get("Protocol"), 5700 /*PrevDecl=*/0, 5701 SourceLocation(), true); 5702 } 5703 5704 return ObjCProtocolClassDecl; 5705 } 5706 5707 //===----------------------------------------------------------------------===// 5708 // __builtin_va_list Construction Functions 5709 //===----------------------------------------------------------------------===// 5710 5711 static TypedefDecl *CreateCharPtrBuiltinVaListDecl(const ASTContext *Context) { 5712 // typedef char* __builtin_va_list; 5713 QualType CharPtrType = Context->getPointerType(Context->CharTy); 5714 TypeSourceInfo *TInfo 5715 = Context->getTrivialTypeSourceInfo(CharPtrType); 5716 5717 TypedefDecl *VaListTypeDecl 5718 = TypedefDecl::Create(const_cast<ASTContext &>(*Context), 5719 Context->getTranslationUnitDecl(), 5720 SourceLocation(), SourceLocation(), 5721 &Context->Idents.get("__builtin_va_list"), 5722 TInfo); 5723 return VaListTypeDecl; 5724 } 5725 5726 static TypedefDecl *CreateVoidPtrBuiltinVaListDecl(const ASTContext *Context) { 5727 // typedef void* __builtin_va_list; 5728 QualType VoidPtrType = Context->getPointerType(Context->VoidTy); 5729 TypeSourceInfo *TInfo 5730 = Context->getTrivialTypeSourceInfo(VoidPtrType); 5731 5732 TypedefDecl *VaListTypeDecl 5733 = TypedefDecl::Create(const_cast<ASTContext &>(*Context), 5734 Context->getTranslationUnitDecl(), 5735 SourceLocation(), SourceLocation(), 5736 &Context->Idents.get("__builtin_va_list"), 5737 TInfo); 5738 return VaListTypeDecl; 5739 } 5740 5741 static TypedefDecl * 5742 CreateAArch64ABIBuiltinVaListDecl(const ASTContext *Context) { 5743 RecordDecl *VaListTagDecl; 5744 if (Context->getLangOpts().CPlusPlus) { 5745 // namespace std { struct __va_list { 5746 NamespaceDecl *NS; 5747 NS = NamespaceDecl::Create(const_cast<ASTContext &>(*Context), 5748 Context->getTranslationUnitDecl(), 5749 /*Inline*/false, SourceLocation(), 5750 SourceLocation(), &Context->Idents.get("std"), 5751 /*PrevDecl*/0); 5752 5753 VaListTagDecl = CXXRecordDecl::Create(*Context, TTK_Struct, 5754 Context->getTranslationUnitDecl(), 5755 SourceLocation(), SourceLocation(), 5756 &Context->Idents.get("__va_list")); 5757 VaListTagDecl->setDeclContext(NS); 5758 } else { 5759 // struct __va_list 5760 VaListTagDecl = CreateRecordDecl(*Context, TTK_Struct, 5761 Context->getTranslationUnitDecl(), 5762 &Context->Idents.get("__va_list")); 5763 } 5764 5765 VaListTagDecl->startDefinition(); 5766 5767 const size_t NumFields = 5; 5768 QualType FieldTypes[NumFields]; 5769 const char *FieldNames[NumFields]; 5770 5771 // void *__stack; 5772 FieldTypes[0] = Context->getPointerType(Context->VoidTy); 5773 FieldNames[0] = "__stack"; 5774 5775 // void *__gr_top; 5776 FieldTypes[1] = Context->getPointerType(Context->VoidTy); 5777 FieldNames[1] = "__gr_top"; 5778 5779 // void *__vr_top; 5780 FieldTypes[2] = Context->getPointerType(Context->VoidTy); 5781 FieldNames[2] = "__vr_top"; 5782 5783 // int __gr_offs; 5784 FieldTypes[3] = Context->IntTy; 5785 FieldNames[3] = "__gr_offs"; 5786 5787 // int __vr_offs; 5788 FieldTypes[4] = Context->IntTy; 5789 FieldNames[4] = "__vr_offs"; 5790 5791 // Create fields 5792 for (unsigned i = 0; i < NumFields; ++i) { 5793 FieldDecl *Field = FieldDecl::Create(const_cast<ASTContext &>(*Context), 5794 VaListTagDecl, 5795 SourceLocation(), 5796 SourceLocation(), 5797 &Context->Idents.get(FieldNames[i]), 5798 FieldTypes[i], /*TInfo=*/0, 5799 /*BitWidth=*/0, 5800 /*Mutable=*/false, 5801 ICIS_NoInit); 5802 Field->setAccess(AS_public); 5803 VaListTagDecl->addDecl(Field); 5804 } 5805 VaListTagDecl->completeDefinition(); 5806 QualType VaListTagType = Context->getRecordType(VaListTagDecl); 5807 Context->VaListTagTy = VaListTagType; 5808 5809 // } __builtin_va_list; 5810 TypedefDecl *VaListTypedefDecl 5811 = TypedefDecl::Create(const_cast<ASTContext &>(*Context), 5812 Context->getTranslationUnitDecl(), 5813 SourceLocation(), SourceLocation(), 5814 &Context->Idents.get("__builtin_va_list"), 5815 Context->getTrivialTypeSourceInfo(VaListTagType)); 5816 5817 return VaListTypedefDecl; 5818 } 5819 5820 static TypedefDecl *CreatePowerABIBuiltinVaListDecl(const ASTContext *Context) { 5821 // typedef struct __va_list_tag { 5822 RecordDecl *VaListTagDecl; 5823 5824 VaListTagDecl = CreateRecordDecl(*Context, TTK_Struct, 5825 Context->getTranslationUnitDecl(), 5826 &Context->Idents.get("__va_list_tag")); 5827 VaListTagDecl->startDefinition(); 5828 5829 const size_t NumFields = 5; 5830 QualType FieldTypes[NumFields]; 5831 const char *FieldNames[NumFields]; 5832 5833 // unsigned char gpr; 5834 FieldTypes[0] = Context->UnsignedCharTy; 5835 FieldNames[0] = "gpr"; 5836 5837 // unsigned char fpr; 5838 FieldTypes[1] = Context->UnsignedCharTy; 5839 FieldNames[1] = "fpr"; 5840 5841 // unsigned short reserved; 5842 FieldTypes[2] = Context->UnsignedShortTy; 5843 FieldNames[2] = "reserved"; 5844 5845 // void* overflow_arg_area; 5846 FieldTypes[3] = Context->getPointerType(Context->VoidTy); 5847 FieldNames[3] = "overflow_arg_area"; 5848 5849 // void* reg_save_area; 5850 FieldTypes[4] = Context->getPointerType(Context->VoidTy); 5851 FieldNames[4] = "reg_save_area"; 5852 5853 // Create fields 5854 for (unsigned i = 0; i < NumFields; ++i) { 5855 FieldDecl *Field = FieldDecl::Create(*Context, VaListTagDecl, 5856 SourceLocation(), 5857 SourceLocation(), 5858 &Context->Idents.get(FieldNames[i]), 5859 FieldTypes[i], /*TInfo=*/0, 5860 /*BitWidth=*/0, 5861 /*Mutable=*/false, 5862 ICIS_NoInit); 5863 Field->setAccess(AS_public); 5864 VaListTagDecl->addDecl(Field); 5865 } 5866 VaListTagDecl->completeDefinition(); 5867 QualType VaListTagType = Context->getRecordType(VaListTagDecl); 5868 Context->VaListTagTy = VaListTagType; 5869 5870 // } __va_list_tag; 5871 TypedefDecl *VaListTagTypedefDecl 5872 = TypedefDecl::Create(const_cast<ASTContext &>(*Context), 5873 Context->getTranslationUnitDecl(), 5874 SourceLocation(), SourceLocation(), 5875 &Context->Idents.get("__va_list_tag"), 5876 Context->getTrivialTypeSourceInfo(VaListTagType)); 5877 QualType VaListTagTypedefType = 5878 Context->getTypedefType(VaListTagTypedefDecl); 5879 5880 // typedef __va_list_tag __builtin_va_list[1]; 5881 llvm::APInt Size(Context->getTypeSize(Context->getSizeType()), 1); 5882 QualType VaListTagArrayType 5883 = Context->getConstantArrayType(VaListTagTypedefType, 5884 Size, ArrayType::Normal, 0); 5885 TypeSourceInfo *TInfo 5886 = Context->getTrivialTypeSourceInfo(VaListTagArrayType); 5887 TypedefDecl *VaListTypedefDecl 5888 = TypedefDecl::Create(const_cast<ASTContext &>(*Context), 5889 Context->getTranslationUnitDecl(), 5890 SourceLocation(), SourceLocation(), 5891 &Context->Idents.get("__builtin_va_list"), 5892 TInfo); 5893 5894 return VaListTypedefDecl; 5895 } 5896 5897 static TypedefDecl * 5898 CreateX86_64ABIBuiltinVaListDecl(const ASTContext *Context) { 5899 // typedef struct __va_list_tag { 5900 RecordDecl *VaListTagDecl; 5901 VaListTagDecl = CreateRecordDecl(*Context, TTK_Struct, 5902 Context->getTranslationUnitDecl(), 5903 &Context->Idents.get("__va_list_tag")); 5904 VaListTagDecl->startDefinition(); 5905 5906 const size_t NumFields = 4; 5907 QualType FieldTypes[NumFields]; 5908 const char *FieldNames[NumFields]; 5909 5910 // unsigned gp_offset; 5911 FieldTypes[0] = Context->UnsignedIntTy; 5912 FieldNames[0] = "gp_offset"; 5913 5914 // unsigned fp_offset; 5915 FieldTypes[1] = Context->UnsignedIntTy; 5916 FieldNames[1] = "fp_offset"; 5917 5918 // void* overflow_arg_area; 5919 FieldTypes[2] = Context->getPointerType(Context->VoidTy); 5920 FieldNames[2] = "overflow_arg_area"; 5921 5922 // void* reg_save_area; 5923 FieldTypes[3] = Context->getPointerType(Context->VoidTy); 5924 FieldNames[3] = "reg_save_area"; 5925 5926 // Create fields 5927 for (unsigned i = 0; i < NumFields; ++i) { 5928 FieldDecl *Field = FieldDecl::Create(const_cast<ASTContext &>(*Context), 5929 VaListTagDecl, 5930 SourceLocation(), 5931 SourceLocation(), 5932 &Context->Idents.get(FieldNames[i]), 5933 FieldTypes[i], /*TInfo=*/0, 5934 /*BitWidth=*/0, 5935 /*Mutable=*/false, 5936 ICIS_NoInit); 5937 Field->setAccess(AS_public); 5938 VaListTagDecl->addDecl(Field); 5939 } 5940 VaListTagDecl->completeDefinition(); 5941 QualType VaListTagType = Context->getRecordType(VaListTagDecl); 5942 Context->VaListTagTy = VaListTagType; 5943 5944 // } __va_list_tag; 5945 TypedefDecl *VaListTagTypedefDecl 5946 = TypedefDecl::Create(const_cast<ASTContext &>(*Context), 5947 Context->getTranslationUnitDecl(), 5948 SourceLocation(), SourceLocation(), 5949 &Context->Idents.get("__va_list_tag"), 5950 Context->getTrivialTypeSourceInfo(VaListTagType)); 5951 QualType VaListTagTypedefType = 5952 Context->getTypedefType(VaListTagTypedefDecl); 5953 5954 // typedef __va_list_tag __builtin_va_list[1]; 5955 llvm::APInt Size(Context->getTypeSize(Context->getSizeType()), 1); 5956 QualType VaListTagArrayType 5957 = Context->getConstantArrayType(VaListTagTypedefType, 5958 Size, ArrayType::Normal,0); 5959 TypeSourceInfo *TInfo 5960 = Context->getTrivialTypeSourceInfo(VaListTagArrayType); 5961 TypedefDecl *VaListTypedefDecl 5962 = TypedefDecl::Create(const_cast<ASTContext &>(*Context), 5963 Context->getTranslationUnitDecl(), 5964 SourceLocation(), SourceLocation(), 5965 &Context->Idents.get("__builtin_va_list"), 5966 TInfo); 5967 5968 return VaListTypedefDecl; 5969 } 5970 5971 static TypedefDecl *CreatePNaClABIBuiltinVaListDecl(const ASTContext *Context) { 5972 // typedef int __builtin_va_list[4]; 5973 llvm::APInt Size(Context->getTypeSize(Context->getSizeType()), 4); 5974 QualType IntArrayType 5975 = Context->getConstantArrayType(Context->IntTy, 5976 Size, ArrayType::Normal, 0); 5977 TypedefDecl *VaListTypedefDecl 5978 = TypedefDecl::Create(const_cast<ASTContext &>(*Context), 5979 Context->getTranslationUnitDecl(), 5980 SourceLocation(), SourceLocation(), 5981 &Context->Idents.get("__builtin_va_list"), 5982 Context->getTrivialTypeSourceInfo(IntArrayType)); 5983 5984 return VaListTypedefDecl; 5985 } 5986 5987 static TypedefDecl * 5988 CreateAAPCSABIBuiltinVaListDecl(const ASTContext *Context) { 5989 RecordDecl *VaListDecl; 5990 if (Context->getLangOpts().CPlusPlus) { 5991 // namespace std { struct __va_list { 5992 NamespaceDecl *NS; 5993 NS = NamespaceDecl::Create(const_cast<ASTContext &>(*Context), 5994 Context->getTranslationUnitDecl(), 5995 /*Inline*/false, SourceLocation(), 5996 SourceLocation(), &Context->Idents.get("std"), 5997 /*PrevDecl*/0); 5998 5999 VaListDecl = CXXRecordDecl::Create(*Context, TTK_Struct, 6000 Context->getTranslationUnitDecl(), 6001 SourceLocation(), SourceLocation(), 6002 &Context->Idents.get("__va_list")); 6003 6004 VaListDecl->setDeclContext(NS); 6005 6006 } else { 6007 // struct __va_list { 6008 VaListDecl = CreateRecordDecl(*Context, TTK_Struct, 6009 Context->getTranslationUnitDecl(), 6010 &Context->Idents.get("__va_list")); 6011 } 6012 6013 VaListDecl->startDefinition(); 6014 6015 // void * __ap; 6016 FieldDecl *Field = FieldDecl::Create(const_cast<ASTContext &>(*Context), 6017 VaListDecl, 6018 SourceLocation(), 6019 SourceLocation(), 6020 &Context->Idents.get("__ap"), 6021 Context->getPointerType(Context->VoidTy), 6022 /*TInfo=*/0, 6023 /*BitWidth=*/0, 6024 /*Mutable=*/false, 6025 ICIS_NoInit); 6026 Field->setAccess(AS_public); 6027 VaListDecl->addDecl(Field); 6028 6029 // }; 6030 VaListDecl->completeDefinition(); 6031 6032 // typedef struct __va_list __builtin_va_list; 6033 TypeSourceInfo *TInfo 6034 = Context->getTrivialTypeSourceInfo(Context->getRecordType(VaListDecl)); 6035 6036 TypedefDecl *VaListTypeDecl 6037 = TypedefDecl::Create(const_cast<ASTContext &>(*Context), 6038 Context->getTranslationUnitDecl(), 6039 SourceLocation(), SourceLocation(), 6040 &Context->Idents.get("__builtin_va_list"), 6041 TInfo); 6042 6043 return VaListTypeDecl; 6044 } 6045 6046 static TypedefDecl * 6047 CreateSystemZBuiltinVaListDecl(const ASTContext *Context) { 6048 // typedef struct __va_list_tag { 6049 RecordDecl *VaListTagDecl; 6050 VaListTagDecl = CreateRecordDecl(*Context, TTK_Struct, 6051 Context->getTranslationUnitDecl(), 6052 &Context->Idents.get("__va_list_tag")); 6053 VaListTagDecl->startDefinition(); 6054 6055 const size_t NumFields = 4; 6056 QualType FieldTypes[NumFields]; 6057 const char *FieldNames[NumFields]; 6058 6059 // long __gpr; 6060 FieldTypes[0] = Context->LongTy; 6061 FieldNames[0] = "__gpr"; 6062 6063 // long __fpr; 6064 FieldTypes[1] = Context->LongTy; 6065 FieldNames[1] = "__fpr"; 6066 6067 // void *__overflow_arg_area; 6068 FieldTypes[2] = Context->getPointerType(Context->VoidTy); 6069 FieldNames[2] = "__overflow_arg_area"; 6070 6071 // void *__reg_save_area; 6072 FieldTypes[3] = Context->getPointerType(Context->VoidTy); 6073 FieldNames[3] = "__reg_save_area"; 6074 6075 // Create fields 6076 for (unsigned i = 0; i < NumFields; ++i) { 6077 FieldDecl *Field = FieldDecl::Create(const_cast<ASTContext &>(*Context), 6078 VaListTagDecl, 6079 SourceLocation(), 6080 SourceLocation(), 6081 &Context->Idents.get(FieldNames[i]), 6082 FieldTypes[i], /*TInfo=*/0, 6083 /*BitWidth=*/0, 6084 /*Mutable=*/false, 6085 ICIS_NoInit); 6086 Field->setAccess(AS_public); 6087 VaListTagDecl->addDecl(Field); 6088 } 6089 VaListTagDecl->completeDefinition(); 6090 QualType VaListTagType = Context->getRecordType(VaListTagDecl); 6091 Context->VaListTagTy = VaListTagType; 6092 6093 // } __va_list_tag; 6094 TypedefDecl *VaListTagTypedefDecl 6095 = TypedefDecl::Create(const_cast<ASTContext &>(*Context), 6096 Context->getTranslationUnitDecl(), 6097 SourceLocation(), SourceLocation(), 6098 &Context->Idents.get("__va_list_tag"), 6099 Context->getTrivialTypeSourceInfo(VaListTagType)); 6100 QualType VaListTagTypedefType = 6101 Context->getTypedefType(VaListTagTypedefDecl); 6102 6103 // typedef __va_list_tag __builtin_va_list[1]; 6104 llvm::APInt Size(Context->getTypeSize(Context->getSizeType()), 1); 6105 QualType VaListTagArrayType 6106 = Context->getConstantArrayType(VaListTagTypedefType, 6107 Size, ArrayType::Normal,0); 6108 TypeSourceInfo *TInfo 6109 = Context->getTrivialTypeSourceInfo(VaListTagArrayType); 6110 TypedefDecl *VaListTypedefDecl 6111 = TypedefDecl::Create(const_cast<ASTContext &>(*Context), 6112 Context->getTranslationUnitDecl(), 6113 SourceLocation(), SourceLocation(), 6114 &Context->Idents.get("__builtin_va_list"), 6115 TInfo); 6116 6117 return VaListTypedefDecl; 6118 } 6119 6120 static TypedefDecl *CreateVaListDecl(const ASTContext *Context, 6121 TargetInfo::BuiltinVaListKind Kind) { 6122 switch (Kind) { 6123 case TargetInfo::CharPtrBuiltinVaList: 6124 return CreateCharPtrBuiltinVaListDecl(Context); 6125 case TargetInfo::VoidPtrBuiltinVaList: 6126 return CreateVoidPtrBuiltinVaListDecl(Context); 6127 case TargetInfo::AArch64ABIBuiltinVaList: 6128 return CreateAArch64ABIBuiltinVaListDecl(Context); 6129 case TargetInfo::PowerABIBuiltinVaList: 6130 return CreatePowerABIBuiltinVaListDecl(Context); 6131 case TargetInfo::X86_64ABIBuiltinVaList: 6132 return CreateX86_64ABIBuiltinVaListDecl(Context); 6133 case TargetInfo::PNaClABIBuiltinVaList: 6134 return CreatePNaClABIBuiltinVaListDecl(Context); 6135 case TargetInfo::AAPCSABIBuiltinVaList: 6136 return CreateAAPCSABIBuiltinVaListDecl(Context); 6137 case TargetInfo::SystemZBuiltinVaList: 6138 return CreateSystemZBuiltinVaListDecl(Context); 6139 } 6140 6141 llvm_unreachable("Unhandled __builtin_va_list type kind"); 6142 } 6143 6144 TypedefDecl *ASTContext::getBuiltinVaListDecl() const { 6145 if (!BuiltinVaListDecl) 6146 BuiltinVaListDecl = CreateVaListDecl(this, Target->getBuiltinVaListKind()); 6147 6148 return BuiltinVaListDecl; 6149 } 6150 6151 QualType ASTContext::getVaListTagType() const { 6152 // Force the creation of VaListTagTy by building the __builtin_va_list 6153 // declaration. 6154 if (VaListTagTy.isNull()) 6155 (void) getBuiltinVaListDecl(); 6156 6157 return VaListTagTy; 6158 } 6159 6160 void ASTContext::setObjCConstantStringInterface(ObjCInterfaceDecl *Decl) { 6161 assert(ObjCConstantStringType.isNull() && 6162 "'NSConstantString' type already set!"); 6163 6164 ObjCConstantStringType = getObjCInterfaceType(Decl); 6165 } 6166 6167 /// \brief Retrieve the template name that corresponds to a non-empty 6168 /// lookup. 6169 TemplateName 6170 ASTContext::getOverloadedTemplateName(UnresolvedSetIterator Begin, 6171 UnresolvedSetIterator End) const { 6172 unsigned size = End - Begin; 6173 assert(size > 1 && "set is not overloaded!"); 6174 6175 void *memory = Allocate(sizeof(OverloadedTemplateStorage) + 6176 size * sizeof(FunctionTemplateDecl*)); 6177 OverloadedTemplateStorage *OT = new(memory) OverloadedTemplateStorage(size); 6178 6179 NamedDecl **Storage = OT->getStorage(); 6180 for (UnresolvedSetIterator I = Begin; I != End; ++I) { 6181 NamedDecl *D = *I; 6182 assert(isa<FunctionTemplateDecl>(D) || 6183 (isa<UsingShadowDecl>(D) && 6184 isa<FunctionTemplateDecl>(D->getUnderlyingDecl()))); 6185 *Storage++ = D; 6186 } 6187 6188 return TemplateName(OT); 6189 } 6190 6191 /// \brief Retrieve the template name that represents a qualified 6192 /// template name such as \c std::vector. 6193 TemplateName 6194 ASTContext::getQualifiedTemplateName(NestedNameSpecifier *NNS, 6195 bool TemplateKeyword, 6196 TemplateDecl *Template) const { 6197 assert(NNS && "Missing nested-name-specifier in qualified template name"); 6198 6199 // FIXME: Canonicalization? 6200 llvm::FoldingSetNodeID ID; 6201 QualifiedTemplateName::Profile(ID, NNS, TemplateKeyword, Template); 6202 6203 void *InsertPos = 0; 6204 QualifiedTemplateName *QTN = 6205 QualifiedTemplateNames.FindNodeOrInsertPos(ID, InsertPos); 6206 if (!QTN) { 6207 QTN = new (*this, llvm::alignOf<QualifiedTemplateName>()) 6208 QualifiedTemplateName(NNS, TemplateKeyword, Template); 6209 QualifiedTemplateNames.InsertNode(QTN, InsertPos); 6210 } 6211 6212 return TemplateName(QTN); 6213 } 6214 6215 /// \brief Retrieve the template name that represents a dependent 6216 /// template name such as \c MetaFun::template apply. 6217 TemplateName 6218 ASTContext::getDependentTemplateName(NestedNameSpecifier *NNS, 6219 const IdentifierInfo *Name) const { 6220 assert((!NNS || NNS->isDependent()) && 6221 "Nested name specifier must be dependent"); 6222 6223 llvm::FoldingSetNodeID ID; 6224 DependentTemplateName::Profile(ID, NNS, Name); 6225 6226 void *InsertPos = 0; 6227 DependentTemplateName *QTN = 6228 DependentTemplateNames.FindNodeOrInsertPos(ID, InsertPos); 6229 6230 if (QTN) 6231 return TemplateName(QTN); 6232 6233 NestedNameSpecifier *CanonNNS = getCanonicalNestedNameSpecifier(NNS); 6234 if (CanonNNS == NNS) { 6235 QTN = new (*this, llvm::alignOf<DependentTemplateName>()) 6236 DependentTemplateName(NNS, Name); 6237 } else { 6238 TemplateName Canon = getDependentTemplateName(CanonNNS, Name); 6239 QTN = new (*this, llvm::alignOf<DependentTemplateName>()) 6240 DependentTemplateName(NNS, Name, Canon); 6241 DependentTemplateName *CheckQTN = 6242 DependentTemplateNames.FindNodeOrInsertPos(ID, InsertPos); 6243 assert(!CheckQTN && "Dependent type name canonicalization broken"); 6244 (void)CheckQTN; 6245 } 6246 6247 DependentTemplateNames.InsertNode(QTN, InsertPos); 6248 return TemplateName(QTN); 6249 } 6250 6251 /// \brief Retrieve the template name that represents a dependent 6252 /// template name such as \c MetaFun::template operator+. 6253 TemplateName 6254 ASTContext::getDependentTemplateName(NestedNameSpecifier *NNS, 6255 OverloadedOperatorKind Operator) const { 6256 assert((!NNS || NNS->isDependent()) && 6257 "Nested name specifier must be dependent"); 6258 6259 llvm::FoldingSetNodeID ID; 6260 DependentTemplateName::Profile(ID, NNS, Operator); 6261 6262 void *InsertPos = 0; 6263 DependentTemplateName *QTN 6264 = DependentTemplateNames.FindNodeOrInsertPos(ID, InsertPos); 6265 6266 if (QTN) 6267 return TemplateName(QTN); 6268 6269 NestedNameSpecifier *CanonNNS = getCanonicalNestedNameSpecifier(NNS); 6270 if (CanonNNS == NNS) { 6271 QTN = new (*this, llvm::alignOf<DependentTemplateName>()) 6272 DependentTemplateName(NNS, Operator); 6273 } else { 6274 TemplateName Canon = getDependentTemplateName(CanonNNS, Operator); 6275 QTN = new (*this, llvm::alignOf<DependentTemplateName>()) 6276 DependentTemplateName(NNS, Operator, Canon); 6277 6278 DependentTemplateName *CheckQTN 6279 = DependentTemplateNames.FindNodeOrInsertPos(ID, InsertPos); 6280 assert(!CheckQTN && "Dependent template name canonicalization broken"); 6281 (void)CheckQTN; 6282 } 6283 6284 DependentTemplateNames.InsertNode(QTN, InsertPos); 6285 return TemplateName(QTN); 6286 } 6287 6288 TemplateName 6289 ASTContext::getSubstTemplateTemplateParm(TemplateTemplateParmDecl *param, 6290 TemplateName replacement) const { 6291 llvm::FoldingSetNodeID ID; 6292 SubstTemplateTemplateParmStorage::Profile(ID, param, replacement); 6293 6294 void *insertPos = 0; 6295 SubstTemplateTemplateParmStorage *subst 6296 = SubstTemplateTemplateParms.FindNodeOrInsertPos(ID, insertPos); 6297 6298 if (!subst) { 6299 subst = new (*this) SubstTemplateTemplateParmStorage(param, replacement); 6300 SubstTemplateTemplateParms.InsertNode(subst, insertPos); 6301 } 6302 6303 return TemplateName(subst); 6304 } 6305 6306 TemplateName 6307 ASTContext::getSubstTemplateTemplateParmPack(TemplateTemplateParmDecl *Param, 6308 const TemplateArgument &ArgPack) const { 6309 ASTContext &Self = const_cast<ASTContext &>(*this); 6310 llvm::FoldingSetNodeID ID; 6311 SubstTemplateTemplateParmPackStorage::Profile(ID, Self, Param, ArgPack); 6312 6313 void *InsertPos = 0; 6314 SubstTemplateTemplateParmPackStorage *Subst 6315 = SubstTemplateTemplateParmPacks.FindNodeOrInsertPos(ID, InsertPos); 6316 6317 if (!Subst) { 6318 Subst = new (*this) SubstTemplateTemplateParmPackStorage(Param, 6319 ArgPack.pack_size(), 6320 ArgPack.pack_begin()); 6321 SubstTemplateTemplateParmPacks.InsertNode(Subst, InsertPos); 6322 } 6323 6324 return TemplateName(Subst); 6325 } 6326 6327 /// getFromTargetType - Given one of the integer types provided by 6328 /// TargetInfo, produce the corresponding type. The unsigned @p Type 6329 /// is actually a value of type @c TargetInfo::IntType. 6330 CanQualType ASTContext::getFromTargetType(unsigned Type) const { 6331 switch (Type) { 6332 case TargetInfo::NoInt: return CanQualType(); 6333 case TargetInfo::SignedShort: return ShortTy; 6334 case TargetInfo::UnsignedShort: return UnsignedShortTy; 6335 case TargetInfo::SignedInt: return IntTy; 6336 case TargetInfo::UnsignedInt: return UnsignedIntTy; 6337 case TargetInfo::SignedLong: return LongTy; 6338 case TargetInfo::UnsignedLong: return UnsignedLongTy; 6339 case TargetInfo::SignedLongLong: return LongLongTy; 6340 case TargetInfo::UnsignedLongLong: return UnsignedLongLongTy; 6341 } 6342 6343 llvm_unreachable("Unhandled TargetInfo::IntType value"); 6344 } 6345 6346 //===----------------------------------------------------------------------===// 6347 // Type Predicates. 6348 //===----------------------------------------------------------------------===// 6349 6350 /// getObjCGCAttr - Returns one of GCNone, Weak or Strong objc's 6351 /// garbage collection attribute. 6352 /// 6353 Qualifiers::GC ASTContext::getObjCGCAttrKind(QualType Ty) const { 6354 if (getLangOpts().getGC() == LangOptions::NonGC) 6355 return Qualifiers::GCNone; 6356 6357 assert(getLangOpts().ObjC1); 6358 Qualifiers::GC GCAttrs = Ty.getObjCGCAttr(); 6359 6360 // Default behaviour under objective-C's gc is for ObjC pointers 6361 // (or pointers to them) be treated as though they were declared 6362 // as __strong. 6363 if (GCAttrs == Qualifiers::GCNone) { 6364 if (Ty->isObjCObjectPointerType() || Ty->isBlockPointerType()) 6365 return Qualifiers::Strong; 6366 else if (Ty->isPointerType()) 6367 return getObjCGCAttrKind(Ty->getAs<PointerType>()->getPointeeType()); 6368 } else { 6369 // It's not valid to set GC attributes on anything that isn't a 6370 // pointer. 6371 #ifndef NDEBUG 6372 QualType CT = Ty->getCanonicalTypeInternal(); 6373 while (const ArrayType *AT = dyn_cast<ArrayType>(CT)) 6374 CT = AT->getElementType(); 6375 assert(CT->isAnyPointerType() || CT->isBlockPointerType()); 6376 #endif 6377 } 6378 return GCAttrs; 6379 } 6380 6381 //===----------------------------------------------------------------------===// 6382 // Type Compatibility Testing 6383 //===----------------------------------------------------------------------===// 6384 6385 /// areCompatVectorTypes - Return true if the two specified vector types are 6386 /// compatible. 6387 static bool areCompatVectorTypes(const VectorType *LHS, 6388 const VectorType *RHS) { 6389 assert(LHS->isCanonicalUnqualified() && RHS->isCanonicalUnqualified()); 6390 return LHS->getElementType() == RHS->getElementType() && 6391 LHS->getNumElements() == RHS->getNumElements(); 6392 } 6393 6394 bool ASTContext::areCompatibleVectorTypes(QualType FirstVec, 6395 QualType SecondVec) { 6396 assert(FirstVec->isVectorType() && "FirstVec should be a vector type"); 6397 assert(SecondVec->isVectorType() && "SecondVec should be a vector type"); 6398 6399 if (hasSameUnqualifiedType(FirstVec, SecondVec)) 6400 return true; 6401 6402 // Treat Neon vector types and most AltiVec vector types as if they are the 6403 // equivalent GCC vector types. 6404 const VectorType *First = FirstVec->getAs<VectorType>(); 6405 const VectorType *Second = SecondVec->getAs<VectorType>(); 6406 if (First->getNumElements() == Second->getNumElements() && 6407 hasSameType(First->getElementType(), Second->getElementType()) && 6408 First->getVectorKind() != VectorType::AltiVecPixel && 6409 First->getVectorKind() != VectorType::AltiVecBool && 6410 Second->getVectorKind() != VectorType::AltiVecPixel && 6411 Second->getVectorKind() != VectorType::AltiVecBool) 6412 return true; 6413 6414 return false; 6415 } 6416 6417 //===----------------------------------------------------------------------===// 6418 // ObjCQualifiedIdTypesAreCompatible - Compatibility testing for qualified id's. 6419 //===----------------------------------------------------------------------===// 6420 6421 /// ProtocolCompatibleWithProtocol - return 'true' if 'lProto' is in the 6422 /// inheritance hierarchy of 'rProto'. 6423 bool 6424 ASTContext::ProtocolCompatibleWithProtocol(ObjCProtocolDecl *lProto, 6425 ObjCProtocolDecl *rProto) const { 6426 if (declaresSameEntity(lProto, rProto)) 6427 return true; 6428 for (ObjCProtocolDecl::protocol_iterator PI = rProto->protocol_begin(), 6429 E = rProto->protocol_end(); PI != E; ++PI) 6430 if (ProtocolCompatibleWithProtocol(lProto, *PI)) 6431 return true; 6432 return false; 6433 } 6434 6435 /// ObjCQualifiedClassTypesAreCompatible - compare Class<pr,...> and 6436 /// Class<pr1, ...>. 6437 bool ASTContext::ObjCQualifiedClassTypesAreCompatible(QualType lhs, 6438 QualType rhs) { 6439 const ObjCObjectPointerType *lhsQID = lhs->getAs<ObjCObjectPointerType>(); 6440 const ObjCObjectPointerType *rhsOPT = rhs->getAs<ObjCObjectPointerType>(); 6441 assert ((lhsQID && rhsOPT) && "ObjCQualifiedClassTypesAreCompatible"); 6442 6443 for (ObjCObjectPointerType::qual_iterator I = lhsQID->qual_begin(), 6444 E = lhsQID->qual_end(); I != E; ++I) { 6445 bool match = false; 6446 ObjCProtocolDecl *lhsProto = *I; 6447 for (ObjCObjectPointerType::qual_iterator J = rhsOPT->qual_begin(), 6448 E = rhsOPT->qual_end(); J != E; ++J) { 6449 ObjCProtocolDecl *rhsProto = *J; 6450 if (ProtocolCompatibleWithProtocol(lhsProto, rhsProto)) { 6451 match = true; 6452 break; 6453 } 6454 } 6455 if (!match) 6456 return false; 6457 } 6458 return true; 6459 } 6460 6461 /// ObjCQualifiedIdTypesAreCompatible - We know that one of lhs/rhs is an 6462 /// ObjCQualifiedIDType. 6463 bool ASTContext::ObjCQualifiedIdTypesAreCompatible(QualType lhs, QualType rhs, 6464 bool compare) { 6465 // Allow id<P..> and an 'id' or void* type in all cases. 6466 if (lhs->isVoidPointerType() || 6467 lhs->isObjCIdType() || lhs->isObjCClassType()) 6468 return true; 6469 else if (rhs->isVoidPointerType() || 6470 rhs->isObjCIdType() || rhs->isObjCClassType()) 6471 return true; 6472 6473 if (const ObjCObjectPointerType *lhsQID = lhs->getAsObjCQualifiedIdType()) { 6474 const ObjCObjectPointerType *rhsOPT = rhs->getAs<ObjCObjectPointerType>(); 6475 6476 if (!rhsOPT) return false; 6477 6478 if (rhsOPT->qual_empty()) { 6479 // If the RHS is a unqualified interface pointer "NSString*", 6480 // make sure we check the class hierarchy. 6481 if (ObjCInterfaceDecl *rhsID = rhsOPT->getInterfaceDecl()) { 6482 for (ObjCObjectPointerType::qual_iterator I = lhsQID->qual_begin(), 6483 E = lhsQID->qual_end(); I != E; ++I) { 6484 // when comparing an id<P> on lhs with a static type on rhs, 6485 // see if static class implements all of id's protocols, directly or 6486 // through its super class and categories. 6487 if (!rhsID->ClassImplementsProtocol(*I, true)) 6488 return false; 6489 } 6490 } 6491 // If there are no qualifiers and no interface, we have an 'id'. 6492 return true; 6493 } 6494 // Both the right and left sides have qualifiers. 6495 for (ObjCObjectPointerType::qual_iterator I = lhsQID->qual_begin(), 6496 E = lhsQID->qual_end(); I != E; ++I) { 6497 ObjCProtocolDecl *lhsProto = *I; 6498 bool match = false; 6499 6500 // when comparing an id<P> on lhs with a static type on rhs, 6501 // see if static class implements all of id's protocols, directly or 6502 // through its super class and categories. 6503 for (ObjCObjectPointerType::qual_iterator J = rhsOPT->qual_begin(), 6504 E = rhsOPT->qual_end(); J != E; ++J) { 6505 ObjCProtocolDecl *rhsProto = *J; 6506 if (ProtocolCompatibleWithProtocol(lhsProto, rhsProto) || 6507 (compare && ProtocolCompatibleWithProtocol(rhsProto, lhsProto))) { 6508 match = true; 6509 break; 6510 } 6511 } 6512 // If the RHS is a qualified interface pointer "NSString<P>*", 6513 // make sure we check the class hierarchy. 6514 if (ObjCInterfaceDecl *rhsID = rhsOPT->getInterfaceDecl()) { 6515 for (ObjCObjectPointerType::qual_iterator I = lhsQID->qual_begin(), 6516 E = lhsQID->qual_end(); I != E; ++I) { 6517 // when comparing an id<P> on lhs with a static type on rhs, 6518 // see if static class implements all of id's protocols, directly or 6519 // through its super class and categories. 6520 if (rhsID->ClassImplementsProtocol(*I, true)) { 6521 match = true; 6522 break; 6523 } 6524 } 6525 } 6526 if (!match) 6527 return false; 6528 } 6529 6530 return true; 6531 } 6532 6533 const ObjCObjectPointerType *rhsQID = rhs->getAsObjCQualifiedIdType(); 6534 assert(rhsQID && "One of the LHS/RHS should be id<x>"); 6535 6536 if (const ObjCObjectPointerType *lhsOPT = 6537 lhs->getAsObjCInterfacePointerType()) { 6538 // If both the right and left sides have qualifiers. 6539 for (ObjCObjectPointerType::qual_iterator I = lhsOPT->qual_begin(), 6540 E = lhsOPT->qual_end(); I != E; ++I) { 6541 ObjCProtocolDecl *lhsProto = *I; 6542 bool match = false; 6543 6544 // when comparing an id<P> on rhs with a static type on lhs, 6545 // see if static class implements all of id's protocols, directly or 6546 // through its super class and categories. 6547 // First, lhs protocols in the qualifier list must be found, direct 6548 // or indirect in rhs's qualifier list or it is a mismatch. 6549 for (ObjCObjectPointerType::qual_iterator J = rhsQID->qual_begin(), 6550 E = rhsQID->qual_end(); J != E; ++J) { 6551 ObjCProtocolDecl *rhsProto = *J; 6552 if (ProtocolCompatibleWithProtocol(lhsProto, rhsProto) || 6553 (compare && ProtocolCompatibleWithProtocol(rhsProto, lhsProto))) { 6554 match = true; 6555 break; 6556 } 6557 } 6558 if (!match) 6559 return false; 6560 } 6561 6562 // Static class's protocols, or its super class or category protocols 6563 // must be found, direct or indirect in rhs's qualifier list or it is a mismatch. 6564 if (ObjCInterfaceDecl *lhsID = lhsOPT->getInterfaceDecl()) { 6565 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> LHSInheritedProtocols; 6566 CollectInheritedProtocols(lhsID, LHSInheritedProtocols); 6567 // This is rather dubious but matches gcc's behavior. If lhs has 6568 // no type qualifier and its class has no static protocol(s) 6569 // assume that it is mismatch. 6570 if (LHSInheritedProtocols.empty() && lhsOPT->qual_empty()) 6571 return false; 6572 for (llvm::SmallPtrSet<ObjCProtocolDecl*,8>::iterator I = 6573 LHSInheritedProtocols.begin(), 6574 E = LHSInheritedProtocols.end(); I != E; ++I) { 6575 bool match = false; 6576 ObjCProtocolDecl *lhsProto = (*I); 6577 for (ObjCObjectPointerType::qual_iterator J = rhsQID->qual_begin(), 6578 E = rhsQID->qual_end(); J != E; ++J) { 6579 ObjCProtocolDecl *rhsProto = *J; 6580 if (ProtocolCompatibleWithProtocol(lhsProto, rhsProto) || 6581 (compare && ProtocolCompatibleWithProtocol(rhsProto, lhsProto))) { 6582 match = true; 6583 break; 6584 } 6585 } 6586 if (!match) 6587 return false; 6588 } 6589 } 6590 return true; 6591 } 6592 return false; 6593 } 6594 6595 /// canAssignObjCInterfaces - Return true if the two interface types are 6596 /// compatible for assignment from RHS to LHS. This handles validation of any 6597 /// protocol qualifiers on the LHS or RHS. 6598 /// 6599 bool ASTContext::canAssignObjCInterfaces(const ObjCObjectPointerType *LHSOPT, 6600 const ObjCObjectPointerType *RHSOPT) { 6601 const ObjCObjectType* LHS = LHSOPT->getObjectType(); 6602 const ObjCObjectType* RHS = RHSOPT->getObjectType(); 6603 6604 // If either type represents the built-in 'id' or 'Class' types, return true. 6605 if (LHS->isObjCUnqualifiedIdOrClass() || 6606 RHS->isObjCUnqualifiedIdOrClass()) 6607 return true; 6608 6609 if (LHS->isObjCQualifiedId() || RHS->isObjCQualifiedId()) 6610 return ObjCQualifiedIdTypesAreCompatible(QualType(LHSOPT,0), 6611 QualType(RHSOPT,0), 6612 false); 6613 6614 if (LHS->isObjCQualifiedClass() && RHS->isObjCQualifiedClass()) 6615 return ObjCQualifiedClassTypesAreCompatible(QualType(LHSOPT,0), 6616 QualType(RHSOPT,0)); 6617 6618 // If we have 2 user-defined types, fall into that path. 6619 if (LHS->getInterface() && RHS->getInterface()) 6620 return canAssignObjCInterfaces(LHS, RHS); 6621 6622 return false; 6623 } 6624 6625 /// canAssignObjCInterfacesInBlockPointer - This routine is specifically written 6626 /// for providing type-safety for objective-c pointers used to pass/return 6627 /// arguments in block literals. When passed as arguments, passing 'A*' where 6628 /// 'id' is expected is not OK. Passing 'Sub *" where 'Super *" is expected is 6629 /// not OK. For the return type, the opposite is not OK. 6630 bool ASTContext::canAssignObjCInterfacesInBlockPointer( 6631 const ObjCObjectPointerType *LHSOPT, 6632 const ObjCObjectPointerType *RHSOPT, 6633 bool BlockReturnType) { 6634 if (RHSOPT->isObjCBuiltinType() || LHSOPT->isObjCIdType()) 6635 return true; 6636 6637 if (LHSOPT->isObjCBuiltinType()) { 6638 return RHSOPT->isObjCBuiltinType() || RHSOPT->isObjCQualifiedIdType(); 6639 } 6640 6641 if (LHSOPT->isObjCQualifiedIdType() || RHSOPT->isObjCQualifiedIdType()) 6642 return ObjCQualifiedIdTypesAreCompatible(QualType(LHSOPT,0), 6643 QualType(RHSOPT,0), 6644 false); 6645 6646 const ObjCInterfaceType* LHS = LHSOPT->getInterfaceType(); 6647 const ObjCInterfaceType* RHS = RHSOPT->getInterfaceType(); 6648 if (LHS && RHS) { // We have 2 user-defined types. 6649 if (LHS != RHS) { 6650 if (LHS->getDecl()->isSuperClassOf(RHS->getDecl())) 6651 return BlockReturnType; 6652 if (RHS->getDecl()->isSuperClassOf(LHS->getDecl())) 6653 return !BlockReturnType; 6654 } 6655 else 6656 return true; 6657 } 6658 return false; 6659 } 6660 6661 /// getIntersectionOfProtocols - This routine finds the intersection of set 6662 /// of protocols inherited from two distinct objective-c pointer objects. 6663 /// It is used to build composite qualifier list of the composite type of 6664 /// the conditional expression involving two objective-c pointer objects. 6665 static 6666 void getIntersectionOfProtocols(ASTContext &Context, 6667 const ObjCObjectPointerType *LHSOPT, 6668 const ObjCObjectPointerType *RHSOPT, 6669 SmallVectorImpl<ObjCProtocolDecl *> &IntersectionOfProtocols) { 6670 6671 const ObjCObjectType* LHS = LHSOPT->getObjectType(); 6672 const ObjCObjectType* RHS = RHSOPT->getObjectType(); 6673 assert(LHS->getInterface() && "LHS must have an interface base"); 6674 assert(RHS->getInterface() && "RHS must have an interface base"); 6675 6676 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> InheritedProtocolSet; 6677 unsigned LHSNumProtocols = LHS->getNumProtocols(); 6678 if (LHSNumProtocols > 0) 6679 InheritedProtocolSet.insert(LHS->qual_begin(), LHS->qual_end()); 6680 else { 6681 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> LHSInheritedProtocols; 6682 Context.CollectInheritedProtocols(LHS->getInterface(), 6683 LHSInheritedProtocols); 6684 InheritedProtocolSet.insert(LHSInheritedProtocols.begin(), 6685 LHSInheritedProtocols.end()); 6686 } 6687 6688 unsigned RHSNumProtocols = RHS->getNumProtocols(); 6689 if (RHSNumProtocols > 0) { 6690 ObjCProtocolDecl **RHSProtocols = 6691 const_cast<ObjCProtocolDecl **>(RHS->qual_begin()); 6692 for (unsigned i = 0; i < RHSNumProtocols; ++i) 6693 if (InheritedProtocolSet.count(RHSProtocols[i])) 6694 IntersectionOfProtocols.push_back(RHSProtocols[i]); 6695 } else { 6696 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> RHSInheritedProtocols; 6697 Context.CollectInheritedProtocols(RHS->getInterface(), 6698 RHSInheritedProtocols); 6699 for (llvm::SmallPtrSet<ObjCProtocolDecl*,8>::iterator I = 6700 RHSInheritedProtocols.begin(), 6701 E = RHSInheritedProtocols.end(); I != E; ++I) 6702 if (InheritedProtocolSet.count((*I))) 6703 IntersectionOfProtocols.push_back((*I)); 6704 } 6705 } 6706 6707 /// areCommonBaseCompatible - Returns common base class of the two classes if 6708 /// one found. Note that this is O'2 algorithm. But it will be called as the 6709 /// last type comparison in a ?-exp of ObjC pointer types before a 6710 /// warning is issued. So, its invokation is extremely rare. 6711 QualType ASTContext::areCommonBaseCompatible( 6712 const ObjCObjectPointerType *Lptr, 6713 const ObjCObjectPointerType *Rptr) { 6714 const ObjCObjectType *LHS = Lptr->getObjectType(); 6715 const ObjCObjectType *RHS = Rptr->getObjectType(); 6716 const ObjCInterfaceDecl* LDecl = LHS->getInterface(); 6717 const ObjCInterfaceDecl* RDecl = RHS->getInterface(); 6718 if (!LDecl || !RDecl || (declaresSameEntity(LDecl, RDecl))) 6719 return QualType(); 6720 6721 do { 6722 LHS = cast<ObjCInterfaceType>(getObjCInterfaceType(LDecl)); 6723 if (canAssignObjCInterfaces(LHS, RHS)) { 6724 SmallVector<ObjCProtocolDecl *, 8> Protocols; 6725 getIntersectionOfProtocols(*this, Lptr, Rptr, Protocols); 6726 6727 QualType Result = QualType(LHS, 0); 6728 if (!Protocols.empty()) 6729 Result = getObjCObjectType(Result, Protocols.data(), Protocols.size()); 6730 Result = getObjCObjectPointerType(Result); 6731 return Result; 6732 } 6733 } while ((LDecl = LDecl->getSuperClass())); 6734 6735 return QualType(); 6736 } 6737 6738 bool ASTContext::canAssignObjCInterfaces(const ObjCObjectType *LHS, 6739 const ObjCObjectType *RHS) { 6740 assert(LHS->getInterface() && "LHS is not an interface type"); 6741 assert(RHS->getInterface() && "RHS is not an interface type"); 6742 6743 // Verify that the base decls are compatible: the RHS must be a subclass of 6744 // the LHS. 6745 if (!LHS->getInterface()->isSuperClassOf(RHS->getInterface())) 6746 return false; 6747 6748 // RHS must have a superset of the protocols in the LHS. If the LHS is not 6749 // protocol qualified at all, then we are good. 6750 if (LHS->getNumProtocols() == 0) 6751 return true; 6752 6753 // Okay, we know the LHS has protocol qualifiers. If the RHS doesn't, 6754 // more detailed analysis is required. 6755 if (RHS->getNumProtocols() == 0) { 6756 // OK, if LHS is a superclass of RHS *and* 6757 // this superclass is assignment compatible with LHS. 6758 // false otherwise. 6759 bool IsSuperClass = 6760 LHS->getInterface()->isSuperClassOf(RHS->getInterface()); 6761 if (IsSuperClass) { 6762 // OK if conversion of LHS to SuperClass results in narrowing of types 6763 // ; i.e., SuperClass may implement at least one of the protocols 6764 // in LHS's protocol list. Example, SuperObj<P1> = lhs<P1,P2> is ok. 6765 // But not SuperObj<P1,P2,P3> = lhs<P1,P2>. 6766 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> SuperClassInheritedProtocols; 6767 CollectInheritedProtocols(RHS->getInterface(), SuperClassInheritedProtocols); 6768 // If super class has no protocols, it is not a match. 6769 if (SuperClassInheritedProtocols.empty()) 6770 return false; 6771 6772 for (ObjCObjectType::qual_iterator LHSPI = LHS->qual_begin(), 6773 LHSPE = LHS->qual_end(); 6774 LHSPI != LHSPE; LHSPI++) { 6775 bool SuperImplementsProtocol = false; 6776 ObjCProtocolDecl *LHSProto = (*LHSPI); 6777 6778 for (llvm::SmallPtrSet<ObjCProtocolDecl*,8>::iterator I = 6779 SuperClassInheritedProtocols.begin(), 6780 E = SuperClassInheritedProtocols.end(); I != E; ++I) { 6781 ObjCProtocolDecl *SuperClassProto = (*I); 6782 if (SuperClassProto->lookupProtocolNamed(LHSProto->getIdentifier())) { 6783 SuperImplementsProtocol = true; 6784 break; 6785 } 6786 } 6787 if (!SuperImplementsProtocol) 6788 return false; 6789 } 6790 return true; 6791 } 6792 return false; 6793 } 6794 6795 for (ObjCObjectType::qual_iterator LHSPI = LHS->qual_begin(), 6796 LHSPE = LHS->qual_end(); 6797 LHSPI != LHSPE; LHSPI++) { 6798 bool RHSImplementsProtocol = false; 6799 6800 // If the RHS doesn't implement the protocol on the left, the types 6801 // are incompatible. 6802 for (ObjCObjectType::qual_iterator RHSPI = RHS->qual_begin(), 6803 RHSPE = RHS->qual_end(); 6804 RHSPI != RHSPE; RHSPI++) { 6805 if ((*RHSPI)->lookupProtocolNamed((*LHSPI)->getIdentifier())) { 6806 RHSImplementsProtocol = true; 6807 break; 6808 } 6809 } 6810 // FIXME: For better diagnostics, consider passing back the protocol name. 6811 if (!RHSImplementsProtocol) 6812 return false; 6813 } 6814 // The RHS implements all protocols listed on the LHS. 6815 return true; 6816 } 6817 6818 bool ASTContext::areComparableObjCPointerTypes(QualType LHS, QualType RHS) { 6819 // get the "pointed to" types 6820 const ObjCObjectPointerType *LHSOPT = LHS->getAs<ObjCObjectPointerType>(); 6821 const ObjCObjectPointerType *RHSOPT = RHS->getAs<ObjCObjectPointerType>(); 6822 6823 if (!LHSOPT || !RHSOPT) 6824 return false; 6825 6826 return canAssignObjCInterfaces(LHSOPT, RHSOPT) || 6827 canAssignObjCInterfaces(RHSOPT, LHSOPT); 6828 } 6829 6830 bool ASTContext::canBindObjCObjectType(QualType To, QualType From) { 6831 return canAssignObjCInterfaces( 6832 getObjCObjectPointerType(To)->getAs<ObjCObjectPointerType>(), 6833 getObjCObjectPointerType(From)->getAs<ObjCObjectPointerType>()); 6834 } 6835 6836 /// typesAreCompatible - C99 6.7.3p9: For two qualified types to be compatible, 6837 /// both shall have the identically qualified version of a compatible type. 6838 /// C99 6.2.7p1: Two types have compatible types if their types are the 6839 /// same. See 6.7.[2,3,5] for additional rules. 6840 bool ASTContext::typesAreCompatible(QualType LHS, QualType RHS, 6841 bool CompareUnqualified) { 6842 if (getLangOpts().CPlusPlus) 6843 return hasSameType(LHS, RHS); 6844 6845 return !mergeTypes(LHS, RHS, false, CompareUnqualified).isNull(); 6846 } 6847 6848 bool ASTContext::propertyTypesAreCompatible(QualType LHS, QualType RHS) { 6849 return typesAreCompatible(LHS, RHS); 6850 } 6851 6852 bool ASTContext::typesAreBlockPointerCompatible(QualType LHS, QualType RHS) { 6853 return !mergeTypes(LHS, RHS, true).isNull(); 6854 } 6855 6856 /// mergeTransparentUnionType - if T is a transparent union type and a member 6857 /// of T is compatible with SubType, return the merged type, else return 6858 /// QualType() 6859 QualType ASTContext::mergeTransparentUnionType(QualType T, QualType SubType, 6860 bool OfBlockPointer, 6861 bool Unqualified) { 6862 if (const RecordType *UT = T->getAsUnionType()) { 6863 RecordDecl *UD = UT->getDecl(); 6864 if (UD->hasAttr<TransparentUnionAttr>()) { 6865 for (RecordDecl::field_iterator it = UD->field_begin(), 6866 itend = UD->field_end(); it != itend; ++it) { 6867 QualType ET = it->getType().getUnqualifiedType(); 6868 QualType MT = mergeTypes(ET, SubType, OfBlockPointer, Unqualified); 6869 if (!MT.isNull()) 6870 return MT; 6871 } 6872 } 6873 } 6874 6875 return QualType(); 6876 } 6877 6878 /// mergeFunctionArgumentTypes - merge two types which appear as function 6879 /// argument types 6880 QualType ASTContext::mergeFunctionArgumentTypes(QualType lhs, QualType rhs, 6881 bool OfBlockPointer, 6882 bool Unqualified) { 6883 // GNU extension: two types are compatible if they appear as a function 6884 // argument, one of the types is a transparent union type and the other 6885 // type is compatible with a union member 6886 QualType lmerge = mergeTransparentUnionType(lhs, rhs, OfBlockPointer, 6887 Unqualified); 6888 if (!lmerge.isNull()) 6889 return lmerge; 6890 6891 QualType rmerge = mergeTransparentUnionType(rhs, lhs, OfBlockPointer, 6892 Unqualified); 6893 if (!rmerge.isNull()) 6894 return rmerge; 6895 6896 return mergeTypes(lhs, rhs, OfBlockPointer, Unqualified); 6897 } 6898 6899 QualType ASTContext::mergeFunctionTypes(QualType lhs, QualType rhs, 6900 bool OfBlockPointer, 6901 bool Unqualified) { 6902 const FunctionType *lbase = lhs->getAs<FunctionType>(); 6903 const FunctionType *rbase = rhs->getAs<FunctionType>(); 6904 const FunctionProtoType *lproto = dyn_cast<FunctionProtoType>(lbase); 6905 const FunctionProtoType *rproto = dyn_cast<FunctionProtoType>(rbase); 6906 bool allLTypes = true; 6907 bool allRTypes = true; 6908 6909 // Check return type 6910 QualType retType; 6911 if (OfBlockPointer) { 6912 QualType RHS = rbase->getResultType(); 6913 QualType LHS = lbase->getResultType(); 6914 bool UnqualifiedResult = Unqualified; 6915 if (!UnqualifiedResult) 6916 UnqualifiedResult = (!RHS.hasQualifiers() && LHS.hasQualifiers()); 6917 retType = mergeTypes(LHS, RHS, true, UnqualifiedResult, true); 6918 } 6919 else 6920 retType = mergeTypes(lbase->getResultType(), rbase->getResultType(), false, 6921 Unqualified); 6922 if (retType.isNull()) return QualType(); 6923 6924 if (Unqualified) 6925 retType = retType.getUnqualifiedType(); 6926 6927 CanQualType LRetType = getCanonicalType(lbase->getResultType()); 6928 CanQualType RRetType = getCanonicalType(rbase->getResultType()); 6929 if (Unqualified) { 6930 LRetType = LRetType.getUnqualifiedType(); 6931 RRetType = RRetType.getUnqualifiedType(); 6932 } 6933 6934 if (getCanonicalType(retType) != LRetType) 6935 allLTypes = false; 6936 if (getCanonicalType(retType) != RRetType) 6937 allRTypes = false; 6938 6939 // FIXME: double check this 6940 // FIXME: should we error if lbase->getRegParmAttr() != 0 && 6941 // rbase->getRegParmAttr() != 0 && 6942 // lbase->getRegParmAttr() != rbase->getRegParmAttr()? 6943 FunctionType::ExtInfo lbaseInfo = lbase->getExtInfo(); 6944 FunctionType::ExtInfo rbaseInfo = rbase->getExtInfo(); 6945 6946 // Compatible functions must have compatible calling conventions 6947 if (!isSameCallConv(lbaseInfo.getCC(), rbaseInfo.getCC())) 6948 return QualType(); 6949 6950 // Regparm is part of the calling convention. 6951 if (lbaseInfo.getHasRegParm() != rbaseInfo.getHasRegParm()) 6952 return QualType(); 6953 if (lbaseInfo.getRegParm() != rbaseInfo.getRegParm()) 6954 return QualType(); 6955 6956 if (lbaseInfo.getProducesResult() != rbaseInfo.getProducesResult()) 6957 return QualType(); 6958 6959 // FIXME: some uses, e.g. conditional exprs, really want this to be 'both'. 6960 bool NoReturn = lbaseInfo.getNoReturn() || rbaseInfo.getNoReturn(); 6961 6962 if (lbaseInfo.getNoReturn() != NoReturn) 6963 allLTypes = false; 6964 if (rbaseInfo.getNoReturn() != NoReturn) 6965 allRTypes = false; 6966 6967 FunctionType::ExtInfo einfo = lbaseInfo.withNoReturn(NoReturn); 6968 6969 if (lproto && rproto) { // two C99 style function prototypes 6970 assert(!lproto->hasExceptionSpec() && !rproto->hasExceptionSpec() && 6971 "C++ shouldn't be here"); 6972 unsigned lproto_nargs = lproto->getNumArgs(); 6973 unsigned rproto_nargs = rproto->getNumArgs(); 6974 6975 // Compatible functions must have the same number of arguments 6976 if (lproto_nargs != rproto_nargs) 6977 return QualType(); 6978 6979 // Variadic and non-variadic functions aren't compatible 6980 if (lproto->isVariadic() != rproto->isVariadic()) 6981 return QualType(); 6982 6983 if (lproto->getTypeQuals() != rproto->getTypeQuals()) 6984 return QualType(); 6985 6986 if (LangOpts.ObjCAutoRefCount && 6987 !FunctionTypesMatchOnNSConsumedAttrs(rproto, lproto)) 6988 return QualType(); 6989 6990 // Check argument compatibility 6991 SmallVector<QualType, 10> types; 6992 for (unsigned i = 0; i < lproto_nargs; i++) { 6993 QualType largtype = lproto->getArgType(i).getUnqualifiedType(); 6994 QualType rargtype = rproto->getArgType(i).getUnqualifiedType(); 6995 QualType argtype = mergeFunctionArgumentTypes(largtype, rargtype, 6996 OfBlockPointer, 6997 Unqualified); 6998 if (argtype.isNull()) return QualType(); 6999 7000 if (Unqualified) 7001 argtype = argtype.getUnqualifiedType(); 7002 7003 types.push_back(argtype); 7004 if (Unqualified) { 7005 largtype = largtype.getUnqualifiedType(); 7006 rargtype = rargtype.getUnqualifiedType(); 7007 } 7008 7009 if (getCanonicalType(argtype) != getCanonicalType(largtype)) 7010 allLTypes = false; 7011 if (getCanonicalType(argtype) != getCanonicalType(rargtype)) 7012 allRTypes = false; 7013 } 7014 7015 if (allLTypes) return lhs; 7016 if (allRTypes) return rhs; 7017 7018 FunctionProtoType::ExtProtoInfo EPI = lproto->getExtProtoInfo(); 7019 EPI.ExtInfo = einfo; 7020 return getFunctionType(retType, types, EPI); 7021 } 7022 7023 if (lproto) allRTypes = false; 7024 if (rproto) allLTypes = false; 7025 7026 const FunctionProtoType *proto = lproto ? lproto : rproto; 7027 if (proto) { 7028 assert(!proto->hasExceptionSpec() && "C++ shouldn't be here"); 7029 if (proto->isVariadic()) return QualType(); 7030 // Check that the types are compatible with the types that 7031 // would result from default argument promotions (C99 6.7.5.3p15). 7032 // The only types actually affected are promotable integer 7033 // types and floats, which would be passed as a different 7034 // type depending on whether the prototype is visible. 7035 unsigned proto_nargs = proto->getNumArgs(); 7036 for (unsigned i = 0; i < proto_nargs; ++i) { 7037 QualType argTy = proto->getArgType(i); 7038 7039 // Look at the converted type of enum types, since that is the type used 7040 // to pass enum values. 7041 if (const EnumType *Enum = argTy->getAs<EnumType>()) { 7042 argTy = Enum->getDecl()->getIntegerType(); 7043 if (argTy.isNull()) 7044 return QualType(); 7045 } 7046 7047 if (argTy->isPromotableIntegerType() || 7048 getCanonicalType(argTy).getUnqualifiedType() == FloatTy) 7049 return QualType(); 7050 } 7051 7052 if (allLTypes) return lhs; 7053 if (allRTypes) return rhs; 7054 7055 FunctionProtoType::ExtProtoInfo EPI = proto->getExtProtoInfo(); 7056 EPI.ExtInfo = einfo; 7057 return getFunctionType(retType, proto->getArgTypes(), EPI); 7058 } 7059 7060 if (allLTypes) return lhs; 7061 if (allRTypes) return rhs; 7062 return getFunctionNoProtoType(retType, einfo); 7063 } 7064 7065 /// Given that we have an enum type and a non-enum type, try to merge them. 7066 static QualType mergeEnumWithInteger(ASTContext &Context, const EnumType *ET, 7067 QualType other, bool isBlockReturnType) { 7068 // C99 6.7.2.2p4: Each enumerated type shall be compatible with char, 7069 // a signed integer type, or an unsigned integer type. 7070 // Compatibility is based on the underlying type, not the promotion 7071 // type. 7072 QualType underlyingType = ET->getDecl()->getIntegerType(); 7073 if (underlyingType.isNull()) return QualType(); 7074 if (Context.hasSameType(underlyingType, other)) 7075 return other; 7076 7077 // In block return types, we're more permissive and accept any 7078 // integral type of the same size. 7079 if (isBlockReturnType && other->isIntegerType() && 7080 Context.getTypeSize(underlyingType) == Context.getTypeSize(other)) 7081 return other; 7082 7083 return QualType(); 7084 } 7085 7086 QualType ASTContext::mergeTypes(QualType LHS, QualType RHS, 7087 bool OfBlockPointer, 7088 bool Unqualified, bool BlockReturnType) { 7089 // C++ [expr]: If an expression initially has the type "reference to T", the 7090 // type is adjusted to "T" prior to any further analysis, the expression 7091 // designates the object or function denoted by the reference, and the 7092 // expression is an lvalue unless the reference is an rvalue reference and 7093 // the expression is a function call (possibly inside parentheses). 7094 assert(!LHS->getAs<ReferenceType>() && "LHS is a reference type?"); 7095 assert(!RHS->getAs<ReferenceType>() && "RHS is a reference type?"); 7096 7097 if (Unqualified) { 7098 LHS = LHS.getUnqualifiedType(); 7099 RHS = RHS.getUnqualifiedType(); 7100 } 7101 7102 QualType LHSCan = getCanonicalType(LHS), 7103 RHSCan = getCanonicalType(RHS); 7104 7105 // If two types are identical, they are compatible. 7106 if (LHSCan == RHSCan) 7107 return LHS; 7108 7109 // If the qualifiers are different, the types aren't compatible... mostly. 7110 Qualifiers LQuals = LHSCan.getLocalQualifiers(); 7111 Qualifiers RQuals = RHSCan.getLocalQualifiers(); 7112 if (LQuals != RQuals) { 7113 // If any of these qualifiers are different, we have a type 7114 // mismatch. 7115 if (LQuals.getCVRQualifiers() != RQuals.getCVRQualifiers() || 7116 LQuals.getAddressSpace() != RQuals.getAddressSpace() || 7117 LQuals.getObjCLifetime() != RQuals.getObjCLifetime()) 7118 return QualType(); 7119 7120 // Exactly one GC qualifier difference is allowed: __strong is 7121 // okay if the other type has no GC qualifier but is an Objective 7122 // C object pointer (i.e. implicitly strong by default). We fix 7123 // this by pretending that the unqualified type was actually 7124 // qualified __strong. 7125 Qualifiers::GC GC_L = LQuals.getObjCGCAttr(); 7126 Qualifiers::GC GC_R = RQuals.getObjCGCAttr(); 7127 assert((GC_L != GC_R) && "unequal qualifier sets had only equal elements"); 7128 7129 if (GC_L == Qualifiers::Weak || GC_R == Qualifiers::Weak) 7130 return QualType(); 7131 7132 if (GC_L == Qualifiers::Strong && RHSCan->isObjCObjectPointerType()) { 7133 return mergeTypes(LHS, getObjCGCQualType(RHS, Qualifiers::Strong)); 7134 } 7135 if (GC_R == Qualifiers::Strong && LHSCan->isObjCObjectPointerType()) { 7136 return mergeTypes(getObjCGCQualType(LHS, Qualifiers::Strong), RHS); 7137 } 7138 return QualType(); 7139 } 7140 7141 // Okay, qualifiers are equal. 7142 7143 Type::TypeClass LHSClass = LHSCan->getTypeClass(); 7144 Type::TypeClass RHSClass = RHSCan->getTypeClass(); 7145 7146 // We want to consider the two function types to be the same for these 7147 // comparisons, just force one to the other. 7148 if (LHSClass == Type::FunctionProto) LHSClass = Type::FunctionNoProto; 7149 if (RHSClass == Type::FunctionProto) RHSClass = Type::FunctionNoProto; 7150 7151 // Same as above for arrays 7152 if (LHSClass == Type::VariableArray || LHSClass == Type::IncompleteArray) 7153 LHSClass = Type::ConstantArray; 7154 if (RHSClass == Type::VariableArray || RHSClass == Type::IncompleteArray) 7155 RHSClass = Type::ConstantArray; 7156 7157 // ObjCInterfaces are just specialized ObjCObjects. 7158 if (LHSClass == Type::ObjCInterface) LHSClass = Type::ObjCObject; 7159 if (RHSClass == Type::ObjCInterface) RHSClass = Type::ObjCObject; 7160 7161 // Canonicalize ExtVector -> Vector. 7162 if (LHSClass == Type::ExtVector) LHSClass = Type::Vector; 7163 if (RHSClass == Type::ExtVector) RHSClass = Type::Vector; 7164 7165 // If the canonical type classes don't match. 7166 if (LHSClass != RHSClass) { 7167 // Note that we only have special rules for turning block enum 7168 // returns into block int returns, not vice-versa. 7169 if (const EnumType* ETy = LHS->getAs<EnumType>()) { 7170 return mergeEnumWithInteger(*this, ETy, RHS, false); 7171 } 7172 if (const EnumType* ETy = RHS->getAs<EnumType>()) { 7173 return mergeEnumWithInteger(*this, ETy, LHS, BlockReturnType); 7174 } 7175 // allow block pointer type to match an 'id' type. 7176 if (OfBlockPointer && !BlockReturnType) { 7177 if (LHS->isObjCIdType() && RHS->isBlockPointerType()) 7178 return LHS; 7179 if (RHS->isObjCIdType() && LHS->isBlockPointerType()) 7180 return RHS; 7181 } 7182 7183 return QualType(); 7184 } 7185 7186 // The canonical type classes match. 7187 switch (LHSClass) { 7188 #define TYPE(Class, Base) 7189 #define ABSTRACT_TYPE(Class, Base) 7190 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class: 7191 #define NON_CANONICAL_TYPE(Class, Base) case Type::Class: 7192 #define DEPENDENT_TYPE(Class, Base) case Type::Class: 7193 #include "clang/AST/TypeNodes.def" 7194 llvm_unreachable("Non-canonical and dependent types shouldn't get here"); 7195 7196 case Type::Auto: 7197 case Type::LValueReference: 7198 case Type::RValueReference: 7199 case Type::MemberPointer: 7200 llvm_unreachable("C++ should never be in mergeTypes"); 7201 7202 case Type::ObjCInterface: 7203 case Type::IncompleteArray: 7204 case Type::VariableArray: 7205 case Type::FunctionProto: 7206 case Type::ExtVector: 7207 llvm_unreachable("Types are eliminated above"); 7208 7209 case Type::Pointer: 7210 { 7211 // Merge two pointer types, while trying to preserve typedef info 7212 QualType LHSPointee = LHS->getAs<PointerType>()->getPointeeType(); 7213 QualType RHSPointee = RHS->getAs<PointerType>()->getPointeeType(); 7214 if (Unqualified) { 7215 LHSPointee = LHSPointee.getUnqualifiedType(); 7216 RHSPointee = RHSPointee.getUnqualifiedType(); 7217 } 7218 QualType ResultType = mergeTypes(LHSPointee, RHSPointee, false, 7219 Unqualified); 7220 if (ResultType.isNull()) return QualType(); 7221 if (getCanonicalType(LHSPointee) == getCanonicalType(ResultType)) 7222 return LHS; 7223 if (getCanonicalType(RHSPointee) == getCanonicalType(ResultType)) 7224 return RHS; 7225 return getPointerType(ResultType); 7226 } 7227 case Type::BlockPointer: 7228 { 7229 // Merge two block pointer types, while trying to preserve typedef info 7230 QualType LHSPointee = LHS->getAs<BlockPointerType>()->getPointeeType(); 7231 QualType RHSPointee = RHS->getAs<BlockPointerType>()->getPointeeType(); 7232 if (Unqualified) { 7233 LHSPointee = LHSPointee.getUnqualifiedType(); 7234 RHSPointee = RHSPointee.getUnqualifiedType(); 7235 } 7236 QualType ResultType = mergeTypes(LHSPointee, RHSPointee, OfBlockPointer, 7237 Unqualified); 7238 if (ResultType.isNull()) return QualType(); 7239 if (getCanonicalType(LHSPointee) == getCanonicalType(ResultType)) 7240 return LHS; 7241 if (getCanonicalType(RHSPointee) == getCanonicalType(ResultType)) 7242 return RHS; 7243 return getBlockPointerType(ResultType); 7244 } 7245 case Type::Atomic: 7246 { 7247 // Merge two pointer types, while trying to preserve typedef info 7248 QualType LHSValue = LHS->getAs<AtomicType>()->getValueType(); 7249 QualType RHSValue = RHS->getAs<AtomicType>()->getValueType(); 7250 if (Unqualified) { 7251 LHSValue = LHSValue.getUnqualifiedType(); 7252 RHSValue = RHSValue.getUnqualifiedType(); 7253 } 7254 QualType ResultType = mergeTypes(LHSValue, RHSValue, false, 7255 Unqualified); 7256 if (ResultType.isNull()) return QualType(); 7257 if (getCanonicalType(LHSValue) == getCanonicalType(ResultType)) 7258 return LHS; 7259 if (getCanonicalType(RHSValue) == getCanonicalType(ResultType)) 7260 return RHS; 7261 return getAtomicType(ResultType); 7262 } 7263 case Type::ConstantArray: 7264 { 7265 const ConstantArrayType* LCAT = getAsConstantArrayType(LHS); 7266 const ConstantArrayType* RCAT = getAsConstantArrayType(RHS); 7267 if (LCAT && RCAT && RCAT->getSize() != LCAT->getSize()) 7268 return QualType(); 7269 7270 QualType LHSElem = getAsArrayType(LHS)->getElementType(); 7271 QualType RHSElem = getAsArrayType(RHS)->getElementType(); 7272 if (Unqualified) { 7273 LHSElem = LHSElem.getUnqualifiedType(); 7274 RHSElem = RHSElem.getUnqualifiedType(); 7275 } 7276 7277 QualType ResultType = mergeTypes(LHSElem, RHSElem, false, Unqualified); 7278 if (ResultType.isNull()) return QualType(); 7279 if (LCAT && getCanonicalType(LHSElem) == getCanonicalType(ResultType)) 7280 return LHS; 7281 if (RCAT && getCanonicalType(RHSElem) == getCanonicalType(ResultType)) 7282 return RHS; 7283 if (LCAT) return getConstantArrayType(ResultType, LCAT->getSize(), 7284 ArrayType::ArraySizeModifier(), 0); 7285 if (RCAT) return getConstantArrayType(ResultType, RCAT->getSize(), 7286 ArrayType::ArraySizeModifier(), 0); 7287 const VariableArrayType* LVAT = getAsVariableArrayType(LHS); 7288 const VariableArrayType* RVAT = getAsVariableArrayType(RHS); 7289 if (LVAT && getCanonicalType(LHSElem) == getCanonicalType(ResultType)) 7290 return LHS; 7291 if (RVAT && getCanonicalType(RHSElem) == getCanonicalType(ResultType)) 7292 return RHS; 7293 if (LVAT) { 7294 // FIXME: This isn't correct! But tricky to implement because 7295 // the array's size has to be the size of LHS, but the type 7296 // has to be different. 7297 return LHS; 7298 } 7299 if (RVAT) { 7300 // FIXME: This isn't correct! But tricky to implement because 7301 // the array's size has to be the size of RHS, but the type 7302 // has to be different. 7303 return RHS; 7304 } 7305 if (getCanonicalType(LHSElem) == getCanonicalType(ResultType)) return LHS; 7306 if (getCanonicalType(RHSElem) == getCanonicalType(ResultType)) return RHS; 7307 return getIncompleteArrayType(ResultType, 7308 ArrayType::ArraySizeModifier(), 0); 7309 } 7310 case Type::FunctionNoProto: 7311 return mergeFunctionTypes(LHS, RHS, OfBlockPointer, Unqualified); 7312 case Type::Record: 7313 case Type::Enum: 7314 return QualType(); 7315 case Type::Builtin: 7316 // Only exactly equal builtin types are compatible, which is tested above. 7317 return QualType(); 7318 case Type::Complex: 7319 // Distinct complex types are incompatible. 7320 return QualType(); 7321 case Type::Vector: 7322 // FIXME: The merged type should be an ExtVector! 7323 if (areCompatVectorTypes(LHSCan->getAs<VectorType>(), 7324 RHSCan->getAs<VectorType>())) 7325 return LHS; 7326 return QualType(); 7327 case Type::ObjCObject: { 7328 // Check if the types are assignment compatible. 7329 // FIXME: This should be type compatibility, e.g. whether 7330 // "LHS x; RHS x;" at global scope is legal. 7331 const ObjCObjectType* LHSIface = LHS->getAs<ObjCObjectType>(); 7332 const ObjCObjectType* RHSIface = RHS->getAs<ObjCObjectType>(); 7333 if (canAssignObjCInterfaces(LHSIface, RHSIface)) 7334 return LHS; 7335 7336 return QualType(); 7337 } 7338 case Type::ObjCObjectPointer: { 7339 if (OfBlockPointer) { 7340 if (canAssignObjCInterfacesInBlockPointer( 7341 LHS->getAs<ObjCObjectPointerType>(), 7342 RHS->getAs<ObjCObjectPointerType>(), 7343 BlockReturnType)) 7344 return LHS; 7345 return QualType(); 7346 } 7347 if (canAssignObjCInterfaces(LHS->getAs<ObjCObjectPointerType>(), 7348 RHS->getAs<ObjCObjectPointerType>())) 7349 return LHS; 7350 7351 return QualType(); 7352 } 7353 } 7354 7355 llvm_unreachable("Invalid Type::Class!"); 7356 } 7357 7358 bool ASTContext::FunctionTypesMatchOnNSConsumedAttrs( 7359 const FunctionProtoType *FromFunctionType, 7360 const FunctionProtoType *ToFunctionType) { 7361 if (FromFunctionType->hasAnyConsumedArgs() != 7362 ToFunctionType->hasAnyConsumedArgs()) 7363 return false; 7364 FunctionProtoType::ExtProtoInfo FromEPI = 7365 FromFunctionType->getExtProtoInfo(); 7366 FunctionProtoType::ExtProtoInfo ToEPI = 7367 ToFunctionType->getExtProtoInfo(); 7368 if (FromEPI.ConsumedArguments && ToEPI.ConsumedArguments) 7369 for (unsigned ArgIdx = 0, NumArgs = FromFunctionType->getNumArgs(); 7370 ArgIdx != NumArgs; ++ArgIdx) { 7371 if (FromEPI.ConsumedArguments[ArgIdx] != 7372 ToEPI.ConsumedArguments[ArgIdx]) 7373 return false; 7374 } 7375 return true; 7376 } 7377 7378 /// mergeObjCGCQualifiers - This routine merges ObjC's GC attribute of 'LHS' and 7379 /// 'RHS' attributes and returns the merged version; including for function 7380 /// return types. 7381 QualType ASTContext::mergeObjCGCQualifiers(QualType LHS, QualType RHS) { 7382 QualType LHSCan = getCanonicalType(LHS), 7383 RHSCan = getCanonicalType(RHS); 7384 // If two types are identical, they are compatible. 7385 if (LHSCan == RHSCan) 7386 return LHS; 7387 if (RHSCan->isFunctionType()) { 7388 if (!LHSCan->isFunctionType()) 7389 return QualType(); 7390 QualType OldReturnType = 7391 cast<FunctionType>(RHSCan.getTypePtr())->getResultType(); 7392 QualType NewReturnType = 7393 cast<FunctionType>(LHSCan.getTypePtr())->getResultType(); 7394 QualType ResReturnType = 7395 mergeObjCGCQualifiers(NewReturnType, OldReturnType); 7396 if (ResReturnType.isNull()) 7397 return QualType(); 7398 if (ResReturnType == NewReturnType || ResReturnType == OldReturnType) { 7399 // id foo(); ... __strong id foo(); or: __strong id foo(); ... id foo(); 7400 // In either case, use OldReturnType to build the new function type. 7401 const FunctionType *F = LHS->getAs<FunctionType>(); 7402 if (const FunctionProtoType *FPT = cast<FunctionProtoType>(F)) { 7403 FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo(); 7404 EPI.ExtInfo = getFunctionExtInfo(LHS); 7405 QualType ResultType = 7406 getFunctionType(OldReturnType, FPT->getArgTypes(), EPI); 7407 return ResultType; 7408 } 7409 } 7410 return QualType(); 7411 } 7412 7413 // If the qualifiers are different, the types can still be merged. 7414 Qualifiers LQuals = LHSCan.getLocalQualifiers(); 7415 Qualifiers RQuals = RHSCan.getLocalQualifiers(); 7416 if (LQuals != RQuals) { 7417 // If any of these qualifiers are different, we have a type mismatch. 7418 if (LQuals.getCVRQualifiers() != RQuals.getCVRQualifiers() || 7419 LQuals.getAddressSpace() != RQuals.getAddressSpace()) 7420 return QualType(); 7421 7422 // Exactly one GC qualifier difference is allowed: __strong is 7423 // okay if the other type has no GC qualifier but is an Objective 7424 // C object pointer (i.e. implicitly strong by default). We fix 7425 // this by pretending that the unqualified type was actually 7426 // qualified __strong. 7427 Qualifiers::GC GC_L = LQuals.getObjCGCAttr(); 7428 Qualifiers::GC GC_R = RQuals.getObjCGCAttr(); 7429 assert((GC_L != GC_R) && "unequal qualifier sets had only equal elements"); 7430 7431 if (GC_L == Qualifiers::Weak || GC_R == Qualifiers::Weak) 7432 return QualType(); 7433 7434 if (GC_L == Qualifiers::Strong) 7435 return LHS; 7436 if (GC_R == Qualifiers::Strong) 7437 return RHS; 7438 return QualType(); 7439 } 7440 7441 if (LHSCan->isObjCObjectPointerType() && RHSCan->isObjCObjectPointerType()) { 7442 QualType LHSBaseQT = LHS->getAs<ObjCObjectPointerType>()->getPointeeType(); 7443 QualType RHSBaseQT = RHS->getAs<ObjCObjectPointerType>()->getPointeeType(); 7444 QualType ResQT = mergeObjCGCQualifiers(LHSBaseQT, RHSBaseQT); 7445 if (ResQT == LHSBaseQT) 7446 return LHS; 7447 if (ResQT == RHSBaseQT) 7448 return RHS; 7449 } 7450 return QualType(); 7451 } 7452 7453 //===----------------------------------------------------------------------===// 7454 // Integer Predicates 7455 //===----------------------------------------------------------------------===// 7456 7457 unsigned ASTContext::getIntWidth(QualType T) const { 7458 if (const EnumType *ET = dyn_cast<EnumType>(T)) 7459 T = ET->getDecl()->getIntegerType(); 7460 if (T->isBooleanType()) 7461 return 1; 7462 // For builtin types, just use the standard type sizing method 7463 return (unsigned)getTypeSize(T); 7464 } 7465 7466 QualType ASTContext::getCorrespondingUnsignedType(QualType T) const { 7467 assert(T->hasSignedIntegerRepresentation() && "Unexpected type"); 7468 7469 // Turn <4 x signed int> -> <4 x unsigned int> 7470 if (const VectorType *VTy = T->getAs<VectorType>()) 7471 return getVectorType(getCorrespondingUnsignedType(VTy->getElementType()), 7472 VTy->getNumElements(), VTy->getVectorKind()); 7473 7474 // For enums, we return the unsigned version of the base type. 7475 if (const EnumType *ETy = T->getAs<EnumType>()) 7476 T = ETy->getDecl()->getIntegerType(); 7477 7478 const BuiltinType *BTy = T->getAs<BuiltinType>(); 7479 assert(BTy && "Unexpected signed integer type"); 7480 switch (BTy->getKind()) { 7481 case BuiltinType::Char_S: 7482 case BuiltinType::SChar: 7483 return UnsignedCharTy; 7484 case BuiltinType::Short: 7485 return UnsignedShortTy; 7486 case BuiltinType::Int: 7487 return UnsignedIntTy; 7488 case BuiltinType::Long: 7489 return UnsignedLongTy; 7490 case BuiltinType::LongLong: 7491 return UnsignedLongLongTy; 7492 case BuiltinType::Int128: 7493 return UnsignedInt128Ty; 7494 default: 7495 llvm_unreachable("Unexpected signed integer type"); 7496 } 7497 } 7498 7499 ASTMutationListener::~ASTMutationListener() { } 7500 7501 void ASTMutationListener::DeducedReturnType(const FunctionDecl *FD, 7502 QualType ReturnType) {} 7503 7504 //===----------------------------------------------------------------------===// 7505 // Builtin Type Computation 7506 //===----------------------------------------------------------------------===// 7507 7508 /// DecodeTypeFromStr - This decodes one type descriptor from Str, advancing the 7509 /// pointer over the consumed characters. This returns the resultant type. If 7510 /// AllowTypeModifiers is false then modifier like * are not parsed, just basic 7511 /// types. This allows "v2i*" to be parsed as a pointer to a v2i instead of 7512 /// a vector of "i*". 7513 /// 7514 /// RequiresICE is filled in on return to indicate whether the value is required 7515 /// to be an Integer Constant Expression. 7516 static QualType DecodeTypeFromStr(const char *&Str, const ASTContext &Context, 7517 ASTContext::GetBuiltinTypeError &Error, 7518 bool &RequiresICE, 7519 bool AllowTypeModifiers) { 7520 // Modifiers. 7521 int HowLong = 0; 7522 bool Signed = false, Unsigned = false; 7523 RequiresICE = false; 7524 7525 // Read the prefixed modifiers first. 7526 bool Done = false; 7527 while (!Done) { 7528 switch (*Str++) { 7529 default: Done = true; --Str; break; 7530 case 'I': 7531 RequiresICE = true; 7532 break; 7533 case 'S': 7534 assert(!Unsigned && "Can't use both 'S' and 'U' modifiers!"); 7535 assert(!Signed && "Can't use 'S' modifier multiple times!"); 7536 Signed = true; 7537 break; 7538 case 'U': 7539 assert(!Signed && "Can't use both 'S' and 'U' modifiers!"); 7540 assert(!Unsigned && "Can't use 'S' modifier multiple times!"); 7541 Unsigned = true; 7542 break; 7543 case 'L': 7544 assert(HowLong <= 2 && "Can't have LLLL modifier"); 7545 ++HowLong; 7546 break; 7547 } 7548 } 7549 7550 QualType Type; 7551 7552 // Read the base type. 7553 switch (*Str++) { 7554 default: llvm_unreachable("Unknown builtin type letter!"); 7555 case 'v': 7556 assert(HowLong == 0 && !Signed && !Unsigned && 7557 "Bad modifiers used with 'v'!"); 7558 Type = Context.VoidTy; 7559 break; 7560 case 'f': 7561 assert(HowLong == 0 && !Signed && !Unsigned && 7562 "Bad modifiers used with 'f'!"); 7563 Type = Context.FloatTy; 7564 break; 7565 case 'd': 7566 assert(HowLong < 2 && !Signed && !Unsigned && 7567 "Bad modifiers used with 'd'!"); 7568 if (HowLong) 7569 Type = Context.LongDoubleTy; 7570 else 7571 Type = Context.DoubleTy; 7572 break; 7573 case 's': 7574 assert(HowLong == 0 && "Bad modifiers used with 's'!"); 7575 if (Unsigned) 7576 Type = Context.UnsignedShortTy; 7577 else 7578 Type = Context.ShortTy; 7579 break; 7580 case 'i': 7581 if (HowLong == 3) 7582 Type = Unsigned ? Context.UnsignedInt128Ty : Context.Int128Ty; 7583 else if (HowLong == 2) 7584 Type = Unsigned ? Context.UnsignedLongLongTy : Context.LongLongTy; 7585 else if (HowLong == 1) 7586 Type = Unsigned ? Context.UnsignedLongTy : Context.LongTy; 7587 else 7588 Type = Unsigned ? Context.UnsignedIntTy : Context.IntTy; 7589 break; 7590 case 'c': 7591 assert(HowLong == 0 && "Bad modifiers used with 'c'!"); 7592 if (Signed) 7593 Type = Context.SignedCharTy; 7594 else if (Unsigned) 7595 Type = Context.UnsignedCharTy; 7596 else 7597 Type = Context.CharTy; 7598 break; 7599 case 'b': // boolean 7600 assert(HowLong == 0 && !Signed && !Unsigned && "Bad modifiers for 'b'!"); 7601 Type = Context.BoolTy; 7602 break; 7603 case 'z': // size_t. 7604 assert(HowLong == 0 && !Signed && !Unsigned && "Bad modifiers for 'z'!"); 7605 Type = Context.getSizeType(); 7606 break; 7607 case 'F': 7608 Type = Context.getCFConstantStringType(); 7609 break; 7610 case 'G': 7611 Type = Context.getObjCIdType(); 7612 break; 7613 case 'H': 7614 Type = Context.getObjCSelType(); 7615 break; 7616 case 'M': 7617 Type = Context.getObjCSuperType(); 7618 break; 7619 case 'a': 7620 Type = Context.getBuiltinVaListType(); 7621 assert(!Type.isNull() && "builtin va list type not initialized!"); 7622 break; 7623 case 'A': 7624 // This is a "reference" to a va_list; however, what exactly 7625 // this means depends on how va_list is defined. There are two 7626 // different kinds of va_list: ones passed by value, and ones 7627 // passed by reference. An example of a by-value va_list is 7628 // x86, where va_list is a char*. An example of by-ref va_list 7629 // is x86-64, where va_list is a __va_list_tag[1]. For x86, 7630 // we want this argument to be a char*&; for x86-64, we want 7631 // it to be a __va_list_tag*. 7632 Type = Context.getBuiltinVaListType(); 7633 assert(!Type.isNull() && "builtin va list type not initialized!"); 7634 if (Type->isArrayType()) 7635 Type = Context.getArrayDecayedType(Type); 7636 else 7637 Type = Context.getLValueReferenceType(Type); 7638 break; 7639 case 'V': { 7640 char *End; 7641 unsigned NumElements = strtoul(Str, &End, 10); 7642 assert(End != Str && "Missing vector size"); 7643 Str = End; 7644 7645 QualType ElementType = DecodeTypeFromStr(Str, Context, Error, 7646 RequiresICE, false); 7647 assert(!RequiresICE && "Can't require vector ICE"); 7648 7649 // TODO: No way to make AltiVec vectors in builtins yet. 7650 Type = Context.getVectorType(ElementType, NumElements, 7651 VectorType::GenericVector); 7652 break; 7653 } 7654 case 'E': { 7655 char *End; 7656 7657 unsigned NumElements = strtoul(Str, &End, 10); 7658 assert(End != Str && "Missing vector size"); 7659 7660 Str = End; 7661 7662 QualType ElementType = DecodeTypeFromStr(Str, Context, Error, RequiresICE, 7663 false); 7664 Type = Context.getExtVectorType(ElementType, NumElements); 7665 break; 7666 } 7667 case 'X': { 7668 QualType ElementType = DecodeTypeFromStr(Str, Context, Error, RequiresICE, 7669 false); 7670 assert(!RequiresICE && "Can't require complex ICE"); 7671 Type = Context.getComplexType(ElementType); 7672 break; 7673 } 7674 case 'Y' : { 7675 Type = Context.getPointerDiffType(); 7676 break; 7677 } 7678 case 'P': 7679 Type = Context.getFILEType(); 7680 if (Type.isNull()) { 7681 Error = ASTContext::GE_Missing_stdio; 7682 return QualType(); 7683 } 7684 break; 7685 case 'J': 7686 if (Signed) 7687 Type = Context.getsigjmp_bufType(); 7688 else 7689 Type = Context.getjmp_bufType(); 7690 7691 if (Type.isNull()) { 7692 Error = ASTContext::GE_Missing_setjmp; 7693 return QualType(); 7694 } 7695 break; 7696 case 'K': 7697 assert(HowLong == 0 && !Signed && !Unsigned && "Bad modifiers for 'K'!"); 7698 Type = Context.getucontext_tType(); 7699 7700 if (Type.isNull()) { 7701 Error = ASTContext::GE_Missing_ucontext; 7702 return QualType(); 7703 } 7704 break; 7705 case 'p': 7706 Type = Context.getProcessIDType(); 7707 break; 7708 } 7709 7710 // If there are modifiers and if we're allowed to parse them, go for it. 7711 Done = !AllowTypeModifiers; 7712 while (!Done) { 7713 switch (char c = *Str++) { 7714 default: Done = true; --Str; break; 7715 case '*': 7716 case '&': { 7717 // Both pointers and references can have their pointee types 7718 // qualified with an address space. 7719 char *End; 7720 unsigned AddrSpace = strtoul(Str, &End, 10); 7721 if (End != Str && AddrSpace != 0) { 7722 Type = Context.getAddrSpaceQualType(Type, AddrSpace); 7723 Str = End; 7724 } 7725 if (c == '*') 7726 Type = Context.getPointerType(Type); 7727 else 7728 Type = Context.getLValueReferenceType(Type); 7729 break; 7730 } 7731 // FIXME: There's no way to have a built-in with an rvalue ref arg. 7732 case 'C': 7733 Type = Type.withConst(); 7734 break; 7735 case 'D': 7736 Type = Context.getVolatileType(Type); 7737 break; 7738 case 'R': 7739 Type = Type.withRestrict(); 7740 break; 7741 } 7742 } 7743 7744 assert((!RequiresICE || Type->isIntegralOrEnumerationType()) && 7745 "Integer constant 'I' type must be an integer"); 7746 7747 return Type; 7748 } 7749 7750 /// GetBuiltinType - Return the type for the specified builtin. 7751 QualType ASTContext::GetBuiltinType(unsigned Id, 7752 GetBuiltinTypeError &Error, 7753 unsigned *IntegerConstantArgs) const { 7754 const char *TypeStr = BuiltinInfo.GetTypeString(Id); 7755 7756 SmallVector<QualType, 8> ArgTypes; 7757 7758 bool RequiresICE = false; 7759 Error = GE_None; 7760 QualType ResType = DecodeTypeFromStr(TypeStr, *this, Error, 7761 RequiresICE, true); 7762 if (Error != GE_None) 7763 return QualType(); 7764 7765 assert(!RequiresICE && "Result of intrinsic cannot be required to be an ICE"); 7766 7767 while (TypeStr[0] && TypeStr[0] != '.') { 7768 QualType Ty = DecodeTypeFromStr(TypeStr, *this, Error, RequiresICE, true); 7769 if (Error != GE_None) 7770 return QualType(); 7771 7772 // If this argument is required to be an IntegerConstantExpression and the 7773 // caller cares, fill in the bitmask we return. 7774 if (RequiresICE && IntegerConstantArgs) 7775 *IntegerConstantArgs |= 1 << ArgTypes.size(); 7776 7777 // Do array -> pointer decay. The builtin should use the decayed type. 7778 if (Ty->isArrayType()) 7779 Ty = getArrayDecayedType(Ty); 7780 7781 ArgTypes.push_back(Ty); 7782 } 7783 7784 assert((TypeStr[0] != '.' || TypeStr[1] == 0) && 7785 "'.' should only occur at end of builtin type list!"); 7786 7787 FunctionType::ExtInfo EI; 7788 if (BuiltinInfo.isNoReturn(Id)) EI = EI.withNoReturn(true); 7789 7790 bool Variadic = (TypeStr[0] == '.'); 7791 7792 // We really shouldn't be making a no-proto type here, especially in C++. 7793 if (ArgTypes.empty() && Variadic) 7794 return getFunctionNoProtoType(ResType, EI); 7795 7796 FunctionProtoType::ExtProtoInfo EPI; 7797 EPI.ExtInfo = EI; 7798 EPI.Variadic = Variadic; 7799 7800 return getFunctionType(ResType, ArgTypes, EPI); 7801 } 7802 7803 GVALinkage ASTContext::GetGVALinkageForFunction(const FunctionDecl *FD) { 7804 if (!FD->isExternallyVisible()) 7805 return GVA_Internal; 7806 7807 GVALinkage External = GVA_StrongExternal; 7808 switch (FD->getTemplateSpecializationKind()) { 7809 case TSK_Undeclared: 7810 case TSK_ExplicitSpecialization: 7811 External = GVA_StrongExternal; 7812 break; 7813 7814 case TSK_ExplicitInstantiationDefinition: 7815 return GVA_ExplicitTemplateInstantiation; 7816 7817 case TSK_ExplicitInstantiationDeclaration: 7818 case TSK_ImplicitInstantiation: 7819 External = GVA_TemplateInstantiation; 7820 break; 7821 } 7822 7823 if (!FD->isInlined()) 7824 return External; 7825 7826 if ((!getLangOpts().CPlusPlus && !getLangOpts().MicrosoftMode) || 7827 FD->hasAttr<GNUInlineAttr>()) { 7828 // GNU or C99 inline semantics. Determine whether this symbol should be 7829 // externally visible. 7830 if (FD->isInlineDefinitionExternallyVisible()) 7831 return External; 7832 7833 // C99 inline semantics, where the symbol is not externally visible. 7834 return GVA_C99Inline; 7835 } 7836 7837 // C++0x [temp.explicit]p9: 7838 // [ Note: The intent is that an inline function that is the subject of 7839 // an explicit instantiation declaration will still be implicitly 7840 // instantiated when used so that the body can be considered for 7841 // inlining, but that no out-of-line copy of the inline function would be 7842 // generated in the translation unit. -- end note ] 7843 if (FD->getTemplateSpecializationKind() 7844 == TSK_ExplicitInstantiationDeclaration) 7845 return GVA_C99Inline; 7846 7847 return GVA_CXXInline; 7848 } 7849 7850 GVALinkage ASTContext::GetGVALinkageForVariable(const VarDecl *VD) { 7851 if (!VD->isExternallyVisible()) 7852 return GVA_Internal; 7853 7854 // If this is a static data member, compute the kind of template 7855 // specialization. Otherwise, this variable is not part of a 7856 // template. 7857 TemplateSpecializationKind TSK = TSK_Undeclared; 7858 if (VD->isStaticDataMember()) 7859 TSK = VD->getTemplateSpecializationKind(); 7860 7861 switch (TSK) { 7862 case TSK_Undeclared: 7863 case TSK_ExplicitSpecialization: 7864 return GVA_StrongExternal; 7865 7866 case TSK_ExplicitInstantiationDeclaration: 7867 llvm_unreachable("Variable should not be instantiated"); 7868 // Fall through to treat this like any other instantiation. 7869 7870 case TSK_ExplicitInstantiationDefinition: 7871 return GVA_ExplicitTemplateInstantiation; 7872 7873 case TSK_ImplicitInstantiation: 7874 return GVA_TemplateInstantiation; 7875 } 7876 7877 llvm_unreachable("Invalid Linkage!"); 7878 } 7879 7880 bool ASTContext::DeclMustBeEmitted(const Decl *D) { 7881 if (const VarDecl *VD = dyn_cast<VarDecl>(D)) { 7882 if (!VD->isFileVarDecl()) 7883 return false; 7884 } else if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) { 7885 // We never need to emit an uninstantiated function template. 7886 if (FD->getTemplatedKind() == FunctionDecl::TK_FunctionTemplate) 7887 return false; 7888 } else 7889 return false; 7890 7891 // If this is a member of a class template, we do not need to emit it. 7892 if (D->getDeclContext()->isDependentContext()) 7893 return false; 7894 7895 // Weak references don't produce any output by themselves. 7896 if (D->hasAttr<WeakRefAttr>()) 7897 return false; 7898 7899 // Aliases and used decls are required. 7900 if (D->hasAttr<AliasAttr>() || D->hasAttr<UsedAttr>()) 7901 return true; 7902 7903 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) { 7904 // Forward declarations aren't required. 7905 if (!FD->doesThisDeclarationHaveABody()) 7906 return FD->doesDeclarationForceExternallyVisibleDefinition(); 7907 7908 // Constructors and destructors are required. 7909 if (FD->hasAttr<ConstructorAttr>() || FD->hasAttr<DestructorAttr>()) 7910 return true; 7911 7912 // The key function for a class is required. This rule only comes 7913 // into play when inline functions can be key functions, though. 7914 if (getTargetInfo().getCXXABI().canKeyFunctionBeInline()) { 7915 if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD)) { 7916 const CXXRecordDecl *RD = MD->getParent(); 7917 if (MD->isOutOfLine() && RD->isDynamicClass()) { 7918 const CXXMethodDecl *KeyFunc = getCurrentKeyFunction(RD); 7919 if (KeyFunc && KeyFunc->getCanonicalDecl() == MD->getCanonicalDecl()) 7920 return true; 7921 } 7922 } 7923 } 7924 7925 GVALinkage Linkage = GetGVALinkageForFunction(FD); 7926 7927 // static, static inline, always_inline, and extern inline functions can 7928 // always be deferred. Normal inline functions can be deferred in C99/C++. 7929 // Implicit template instantiations can also be deferred in C++. 7930 if (Linkage == GVA_Internal || Linkage == GVA_C99Inline || 7931 Linkage == GVA_CXXInline || Linkage == GVA_TemplateInstantiation) 7932 return false; 7933 return true; 7934 } 7935 7936 const VarDecl *VD = cast<VarDecl>(D); 7937 assert(VD->isFileVarDecl() && "Expected file scoped var"); 7938 7939 if (VD->isThisDeclarationADefinition() == VarDecl::DeclarationOnly) 7940 return false; 7941 7942 // Variables that can be needed in other TUs are required. 7943 GVALinkage L = GetGVALinkageForVariable(VD); 7944 if (L != GVA_Internal && L != GVA_TemplateInstantiation) 7945 return true; 7946 7947 // Variables that have destruction with side-effects are required. 7948 if (VD->getType().isDestructedType()) 7949 return true; 7950 7951 // Variables that have initialization with side-effects are required. 7952 if (VD->getInit() && VD->getInit()->HasSideEffects(*this)) 7953 return true; 7954 7955 return false; 7956 } 7957 7958 CallingConv ASTContext::getDefaultCXXMethodCallConv(bool isVariadic) { 7959 // Pass through to the C++ ABI object 7960 return ABI->getDefaultMethodCallConv(isVariadic); 7961 } 7962 7963 CallingConv ASTContext::getCanonicalCallConv(CallingConv CC) const { 7964 if (CC == CC_C && !LangOpts.MRTD && 7965 getTargetInfo().getCXXABI().isMemberFunctionCCDefault()) 7966 return CC_Default; 7967 return CC; 7968 } 7969 7970 bool ASTContext::isNearlyEmpty(const CXXRecordDecl *RD) const { 7971 // Pass through to the C++ ABI object 7972 return ABI->isNearlyEmpty(RD); 7973 } 7974 7975 MangleContext *ASTContext::createMangleContext() { 7976 switch (Target->getCXXABI().getKind()) { 7977 case TargetCXXABI::GenericAArch64: 7978 case TargetCXXABI::GenericItanium: 7979 case TargetCXXABI::GenericARM: 7980 case TargetCXXABI::iOS: 7981 return createItaniumMangleContext(*this, getDiagnostics()); 7982 case TargetCXXABI::Microsoft: 7983 return createMicrosoftMangleContext(*this, getDiagnostics()); 7984 } 7985 llvm_unreachable("Unsupported ABI"); 7986 } 7987 7988 CXXABI::~CXXABI() {} 7989 7990 size_t ASTContext::getSideTableAllocatedMemory() const { 7991 return ASTRecordLayouts.getMemorySize() + 7992 llvm::capacity_in_bytes(ObjCLayouts) + 7993 llvm::capacity_in_bytes(KeyFunctions) + 7994 llvm::capacity_in_bytes(ObjCImpls) + 7995 llvm::capacity_in_bytes(BlockVarCopyInits) + 7996 llvm::capacity_in_bytes(DeclAttrs) + 7997 llvm::capacity_in_bytes(TemplateOrInstantiation) + 7998 llvm::capacity_in_bytes(InstantiatedFromUsingDecl) + 7999 llvm::capacity_in_bytes(InstantiatedFromUsingShadowDecl) + 8000 llvm::capacity_in_bytes(InstantiatedFromUnnamedFieldDecl) + 8001 llvm::capacity_in_bytes(OverriddenMethods) + 8002 llvm::capacity_in_bytes(Types) + 8003 llvm::capacity_in_bytes(VariableArrayTypes) + 8004 llvm::capacity_in_bytes(ClassScopeSpecializationPattern); 8005 } 8006 8007 void ASTContext::setManglingNumber(const NamedDecl *ND, unsigned Number) { 8008 if (Number > 1) 8009 MangleNumbers[ND] = Number; 8010 } 8011 8012 unsigned ASTContext::getManglingNumber(const NamedDecl *ND) const { 8013 llvm::DenseMap<const NamedDecl *, unsigned>::const_iterator I = 8014 MangleNumbers.find(ND); 8015 return I != MangleNumbers.end() ? I->second : 1; 8016 } 8017 8018 MangleNumberingContext & 8019 ASTContext::getManglingNumberContext(const DeclContext *DC) { 8020 return MangleNumberingContexts[DC]; 8021 } 8022 8023 void ASTContext::setParameterIndex(const ParmVarDecl *D, unsigned int index) { 8024 ParamIndices[D] = index; 8025 } 8026 8027 unsigned ASTContext::getParameterIndex(const ParmVarDecl *D) const { 8028 ParameterIndexTable::const_iterator I = ParamIndices.find(D); 8029 assert(I != ParamIndices.end() && 8030 "ParmIndices lacks entry set by ParmVarDecl"); 8031 return I->second; 8032 } 8033 8034 APValue * 8035 ASTContext::getMaterializedTemporaryValue(const MaterializeTemporaryExpr *E, 8036 bool MayCreate) { 8037 assert(E && E->getStorageDuration() == SD_Static && 8038 "don't need to cache the computed value for this temporary"); 8039 if (MayCreate) 8040 return &MaterializedTemporaryValues[E]; 8041 8042 llvm::DenseMap<const MaterializeTemporaryExpr *, APValue>::iterator I = 8043 MaterializedTemporaryValues.find(E); 8044 return I == MaterializedTemporaryValues.end() ? 0 : &I->second; 8045 } 8046 8047 bool ASTContext::AtomicUsesUnsupportedLibcall(const AtomicExpr *E) const { 8048 const llvm::Triple &T = getTargetInfo().getTriple(); 8049 if (!T.isOSDarwin()) 8050 return false; 8051 8052 QualType AtomicTy = E->getPtr()->getType()->getPointeeType(); 8053 CharUnits sizeChars = getTypeSizeInChars(AtomicTy); 8054 uint64_t Size = sizeChars.getQuantity(); 8055 CharUnits alignChars = getTypeAlignInChars(AtomicTy); 8056 unsigned Align = alignChars.getQuantity(); 8057 unsigned MaxInlineWidthInBits = getTargetInfo().getMaxAtomicInlineWidth(); 8058 return (Size != Align || toBits(sizeChars) > MaxInlineWidthInBits); 8059 } 8060 8061 namespace { 8062 8063 /// \brief A \c RecursiveASTVisitor that builds a map from nodes to their 8064 /// parents as defined by the \c RecursiveASTVisitor. 8065 /// 8066 /// Note that the relationship described here is purely in terms of AST 8067 /// traversal - there are other relationships (for example declaration context) 8068 /// in the AST that are better modeled by special matchers. 8069 /// 8070 /// FIXME: Currently only builds up the map using \c Stmt and \c Decl nodes. 8071 class ParentMapASTVisitor : public RecursiveASTVisitor<ParentMapASTVisitor> { 8072 8073 public: 8074 /// \brief Builds and returns the translation unit's parent map. 8075 /// 8076 /// The caller takes ownership of the returned \c ParentMap. 8077 static ASTContext::ParentMap *buildMap(TranslationUnitDecl &TU) { 8078 ParentMapASTVisitor Visitor(new ASTContext::ParentMap); 8079 Visitor.TraverseDecl(&TU); 8080 return Visitor.Parents; 8081 } 8082 8083 private: 8084 typedef RecursiveASTVisitor<ParentMapASTVisitor> VisitorBase; 8085 8086 ParentMapASTVisitor(ASTContext::ParentMap *Parents) : Parents(Parents) { 8087 } 8088 8089 bool shouldVisitTemplateInstantiations() const { 8090 return true; 8091 } 8092 bool shouldVisitImplicitCode() const { 8093 return true; 8094 } 8095 // Disables data recursion. We intercept Traverse* methods in the RAV, which 8096 // are not triggered during data recursion. 8097 bool shouldUseDataRecursionFor(clang::Stmt *S) const { 8098 return false; 8099 } 8100 8101 template <typename T> 8102 bool TraverseNode(T *Node, bool(VisitorBase:: *traverse) (T *)) { 8103 if (Node == NULL) 8104 return true; 8105 if (ParentStack.size() > 0) 8106 // FIXME: Currently we add the same parent multiple times, for example 8107 // when we visit all subexpressions of template instantiations; this is 8108 // suboptimal, bug benign: the only way to visit those is with 8109 // hasAncestor / hasParent, and those do not create new matches. 8110 // The plan is to enable DynTypedNode to be storable in a map or hash 8111 // map. The main problem there is to implement hash functions / 8112 // comparison operators for all types that DynTypedNode supports that 8113 // do not have pointer identity. 8114 (*Parents)[Node].push_back(ParentStack.back()); 8115 ParentStack.push_back(ast_type_traits::DynTypedNode::create(*Node)); 8116 bool Result = (this ->* traverse) (Node); 8117 ParentStack.pop_back(); 8118 return Result; 8119 } 8120 8121 bool TraverseDecl(Decl *DeclNode) { 8122 return TraverseNode(DeclNode, &VisitorBase::TraverseDecl); 8123 } 8124 8125 bool TraverseStmt(Stmt *StmtNode) { 8126 return TraverseNode(StmtNode, &VisitorBase::TraverseStmt); 8127 } 8128 8129 ASTContext::ParentMap *Parents; 8130 llvm::SmallVector<ast_type_traits::DynTypedNode, 16> ParentStack; 8131 8132 friend class RecursiveASTVisitor<ParentMapASTVisitor>; 8133 }; 8134 8135 } // end namespace 8136 8137 ASTContext::ParentVector 8138 ASTContext::getParents(const ast_type_traits::DynTypedNode &Node) { 8139 assert(Node.getMemoizationData() && 8140 "Invariant broken: only nodes that support memoization may be " 8141 "used in the parent map."); 8142 if (!AllParents) { 8143 // We always need to run over the whole translation unit, as 8144 // hasAncestor can escape any subtree. 8145 AllParents.reset( 8146 ParentMapASTVisitor::buildMap(*getTranslationUnitDecl())); 8147 } 8148 ParentMap::const_iterator I = AllParents->find(Node.getMemoizationData()); 8149 if (I == AllParents->end()) { 8150 return ParentVector(); 8151 } 8152 return I->second; 8153 } 8154 8155 bool 8156 ASTContext::ObjCMethodsAreEqual(const ObjCMethodDecl *MethodDecl, 8157 const ObjCMethodDecl *MethodImpl) { 8158 // No point trying to match an unavailable/deprecated mothod. 8159 if (MethodDecl->hasAttr<UnavailableAttr>() 8160 || MethodDecl->hasAttr<DeprecatedAttr>()) 8161 return false; 8162 if (MethodDecl->getObjCDeclQualifier() != 8163 MethodImpl->getObjCDeclQualifier()) 8164 return false; 8165 if (!hasSameType(MethodDecl->getResultType(), 8166 MethodImpl->getResultType())) 8167 return false; 8168 8169 if (MethodDecl->param_size() != MethodImpl->param_size()) 8170 return false; 8171 8172 for (ObjCMethodDecl::param_const_iterator IM = MethodImpl->param_begin(), 8173 IF = MethodDecl->param_begin(), EM = MethodImpl->param_end(), 8174 EF = MethodDecl->param_end(); 8175 IM != EM && IF != EF; ++IM, ++IF) { 8176 const ParmVarDecl *DeclVar = (*IF); 8177 const ParmVarDecl *ImplVar = (*IM); 8178 if (ImplVar->getObjCDeclQualifier() != DeclVar->getObjCDeclQualifier()) 8179 return false; 8180 if (!hasSameType(DeclVar->getType(), ImplVar->getType())) 8181 return false; 8182 } 8183 return (MethodDecl->isVariadic() == MethodImpl->isVariadic()); 8184 8185 } 8186