1 //===--- ASTContext.cpp - Context to hold long-lived AST nodes ------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file implements the ASTContext interface. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "clang/AST/ASTContext.h" 15 #include "CXXABI.h" 16 #include "clang/AST/ASTMutationListener.h" 17 #include "clang/AST/Attr.h" 18 #include "clang/AST/CharUnits.h" 19 #include "clang/AST/Comment.h" 20 #include "clang/AST/CommentCommandTraits.h" 21 #include "clang/AST/DeclCXX.h" 22 #include "clang/AST/DeclContextInternals.h" 23 #include "clang/AST/DeclObjC.h" 24 #include "clang/AST/DeclTemplate.h" 25 #include "clang/AST/Expr.h" 26 #include "clang/AST/ExprCXX.h" 27 #include "clang/AST/ExternalASTSource.h" 28 #include "clang/AST/Mangle.h" 29 #include "clang/AST/MangleNumberingContext.h" 30 #include "clang/AST/RecordLayout.h" 31 #include "clang/AST/RecursiveASTVisitor.h" 32 #include "clang/AST/TypeLoc.h" 33 #include "clang/AST/VTableBuilder.h" 34 #include "clang/Basic/Builtins.h" 35 #include "clang/Basic/SourceManager.h" 36 #include "clang/Basic/TargetInfo.h" 37 #include "llvm/ADT/SmallString.h" 38 #include "llvm/ADT/StringExtras.h" 39 #include "llvm/ADT/Triple.h" 40 #include "llvm/Support/Capacity.h" 41 #include "llvm/Support/MathExtras.h" 42 #include "llvm/Support/raw_ostream.h" 43 #include <map> 44 45 using namespace clang; 46 47 unsigned ASTContext::NumImplicitDefaultConstructors; 48 unsigned ASTContext::NumImplicitDefaultConstructorsDeclared; 49 unsigned ASTContext::NumImplicitCopyConstructors; 50 unsigned ASTContext::NumImplicitCopyConstructorsDeclared; 51 unsigned ASTContext::NumImplicitMoveConstructors; 52 unsigned ASTContext::NumImplicitMoveConstructorsDeclared; 53 unsigned ASTContext::NumImplicitCopyAssignmentOperators; 54 unsigned ASTContext::NumImplicitCopyAssignmentOperatorsDeclared; 55 unsigned ASTContext::NumImplicitMoveAssignmentOperators; 56 unsigned ASTContext::NumImplicitMoveAssignmentOperatorsDeclared; 57 unsigned ASTContext::NumImplicitDestructors; 58 unsigned ASTContext::NumImplicitDestructorsDeclared; 59 60 enum FloatingRank { 61 HalfRank, FloatRank, DoubleRank, LongDoubleRank, Float128Rank 62 }; 63 64 RawComment *ASTContext::getRawCommentForDeclNoCache(const Decl *D) const { 65 if (!CommentsLoaded && ExternalSource) { 66 ExternalSource->ReadComments(); 67 68 #ifndef NDEBUG 69 ArrayRef<RawComment *> RawComments = Comments.getComments(); 70 assert(std::is_sorted(RawComments.begin(), RawComments.end(), 71 BeforeThanCompare<RawComment>(SourceMgr))); 72 #endif 73 74 CommentsLoaded = true; 75 } 76 77 assert(D); 78 79 // User can not attach documentation to implicit declarations. 80 if (D->isImplicit()) 81 return nullptr; 82 83 // User can not attach documentation to implicit instantiations. 84 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) { 85 if (FD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation) 86 return nullptr; 87 } 88 89 if (const VarDecl *VD = dyn_cast<VarDecl>(D)) { 90 if (VD->isStaticDataMember() && 91 VD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation) 92 return nullptr; 93 } 94 95 if (const CXXRecordDecl *CRD = dyn_cast<CXXRecordDecl>(D)) { 96 if (CRD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation) 97 return nullptr; 98 } 99 100 if (const ClassTemplateSpecializationDecl *CTSD = 101 dyn_cast<ClassTemplateSpecializationDecl>(D)) { 102 TemplateSpecializationKind TSK = CTSD->getSpecializationKind(); 103 if (TSK == TSK_ImplicitInstantiation || 104 TSK == TSK_Undeclared) 105 return nullptr; 106 } 107 108 if (const EnumDecl *ED = dyn_cast<EnumDecl>(D)) { 109 if (ED->getTemplateSpecializationKind() == TSK_ImplicitInstantiation) 110 return nullptr; 111 } 112 if (const TagDecl *TD = dyn_cast<TagDecl>(D)) { 113 // When tag declaration (but not definition!) is part of the 114 // decl-specifier-seq of some other declaration, it doesn't get comment 115 if (TD->isEmbeddedInDeclarator() && !TD->isCompleteDefinition()) 116 return nullptr; 117 } 118 // TODO: handle comments for function parameters properly. 119 if (isa<ParmVarDecl>(D)) 120 return nullptr; 121 122 // TODO: we could look up template parameter documentation in the template 123 // documentation. 124 if (isa<TemplateTypeParmDecl>(D) || 125 isa<NonTypeTemplateParmDecl>(D) || 126 isa<TemplateTemplateParmDecl>(D)) 127 return nullptr; 128 129 ArrayRef<RawComment *> RawComments = Comments.getComments(); 130 131 // If there are no comments anywhere, we won't find anything. 132 if (RawComments.empty()) 133 return nullptr; 134 135 // Find declaration location. 136 // For Objective-C declarations we generally don't expect to have multiple 137 // declarators, thus use declaration starting location as the "declaration 138 // location". 139 // For all other declarations multiple declarators are used quite frequently, 140 // so we use the location of the identifier as the "declaration location". 141 SourceLocation DeclLoc; 142 if (isa<ObjCMethodDecl>(D) || isa<ObjCContainerDecl>(D) || 143 isa<ObjCPropertyDecl>(D) || 144 isa<RedeclarableTemplateDecl>(D) || 145 isa<ClassTemplateSpecializationDecl>(D)) 146 DeclLoc = D->getLocStart(); 147 else { 148 DeclLoc = D->getLocation(); 149 if (DeclLoc.isMacroID()) { 150 if (isa<TypedefDecl>(D)) { 151 // If location of the typedef name is in a macro, it is because being 152 // declared via a macro. Try using declaration's starting location as 153 // the "declaration location". 154 DeclLoc = D->getLocStart(); 155 } else if (const TagDecl *TD = dyn_cast<TagDecl>(D)) { 156 // If location of the tag decl is inside a macro, but the spelling of 157 // the tag name comes from a macro argument, it looks like a special 158 // macro like NS_ENUM is being used to define the tag decl. In that 159 // case, adjust the source location to the expansion loc so that we can 160 // attach the comment to the tag decl. 161 if (SourceMgr.isMacroArgExpansion(DeclLoc) && 162 TD->isCompleteDefinition()) 163 DeclLoc = SourceMgr.getExpansionLoc(DeclLoc); 164 } 165 } 166 } 167 168 // If the declaration doesn't map directly to a location in a file, we 169 // can't find the comment. 170 if (DeclLoc.isInvalid() || !DeclLoc.isFileID()) 171 return nullptr; 172 173 // Find the comment that occurs just after this declaration. 174 ArrayRef<RawComment *>::iterator Comment; 175 { 176 // When searching for comments during parsing, the comment we are looking 177 // for is usually among the last two comments we parsed -- check them 178 // first. 179 RawComment CommentAtDeclLoc( 180 SourceMgr, SourceRange(DeclLoc), false, 181 LangOpts.CommentOpts.ParseAllComments); 182 BeforeThanCompare<RawComment> Compare(SourceMgr); 183 ArrayRef<RawComment *>::iterator MaybeBeforeDecl = RawComments.end() - 1; 184 bool Found = Compare(*MaybeBeforeDecl, &CommentAtDeclLoc); 185 if (!Found && RawComments.size() >= 2) { 186 MaybeBeforeDecl--; 187 Found = Compare(*MaybeBeforeDecl, &CommentAtDeclLoc); 188 } 189 190 if (Found) { 191 Comment = MaybeBeforeDecl + 1; 192 assert(Comment == std::lower_bound(RawComments.begin(), RawComments.end(), 193 &CommentAtDeclLoc, Compare)); 194 } else { 195 // Slow path. 196 Comment = std::lower_bound(RawComments.begin(), RawComments.end(), 197 &CommentAtDeclLoc, Compare); 198 } 199 } 200 201 // Decompose the location for the declaration and find the beginning of the 202 // file buffer. 203 std::pair<FileID, unsigned> DeclLocDecomp = SourceMgr.getDecomposedLoc(DeclLoc); 204 205 // First check whether we have a trailing comment. 206 if (Comment != RawComments.end() && 207 (*Comment)->isDocumentation() && (*Comment)->isTrailingComment() && 208 (isa<FieldDecl>(D) || isa<EnumConstantDecl>(D) || isa<VarDecl>(D) || 209 isa<ObjCMethodDecl>(D) || isa<ObjCPropertyDecl>(D))) { 210 std::pair<FileID, unsigned> CommentBeginDecomp 211 = SourceMgr.getDecomposedLoc((*Comment)->getSourceRange().getBegin()); 212 // Check that Doxygen trailing comment comes after the declaration, starts 213 // on the same line and in the same file as the declaration. 214 if (DeclLocDecomp.first == CommentBeginDecomp.first && 215 SourceMgr.getLineNumber(DeclLocDecomp.first, DeclLocDecomp.second) 216 == SourceMgr.getLineNumber(CommentBeginDecomp.first, 217 CommentBeginDecomp.second)) { 218 return *Comment; 219 } 220 } 221 222 // The comment just after the declaration was not a trailing comment. 223 // Let's look at the previous comment. 224 if (Comment == RawComments.begin()) 225 return nullptr; 226 --Comment; 227 228 // Check that we actually have a non-member Doxygen comment. 229 if (!(*Comment)->isDocumentation() || (*Comment)->isTrailingComment()) 230 return nullptr; 231 232 // Decompose the end of the comment. 233 std::pair<FileID, unsigned> CommentEndDecomp 234 = SourceMgr.getDecomposedLoc((*Comment)->getSourceRange().getEnd()); 235 236 // If the comment and the declaration aren't in the same file, then they 237 // aren't related. 238 if (DeclLocDecomp.first != CommentEndDecomp.first) 239 return nullptr; 240 241 // Get the corresponding buffer. 242 bool Invalid = false; 243 const char *Buffer = SourceMgr.getBufferData(DeclLocDecomp.first, 244 &Invalid).data(); 245 if (Invalid) 246 return nullptr; 247 248 // Extract text between the comment and declaration. 249 StringRef Text(Buffer + CommentEndDecomp.second, 250 DeclLocDecomp.second - CommentEndDecomp.second); 251 252 // There should be no other declarations or preprocessor directives between 253 // comment and declaration. 254 if (Text.find_first_of(";{}#@") != StringRef::npos) 255 return nullptr; 256 257 return *Comment; 258 } 259 260 namespace { 261 /// If we have a 'templated' declaration for a template, adjust 'D' to 262 /// refer to the actual template. 263 /// If we have an implicit instantiation, adjust 'D' to refer to template. 264 const Decl *adjustDeclToTemplate(const Decl *D) { 265 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) { 266 // Is this function declaration part of a function template? 267 if (const FunctionTemplateDecl *FTD = FD->getDescribedFunctionTemplate()) 268 return FTD; 269 270 // Nothing to do if function is not an implicit instantiation. 271 if (FD->getTemplateSpecializationKind() != TSK_ImplicitInstantiation) 272 return D; 273 274 // Function is an implicit instantiation of a function template? 275 if (const FunctionTemplateDecl *FTD = FD->getPrimaryTemplate()) 276 return FTD; 277 278 // Function is instantiated from a member definition of a class template? 279 if (const FunctionDecl *MemberDecl = 280 FD->getInstantiatedFromMemberFunction()) 281 return MemberDecl; 282 283 return D; 284 } 285 if (const VarDecl *VD = dyn_cast<VarDecl>(D)) { 286 // Static data member is instantiated from a member definition of a class 287 // template? 288 if (VD->isStaticDataMember()) 289 if (const VarDecl *MemberDecl = VD->getInstantiatedFromStaticDataMember()) 290 return MemberDecl; 291 292 return D; 293 } 294 if (const CXXRecordDecl *CRD = dyn_cast<CXXRecordDecl>(D)) { 295 // Is this class declaration part of a class template? 296 if (const ClassTemplateDecl *CTD = CRD->getDescribedClassTemplate()) 297 return CTD; 298 299 // Class is an implicit instantiation of a class template or partial 300 // specialization? 301 if (const ClassTemplateSpecializationDecl *CTSD = 302 dyn_cast<ClassTemplateSpecializationDecl>(CRD)) { 303 if (CTSD->getSpecializationKind() != TSK_ImplicitInstantiation) 304 return D; 305 llvm::PointerUnion<ClassTemplateDecl *, 306 ClassTemplatePartialSpecializationDecl *> 307 PU = CTSD->getSpecializedTemplateOrPartial(); 308 return PU.is<ClassTemplateDecl*>() ? 309 static_cast<const Decl*>(PU.get<ClassTemplateDecl *>()) : 310 static_cast<const Decl*>( 311 PU.get<ClassTemplatePartialSpecializationDecl *>()); 312 } 313 314 // Class is instantiated from a member definition of a class template? 315 if (const MemberSpecializationInfo *Info = 316 CRD->getMemberSpecializationInfo()) 317 return Info->getInstantiatedFrom(); 318 319 return D; 320 } 321 if (const EnumDecl *ED = dyn_cast<EnumDecl>(D)) { 322 // Enum is instantiated from a member definition of a class template? 323 if (const EnumDecl *MemberDecl = ED->getInstantiatedFromMemberEnum()) 324 return MemberDecl; 325 326 return D; 327 } 328 // FIXME: Adjust alias templates? 329 return D; 330 } 331 } // anonymous namespace 332 333 const RawComment *ASTContext::getRawCommentForAnyRedecl( 334 const Decl *D, 335 const Decl **OriginalDecl) const { 336 D = adjustDeclToTemplate(D); 337 338 // Check whether we have cached a comment for this declaration already. 339 { 340 llvm::DenseMap<const Decl *, RawCommentAndCacheFlags>::iterator Pos = 341 RedeclComments.find(D); 342 if (Pos != RedeclComments.end()) { 343 const RawCommentAndCacheFlags &Raw = Pos->second; 344 if (Raw.getKind() != RawCommentAndCacheFlags::NoCommentInDecl) { 345 if (OriginalDecl) 346 *OriginalDecl = Raw.getOriginalDecl(); 347 return Raw.getRaw(); 348 } 349 } 350 } 351 352 // Search for comments attached to declarations in the redeclaration chain. 353 const RawComment *RC = nullptr; 354 const Decl *OriginalDeclForRC = nullptr; 355 for (auto I : D->redecls()) { 356 llvm::DenseMap<const Decl *, RawCommentAndCacheFlags>::iterator Pos = 357 RedeclComments.find(I); 358 if (Pos != RedeclComments.end()) { 359 const RawCommentAndCacheFlags &Raw = Pos->second; 360 if (Raw.getKind() != RawCommentAndCacheFlags::NoCommentInDecl) { 361 RC = Raw.getRaw(); 362 OriginalDeclForRC = Raw.getOriginalDecl(); 363 break; 364 } 365 } else { 366 RC = getRawCommentForDeclNoCache(I); 367 OriginalDeclForRC = I; 368 RawCommentAndCacheFlags Raw; 369 if (RC) { 370 // Call order swapped to work around ICE in VS2015 RTM (Release Win32) 371 // https://connect.microsoft.com/VisualStudio/feedback/details/1741530 372 Raw.setKind(RawCommentAndCacheFlags::FromDecl); 373 Raw.setRaw(RC); 374 } else 375 Raw.setKind(RawCommentAndCacheFlags::NoCommentInDecl); 376 Raw.setOriginalDecl(I); 377 RedeclComments[I] = Raw; 378 if (RC) 379 break; 380 } 381 } 382 383 // If we found a comment, it should be a documentation comment. 384 assert(!RC || RC->isDocumentation()); 385 386 if (OriginalDecl) 387 *OriginalDecl = OriginalDeclForRC; 388 389 // Update cache for every declaration in the redeclaration chain. 390 RawCommentAndCacheFlags Raw; 391 Raw.setRaw(RC); 392 Raw.setKind(RawCommentAndCacheFlags::FromRedecl); 393 Raw.setOriginalDecl(OriginalDeclForRC); 394 395 for (auto I : D->redecls()) { 396 RawCommentAndCacheFlags &R = RedeclComments[I]; 397 if (R.getKind() == RawCommentAndCacheFlags::NoCommentInDecl) 398 R = Raw; 399 } 400 401 return RC; 402 } 403 404 static void addRedeclaredMethods(const ObjCMethodDecl *ObjCMethod, 405 SmallVectorImpl<const NamedDecl *> &Redeclared) { 406 const DeclContext *DC = ObjCMethod->getDeclContext(); 407 if (const ObjCImplDecl *IMD = dyn_cast<ObjCImplDecl>(DC)) { 408 const ObjCInterfaceDecl *ID = IMD->getClassInterface(); 409 if (!ID) 410 return; 411 // Add redeclared method here. 412 for (const auto *Ext : ID->known_extensions()) { 413 if (ObjCMethodDecl *RedeclaredMethod = 414 Ext->getMethod(ObjCMethod->getSelector(), 415 ObjCMethod->isInstanceMethod())) 416 Redeclared.push_back(RedeclaredMethod); 417 } 418 } 419 } 420 421 comments::FullComment *ASTContext::cloneFullComment(comments::FullComment *FC, 422 const Decl *D) const { 423 comments::DeclInfo *ThisDeclInfo = new (*this) comments::DeclInfo; 424 ThisDeclInfo->CommentDecl = D; 425 ThisDeclInfo->IsFilled = false; 426 ThisDeclInfo->fill(); 427 ThisDeclInfo->CommentDecl = FC->getDecl(); 428 if (!ThisDeclInfo->TemplateParameters) 429 ThisDeclInfo->TemplateParameters = FC->getDeclInfo()->TemplateParameters; 430 comments::FullComment *CFC = 431 new (*this) comments::FullComment(FC->getBlocks(), 432 ThisDeclInfo); 433 return CFC; 434 } 435 436 comments::FullComment *ASTContext::getLocalCommentForDeclUncached(const Decl *D) const { 437 const RawComment *RC = getRawCommentForDeclNoCache(D); 438 return RC ? RC->parse(*this, nullptr, D) : nullptr; 439 } 440 441 comments::FullComment *ASTContext::getCommentForDecl( 442 const Decl *D, 443 const Preprocessor *PP) const { 444 if (D->isInvalidDecl()) 445 return nullptr; 446 D = adjustDeclToTemplate(D); 447 448 const Decl *Canonical = D->getCanonicalDecl(); 449 llvm::DenseMap<const Decl *, comments::FullComment *>::iterator Pos = 450 ParsedComments.find(Canonical); 451 452 if (Pos != ParsedComments.end()) { 453 if (Canonical != D) { 454 comments::FullComment *FC = Pos->second; 455 comments::FullComment *CFC = cloneFullComment(FC, D); 456 return CFC; 457 } 458 return Pos->second; 459 } 460 461 const Decl *OriginalDecl; 462 463 const RawComment *RC = getRawCommentForAnyRedecl(D, &OriginalDecl); 464 if (!RC) { 465 if (isa<ObjCMethodDecl>(D) || isa<FunctionDecl>(D)) { 466 SmallVector<const NamedDecl*, 8> Overridden; 467 const ObjCMethodDecl *OMD = dyn_cast<ObjCMethodDecl>(D); 468 if (OMD && OMD->isPropertyAccessor()) 469 if (const ObjCPropertyDecl *PDecl = OMD->findPropertyDecl()) 470 if (comments::FullComment *FC = getCommentForDecl(PDecl, PP)) 471 return cloneFullComment(FC, D); 472 if (OMD) 473 addRedeclaredMethods(OMD, Overridden); 474 getOverriddenMethods(dyn_cast<NamedDecl>(D), Overridden); 475 for (unsigned i = 0, e = Overridden.size(); i < e; i++) 476 if (comments::FullComment *FC = getCommentForDecl(Overridden[i], PP)) 477 return cloneFullComment(FC, D); 478 } 479 else if (const TypedefNameDecl *TD = dyn_cast<TypedefNameDecl>(D)) { 480 // Attach any tag type's documentation to its typedef if latter 481 // does not have one of its own. 482 QualType QT = TD->getUnderlyingType(); 483 if (const TagType *TT = QT->getAs<TagType>()) 484 if (const Decl *TD = TT->getDecl()) 485 if (comments::FullComment *FC = getCommentForDecl(TD, PP)) 486 return cloneFullComment(FC, D); 487 } 488 else if (const ObjCInterfaceDecl *IC = dyn_cast<ObjCInterfaceDecl>(D)) { 489 while (IC->getSuperClass()) { 490 IC = IC->getSuperClass(); 491 if (comments::FullComment *FC = getCommentForDecl(IC, PP)) 492 return cloneFullComment(FC, D); 493 } 494 } 495 else if (const ObjCCategoryDecl *CD = dyn_cast<ObjCCategoryDecl>(D)) { 496 if (const ObjCInterfaceDecl *IC = CD->getClassInterface()) 497 if (comments::FullComment *FC = getCommentForDecl(IC, PP)) 498 return cloneFullComment(FC, D); 499 } 500 else if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D)) { 501 if (!(RD = RD->getDefinition())) 502 return nullptr; 503 // Check non-virtual bases. 504 for (const auto &I : RD->bases()) { 505 if (I.isVirtual() || (I.getAccessSpecifier() != AS_public)) 506 continue; 507 QualType Ty = I.getType(); 508 if (Ty.isNull()) 509 continue; 510 if (const CXXRecordDecl *NonVirtualBase = Ty->getAsCXXRecordDecl()) { 511 if (!(NonVirtualBase= NonVirtualBase->getDefinition())) 512 continue; 513 514 if (comments::FullComment *FC = getCommentForDecl((NonVirtualBase), PP)) 515 return cloneFullComment(FC, D); 516 } 517 } 518 // Check virtual bases. 519 for (const auto &I : RD->vbases()) { 520 if (I.getAccessSpecifier() != AS_public) 521 continue; 522 QualType Ty = I.getType(); 523 if (Ty.isNull()) 524 continue; 525 if (const CXXRecordDecl *VirtualBase = Ty->getAsCXXRecordDecl()) { 526 if (!(VirtualBase= VirtualBase->getDefinition())) 527 continue; 528 if (comments::FullComment *FC = getCommentForDecl((VirtualBase), PP)) 529 return cloneFullComment(FC, D); 530 } 531 } 532 } 533 return nullptr; 534 } 535 536 // If the RawComment was attached to other redeclaration of this Decl, we 537 // should parse the comment in context of that other Decl. This is important 538 // because comments can contain references to parameter names which can be 539 // different across redeclarations. 540 if (D != OriginalDecl) 541 return getCommentForDecl(OriginalDecl, PP); 542 543 comments::FullComment *FC = RC->parse(*this, PP, D); 544 ParsedComments[Canonical] = FC; 545 return FC; 546 } 547 548 void 549 ASTContext::CanonicalTemplateTemplateParm::Profile(llvm::FoldingSetNodeID &ID, 550 TemplateTemplateParmDecl *Parm) { 551 ID.AddInteger(Parm->getDepth()); 552 ID.AddInteger(Parm->getPosition()); 553 ID.AddBoolean(Parm->isParameterPack()); 554 555 TemplateParameterList *Params = Parm->getTemplateParameters(); 556 ID.AddInteger(Params->size()); 557 for (TemplateParameterList::const_iterator P = Params->begin(), 558 PEnd = Params->end(); 559 P != PEnd; ++P) { 560 if (TemplateTypeParmDecl *TTP = dyn_cast<TemplateTypeParmDecl>(*P)) { 561 ID.AddInteger(0); 562 ID.AddBoolean(TTP->isParameterPack()); 563 continue; 564 } 565 566 if (NonTypeTemplateParmDecl *NTTP = dyn_cast<NonTypeTemplateParmDecl>(*P)) { 567 ID.AddInteger(1); 568 ID.AddBoolean(NTTP->isParameterPack()); 569 ID.AddPointer(NTTP->getType().getCanonicalType().getAsOpaquePtr()); 570 if (NTTP->isExpandedParameterPack()) { 571 ID.AddBoolean(true); 572 ID.AddInteger(NTTP->getNumExpansionTypes()); 573 for (unsigned I = 0, N = NTTP->getNumExpansionTypes(); I != N; ++I) { 574 QualType T = NTTP->getExpansionType(I); 575 ID.AddPointer(T.getCanonicalType().getAsOpaquePtr()); 576 } 577 } else 578 ID.AddBoolean(false); 579 continue; 580 } 581 582 TemplateTemplateParmDecl *TTP = cast<TemplateTemplateParmDecl>(*P); 583 ID.AddInteger(2); 584 Profile(ID, TTP); 585 } 586 } 587 588 TemplateTemplateParmDecl * 589 ASTContext::getCanonicalTemplateTemplateParmDecl( 590 TemplateTemplateParmDecl *TTP) const { 591 // Check if we already have a canonical template template parameter. 592 llvm::FoldingSetNodeID ID; 593 CanonicalTemplateTemplateParm::Profile(ID, TTP); 594 void *InsertPos = nullptr; 595 CanonicalTemplateTemplateParm *Canonical 596 = CanonTemplateTemplateParms.FindNodeOrInsertPos(ID, InsertPos); 597 if (Canonical) 598 return Canonical->getParam(); 599 600 // Build a canonical template parameter list. 601 TemplateParameterList *Params = TTP->getTemplateParameters(); 602 SmallVector<NamedDecl *, 4> CanonParams; 603 CanonParams.reserve(Params->size()); 604 for (TemplateParameterList::const_iterator P = Params->begin(), 605 PEnd = Params->end(); 606 P != PEnd; ++P) { 607 if (TemplateTypeParmDecl *TTP = dyn_cast<TemplateTypeParmDecl>(*P)) 608 CanonParams.push_back( 609 TemplateTypeParmDecl::Create(*this, getTranslationUnitDecl(), 610 SourceLocation(), 611 SourceLocation(), 612 TTP->getDepth(), 613 TTP->getIndex(), nullptr, false, 614 TTP->isParameterPack())); 615 else if (NonTypeTemplateParmDecl *NTTP 616 = dyn_cast<NonTypeTemplateParmDecl>(*P)) { 617 QualType T = getCanonicalType(NTTP->getType()); 618 TypeSourceInfo *TInfo = getTrivialTypeSourceInfo(T); 619 NonTypeTemplateParmDecl *Param; 620 if (NTTP->isExpandedParameterPack()) { 621 SmallVector<QualType, 2> ExpandedTypes; 622 SmallVector<TypeSourceInfo *, 2> ExpandedTInfos; 623 for (unsigned I = 0, N = NTTP->getNumExpansionTypes(); I != N; ++I) { 624 ExpandedTypes.push_back(getCanonicalType(NTTP->getExpansionType(I))); 625 ExpandedTInfos.push_back( 626 getTrivialTypeSourceInfo(ExpandedTypes.back())); 627 } 628 629 Param = NonTypeTemplateParmDecl::Create(*this, getTranslationUnitDecl(), 630 SourceLocation(), 631 SourceLocation(), 632 NTTP->getDepth(), 633 NTTP->getPosition(), nullptr, 634 T, 635 TInfo, 636 ExpandedTypes, 637 ExpandedTInfos); 638 } else { 639 Param = NonTypeTemplateParmDecl::Create(*this, getTranslationUnitDecl(), 640 SourceLocation(), 641 SourceLocation(), 642 NTTP->getDepth(), 643 NTTP->getPosition(), nullptr, 644 T, 645 NTTP->isParameterPack(), 646 TInfo); 647 } 648 CanonParams.push_back(Param); 649 650 } else 651 CanonParams.push_back(getCanonicalTemplateTemplateParmDecl( 652 cast<TemplateTemplateParmDecl>(*P))); 653 } 654 655 TemplateTemplateParmDecl *CanonTTP 656 = TemplateTemplateParmDecl::Create(*this, getTranslationUnitDecl(), 657 SourceLocation(), TTP->getDepth(), 658 TTP->getPosition(), 659 TTP->isParameterPack(), 660 nullptr, 661 TemplateParameterList::Create(*this, SourceLocation(), 662 SourceLocation(), 663 CanonParams, 664 SourceLocation())); 665 666 // Get the new insert position for the node we care about. 667 Canonical = CanonTemplateTemplateParms.FindNodeOrInsertPos(ID, InsertPos); 668 assert(!Canonical && "Shouldn't be in the map!"); 669 (void)Canonical; 670 671 // Create the canonical template template parameter entry. 672 Canonical = new (*this) CanonicalTemplateTemplateParm(CanonTTP); 673 CanonTemplateTemplateParms.InsertNode(Canonical, InsertPos); 674 return CanonTTP; 675 } 676 677 CXXABI *ASTContext::createCXXABI(const TargetInfo &T) { 678 if (!LangOpts.CPlusPlus) return nullptr; 679 680 switch (T.getCXXABI().getKind()) { 681 case TargetCXXABI::GenericARM: // Same as Itanium at this level 682 case TargetCXXABI::iOS: 683 case TargetCXXABI::iOS64: 684 case TargetCXXABI::WatchOS: 685 case TargetCXXABI::GenericAArch64: 686 case TargetCXXABI::GenericMIPS: 687 case TargetCXXABI::GenericItanium: 688 case TargetCXXABI::WebAssembly: 689 return CreateItaniumCXXABI(*this); 690 case TargetCXXABI::Microsoft: 691 return CreateMicrosoftCXXABI(*this); 692 } 693 llvm_unreachable("Invalid CXXABI type!"); 694 } 695 696 static const LangAS::Map *getAddressSpaceMap(const TargetInfo &T, 697 const LangOptions &LOpts) { 698 if (LOpts.FakeAddressSpaceMap) { 699 // The fake address space map must have a distinct entry for each 700 // language-specific address space. 701 static const unsigned FakeAddrSpaceMap[] = { 702 1, // opencl_global 703 2, // opencl_local 704 3, // opencl_constant 705 4, // opencl_generic 706 5, // cuda_device 707 6, // cuda_constant 708 7 // cuda_shared 709 }; 710 return &FakeAddrSpaceMap; 711 } else { 712 return &T.getAddressSpaceMap(); 713 } 714 } 715 716 static bool isAddrSpaceMapManglingEnabled(const TargetInfo &TI, 717 const LangOptions &LangOpts) { 718 switch (LangOpts.getAddressSpaceMapMangling()) { 719 case LangOptions::ASMM_Target: 720 return TI.useAddressSpaceMapMangling(); 721 case LangOptions::ASMM_On: 722 return true; 723 case LangOptions::ASMM_Off: 724 return false; 725 } 726 llvm_unreachable("getAddressSpaceMapMangling() doesn't cover anything."); 727 } 728 729 ASTContext::ASTContext(LangOptions &LOpts, SourceManager &SM, 730 IdentifierTable &idents, SelectorTable &sels, 731 Builtin::Context &builtins) 732 : FunctionProtoTypes(this_()), TemplateSpecializationTypes(this_()), 733 DependentTemplateSpecializationTypes(this_()), 734 SubstTemplateTemplateParmPacks(this_()), 735 GlobalNestedNameSpecifier(nullptr), Int128Decl(nullptr), 736 UInt128Decl(nullptr), BuiltinVaListDecl(nullptr), 737 BuiltinMSVaListDecl(nullptr), ObjCIdDecl(nullptr), ObjCSelDecl(nullptr), 738 ObjCClassDecl(nullptr), ObjCProtocolClassDecl(nullptr), BOOLDecl(nullptr), 739 CFConstantStringTagDecl(nullptr), CFConstantStringTypeDecl(nullptr), 740 ObjCInstanceTypeDecl(nullptr), FILEDecl(nullptr), jmp_bufDecl(nullptr), 741 sigjmp_bufDecl(nullptr), ucontext_tDecl(nullptr), 742 BlockDescriptorType(nullptr), BlockDescriptorExtendedType(nullptr), 743 cudaConfigureCallDecl(nullptr), FirstLocalImport(), LastLocalImport(), 744 ExternCContext(nullptr), MakeIntegerSeqDecl(nullptr), 745 TypePackElementDecl(nullptr), SourceMgr(SM), LangOpts(LOpts), 746 SanitizerBL(new SanitizerBlacklist(LangOpts.SanitizerBlacklistFiles, SM)), 747 AddrSpaceMap(nullptr), Target(nullptr), AuxTarget(nullptr), 748 PrintingPolicy(LOpts), Idents(idents), Selectors(sels), 749 BuiltinInfo(builtins), DeclarationNames(*this), ExternalSource(nullptr), 750 Listener(nullptr), Comments(SM), CommentsLoaded(false), 751 CommentCommandTraits(BumpAlloc, LOpts.CommentOpts), LastSDM(nullptr, 0) { 752 TUDecl = TranslationUnitDecl::Create(*this); 753 } 754 755 ASTContext::~ASTContext() { 756 ReleaseParentMapEntries(); 757 758 // Release the DenseMaps associated with DeclContext objects. 759 // FIXME: Is this the ideal solution? 760 ReleaseDeclContextMaps(); 761 762 // Call all of the deallocation functions on all of their targets. 763 for (auto &Pair : Deallocations) 764 (Pair.first)(Pair.second); 765 766 // ASTRecordLayout objects in ASTRecordLayouts must always be destroyed 767 // because they can contain DenseMaps. 768 for (llvm::DenseMap<const ObjCContainerDecl*, 769 const ASTRecordLayout*>::iterator 770 I = ObjCLayouts.begin(), E = ObjCLayouts.end(); I != E; ) 771 // Increment in loop to prevent using deallocated memory. 772 if (ASTRecordLayout *R = const_cast<ASTRecordLayout*>((I++)->second)) 773 R->Destroy(*this); 774 775 for (llvm::DenseMap<const RecordDecl*, const ASTRecordLayout*>::iterator 776 I = ASTRecordLayouts.begin(), E = ASTRecordLayouts.end(); I != E; ) { 777 // Increment in loop to prevent using deallocated memory. 778 if (ASTRecordLayout *R = const_cast<ASTRecordLayout*>((I++)->second)) 779 R->Destroy(*this); 780 } 781 782 for (llvm::DenseMap<const Decl*, AttrVec*>::iterator A = DeclAttrs.begin(), 783 AEnd = DeclAttrs.end(); 784 A != AEnd; ++A) 785 A->second->~AttrVec(); 786 787 for (std::pair<const MaterializeTemporaryExpr *, APValue *> &MTVPair : 788 MaterializedTemporaryValues) 789 MTVPair.second->~APValue(); 790 791 llvm::DeleteContainerSeconds(MangleNumberingContexts); 792 } 793 794 void ASTContext::ReleaseParentMapEntries() { 795 if (!PointerParents) return; 796 for (const auto &Entry : *PointerParents) { 797 if (Entry.second.is<ast_type_traits::DynTypedNode *>()) { 798 delete Entry.second.get<ast_type_traits::DynTypedNode *>(); 799 } else if (Entry.second.is<ParentVector *>()) { 800 delete Entry.second.get<ParentVector *>(); 801 } 802 } 803 for (const auto &Entry : *OtherParents) { 804 if (Entry.second.is<ast_type_traits::DynTypedNode *>()) { 805 delete Entry.second.get<ast_type_traits::DynTypedNode *>(); 806 } else if (Entry.second.is<ParentVector *>()) { 807 delete Entry.second.get<ParentVector *>(); 808 } 809 } 810 } 811 812 void ASTContext::AddDeallocation(void (*Callback)(void*), void *Data) { 813 Deallocations.push_back({Callback, Data}); 814 } 815 816 void 817 ASTContext::setExternalSource(IntrusiveRefCntPtr<ExternalASTSource> Source) { 818 ExternalSource = std::move(Source); 819 } 820 821 void ASTContext::PrintStats() const { 822 llvm::errs() << "\n*** AST Context Stats:\n"; 823 llvm::errs() << " " << Types.size() << " types total.\n"; 824 825 unsigned counts[] = { 826 #define TYPE(Name, Parent) 0, 827 #define ABSTRACT_TYPE(Name, Parent) 828 #include "clang/AST/TypeNodes.def" 829 0 // Extra 830 }; 831 832 for (unsigned i = 0, e = Types.size(); i != e; ++i) { 833 Type *T = Types[i]; 834 counts[(unsigned)T->getTypeClass()]++; 835 } 836 837 unsigned Idx = 0; 838 unsigned TotalBytes = 0; 839 #define TYPE(Name, Parent) \ 840 if (counts[Idx]) \ 841 llvm::errs() << " " << counts[Idx] << " " << #Name \ 842 << " types\n"; \ 843 TotalBytes += counts[Idx] * sizeof(Name##Type); \ 844 ++Idx; 845 #define ABSTRACT_TYPE(Name, Parent) 846 #include "clang/AST/TypeNodes.def" 847 848 llvm::errs() << "Total bytes = " << TotalBytes << "\n"; 849 850 // Implicit special member functions. 851 llvm::errs() << NumImplicitDefaultConstructorsDeclared << "/" 852 << NumImplicitDefaultConstructors 853 << " implicit default constructors created\n"; 854 llvm::errs() << NumImplicitCopyConstructorsDeclared << "/" 855 << NumImplicitCopyConstructors 856 << " implicit copy constructors created\n"; 857 if (getLangOpts().CPlusPlus) 858 llvm::errs() << NumImplicitMoveConstructorsDeclared << "/" 859 << NumImplicitMoveConstructors 860 << " implicit move constructors created\n"; 861 llvm::errs() << NumImplicitCopyAssignmentOperatorsDeclared << "/" 862 << NumImplicitCopyAssignmentOperators 863 << " implicit copy assignment operators created\n"; 864 if (getLangOpts().CPlusPlus) 865 llvm::errs() << NumImplicitMoveAssignmentOperatorsDeclared << "/" 866 << NumImplicitMoveAssignmentOperators 867 << " implicit move assignment operators created\n"; 868 llvm::errs() << NumImplicitDestructorsDeclared << "/" 869 << NumImplicitDestructors 870 << " implicit destructors created\n"; 871 872 if (ExternalSource) { 873 llvm::errs() << "\n"; 874 ExternalSource->PrintStats(); 875 } 876 877 BumpAlloc.PrintStats(); 878 } 879 880 void ASTContext::mergeDefinitionIntoModule(NamedDecl *ND, Module *M, 881 bool NotifyListeners) { 882 if (NotifyListeners) 883 if (auto *Listener = getASTMutationListener()) 884 Listener->RedefinedHiddenDefinition(ND, M); 885 886 if (getLangOpts().ModulesLocalVisibility) 887 MergedDefModules[ND].push_back(M); 888 else 889 ND->setHidden(false); 890 } 891 892 void ASTContext::deduplicateMergedDefinitonsFor(NamedDecl *ND) { 893 auto It = MergedDefModules.find(ND); 894 if (It == MergedDefModules.end()) 895 return; 896 897 auto &Merged = It->second; 898 llvm::DenseSet<Module*> Found; 899 for (Module *&M : Merged) 900 if (!Found.insert(M).second) 901 M = nullptr; 902 Merged.erase(std::remove(Merged.begin(), Merged.end(), nullptr), Merged.end()); 903 } 904 905 ExternCContextDecl *ASTContext::getExternCContextDecl() const { 906 if (!ExternCContext) 907 ExternCContext = ExternCContextDecl::Create(*this, getTranslationUnitDecl()); 908 909 return ExternCContext; 910 } 911 912 BuiltinTemplateDecl * 913 ASTContext::buildBuiltinTemplateDecl(BuiltinTemplateKind BTK, 914 const IdentifierInfo *II) const { 915 auto *BuiltinTemplate = BuiltinTemplateDecl::Create(*this, TUDecl, II, BTK); 916 BuiltinTemplate->setImplicit(); 917 TUDecl->addDecl(BuiltinTemplate); 918 919 return BuiltinTemplate; 920 } 921 922 BuiltinTemplateDecl * 923 ASTContext::getMakeIntegerSeqDecl() const { 924 if (!MakeIntegerSeqDecl) 925 MakeIntegerSeqDecl = buildBuiltinTemplateDecl(BTK__make_integer_seq, 926 getMakeIntegerSeqName()); 927 return MakeIntegerSeqDecl; 928 } 929 930 BuiltinTemplateDecl * 931 ASTContext::getTypePackElementDecl() const { 932 if (!TypePackElementDecl) 933 TypePackElementDecl = buildBuiltinTemplateDecl(BTK__type_pack_element, 934 getTypePackElementName()); 935 return TypePackElementDecl; 936 } 937 938 RecordDecl *ASTContext::buildImplicitRecord(StringRef Name, 939 RecordDecl::TagKind TK) const { 940 SourceLocation Loc; 941 RecordDecl *NewDecl; 942 if (getLangOpts().CPlusPlus) 943 NewDecl = CXXRecordDecl::Create(*this, TK, getTranslationUnitDecl(), Loc, 944 Loc, &Idents.get(Name)); 945 else 946 NewDecl = RecordDecl::Create(*this, TK, getTranslationUnitDecl(), Loc, Loc, 947 &Idents.get(Name)); 948 NewDecl->setImplicit(); 949 NewDecl->addAttr(TypeVisibilityAttr::CreateImplicit( 950 const_cast<ASTContext &>(*this), TypeVisibilityAttr::Default)); 951 return NewDecl; 952 } 953 954 TypedefDecl *ASTContext::buildImplicitTypedef(QualType T, 955 StringRef Name) const { 956 TypeSourceInfo *TInfo = getTrivialTypeSourceInfo(T); 957 TypedefDecl *NewDecl = TypedefDecl::Create( 958 const_cast<ASTContext &>(*this), getTranslationUnitDecl(), 959 SourceLocation(), SourceLocation(), &Idents.get(Name), TInfo); 960 NewDecl->setImplicit(); 961 return NewDecl; 962 } 963 964 TypedefDecl *ASTContext::getInt128Decl() const { 965 if (!Int128Decl) 966 Int128Decl = buildImplicitTypedef(Int128Ty, "__int128_t"); 967 return Int128Decl; 968 } 969 970 TypedefDecl *ASTContext::getUInt128Decl() const { 971 if (!UInt128Decl) 972 UInt128Decl = buildImplicitTypedef(UnsignedInt128Ty, "__uint128_t"); 973 return UInt128Decl; 974 } 975 976 void ASTContext::InitBuiltinType(CanQualType &R, BuiltinType::Kind K) { 977 BuiltinType *Ty = new (*this, TypeAlignment) BuiltinType(K); 978 R = CanQualType::CreateUnsafe(QualType(Ty, 0)); 979 Types.push_back(Ty); 980 } 981 982 void ASTContext::InitBuiltinTypes(const TargetInfo &Target, 983 const TargetInfo *AuxTarget) { 984 assert((!this->Target || this->Target == &Target) && 985 "Incorrect target reinitialization"); 986 assert(VoidTy.isNull() && "Context reinitialized?"); 987 988 this->Target = &Target; 989 this->AuxTarget = AuxTarget; 990 991 ABI.reset(createCXXABI(Target)); 992 AddrSpaceMap = getAddressSpaceMap(Target, LangOpts); 993 AddrSpaceMapMangling = isAddrSpaceMapManglingEnabled(Target, LangOpts); 994 995 // C99 6.2.5p19. 996 InitBuiltinType(VoidTy, BuiltinType::Void); 997 998 // C99 6.2.5p2. 999 InitBuiltinType(BoolTy, BuiltinType::Bool); 1000 // C99 6.2.5p3. 1001 if (LangOpts.CharIsSigned) 1002 InitBuiltinType(CharTy, BuiltinType::Char_S); 1003 else 1004 InitBuiltinType(CharTy, BuiltinType::Char_U); 1005 // C99 6.2.5p4. 1006 InitBuiltinType(SignedCharTy, BuiltinType::SChar); 1007 InitBuiltinType(ShortTy, BuiltinType::Short); 1008 InitBuiltinType(IntTy, BuiltinType::Int); 1009 InitBuiltinType(LongTy, BuiltinType::Long); 1010 InitBuiltinType(LongLongTy, BuiltinType::LongLong); 1011 1012 // C99 6.2.5p6. 1013 InitBuiltinType(UnsignedCharTy, BuiltinType::UChar); 1014 InitBuiltinType(UnsignedShortTy, BuiltinType::UShort); 1015 InitBuiltinType(UnsignedIntTy, BuiltinType::UInt); 1016 InitBuiltinType(UnsignedLongTy, BuiltinType::ULong); 1017 InitBuiltinType(UnsignedLongLongTy, BuiltinType::ULongLong); 1018 1019 // C99 6.2.5p10. 1020 InitBuiltinType(FloatTy, BuiltinType::Float); 1021 InitBuiltinType(DoubleTy, BuiltinType::Double); 1022 InitBuiltinType(LongDoubleTy, BuiltinType::LongDouble); 1023 1024 // GNU extension, __float128 for IEEE quadruple precision 1025 InitBuiltinType(Float128Ty, BuiltinType::Float128); 1026 1027 // GNU extension, 128-bit integers. 1028 InitBuiltinType(Int128Ty, BuiltinType::Int128); 1029 InitBuiltinType(UnsignedInt128Ty, BuiltinType::UInt128); 1030 1031 // C++ 3.9.1p5 1032 if (TargetInfo::isTypeSigned(Target.getWCharType())) 1033 InitBuiltinType(WCharTy, BuiltinType::WChar_S); 1034 else // -fshort-wchar makes wchar_t be unsigned. 1035 InitBuiltinType(WCharTy, BuiltinType::WChar_U); 1036 if (LangOpts.CPlusPlus && LangOpts.WChar) 1037 WideCharTy = WCharTy; 1038 else { 1039 // C99 (or C++ using -fno-wchar). 1040 WideCharTy = getFromTargetType(Target.getWCharType()); 1041 } 1042 1043 WIntTy = getFromTargetType(Target.getWIntType()); 1044 1045 if (LangOpts.CPlusPlus) // C++0x 3.9.1p5, extension for C++ 1046 InitBuiltinType(Char16Ty, BuiltinType::Char16); 1047 else // C99 1048 Char16Ty = getFromTargetType(Target.getChar16Type()); 1049 1050 if (LangOpts.CPlusPlus) // C++0x 3.9.1p5, extension for C++ 1051 InitBuiltinType(Char32Ty, BuiltinType::Char32); 1052 else // C99 1053 Char32Ty = getFromTargetType(Target.getChar32Type()); 1054 1055 // Placeholder type for type-dependent expressions whose type is 1056 // completely unknown. No code should ever check a type against 1057 // DependentTy and users should never see it; however, it is here to 1058 // help diagnose failures to properly check for type-dependent 1059 // expressions. 1060 InitBuiltinType(DependentTy, BuiltinType::Dependent); 1061 1062 // Placeholder type for functions. 1063 InitBuiltinType(OverloadTy, BuiltinType::Overload); 1064 1065 // Placeholder type for bound members. 1066 InitBuiltinType(BoundMemberTy, BuiltinType::BoundMember); 1067 1068 // Placeholder type for pseudo-objects. 1069 InitBuiltinType(PseudoObjectTy, BuiltinType::PseudoObject); 1070 1071 // "any" type; useful for debugger-like clients. 1072 InitBuiltinType(UnknownAnyTy, BuiltinType::UnknownAny); 1073 1074 // Placeholder type for unbridged ARC casts. 1075 InitBuiltinType(ARCUnbridgedCastTy, BuiltinType::ARCUnbridgedCast); 1076 1077 // Placeholder type for builtin functions. 1078 InitBuiltinType(BuiltinFnTy, BuiltinType::BuiltinFn); 1079 1080 // Placeholder type for OMP array sections. 1081 if (LangOpts.OpenMP) 1082 InitBuiltinType(OMPArraySectionTy, BuiltinType::OMPArraySection); 1083 1084 // C99 6.2.5p11. 1085 FloatComplexTy = getComplexType(FloatTy); 1086 DoubleComplexTy = getComplexType(DoubleTy); 1087 LongDoubleComplexTy = getComplexType(LongDoubleTy); 1088 Float128ComplexTy = getComplexType(Float128Ty); 1089 1090 // Builtin types for 'id', 'Class', and 'SEL'. 1091 InitBuiltinType(ObjCBuiltinIdTy, BuiltinType::ObjCId); 1092 InitBuiltinType(ObjCBuiltinClassTy, BuiltinType::ObjCClass); 1093 InitBuiltinType(ObjCBuiltinSelTy, BuiltinType::ObjCSel); 1094 1095 if (LangOpts.OpenCL) { 1096 #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \ 1097 InitBuiltinType(SingletonId, BuiltinType::Id); 1098 #include "clang/Basic/OpenCLImageTypes.def" 1099 1100 InitBuiltinType(OCLSamplerTy, BuiltinType::OCLSampler); 1101 InitBuiltinType(OCLEventTy, BuiltinType::OCLEvent); 1102 InitBuiltinType(OCLClkEventTy, BuiltinType::OCLClkEvent); 1103 InitBuiltinType(OCLQueueTy, BuiltinType::OCLQueue); 1104 InitBuiltinType(OCLNDRangeTy, BuiltinType::OCLNDRange); 1105 InitBuiltinType(OCLReserveIDTy, BuiltinType::OCLReserveID); 1106 } 1107 1108 // Builtin type for __objc_yes and __objc_no 1109 ObjCBuiltinBoolTy = (Target.useSignedCharForObjCBool() ? 1110 SignedCharTy : BoolTy); 1111 1112 ObjCConstantStringType = QualType(); 1113 1114 ObjCSuperType = QualType(); 1115 1116 // void * type 1117 VoidPtrTy = getPointerType(VoidTy); 1118 1119 // nullptr type (C++0x 2.14.7) 1120 InitBuiltinType(NullPtrTy, BuiltinType::NullPtr); 1121 1122 // half type (OpenCL 6.1.1.1) / ARM NEON __fp16 1123 InitBuiltinType(HalfTy, BuiltinType::Half); 1124 1125 // Builtin type used to help define __builtin_va_list. 1126 VaListTagDecl = nullptr; 1127 } 1128 1129 DiagnosticsEngine &ASTContext::getDiagnostics() const { 1130 return SourceMgr.getDiagnostics(); 1131 } 1132 1133 AttrVec& ASTContext::getDeclAttrs(const Decl *D) { 1134 AttrVec *&Result = DeclAttrs[D]; 1135 if (!Result) { 1136 void *Mem = Allocate(sizeof(AttrVec)); 1137 Result = new (Mem) AttrVec; 1138 } 1139 1140 return *Result; 1141 } 1142 1143 /// \brief Erase the attributes corresponding to the given declaration. 1144 void ASTContext::eraseDeclAttrs(const Decl *D) { 1145 llvm::DenseMap<const Decl*, AttrVec*>::iterator Pos = DeclAttrs.find(D); 1146 if (Pos != DeclAttrs.end()) { 1147 Pos->second->~AttrVec(); 1148 DeclAttrs.erase(Pos); 1149 } 1150 } 1151 1152 // FIXME: Remove ? 1153 MemberSpecializationInfo * 1154 ASTContext::getInstantiatedFromStaticDataMember(const VarDecl *Var) { 1155 assert(Var->isStaticDataMember() && "Not a static data member"); 1156 return getTemplateOrSpecializationInfo(Var) 1157 .dyn_cast<MemberSpecializationInfo *>(); 1158 } 1159 1160 ASTContext::TemplateOrSpecializationInfo 1161 ASTContext::getTemplateOrSpecializationInfo(const VarDecl *Var) { 1162 llvm::DenseMap<const VarDecl *, TemplateOrSpecializationInfo>::iterator Pos = 1163 TemplateOrInstantiation.find(Var); 1164 if (Pos == TemplateOrInstantiation.end()) 1165 return TemplateOrSpecializationInfo(); 1166 1167 return Pos->second; 1168 } 1169 1170 void 1171 ASTContext::setInstantiatedFromStaticDataMember(VarDecl *Inst, VarDecl *Tmpl, 1172 TemplateSpecializationKind TSK, 1173 SourceLocation PointOfInstantiation) { 1174 assert(Inst->isStaticDataMember() && "Not a static data member"); 1175 assert(Tmpl->isStaticDataMember() && "Not a static data member"); 1176 setTemplateOrSpecializationInfo(Inst, new (*this) MemberSpecializationInfo( 1177 Tmpl, TSK, PointOfInstantiation)); 1178 } 1179 1180 void 1181 ASTContext::setTemplateOrSpecializationInfo(VarDecl *Inst, 1182 TemplateOrSpecializationInfo TSI) { 1183 assert(!TemplateOrInstantiation[Inst] && 1184 "Already noted what the variable was instantiated from"); 1185 TemplateOrInstantiation[Inst] = TSI; 1186 } 1187 1188 FunctionDecl *ASTContext::getClassScopeSpecializationPattern( 1189 const FunctionDecl *FD){ 1190 assert(FD && "Specialization is 0"); 1191 llvm::DenseMap<const FunctionDecl*, FunctionDecl *>::const_iterator Pos 1192 = ClassScopeSpecializationPattern.find(FD); 1193 if (Pos == ClassScopeSpecializationPattern.end()) 1194 return nullptr; 1195 1196 return Pos->second; 1197 } 1198 1199 void ASTContext::setClassScopeSpecializationPattern(FunctionDecl *FD, 1200 FunctionDecl *Pattern) { 1201 assert(FD && "Specialization is 0"); 1202 assert(Pattern && "Class scope specialization pattern is 0"); 1203 ClassScopeSpecializationPattern[FD] = Pattern; 1204 } 1205 1206 NamedDecl * 1207 ASTContext::getInstantiatedFromUsingDecl(UsingDecl *UUD) { 1208 llvm::DenseMap<UsingDecl *, NamedDecl *>::const_iterator Pos 1209 = InstantiatedFromUsingDecl.find(UUD); 1210 if (Pos == InstantiatedFromUsingDecl.end()) 1211 return nullptr; 1212 1213 return Pos->second; 1214 } 1215 1216 void 1217 ASTContext::setInstantiatedFromUsingDecl(UsingDecl *Inst, NamedDecl *Pattern) { 1218 assert((isa<UsingDecl>(Pattern) || 1219 isa<UnresolvedUsingValueDecl>(Pattern) || 1220 isa<UnresolvedUsingTypenameDecl>(Pattern)) && 1221 "pattern decl is not a using decl"); 1222 assert(!InstantiatedFromUsingDecl[Inst] && "pattern already exists"); 1223 InstantiatedFromUsingDecl[Inst] = Pattern; 1224 } 1225 1226 UsingShadowDecl * 1227 ASTContext::getInstantiatedFromUsingShadowDecl(UsingShadowDecl *Inst) { 1228 llvm::DenseMap<UsingShadowDecl*, UsingShadowDecl*>::const_iterator Pos 1229 = InstantiatedFromUsingShadowDecl.find(Inst); 1230 if (Pos == InstantiatedFromUsingShadowDecl.end()) 1231 return nullptr; 1232 1233 return Pos->second; 1234 } 1235 1236 void 1237 ASTContext::setInstantiatedFromUsingShadowDecl(UsingShadowDecl *Inst, 1238 UsingShadowDecl *Pattern) { 1239 assert(!InstantiatedFromUsingShadowDecl[Inst] && "pattern already exists"); 1240 InstantiatedFromUsingShadowDecl[Inst] = Pattern; 1241 } 1242 1243 FieldDecl *ASTContext::getInstantiatedFromUnnamedFieldDecl(FieldDecl *Field) { 1244 llvm::DenseMap<FieldDecl *, FieldDecl *>::iterator Pos 1245 = InstantiatedFromUnnamedFieldDecl.find(Field); 1246 if (Pos == InstantiatedFromUnnamedFieldDecl.end()) 1247 return nullptr; 1248 1249 return Pos->second; 1250 } 1251 1252 void ASTContext::setInstantiatedFromUnnamedFieldDecl(FieldDecl *Inst, 1253 FieldDecl *Tmpl) { 1254 assert(!Inst->getDeclName() && "Instantiated field decl is not unnamed"); 1255 assert(!Tmpl->getDeclName() && "Template field decl is not unnamed"); 1256 assert(!InstantiatedFromUnnamedFieldDecl[Inst] && 1257 "Already noted what unnamed field was instantiated from"); 1258 1259 InstantiatedFromUnnamedFieldDecl[Inst] = Tmpl; 1260 } 1261 1262 ASTContext::overridden_cxx_method_iterator 1263 ASTContext::overridden_methods_begin(const CXXMethodDecl *Method) const { 1264 llvm::DenseMap<const CXXMethodDecl *, CXXMethodVector>::const_iterator Pos = 1265 OverriddenMethods.find(Method->getCanonicalDecl()); 1266 if (Pos == OverriddenMethods.end()) 1267 return nullptr; 1268 return Pos->second.begin(); 1269 } 1270 1271 ASTContext::overridden_cxx_method_iterator 1272 ASTContext::overridden_methods_end(const CXXMethodDecl *Method) const { 1273 llvm::DenseMap<const CXXMethodDecl *, CXXMethodVector>::const_iterator Pos = 1274 OverriddenMethods.find(Method->getCanonicalDecl()); 1275 if (Pos == OverriddenMethods.end()) 1276 return nullptr; 1277 return Pos->second.end(); 1278 } 1279 1280 unsigned 1281 ASTContext::overridden_methods_size(const CXXMethodDecl *Method) const { 1282 llvm::DenseMap<const CXXMethodDecl *, CXXMethodVector>::const_iterator Pos = 1283 OverriddenMethods.find(Method->getCanonicalDecl()); 1284 if (Pos == OverriddenMethods.end()) 1285 return 0; 1286 return Pos->second.size(); 1287 } 1288 1289 ASTContext::overridden_method_range 1290 ASTContext::overridden_methods(const CXXMethodDecl *Method) const { 1291 return overridden_method_range(overridden_methods_begin(Method), 1292 overridden_methods_end(Method)); 1293 } 1294 1295 void ASTContext::addOverriddenMethod(const CXXMethodDecl *Method, 1296 const CXXMethodDecl *Overridden) { 1297 assert(Method->isCanonicalDecl() && Overridden->isCanonicalDecl()); 1298 OverriddenMethods[Method].push_back(Overridden); 1299 } 1300 1301 void ASTContext::getOverriddenMethods( 1302 const NamedDecl *D, 1303 SmallVectorImpl<const NamedDecl *> &Overridden) const { 1304 assert(D); 1305 1306 if (const CXXMethodDecl *CXXMethod = dyn_cast<CXXMethodDecl>(D)) { 1307 Overridden.append(overridden_methods_begin(CXXMethod), 1308 overridden_methods_end(CXXMethod)); 1309 return; 1310 } 1311 1312 const ObjCMethodDecl *Method = dyn_cast<ObjCMethodDecl>(D); 1313 if (!Method) 1314 return; 1315 1316 SmallVector<const ObjCMethodDecl *, 8> OverDecls; 1317 Method->getOverriddenMethods(OverDecls); 1318 Overridden.append(OverDecls.begin(), OverDecls.end()); 1319 } 1320 1321 void ASTContext::addedLocalImportDecl(ImportDecl *Import) { 1322 assert(!Import->NextLocalImport && "Import declaration already in the chain"); 1323 assert(!Import->isFromASTFile() && "Non-local import declaration"); 1324 if (!FirstLocalImport) { 1325 FirstLocalImport = Import; 1326 LastLocalImport = Import; 1327 return; 1328 } 1329 1330 LastLocalImport->NextLocalImport = Import; 1331 LastLocalImport = Import; 1332 } 1333 1334 //===----------------------------------------------------------------------===// 1335 // Type Sizing and Analysis 1336 //===----------------------------------------------------------------------===// 1337 1338 /// getFloatTypeSemantics - Return the APFloat 'semantics' for the specified 1339 /// scalar floating point type. 1340 const llvm::fltSemantics &ASTContext::getFloatTypeSemantics(QualType T) const { 1341 const BuiltinType *BT = T->getAs<BuiltinType>(); 1342 assert(BT && "Not a floating point type!"); 1343 switch (BT->getKind()) { 1344 default: llvm_unreachable("Not a floating point type!"); 1345 case BuiltinType::Half: return Target->getHalfFormat(); 1346 case BuiltinType::Float: return Target->getFloatFormat(); 1347 case BuiltinType::Double: return Target->getDoubleFormat(); 1348 case BuiltinType::LongDouble: return Target->getLongDoubleFormat(); 1349 case BuiltinType::Float128: return Target->getFloat128Format(); 1350 } 1351 } 1352 1353 CharUnits ASTContext::getDeclAlign(const Decl *D, bool ForAlignof) const { 1354 unsigned Align = Target->getCharWidth(); 1355 1356 bool UseAlignAttrOnly = false; 1357 if (unsigned AlignFromAttr = D->getMaxAlignment()) { 1358 Align = AlignFromAttr; 1359 1360 // __attribute__((aligned)) can increase or decrease alignment 1361 // *except* on a struct or struct member, where it only increases 1362 // alignment unless 'packed' is also specified. 1363 // 1364 // It is an error for alignas to decrease alignment, so we can 1365 // ignore that possibility; Sema should diagnose it. 1366 if (isa<FieldDecl>(D)) { 1367 UseAlignAttrOnly = D->hasAttr<PackedAttr>() || 1368 cast<FieldDecl>(D)->getParent()->hasAttr<PackedAttr>(); 1369 } else { 1370 UseAlignAttrOnly = true; 1371 } 1372 } 1373 else if (isa<FieldDecl>(D)) 1374 UseAlignAttrOnly = 1375 D->hasAttr<PackedAttr>() || 1376 cast<FieldDecl>(D)->getParent()->hasAttr<PackedAttr>(); 1377 1378 // If we're using the align attribute only, just ignore everything 1379 // else about the declaration and its type. 1380 if (UseAlignAttrOnly) { 1381 // do nothing 1382 1383 } else if (const ValueDecl *VD = dyn_cast<ValueDecl>(D)) { 1384 QualType T = VD->getType(); 1385 if (const ReferenceType *RT = T->getAs<ReferenceType>()) { 1386 if (ForAlignof) 1387 T = RT->getPointeeType(); 1388 else 1389 T = getPointerType(RT->getPointeeType()); 1390 } 1391 QualType BaseT = getBaseElementType(T); 1392 if (!BaseT->isIncompleteType() && !T->isFunctionType()) { 1393 // Adjust alignments of declarations with array type by the 1394 // large-array alignment on the target. 1395 if (const ArrayType *arrayType = getAsArrayType(T)) { 1396 unsigned MinWidth = Target->getLargeArrayMinWidth(); 1397 if (!ForAlignof && MinWidth) { 1398 if (isa<VariableArrayType>(arrayType)) 1399 Align = std::max(Align, Target->getLargeArrayAlign()); 1400 else if (isa<ConstantArrayType>(arrayType) && 1401 MinWidth <= getTypeSize(cast<ConstantArrayType>(arrayType))) 1402 Align = std::max(Align, Target->getLargeArrayAlign()); 1403 } 1404 } 1405 Align = std::max(Align, getPreferredTypeAlign(T.getTypePtr())); 1406 if (const VarDecl *VD = dyn_cast<VarDecl>(D)) { 1407 if (VD->hasGlobalStorage() && !ForAlignof) 1408 Align = std::max(Align, getTargetInfo().getMinGlobalAlign()); 1409 } 1410 } 1411 1412 // Fields can be subject to extra alignment constraints, like if 1413 // the field is packed, the struct is packed, or the struct has a 1414 // a max-field-alignment constraint (#pragma pack). So calculate 1415 // the actual alignment of the field within the struct, and then 1416 // (as we're expected to) constrain that by the alignment of the type. 1417 if (const FieldDecl *Field = dyn_cast<FieldDecl>(VD)) { 1418 const RecordDecl *Parent = Field->getParent(); 1419 // We can only produce a sensible answer if the record is valid. 1420 if (!Parent->isInvalidDecl()) { 1421 const ASTRecordLayout &Layout = getASTRecordLayout(Parent); 1422 1423 // Start with the record's overall alignment. 1424 unsigned FieldAlign = toBits(Layout.getAlignment()); 1425 1426 // Use the GCD of that and the offset within the record. 1427 uint64_t Offset = Layout.getFieldOffset(Field->getFieldIndex()); 1428 if (Offset > 0) { 1429 // Alignment is always a power of 2, so the GCD will be a power of 2, 1430 // which means we get to do this crazy thing instead of Euclid's. 1431 uint64_t LowBitOfOffset = Offset & (~Offset + 1); 1432 if (LowBitOfOffset < FieldAlign) 1433 FieldAlign = static_cast<unsigned>(LowBitOfOffset); 1434 } 1435 1436 Align = std::min(Align, FieldAlign); 1437 } 1438 } 1439 } 1440 1441 return toCharUnitsFromBits(Align); 1442 } 1443 1444 // getTypeInfoDataSizeInChars - Return the size of a type, in 1445 // chars. If the type is a record, its data size is returned. This is 1446 // the size of the memcpy that's performed when assigning this type 1447 // using a trivial copy/move assignment operator. 1448 std::pair<CharUnits, CharUnits> 1449 ASTContext::getTypeInfoDataSizeInChars(QualType T) const { 1450 std::pair<CharUnits, CharUnits> sizeAndAlign = getTypeInfoInChars(T); 1451 1452 // In C++, objects can sometimes be allocated into the tail padding 1453 // of a base-class subobject. We decide whether that's possible 1454 // during class layout, so here we can just trust the layout results. 1455 if (getLangOpts().CPlusPlus) { 1456 if (const RecordType *RT = T->getAs<RecordType>()) { 1457 const ASTRecordLayout &layout = getASTRecordLayout(RT->getDecl()); 1458 sizeAndAlign.first = layout.getDataSize(); 1459 } 1460 } 1461 1462 return sizeAndAlign; 1463 } 1464 1465 /// getConstantArrayInfoInChars - Performing the computation in CharUnits 1466 /// instead of in bits prevents overflowing the uint64_t for some large arrays. 1467 std::pair<CharUnits, CharUnits> 1468 static getConstantArrayInfoInChars(const ASTContext &Context, 1469 const ConstantArrayType *CAT) { 1470 std::pair<CharUnits, CharUnits> EltInfo = 1471 Context.getTypeInfoInChars(CAT->getElementType()); 1472 uint64_t Size = CAT->getSize().getZExtValue(); 1473 assert((Size == 0 || static_cast<uint64_t>(EltInfo.first.getQuantity()) <= 1474 (uint64_t)(-1)/Size) && 1475 "Overflow in array type char size evaluation"); 1476 uint64_t Width = EltInfo.first.getQuantity() * Size; 1477 unsigned Align = EltInfo.second.getQuantity(); 1478 if (!Context.getTargetInfo().getCXXABI().isMicrosoft() || 1479 Context.getTargetInfo().getPointerWidth(0) == 64) 1480 Width = llvm::alignTo(Width, Align); 1481 return std::make_pair(CharUnits::fromQuantity(Width), 1482 CharUnits::fromQuantity(Align)); 1483 } 1484 1485 std::pair<CharUnits, CharUnits> 1486 ASTContext::getTypeInfoInChars(const Type *T) const { 1487 if (const ConstantArrayType *CAT = dyn_cast<ConstantArrayType>(T)) 1488 return getConstantArrayInfoInChars(*this, CAT); 1489 TypeInfo Info = getTypeInfo(T); 1490 return std::make_pair(toCharUnitsFromBits(Info.Width), 1491 toCharUnitsFromBits(Info.Align)); 1492 } 1493 1494 std::pair<CharUnits, CharUnits> 1495 ASTContext::getTypeInfoInChars(QualType T) const { 1496 return getTypeInfoInChars(T.getTypePtr()); 1497 } 1498 1499 bool ASTContext::isAlignmentRequired(const Type *T) const { 1500 return getTypeInfo(T).AlignIsRequired; 1501 } 1502 1503 bool ASTContext::isAlignmentRequired(QualType T) const { 1504 return isAlignmentRequired(T.getTypePtr()); 1505 } 1506 1507 TypeInfo ASTContext::getTypeInfo(const Type *T) const { 1508 TypeInfoMap::iterator I = MemoizedTypeInfo.find(T); 1509 if (I != MemoizedTypeInfo.end()) 1510 return I->second; 1511 1512 // This call can invalidate MemoizedTypeInfo[T], so we need a second lookup. 1513 TypeInfo TI = getTypeInfoImpl(T); 1514 MemoizedTypeInfo[T] = TI; 1515 return TI; 1516 } 1517 1518 /// getTypeInfoImpl - Return the size of the specified type, in bits. This 1519 /// method does not work on incomplete types. 1520 /// 1521 /// FIXME: Pointers into different addr spaces could have different sizes and 1522 /// alignment requirements: getPointerInfo should take an AddrSpace, this 1523 /// should take a QualType, &c. 1524 TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const { 1525 uint64_t Width = 0; 1526 unsigned Align = 8; 1527 bool AlignIsRequired = false; 1528 switch (T->getTypeClass()) { 1529 #define TYPE(Class, Base) 1530 #define ABSTRACT_TYPE(Class, Base) 1531 #define NON_CANONICAL_TYPE(Class, Base) 1532 #define DEPENDENT_TYPE(Class, Base) case Type::Class: 1533 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) \ 1534 case Type::Class: \ 1535 assert(!T->isDependentType() && "should not see dependent types here"); \ 1536 return getTypeInfo(cast<Class##Type>(T)->desugar().getTypePtr()); 1537 #include "clang/AST/TypeNodes.def" 1538 llvm_unreachable("Should not see dependent types"); 1539 1540 case Type::FunctionNoProto: 1541 case Type::FunctionProto: 1542 // GCC extension: alignof(function) = 32 bits 1543 Width = 0; 1544 Align = 32; 1545 break; 1546 1547 case Type::IncompleteArray: 1548 case Type::VariableArray: 1549 Width = 0; 1550 Align = getTypeAlign(cast<ArrayType>(T)->getElementType()); 1551 break; 1552 1553 case Type::ConstantArray: { 1554 const ConstantArrayType *CAT = cast<ConstantArrayType>(T); 1555 1556 TypeInfo EltInfo = getTypeInfo(CAT->getElementType()); 1557 uint64_t Size = CAT->getSize().getZExtValue(); 1558 assert((Size == 0 || EltInfo.Width <= (uint64_t)(-1) / Size) && 1559 "Overflow in array type bit size evaluation"); 1560 Width = EltInfo.Width * Size; 1561 Align = EltInfo.Align; 1562 if (!getTargetInfo().getCXXABI().isMicrosoft() || 1563 getTargetInfo().getPointerWidth(0) == 64) 1564 Width = llvm::alignTo(Width, Align); 1565 break; 1566 } 1567 case Type::ExtVector: 1568 case Type::Vector: { 1569 const VectorType *VT = cast<VectorType>(T); 1570 TypeInfo EltInfo = getTypeInfo(VT->getElementType()); 1571 Width = EltInfo.Width * VT->getNumElements(); 1572 Align = Width; 1573 // If the alignment is not a power of 2, round up to the next power of 2. 1574 // This happens for non-power-of-2 length vectors. 1575 if (Align & (Align-1)) { 1576 Align = llvm::NextPowerOf2(Align); 1577 Width = llvm::alignTo(Width, Align); 1578 } 1579 // Adjust the alignment based on the target max. 1580 uint64_t TargetVectorAlign = Target->getMaxVectorAlign(); 1581 if (TargetVectorAlign && TargetVectorAlign < Align) 1582 Align = TargetVectorAlign; 1583 break; 1584 } 1585 1586 case Type::Builtin: 1587 switch (cast<BuiltinType>(T)->getKind()) { 1588 default: llvm_unreachable("Unknown builtin type!"); 1589 case BuiltinType::Void: 1590 // GCC extension: alignof(void) = 8 bits. 1591 Width = 0; 1592 Align = 8; 1593 break; 1594 1595 case BuiltinType::Bool: 1596 Width = Target->getBoolWidth(); 1597 Align = Target->getBoolAlign(); 1598 break; 1599 case BuiltinType::Char_S: 1600 case BuiltinType::Char_U: 1601 case BuiltinType::UChar: 1602 case BuiltinType::SChar: 1603 Width = Target->getCharWidth(); 1604 Align = Target->getCharAlign(); 1605 break; 1606 case BuiltinType::WChar_S: 1607 case BuiltinType::WChar_U: 1608 Width = Target->getWCharWidth(); 1609 Align = Target->getWCharAlign(); 1610 break; 1611 case BuiltinType::Char16: 1612 Width = Target->getChar16Width(); 1613 Align = Target->getChar16Align(); 1614 break; 1615 case BuiltinType::Char32: 1616 Width = Target->getChar32Width(); 1617 Align = Target->getChar32Align(); 1618 break; 1619 case BuiltinType::UShort: 1620 case BuiltinType::Short: 1621 Width = Target->getShortWidth(); 1622 Align = Target->getShortAlign(); 1623 break; 1624 case BuiltinType::UInt: 1625 case BuiltinType::Int: 1626 Width = Target->getIntWidth(); 1627 Align = Target->getIntAlign(); 1628 break; 1629 case BuiltinType::ULong: 1630 case BuiltinType::Long: 1631 Width = Target->getLongWidth(); 1632 Align = Target->getLongAlign(); 1633 break; 1634 case BuiltinType::ULongLong: 1635 case BuiltinType::LongLong: 1636 Width = Target->getLongLongWidth(); 1637 Align = Target->getLongLongAlign(); 1638 break; 1639 case BuiltinType::Int128: 1640 case BuiltinType::UInt128: 1641 Width = 128; 1642 Align = 128; // int128_t is 128-bit aligned on all targets. 1643 break; 1644 case BuiltinType::Half: 1645 Width = Target->getHalfWidth(); 1646 Align = Target->getHalfAlign(); 1647 break; 1648 case BuiltinType::Float: 1649 Width = Target->getFloatWidth(); 1650 Align = Target->getFloatAlign(); 1651 break; 1652 case BuiltinType::Double: 1653 Width = Target->getDoubleWidth(); 1654 Align = Target->getDoubleAlign(); 1655 break; 1656 case BuiltinType::LongDouble: 1657 Width = Target->getLongDoubleWidth(); 1658 Align = Target->getLongDoubleAlign(); 1659 break; 1660 case BuiltinType::Float128: 1661 Width = Target->getFloat128Width(); 1662 Align = Target->getFloat128Align(); 1663 break; 1664 case BuiltinType::NullPtr: 1665 Width = Target->getPointerWidth(0); // C++ 3.9.1p11: sizeof(nullptr_t) 1666 Align = Target->getPointerAlign(0); // == sizeof(void*) 1667 break; 1668 case BuiltinType::ObjCId: 1669 case BuiltinType::ObjCClass: 1670 case BuiltinType::ObjCSel: 1671 Width = Target->getPointerWidth(0); 1672 Align = Target->getPointerAlign(0); 1673 break; 1674 case BuiltinType::OCLSampler: 1675 // Samplers are modeled as integers. 1676 Width = Target->getIntWidth(); 1677 Align = Target->getIntAlign(); 1678 break; 1679 case BuiltinType::OCLEvent: 1680 case BuiltinType::OCLClkEvent: 1681 case BuiltinType::OCLQueue: 1682 case BuiltinType::OCLNDRange: 1683 case BuiltinType::OCLReserveID: 1684 #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \ 1685 case BuiltinType::Id: 1686 #include "clang/Basic/OpenCLImageTypes.def" 1687 1688 // Currently these types are pointers to opaque types. 1689 Width = Target->getPointerWidth(0); 1690 Align = Target->getPointerAlign(0); 1691 break; 1692 } 1693 break; 1694 case Type::ObjCObjectPointer: 1695 Width = Target->getPointerWidth(0); 1696 Align = Target->getPointerAlign(0); 1697 break; 1698 case Type::BlockPointer: { 1699 unsigned AS = getTargetAddressSpace( 1700 cast<BlockPointerType>(T)->getPointeeType()); 1701 Width = Target->getPointerWidth(AS); 1702 Align = Target->getPointerAlign(AS); 1703 break; 1704 } 1705 case Type::LValueReference: 1706 case Type::RValueReference: { 1707 // alignof and sizeof should never enter this code path here, so we go 1708 // the pointer route. 1709 unsigned AS = getTargetAddressSpace( 1710 cast<ReferenceType>(T)->getPointeeType()); 1711 Width = Target->getPointerWidth(AS); 1712 Align = Target->getPointerAlign(AS); 1713 break; 1714 } 1715 case Type::Pointer: { 1716 unsigned AS = getTargetAddressSpace(cast<PointerType>(T)->getPointeeType()); 1717 Width = Target->getPointerWidth(AS); 1718 Align = Target->getPointerAlign(AS); 1719 break; 1720 } 1721 case Type::MemberPointer: { 1722 const MemberPointerType *MPT = cast<MemberPointerType>(T); 1723 std::tie(Width, Align) = ABI->getMemberPointerWidthAndAlign(MPT); 1724 break; 1725 } 1726 case Type::Complex: { 1727 // Complex types have the same alignment as their elements, but twice the 1728 // size. 1729 TypeInfo EltInfo = getTypeInfo(cast<ComplexType>(T)->getElementType()); 1730 Width = EltInfo.Width * 2; 1731 Align = EltInfo.Align; 1732 break; 1733 } 1734 case Type::ObjCObject: 1735 return getTypeInfo(cast<ObjCObjectType>(T)->getBaseType().getTypePtr()); 1736 case Type::Adjusted: 1737 case Type::Decayed: 1738 return getTypeInfo(cast<AdjustedType>(T)->getAdjustedType().getTypePtr()); 1739 case Type::ObjCInterface: { 1740 const ObjCInterfaceType *ObjCI = cast<ObjCInterfaceType>(T); 1741 const ASTRecordLayout &Layout = getASTObjCInterfaceLayout(ObjCI->getDecl()); 1742 Width = toBits(Layout.getSize()); 1743 Align = toBits(Layout.getAlignment()); 1744 break; 1745 } 1746 case Type::Record: 1747 case Type::Enum: { 1748 const TagType *TT = cast<TagType>(T); 1749 1750 if (TT->getDecl()->isInvalidDecl()) { 1751 Width = 8; 1752 Align = 8; 1753 break; 1754 } 1755 1756 if (const EnumType *ET = dyn_cast<EnumType>(TT)) { 1757 const EnumDecl *ED = ET->getDecl(); 1758 TypeInfo Info = 1759 getTypeInfo(ED->getIntegerType()->getUnqualifiedDesugaredType()); 1760 if (unsigned AttrAlign = ED->getMaxAlignment()) { 1761 Info.Align = AttrAlign; 1762 Info.AlignIsRequired = true; 1763 } 1764 return Info; 1765 } 1766 1767 const RecordType *RT = cast<RecordType>(TT); 1768 const RecordDecl *RD = RT->getDecl(); 1769 const ASTRecordLayout &Layout = getASTRecordLayout(RD); 1770 Width = toBits(Layout.getSize()); 1771 Align = toBits(Layout.getAlignment()); 1772 AlignIsRequired = RD->hasAttr<AlignedAttr>(); 1773 break; 1774 } 1775 1776 case Type::SubstTemplateTypeParm: 1777 return getTypeInfo(cast<SubstTemplateTypeParmType>(T)-> 1778 getReplacementType().getTypePtr()); 1779 1780 case Type::Auto: { 1781 const AutoType *A = cast<AutoType>(T); 1782 assert(!A->getDeducedType().isNull() && 1783 "cannot request the size of an undeduced or dependent auto type"); 1784 return getTypeInfo(A->getDeducedType().getTypePtr()); 1785 } 1786 1787 case Type::Paren: 1788 return getTypeInfo(cast<ParenType>(T)->getInnerType().getTypePtr()); 1789 1790 case Type::Typedef: { 1791 const TypedefNameDecl *Typedef = cast<TypedefType>(T)->getDecl(); 1792 TypeInfo Info = getTypeInfo(Typedef->getUnderlyingType().getTypePtr()); 1793 // If the typedef has an aligned attribute on it, it overrides any computed 1794 // alignment we have. This violates the GCC documentation (which says that 1795 // attribute(aligned) can only round up) but matches its implementation. 1796 if (unsigned AttrAlign = Typedef->getMaxAlignment()) { 1797 Align = AttrAlign; 1798 AlignIsRequired = true; 1799 } else { 1800 Align = Info.Align; 1801 AlignIsRequired = Info.AlignIsRequired; 1802 } 1803 Width = Info.Width; 1804 break; 1805 } 1806 1807 case Type::Elaborated: 1808 return getTypeInfo(cast<ElaboratedType>(T)->getNamedType().getTypePtr()); 1809 1810 case Type::Attributed: 1811 return getTypeInfo( 1812 cast<AttributedType>(T)->getEquivalentType().getTypePtr()); 1813 1814 case Type::Atomic: { 1815 // Start with the base type information. 1816 TypeInfo Info = getTypeInfo(cast<AtomicType>(T)->getValueType()); 1817 Width = Info.Width; 1818 Align = Info.Align; 1819 1820 // If the size of the type doesn't exceed the platform's max 1821 // atomic promotion width, make the size and alignment more 1822 // favorable to atomic operations: 1823 if (Width != 0 && Width <= Target->getMaxAtomicPromoteWidth()) { 1824 // Round the size up to a power of 2. 1825 if (!llvm::isPowerOf2_64(Width)) 1826 Width = llvm::NextPowerOf2(Width); 1827 1828 // Set the alignment equal to the size. 1829 Align = static_cast<unsigned>(Width); 1830 } 1831 } 1832 break; 1833 1834 case Type::Pipe: { 1835 TypeInfo Info = getTypeInfo(cast<PipeType>(T)->getElementType()); 1836 Width = Info.Width; 1837 Align = Info.Align; 1838 } 1839 1840 } 1841 1842 assert(llvm::isPowerOf2_32(Align) && "Alignment must be power of 2"); 1843 return TypeInfo(Width, Align, AlignIsRequired); 1844 } 1845 1846 unsigned ASTContext::getOpenMPDefaultSimdAlign(QualType T) const { 1847 unsigned SimdAlign = getTargetInfo().getSimdDefaultAlign(); 1848 // Target ppc64 with QPX: simd default alignment for pointer to double is 32. 1849 if ((getTargetInfo().getTriple().getArch() == llvm::Triple::ppc64 || 1850 getTargetInfo().getTriple().getArch() == llvm::Triple::ppc64le) && 1851 getTargetInfo().getABI() == "elfv1-qpx" && 1852 T->isSpecificBuiltinType(BuiltinType::Double)) 1853 SimdAlign = 256; 1854 return SimdAlign; 1855 } 1856 1857 /// toCharUnitsFromBits - Convert a size in bits to a size in characters. 1858 CharUnits ASTContext::toCharUnitsFromBits(int64_t BitSize) const { 1859 return CharUnits::fromQuantity(BitSize / getCharWidth()); 1860 } 1861 1862 /// toBits - Convert a size in characters to a size in characters. 1863 int64_t ASTContext::toBits(CharUnits CharSize) const { 1864 return CharSize.getQuantity() * getCharWidth(); 1865 } 1866 1867 /// getTypeSizeInChars - Return the size of the specified type, in characters. 1868 /// This method does not work on incomplete types. 1869 CharUnits ASTContext::getTypeSizeInChars(QualType T) const { 1870 return getTypeInfoInChars(T).first; 1871 } 1872 CharUnits ASTContext::getTypeSizeInChars(const Type *T) const { 1873 return getTypeInfoInChars(T).first; 1874 } 1875 1876 /// getTypeAlignInChars - Return the ABI-specified alignment of a type, in 1877 /// characters. This method does not work on incomplete types. 1878 CharUnits ASTContext::getTypeAlignInChars(QualType T) const { 1879 return toCharUnitsFromBits(getTypeAlign(T)); 1880 } 1881 CharUnits ASTContext::getTypeAlignInChars(const Type *T) const { 1882 return toCharUnitsFromBits(getTypeAlign(T)); 1883 } 1884 1885 /// getPreferredTypeAlign - Return the "preferred" alignment of the specified 1886 /// type for the current target in bits. This can be different than the ABI 1887 /// alignment in cases where it is beneficial for performance to overalign 1888 /// a data type. 1889 unsigned ASTContext::getPreferredTypeAlign(const Type *T) const { 1890 TypeInfo TI = getTypeInfo(T); 1891 unsigned ABIAlign = TI.Align; 1892 1893 T = T->getBaseElementTypeUnsafe(); 1894 1895 // The preferred alignment of member pointers is that of a pointer. 1896 if (T->isMemberPointerType()) 1897 return getPreferredTypeAlign(getPointerDiffType().getTypePtr()); 1898 1899 if (!Target->allowsLargerPreferedTypeAlignment()) 1900 return ABIAlign; 1901 1902 // Double and long long should be naturally aligned if possible. 1903 if (const ComplexType *CT = T->getAs<ComplexType>()) 1904 T = CT->getElementType().getTypePtr(); 1905 if (const EnumType *ET = T->getAs<EnumType>()) 1906 T = ET->getDecl()->getIntegerType().getTypePtr(); 1907 if (T->isSpecificBuiltinType(BuiltinType::Double) || 1908 T->isSpecificBuiltinType(BuiltinType::LongLong) || 1909 T->isSpecificBuiltinType(BuiltinType::ULongLong)) 1910 // Don't increase the alignment if an alignment attribute was specified on a 1911 // typedef declaration. 1912 if (!TI.AlignIsRequired) 1913 return std::max(ABIAlign, (unsigned)getTypeSize(T)); 1914 1915 return ABIAlign; 1916 } 1917 1918 /// getTargetDefaultAlignForAttributeAligned - Return the default alignment 1919 /// for __attribute__((aligned)) on this target, to be used if no alignment 1920 /// value is specified. 1921 unsigned ASTContext::getTargetDefaultAlignForAttributeAligned() const { 1922 return getTargetInfo().getDefaultAlignForAttributeAligned(); 1923 } 1924 1925 /// getAlignOfGlobalVar - Return the alignment in bits that should be given 1926 /// to a global variable of the specified type. 1927 unsigned ASTContext::getAlignOfGlobalVar(QualType T) const { 1928 return std::max(getTypeAlign(T), getTargetInfo().getMinGlobalAlign()); 1929 } 1930 1931 /// getAlignOfGlobalVarInChars - Return the alignment in characters that 1932 /// should be given to a global variable of the specified type. 1933 CharUnits ASTContext::getAlignOfGlobalVarInChars(QualType T) const { 1934 return toCharUnitsFromBits(getAlignOfGlobalVar(T)); 1935 } 1936 1937 CharUnits ASTContext::getOffsetOfBaseWithVBPtr(const CXXRecordDecl *RD) const { 1938 CharUnits Offset = CharUnits::Zero(); 1939 const ASTRecordLayout *Layout = &getASTRecordLayout(RD); 1940 while (const CXXRecordDecl *Base = Layout->getBaseSharingVBPtr()) { 1941 Offset += Layout->getBaseClassOffset(Base); 1942 Layout = &getASTRecordLayout(Base); 1943 } 1944 return Offset; 1945 } 1946 1947 /// DeepCollectObjCIvars - 1948 /// This routine first collects all declared, but not synthesized, ivars in 1949 /// super class and then collects all ivars, including those synthesized for 1950 /// current class. This routine is used for implementation of current class 1951 /// when all ivars, declared and synthesized are known. 1952 /// 1953 void ASTContext::DeepCollectObjCIvars(const ObjCInterfaceDecl *OI, 1954 bool leafClass, 1955 SmallVectorImpl<const ObjCIvarDecl*> &Ivars) const { 1956 if (const ObjCInterfaceDecl *SuperClass = OI->getSuperClass()) 1957 DeepCollectObjCIvars(SuperClass, false, Ivars); 1958 if (!leafClass) { 1959 for (const auto *I : OI->ivars()) 1960 Ivars.push_back(I); 1961 } else { 1962 ObjCInterfaceDecl *IDecl = const_cast<ObjCInterfaceDecl *>(OI); 1963 for (const ObjCIvarDecl *Iv = IDecl->all_declared_ivar_begin(); Iv; 1964 Iv= Iv->getNextIvar()) 1965 Ivars.push_back(Iv); 1966 } 1967 } 1968 1969 /// CollectInheritedProtocols - Collect all protocols in current class and 1970 /// those inherited by it. 1971 void ASTContext::CollectInheritedProtocols(const Decl *CDecl, 1972 llvm::SmallPtrSet<ObjCProtocolDecl*, 8> &Protocols) { 1973 if (const ObjCInterfaceDecl *OI = dyn_cast<ObjCInterfaceDecl>(CDecl)) { 1974 // We can use protocol_iterator here instead of 1975 // all_referenced_protocol_iterator since we are walking all categories. 1976 for (auto *Proto : OI->all_referenced_protocols()) { 1977 CollectInheritedProtocols(Proto, Protocols); 1978 } 1979 1980 // Categories of this Interface. 1981 for (const auto *Cat : OI->visible_categories()) 1982 CollectInheritedProtocols(Cat, Protocols); 1983 1984 if (ObjCInterfaceDecl *SD = OI->getSuperClass()) 1985 while (SD) { 1986 CollectInheritedProtocols(SD, Protocols); 1987 SD = SD->getSuperClass(); 1988 } 1989 } else if (const ObjCCategoryDecl *OC = dyn_cast<ObjCCategoryDecl>(CDecl)) { 1990 for (auto *Proto : OC->protocols()) { 1991 CollectInheritedProtocols(Proto, Protocols); 1992 } 1993 } else if (const ObjCProtocolDecl *OP = dyn_cast<ObjCProtocolDecl>(CDecl)) { 1994 // Insert the protocol. 1995 if (!Protocols.insert( 1996 const_cast<ObjCProtocolDecl *>(OP->getCanonicalDecl())).second) 1997 return; 1998 1999 for (auto *Proto : OP->protocols()) 2000 CollectInheritedProtocols(Proto, Protocols); 2001 } 2002 } 2003 2004 unsigned ASTContext::CountNonClassIvars(const ObjCInterfaceDecl *OI) const { 2005 unsigned count = 0; 2006 // Count ivars declared in class extension. 2007 for (const auto *Ext : OI->known_extensions()) 2008 count += Ext->ivar_size(); 2009 2010 // Count ivar defined in this class's implementation. This 2011 // includes synthesized ivars. 2012 if (ObjCImplementationDecl *ImplDecl = OI->getImplementation()) 2013 count += ImplDecl->ivar_size(); 2014 2015 return count; 2016 } 2017 2018 bool ASTContext::isSentinelNullExpr(const Expr *E) { 2019 if (!E) 2020 return false; 2021 2022 // nullptr_t is always treated as null. 2023 if (E->getType()->isNullPtrType()) return true; 2024 2025 if (E->getType()->isAnyPointerType() && 2026 E->IgnoreParenCasts()->isNullPointerConstant(*this, 2027 Expr::NPC_ValueDependentIsNull)) 2028 return true; 2029 2030 // Unfortunately, __null has type 'int'. 2031 if (isa<GNUNullExpr>(E)) return true; 2032 2033 return false; 2034 } 2035 2036 /// \brief Get the implementation of ObjCInterfaceDecl,or NULL if none exists. 2037 ObjCImplementationDecl *ASTContext::getObjCImplementation(ObjCInterfaceDecl *D) { 2038 llvm::DenseMap<ObjCContainerDecl*, ObjCImplDecl*>::iterator 2039 I = ObjCImpls.find(D); 2040 if (I != ObjCImpls.end()) 2041 return cast<ObjCImplementationDecl>(I->second); 2042 return nullptr; 2043 } 2044 /// \brief Get the implementation of ObjCCategoryDecl, or NULL if none exists. 2045 ObjCCategoryImplDecl *ASTContext::getObjCImplementation(ObjCCategoryDecl *D) { 2046 llvm::DenseMap<ObjCContainerDecl*, ObjCImplDecl*>::iterator 2047 I = ObjCImpls.find(D); 2048 if (I != ObjCImpls.end()) 2049 return cast<ObjCCategoryImplDecl>(I->second); 2050 return nullptr; 2051 } 2052 2053 /// \brief Set the implementation of ObjCInterfaceDecl. 2054 void ASTContext::setObjCImplementation(ObjCInterfaceDecl *IFaceD, 2055 ObjCImplementationDecl *ImplD) { 2056 assert(IFaceD && ImplD && "Passed null params"); 2057 ObjCImpls[IFaceD] = ImplD; 2058 } 2059 /// \brief Set the implementation of ObjCCategoryDecl. 2060 void ASTContext::setObjCImplementation(ObjCCategoryDecl *CatD, 2061 ObjCCategoryImplDecl *ImplD) { 2062 assert(CatD && ImplD && "Passed null params"); 2063 ObjCImpls[CatD] = ImplD; 2064 } 2065 2066 const ObjCMethodDecl * 2067 ASTContext::getObjCMethodRedeclaration(const ObjCMethodDecl *MD) const { 2068 return ObjCMethodRedecls.lookup(MD); 2069 } 2070 2071 void ASTContext::setObjCMethodRedeclaration(const ObjCMethodDecl *MD, 2072 const ObjCMethodDecl *Redecl) { 2073 assert(!getObjCMethodRedeclaration(MD) && "MD already has a redeclaration"); 2074 ObjCMethodRedecls[MD] = Redecl; 2075 } 2076 2077 const ObjCInterfaceDecl *ASTContext::getObjContainingInterface( 2078 const NamedDecl *ND) const { 2079 if (const ObjCInterfaceDecl *ID = 2080 dyn_cast<ObjCInterfaceDecl>(ND->getDeclContext())) 2081 return ID; 2082 if (const ObjCCategoryDecl *CD = 2083 dyn_cast<ObjCCategoryDecl>(ND->getDeclContext())) 2084 return CD->getClassInterface(); 2085 if (const ObjCImplDecl *IMD = 2086 dyn_cast<ObjCImplDecl>(ND->getDeclContext())) 2087 return IMD->getClassInterface(); 2088 2089 return nullptr; 2090 } 2091 2092 /// \brief Get the copy initialization expression of VarDecl,or NULL if 2093 /// none exists. 2094 Expr *ASTContext::getBlockVarCopyInits(const VarDecl*VD) { 2095 assert(VD && "Passed null params"); 2096 assert(VD->hasAttr<BlocksAttr>() && 2097 "getBlockVarCopyInits - not __block var"); 2098 llvm::DenseMap<const VarDecl*, Expr*>::iterator 2099 I = BlockVarCopyInits.find(VD); 2100 return (I != BlockVarCopyInits.end()) ? cast<Expr>(I->second) : nullptr; 2101 } 2102 2103 /// \brief Set the copy inialization expression of a block var decl. 2104 void ASTContext::setBlockVarCopyInits(VarDecl*VD, Expr* Init) { 2105 assert(VD && Init && "Passed null params"); 2106 assert(VD->hasAttr<BlocksAttr>() && 2107 "setBlockVarCopyInits - not __block var"); 2108 BlockVarCopyInits[VD] = Init; 2109 } 2110 2111 TypeSourceInfo *ASTContext::CreateTypeSourceInfo(QualType T, 2112 unsigned DataSize) const { 2113 if (!DataSize) 2114 DataSize = TypeLoc::getFullDataSizeForType(T); 2115 else 2116 assert(DataSize == TypeLoc::getFullDataSizeForType(T) && 2117 "incorrect data size provided to CreateTypeSourceInfo!"); 2118 2119 TypeSourceInfo *TInfo = 2120 (TypeSourceInfo*)BumpAlloc.Allocate(sizeof(TypeSourceInfo) + DataSize, 8); 2121 new (TInfo) TypeSourceInfo(T); 2122 return TInfo; 2123 } 2124 2125 TypeSourceInfo *ASTContext::getTrivialTypeSourceInfo(QualType T, 2126 SourceLocation L) const { 2127 TypeSourceInfo *DI = CreateTypeSourceInfo(T); 2128 DI->getTypeLoc().initialize(const_cast<ASTContext &>(*this), L); 2129 return DI; 2130 } 2131 2132 const ASTRecordLayout & 2133 ASTContext::getASTObjCInterfaceLayout(const ObjCInterfaceDecl *D) const { 2134 return getObjCLayout(D, nullptr); 2135 } 2136 2137 const ASTRecordLayout & 2138 ASTContext::getASTObjCImplementationLayout( 2139 const ObjCImplementationDecl *D) const { 2140 return getObjCLayout(D->getClassInterface(), D); 2141 } 2142 2143 //===----------------------------------------------------------------------===// 2144 // Type creation/memoization methods 2145 //===----------------------------------------------------------------------===// 2146 2147 QualType 2148 ASTContext::getExtQualType(const Type *baseType, Qualifiers quals) const { 2149 unsigned fastQuals = quals.getFastQualifiers(); 2150 quals.removeFastQualifiers(); 2151 2152 // Check if we've already instantiated this type. 2153 llvm::FoldingSetNodeID ID; 2154 ExtQuals::Profile(ID, baseType, quals); 2155 void *insertPos = nullptr; 2156 if (ExtQuals *eq = ExtQualNodes.FindNodeOrInsertPos(ID, insertPos)) { 2157 assert(eq->getQualifiers() == quals); 2158 return QualType(eq, fastQuals); 2159 } 2160 2161 // If the base type is not canonical, make the appropriate canonical type. 2162 QualType canon; 2163 if (!baseType->isCanonicalUnqualified()) { 2164 SplitQualType canonSplit = baseType->getCanonicalTypeInternal().split(); 2165 canonSplit.Quals.addConsistentQualifiers(quals); 2166 canon = getExtQualType(canonSplit.Ty, canonSplit.Quals); 2167 2168 // Re-find the insert position. 2169 (void) ExtQualNodes.FindNodeOrInsertPos(ID, insertPos); 2170 } 2171 2172 ExtQuals *eq = new (*this, TypeAlignment) ExtQuals(baseType, canon, quals); 2173 ExtQualNodes.InsertNode(eq, insertPos); 2174 return QualType(eq, fastQuals); 2175 } 2176 2177 QualType 2178 ASTContext::getAddrSpaceQualType(QualType T, unsigned AddressSpace) const { 2179 QualType CanT = getCanonicalType(T); 2180 if (CanT.getAddressSpace() == AddressSpace) 2181 return T; 2182 2183 // If we are composing extended qualifiers together, merge together 2184 // into one ExtQuals node. 2185 QualifierCollector Quals; 2186 const Type *TypeNode = Quals.strip(T); 2187 2188 // If this type already has an address space specified, it cannot get 2189 // another one. 2190 assert(!Quals.hasAddressSpace() && 2191 "Type cannot be in multiple addr spaces!"); 2192 Quals.addAddressSpace(AddressSpace); 2193 2194 return getExtQualType(TypeNode, Quals); 2195 } 2196 2197 QualType ASTContext::getObjCGCQualType(QualType T, 2198 Qualifiers::GC GCAttr) const { 2199 QualType CanT = getCanonicalType(T); 2200 if (CanT.getObjCGCAttr() == GCAttr) 2201 return T; 2202 2203 if (const PointerType *ptr = T->getAs<PointerType>()) { 2204 QualType Pointee = ptr->getPointeeType(); 2205 if (Pointee->isAnyPointerType()) { 2206 QualType ResultType = getObjCGCQualType(Pointee, GCAttr); 2207 return getPointerType(ResultType); 2208 } 2209 } 2210 2211 // If we are composing extended qualifiers together, merge together 2212 // into one ExtQuals node. 2213 QualifierCollector Quals; 2214 const Type *TypeNode = Quals.strip(T); 2215 2216 // If this type already has an ObjCGC specified, it cannot get 2217 // another one. 2218 assert(!Quals.hasObjCGCAttr() && 2219 "Type cannot have multiple ObjCGCs!"); 2220 Quals.addObjCGCAttr(GCAttr); 2221 2222 return getExtQualType(TypeNode, Quals); 2223 } 2224 2225 const FunctionType *ASTContext::adjustFunctionType(const FunctionType *T, 2226 FunctionType::ExtInfo Info) { 2227 if (T->getExtInfo() == Info) 2228 return T; 2229 2230 QualType Result; 2231 if (const FunctionNoProtoType *FNPT = dyn_cast<FunctionNoProtoType>(T)) { 2232 Result = getFunctionNoProtoType(FNPT->getReturnType(), Info); 2233 } else { 2234 const FunctionProtoType *FPT = cast<FunctionProtoType>(T); 2235 FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo(); 2236 EPI.ExtInfo = Info; 2237 Result = getFunctionType(FPT->getReturnType(), FPT->getParamTypes(), EPI); 2238 } 2239 2240 return cast<FunctionType>(Result.getTypePtr()); 2241 } 2242 2243 void ASTContext::adjustDeducedFunctionResultType(FunctionDecl *FD, 2244 QualType ResultType) { 2245 FD = FD->getMostRecentDecl(); 2246 while (true) { 2247 const FunctionProtoType *FPT = FD->getType()->castAs<FunctionProtoType>(); 2248 FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo(); 2249 FD->setType(getFunctionType(ResultType, FPT->getParamTypes(), EPI)); 2250 if (FunctionDecl *Next = FD->getPreviousDecl()) 2251 FD = Next; 2252 else 2253 break; 2254 } 2255 if (ASTMutationListener *L = getASTMutationListener()) 2256 L->DeducedReturnType(FD, ResultType); 2257 } 2258 2259 /// Get a function type and produce the equivalent function type with the 2260 /// specified exception specification. Type sugar that can be present on a 2261 /// declaration of a function with an exception specification is permitted 2262 /// and preserved. Other type sugar (for instance, typedefs) is not. 2263 static QualType getFunctionTypeWithExceptionSpec( 2264 ASTContext &Context, QualType Orig, 2265 const FunctionProtoType::ExceptionSpecInfo &ESI) { 2266 // Might have some parens. 2267 if (auto *PT = dyn_cast<ParenType>(Orig)) 2268 return Context.getParenType( 2269 getFunctionTypeWithExceptionSpec(Context, PT->getInnerType(), ESI)); 2270 2271 // Might have a calling-convention attribute. 2272 if (auto *AT = dyn_cast<AttributedType>(Orig)) 2273 return Context.getAttributedType( 2274 AT->getAttrKind(), 2275 getFunctionTypeWithExceptionSpec(Context, AT->getModifiedType(), ESI), 2276 getFunctionTypeWithExceptionSpec(Context, AT->getEquivalentType(), 2277 ESI)); 2278 2279 // Anything else must be a function type. Rebuild it with the new exception 2280 // specification. 2281 const FunctionProtoType *Proto = cast<FunctionProtoType>(Orig); 2282 return Context.getFunctionType( 2283 Proto->getReturnType(), Proto->getParamTypes(), 2284 Proto->getExtProtoInfo().withExceptionSpec(ESI)); 2285 } 2286 2287 void ASTContext::adjustExceptionSpec( 2288 FunctionDecl *FD, const FunctionProtoType::ExceptionSpecInfo &ESI, 2289 bool AsWritten) { 2290 // Update the type. 2291 QualType Updated = 2292 getFunctionTypeWithExceptionSpec(*this, FD->getType(), ESI); 2293 FD->setType(Updated); 2294 2295 if (!AsWritten) 2296 return; 2297 2298 // Update the type in the type source information too. 2299 if (TypeSourceInfo *TSInfo = FD->getTypeSourceInfo()) { 2300 // If the type and the type-as-written differ, we may need to update 2301 // the type-as-written too. 2302 if (TSInfo->getType() != FD->getType()) 2303 Updated = getFunctionTypeWithExceptionSpec(*this, TSInfo->getType(), ESI); 2304 2305 // FIXME: When we get proper type location information for exceptions, 2306 // we'll also have to rebuild the TypeSourceInfo. For now, we just patch 2307 // up the TypeSourceInfo; 2308 assert(TypeLoc::getFullDataSizeForType(Updated) == 2309 TypeLoc::getFullDataSizeForType(TSInfo->getType()) && 2310 "TypeLoc size mismatch from updating exception specification"); 2311 TSInfo->overrideType(Updated); 2312 } 2313 } 2314 2315 /// getComplexType - Return the uniqued reference to the type for a complex 2316 /// number with the specified element type. 2317 QualType ASTContext::getComplexType(QualType T) const { 2318 // Unique pointers, to guarantee there is only one pointer of a particular 2319 // structure. 2320 llvm::FoldingSetNodeID ID; 2321 ComplexType::Profile(ID, T); 2322 2323 void *InsertPos = nullptr; 2324 if (ComplexType *CT = ComplexTypes.FindNodeOrInsertPos(ID, InsertPos)) 2325 return QualType(CT, 0); 2326 2327 // If the pointee type isn't canonical, this won't be a canonical type either, 2328 // so fill in the canonical type field. 2329 QualType Canonical; 2330 if (!T.isCanonical()) { 2331 Canonical = getComplexType(getCanonicalType(T)); 2332 2333 // Get the new insert position for the node we care about. 2334 ComplexType *NewIP = ComplexTypes.FindNodeOrInsertPos(ID, InsertPos); 2335 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 2336 } 2337 ComplexType *New = new (*this, TypeAlignment) ComplexType(T, Canonical); 2338 Types.push_back(New); 2339 ComplexTypes.InsertNode(New, InsertPos); 2340 return QualType(New, 0); 2341 } 2342 2343 /// getPointerType - Return the uniqued reference to the type for a pointer to 2344 /// the specified type. 2345 QualType ASTContext::getPointerType(QualType T) const { 2346 // Unique pointers, to guarantee there is only one pointer of a particular 2347 // structure. 2348 llvm::FoldingSetNodeID ID; 2349 PointerType::Profile(ID, T); 2350 2351 void *InsertPos = nullptr; 2352 if (PointerType *PT = PointerTypes.FindNodeOrInsertPos(ID, InsertPos)) 2353 return QualType(PT, 0); 2354 2355 // If the pointee type isn't canonical, this won't be a canonical type either, 2356 // so fill in the canonical type field. 2357 QualType Canonical; 2358 if (!T.isCanonical()) { 2359 Canonical = getPointerType(getCanonicalType(T)); 2360 2361 // Get the new insert position for the node we care about. 2362 PointerType *NewIP = PointerTypes.FindNodeOrInsertPos(ID, InsertPos); 2363 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 2364 } 2365 PointerType *New = new (*this, TypeAlignment) PointerType(T, Canonical); 2366 Types.push_back(New); 2367 PointerTypes.InsertNode(New, InsertPos); 2368 return QualType(New, 0); 2369 } 2370 2371 QualType ASTContext::getAdjustedType(QualType Orig, QualType New) const { 2372 llvm::FoldingSetNodeID ID; 2373 AdjustedType::Profile(ID, Orig, New); 2374 void *InsertPos = nullptr; 2375 AdjustedType *AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos); 2376 if (AT) 2377 return QualType(AT, 0); 2378 2379 QualType Canonical = getCanonicalType(New); 2380 2381 // Get the new insert position for the node we care about. 2382 AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos); 2383 assert(!AT && "Shouldn't be in the map!"); 2384 2385 AT = new (*this, TypeAlignment) 2386 AdjustedType(Type::Adjusted, Orig, New, Canonical); 2387 Types.push_back(AT); 2388 AdjustedTypes.InsertNode(AT, InsertPos); 2389 return QualType(AT, 0); 2390 } 2391 2392 QualType ASTContext::getDecayedType(QualType T) const { 2393 assert((T->isArrayType() || T->isFunctionType()) && "T does not decay"); 2394 2395 QualType Decayed; 2396 2397 // C99 6.7.5.3p7: 2398 // A declaration of a parameter as "array of type" shall be 2399 // adjusted to "qualified pointer to type", where the type 2400 // qualifiers (if any) are those specified within the [ and ] of 2401 // the array type derivation. 2402 if (T->isArrayType()) 2403 Decayed = getArrayDecayedType(T); 2404 2405 // C99 6.7.5.3p8: 2406 // A declaration of a parameter as "function returning type" 2407 // shall be adjusted to "pointer to function returning type", as 2408 // in 6.3.2.1. 2409 if (T->isFunctionType()) 2410 Decayed = getPointerType(T); 2411 2412 llvm::FoldingSetNodeID ID; 2413 AdjustedType::Profile(ID, T, Decayed); 2414 void *InsertPos = nullptr; 2415 AdjustedType *AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos); 2416 if (AT) 2417 return QualType(AT, 0); 2418 2419 QualType Canonical = getCanonicalType(Decayed); 2420 2421 // Get the new insert position for the node we care about. 2422 AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos); 2423 assert(!AT && "Shouldn't be in the map!"); 2424 2425 AT = new (*this, TypeAlignment) DecayedType(T, Decayed, Canonical); 2426 Types.push_back(AT); 2427 AdjustedTypes.InsertNode(AT, InsertPos); 2428 return QualType(AT, 0); 2429 } 2430 2431 /// getBlockPointerType - Return the uniqued reference to the type for 2432 /// a pointer to the specified block. 2433 QualType ASTContext::getBlockPointerType(QualType T) const { 2434 assert(T->isFunctionType() && "block of function types only"); 2435 // Unique pointers, to guarantee there is only one block of a particular 2436 // structure. 2437 llvm::FoldingSetNodeID ID; 2438 BlockPointerType::Profile(ID, T); 2439 2440 void *InsertPos = nullptr; 2441 if (BlockPointerType *PT = 2442 BlockPointerTypes.FindNodeOrInsertPos(ID, InsertPos)) 2443 return QualType(PT, 0); 2444 2445 // If the block pointee type isn't canonical, this won't be a canonical 2446 // type either so fill in the canonical type field. 2447 QualType Canonical; 2448 if (!T.isCanonical()) { 2449 Canonical = getBlockPointerType(getCanonicalType(T)); 2450 2451 // Get the new insert position for the node we care about. 2452 BlockPointerType *NewIP = 2453 BlockPointerTypes.FindNodeOrInsertPos(ID, InsertPos); 2454 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 2455 } 2456 BlockPointerType *New 2457 = new (*this, TypeAlignment) BlockPointerType(T, Canonical); 2458 Types.push_back(New); 2459 BlockPointerTypes.InsertNode(New, InsertPos); 2460 return QualType(New, 0); 2461 } 2462 2463 /// getLValueReferenceType - Return the uniqued reference to the type for an 2464 /// lvalue reference to the specified type. 2465 QualType 2466 ASTContext::getLValueReferenceType(QualType T, bool SpelledAsLValue) const { 2467 assert(getCanonicalType(T) != OverloadTy && 2468 "Unresolved overloaded function type"); 2469 2470 // Unique pointers, to guarantee there is only one pointer of a particular 2471 // structure. 2472 llvm::FoldingSetNodeID ID; 2473 ReferenceType::Profile(ID, T, SpelledAsLValue); 2474 2475 void *InsertPos = nullptr; 2476 if (LValueReferenceType *RT = 2477 LValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos)) 2478 return QualType(RT, 0); 2479 2480 const ReferenceType *InnerRef = T->getAs<ReferenceType>(); 2481 2482 // If the referencee type isn't canonical, this won't be a canonical type 2483 // either, so fill in the canonical type field. 2484 QualType Canonical; 2485 if (!SpelledAsLValue || InnerRef || !T.isCanonical()) { 2486 QualType PointeeType = (InnerRef ? InnerRef->getPointeeType() : T); 2487 Canonical = getLValueReferenceType(getCanonicalType(PointeeType)); 2488 2489 // Get the new insert position for the node we care about. 2490 LValueReferenceType *NewIP = 2491 LValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos); 2492 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 2493 } 2494 2495 LValueReferenceType *New 2496 = new (*this, TypeAlignment) LValueReferenceType(T, Canonical, 2497 SpelledAsLValue); 2498 Types.push_back(New); 2499 LValueReferenceTypes.InsertNode(New, InsertPos); 2500 2501 return QualType(New, 0); 2502 } 2503 2504 /// getRValueReferenceType - Return the uniqued reference to the type for an 2505 /// rvalue reference to the specified type. 2506 QualType ASTContext::getRValueReferenceType(QualType T) const { 2507 // Unique pointers, to guarantee there is only one pointer of a particular 2508 // structure. 2509 llvm::FoldingSetNodeID ID; 2510 ReferenceType::Profile(ID, T, false); 2511 2512 void *InsertPos = nullptr; 2513 if (RValueReferenceType *RT = 2514 RValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos)) 2515 return QualType(RT, 0); 2516 2517 const ReferenceType *InnerRef = T->getAs<ReferenceType>(); 2518 2519 // If the referencee type isn't canonical, this won't be a canonical type 2520 // either, so fill in the canonical type field. 2521 QualType Canonical; 2522 if (InnerRef || !T.isCanonical()) { 2523 QualType PointeeType = (InnerRef ? InnerRef->getPointeeType() : T); 2524 Canonical = getRValueReferenceType(getCanonicalType(PointeeType)); 2525 2526 // Get the new insert position for the node we care about. 2527 RValueReferenceType *NewIP = 2528 RValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos); 2529 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 2530 } 2531 2532 RValueReferenceType *New 2533 = new (*this, TypeAlignment) RValueReferenceType(T, Canonical); 2534 Types.push_back(New); 2535 RValueReferenceTypes.InsertNode(New, InsertPos); 2536 return QualType(New, 0); 2537 } 2538 2539 /// getMemberPointerType - Return the uniqued reference to the type for a 2540 /// member pointer to the specified type, in the specified class. 2541 QualType ASTContext::getMemberPointerType(QualType T, const Type *Cls) const { 2542 // Unique pointers, to guarantee there is only one pointer of a particular 2543 // structure. 2544 llvm::FoldingSetNodeID ID; 2545 MemberPointerType::Profile(ID, T, Cls); 2546 2547 void *InsertPos = nullptr; 2548 if (MemberPointerType *PT = 2549 MemberPointerTypes.FindNodeOrInsertPos(ID, InsertPos)) 2550 return QualType(PT, 0); 2551 2552 // If the pointee or class type isn't canonical, this won't be a canonical 2553 // type either, so fill in the canonical type field. 2554 QualType Canonical; 2555 if (!T.isCanonical() || !Cls->isCanonicalUnqualified()) { 2556 Canonical = getMemberPointerType(getCanonicalType(T),getCanonicalType(Cls)); 2557 2558 // Get the new insert position for the node we care about. 2559 MemberPointerType *NewIP = 2560 MemberPointerTypes.FindNodeOrInsertPos(ID, InsertPos); 2561 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 2562 } 2563 MemberPointerType *New 2564 = new (*this, TypeAlignment) MemberPointerType(T, Cls, Canonical); 2565 Types.push_back(New); 2566 MemberPointerTypes.InsertNode(New, InsertPos); 2567 return QualType(New, 0); 2568 } 2569 2570 /// getConstantArrayType - Return the unique reference to the type for an 2571 /// array of the specified element type. 2572 QualType ASTContext::getConstantArrayType(QualType EltTy, 2573 const llvm::APInt &ArySizeIn, 2574 ArrayType::ArraySizeModifier ASM, 2575 unsigned IndexTypeQuals) const { 2576 assert((EltTy->isDependentType() || 2577 EltTy->isIncompleteType() || EltTy->isConstantSizeType()) && 2578 "Constant array of VLAs is illegal!"); 2579 2580 // Convert the array size into a canonical width matching the pointer size for 2581 // the target. 2582 llvm::APInt ArySize(ArySizeIn); 2583 ArySize = 2584 ArySize.zextOrTrunc(Target->getPointerWidth(getTargetAddressSpace(EltTy))); 2585 2586 llvm::FoldingSetNodeID ID; 2587 ConstantArrayType::Profile(ID, EltTy, ArySize, ASM, IndexTypeQuals); 2588 2589 void *InsertPos = nullptr; 2590 if (ConstantArrayType *ATP = 2591 ConstantArrayTypes.FindNodeOrInsertPos(ID, InsertPos)) 2592 return QualType(ATP, 0); 2593 2594 // If the element type isn't canonical or has qualifiers, this won't 2595 // be a canonical type either, so fill in the canonical type field. 2596 QualType Canon; 2597 if (!EltTy.isCanonical() || EltTy.hasLocalQualifiers()) { 2598 SplitQualType canonSplit = getCanonicalType(EltTy).split(); 2599 Canon = getConstantArrayType(QualType(canonSplit.Ty, 0), ArySize, 2600 ASM, IndexTypeQuals); 2601 Canon = getQualifiedType(Canon, canonSplit.Quals); 2602 2603 // Get the new insert position for the node we care about. 2604 ConstantArrayType *NewIP = 2605 ConstantArrayTypes.FindNodeOrInsertPos(ID, InsertPos); 2606 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 2607 } 2608 2609 ConstantArrayType *New = new(*this,TypeAlignment) 2610 ConstantArrayType(EltTy, Canon, ArySize, ASM, IndexTypeQuals); 2611 ConstantArrayTypes.InsertNode(New, InsertPos); 2612 Types.push_back(New); 2613 return QualType(New, 0); 2614 } 2615 2616 /// getVariableArrayDecayedType - Turns the given type, which may be 2617 /// variably-modified, into the corresponding type with all the known 2618 /// sizes replaced with [*]. 2619 QualType ASTContext::getVariableArrayDecayedType(QualType type) const { 2620 // Vastly most common case. 2621 if (!type->isVariablyModifiedType()) return type; 2622 2623 QualType result; 2624 2625 SplitQualType split = type.getSplitDesugaredType(); 2626 const Type *ty = split.Ty; 2627 switch (ty->getTypeClass()) { 2628 #define TYPE(Class, Base) 2629 #define ABSTRACT_TYPE(Class, Base) 2630 #define NON_CANONICAL_TYPE(Class, Base) case Type::Class: 2631 #include "clang/AST/TypeNodes.def" 2632 llvm_unreachable("didn't desugar past all non-canonical types?"); 2633 2634 // These types should never be variably-modified. 2635 case Type::Builtin: 2636 case Type::Complex: 2637 case Type::Vector: 2638 case Type::ExtVector: 2639 case Type::DependentSizedExtVector: 2640 case Type::ObjCObject: 2641 case Type::ObjCInterface: 2642 case Type::ObjCObjectPointer: 2643 case Type::Record: 2644 case Type::Enum: 2645 case Type::UnresolvedUsing: 2646 case Type::TypeOfExpr: 2647 case Type::TypeOf: 2648 case Type::Decltype: 2649 case Type::UnaryTransform: 2650 case Type::DependentName: 2651 case Type::InjectedClassName: 2652 case Type::TemplateSpecialization: 2653 case Type::DependentTemplateSpecialization: 2654 case Type::TemplateTypeParm: 2655 case Type::SubstTemplateTypeParmPack: 2656 case Type::Auto: 2657 case Type::PackExpansion: 2658 llvm_unreachable("type should never be variably-modified"); 2659 2660 // These types can be variably-modified but should never need to 2661 // further decay. 2662 case Type::FunctionNoProto: 2663 case Type::FunctionProto: 2664 case Type::BlockPointer: 2665 case Type::MemberPointer: 2666 case Type::Pipe: 2667 return type; 2668 2669 // These types can be variably-modified. All these modifications 2670 // preserve structure except as noted by comments. 2671 // TODO: if we ever care about optimizing VLAs, there are no-op 2672 // optimizations available here. 2673 case Type::Pointer: 2674 result = getPointerType(getVariableArrayDecayedType( 2675 cast<PointerType>(ty)->getPointeeType())); 2676 break; 2677 2678 case Type::LValueReference: { 2679 const LValueReferenceType *lv = cast<LValueReferenceType>(ty); 2680 result = getLValueReferenceType( 2681 getVariableArrayDecayedType(lv->getPointeeType()), 2682 lv->isSpelledAsLValue()); 2683 break; 2684 } 2685 2686 case Type::RValueReference: { 2687 const RValueReferenceType *lv = cast<RValueReferenceType>(ty); 2688 result = getRValueReferenceType( 2689 getVariableArrayDecayedType(lv->getPointeeType())); 2690 break; 2691 } 2692 2693 case Type::Atomic: { 2694 const AtomicType *at = cast<AtomicType>(ty); 2695 result = getAtomicType(getVariableArrayDecayedType(at->getValueType())); 2696 break; 2697 } 2698 2699 case Type::ConstantArray: { 2700 const ConstantArrayType *cat = cast<ConstantArrayType>(ty); 2701 result = getConstantArrayType( 2702 getVariableArrayDecayedType(cat->getElementType()), 2703 cat->getSize(), 2704 cat->getSizeModifier(), 2705 cat->getIndexTypeCVRQualifiers()); 2706 break; 2707 } 2708 2709 case Type::DependentSizedArray: { 2710 const DependentSizedArrayType *dat = cast<DependentSizedArrayType>(ty); 2711 result = getDependentSizedArrayType( 2712 getVariableArrayDecayedType(dat->getElementType()), 2713 dat->getSizeExpr(), 2714 dat->getSizeModifier(), 2715 dat->getIndexTypeCVRQualifiers(), 2716 dat->getBracketsRange()); 2717 break; 2718 } 2719 2720 // Turn incomplete types into [*] types. 2721 case Type::IncompleteArray: { 2722 const IncompleteArrayType *iat = cast<IncompleteArrayType>(ty); 2723 result = getVariableArrayType( 2724 getVariableArrayDecayedType(iat->getElementType()), 2725 /*size*/ nullptr, 2726 ArrayType::Normal, 2727 iat->getIndexTypeCVRQualifiers(), 2728 SourceRange()); 2729 break; 2730 } 2731 2732 // Turn VLA types into [*] types. 2733 case Type::VariableArray: { 2734 const VariableArrayType *vat = cast<VariableArrayType>(ty); 2735 result = getVariableArrayType( 2736 getVariableArrayDecayedType(vat->getElementType()), 2737 /*size*/ nullptr, 2738 ArrayType::Star, 2739 vat->getIndexTypeCVRQualifiers(), 2740 vat->getBracketsRange()); 2741 break; 2742 } 2743 } 2744 2745 // Apply the top-level qualifiers from the original. 2746 return getQualifiedType(result, split.Quals); 2747 } 2748 2749 /// getVariableArrayType - Returns a non-unique reference to the type for a 2750 /// variable array of the specified element type. 2751 QualType ASTContext::getVariableArrayType(QualType EltTy, 2752 Expr *NumElts, 2753 ArrayType::ArraySizeModifier ASM, 2754 unsigned IndexTypeQuals, 2755 SourceRange Brackets) const { 2756 // Since we don't unique expressions, it isn't possible to unique VLA's 2757 // that have an expression provided for their size. 2758 QualType Canon; 2759 2760 // Be sure to pull qualifiers off the element type. 2761 if (!EltTy.isCanonical() || EltTy.hasLocalQualifiers()) { 2762 SplitQualType canonSplit = getCanonicalType(EltTy).split(); 2763 Canon = getVariableArrayType(QualType(canonSplit.Ty, 0), NumElts, ASM, 2764 IndexTypeQuals, Brackets); 2765 Canon = getQualifiedType(Canon, canonSplit.Quals); 2766 } 2767 2768 VariableArrayType *New = new(*this, TypeAlignment) 2769 VariableArrayType(EltTy, Canon, NumElts, ASM, IndexTypeQuals, Brackets); 2770 2771 VariableArrayTypes.push_back(New); 2772 Types.push_back(New); 2773 return QualType(New, 0); 2774 } 2775 2776 /// getDependentSizedArrayType - Returns a non-unique reference to 2777 /// the type for a dependently-sized array of the specified element 2778 /// type. 2779 QualType ASTContext::getDependentSizedArrayType(QualType elementType, 2780 Expr *numElements, 2781 ArrayType::ArraySizeModifier ASM, 2782 unsigned elementTypeQuals, 2783 SourceRange brackets) const { 2784 assert((!numElements || numElements->isTypeDependent() || 2785 numElements->isValueDependent()) && 2786 "Size must be type- or value-dependent!"); 2787 2788 // Dependently-sized array types that do not have a specified number 2789 // of elements will have their sizes deduced from a dependent 2790 // initializer. We do no canonicalization here at all, which is okay 2791 // because they can't be used in most locations. 2792 if (!numElements) { 2793 DependentSizedArrayType *newType 2794 = new (*this, TypeAlignment) 2795 DependentSizedArrayType(*this, elementType, QualType(), 2796 numElements, ASM, elementTypeQuals, 2797 brackets); 2798 Types.push_back(newType); 2799 return QualType(newType, 0); 2800 } 2801 2802 // Otherwise, we actually build a new type every time, but we 2803 // also build a canonical type. 2804 2805 SplitQualType canonElementType = getCanonicalType(elementType).split(); 2806 2807 void *insertPos =