1 //===- lib/MC/MCAssembler.cpp - Assembler Backend Implementation ----------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 10 #define DEBUG_TYPE "assembler" 11 #include "llvm/MC/MCAssembler.h" 12 #include "llvm/ADT/Statistic.h" 13 #include "llvm/ADT/StringExtras.h" 14 #include "llvm/ADT/Twine.h" 15 #include "llvm/MC/MCAsmBackend.h" 16 #include "llvm/MC/MCAsmLayout.h" 17 #include "llvm/MC/MCCodeEmitter.h" 18 #include "llvm/MC/MCContext.h" 19 #include "llvm/MC/MCDwarf.h" 20 #include "llvm/MC/MCExpr.h" 21 #include "llvm/MC/MCFixupKindInfo.h" 22 #include "llvm/MC/MCObjectWriter.h" 23 #include "llvm/MC/MCSection.h" 24 #include "llvm/MC/MCSymbol.h" 25 #include "llvm/MC/MCValue.h" 26 #include "llvm/Support/Debug.h" 27 #include "llvm/Support/ErrorHandling.h" 28 #include "llvm/Support/LEB128.h" 29 #include "llvm/Support/TargetRegistry.h" 30 #include "llvm/Support/raw_ostream.h" 31 32 using namespace llvm; 33 34 namespace { 35 namespace stats { 36 STATISTIC(EmittedFragments, "Number of emitted assembler fragments - total"); 37 STATISTIC(EmittedRelaxableFragments, 38 "Number of emitted assembler fragments - relaxable"); 39 STATISTIC(EmittedDataFragments, 40 "Number of emitted assembler fragments - data"); 41 STATISTIC(EmittedCompactEncodedInstFragments, 42 "Number of emitted assembler fragments - compact encoded inst"); 43 STATISTIC(EmittedAlignFragments, 44 "Number of emitted assembler fragments - align"); 45 STATISTIC(EmittedFillFragments, 46 "Number of emitted assembler fragments - fill"); 47 STATISTIC(EmittedOrgFragments, 48 "Number of emitted assembler fragments - org"); 49 STATISTIC(evaluateFixup, "Number of evaluated fixups"); 50 STATISTIC(FragmentLayouts, "Number of fragment layouts"); 51 STATISTIC(ObjectBytes, "Number of emitted object file bytes"); 52 STATISTIC(RelaxationSteps, "Number of assembler layout and relaxation steps"); 53 STATISTIC(RelaxedInstructions, "Number of relaxed instructions"); 54 } 55 } 56 57 // FIXME FIXME FIXME: There are number of places in this file where we convert 58 // what is a 64-bit assembler value used for computation into a value in the 59 // object file, which may truncate it. We should detect that truncation where 60 // invalid and report errors back. 61 62 /* *** */ 63 64 MCAsmLayout::MCAsmLayout(MCAssembler &Asm) 65 : Assembler(Asm), LastValidFragment() 66 { 67 // Compute the section layout order. Virtual sections must go last. 68 for (MCAssembler::iterator it = Asm.begin(), ie = Asm.end(); it != ie; ++it) 69 if (!it->getSection().isVirtualSection()) 70 SectionOrder.push_back(&*it); 71 for (MCAssembler::iterator it = Asm.begin(), ie = Asm.end(); it != ie; ++it) 72 if (it->getSection().isVirtualSection()) 73 SectionOrder.push_back(&*it); 74 } 75 76 bool MCAsmLayout::isFragmentValid(const MCFragment *F) const { 77 const MCSectionData &SD = *F->getParent(); 78 const MCFragment *LastValid = LastValidFragment.lookup(&SD); 79 if (!LastValid) 80 return false; 81 assert(LastValid->getParent() == F->getParent()); 82 return F->getLayoutOrder() <= LastValid->getLayoutOrder(); 83 } 84 85 void MCAsmLayout::invalidateFragmentsFrom(MCFragment *F) { 86 // If this fragment wasn't already valid, we don't need to do anything. 87 if (!isFragmentValid(F)) 88 return; 89 90 // Otherwise, reset the last valid fragment to the previous fragment 91 // (if this is the first fragment, it will be NULL). 92 const MCSectionData &SD = *F->getParent(); 93 LastValidFragment[&SD] = F->getPrevNode(); 94 } 95 96 void MCAsmLayout::ensureValid(const MCFragment *F) const { 97 MCSectionData &SD = *F->getParent(); 98 99 MCFragment *Cur = LastValidFragment[&SD]; 100 if (!Cur) 101 Cur = &*SD.begin(); 102 else 103 Cur = Cur->getNextNode(); 104 105 // Advance the layout position until the fragment is valid. 106 while (!isFragmentValid(F)) { 107 assert(Cur && "Layout bookkeeping error"); 108 const_cast<MCAsmLayout*>(this)->layoutFragment(Cur); 109 Cur = Cur->getNextNode(); 110 } 111 } 112 113 uint64_t MCAsmLayout::getFragmentOffset(const MCFragment *F) const { 114 ensureValid(F); 115 assert(F->Offset != ~UINT64_C(0) && "Address not set!"); 116 return F->Offset; 117 } 118 119 uint64_t MCAsmLayout::getSymbolOffset(const MCSymbolData *SD) const { 120 const MCSymbol &S = SD->getSymbol(); 121 122 // If this is a variable, then recursively evaluate now. 123 if (S.isVariable()) { 124 MCValue Target; 125 if (!S.getVariableValue()->EvaluateAsRelocatable(Target, *this)) 126 report_fatal_error("unable to evaluate offset for variable '" + 127 S.getName() + "'"); 128 129 // Verify that any used symbols are defined. 130 if (Target.getSymA() && Target.getSymA()->getSymbol().isUndefined()) 131 report_fatal_error("unable to evaluate offset to undefined symbol '" + 132 Target.getSymA()->getSymbol().getName() + "'"); 133 if (Target.getSymB() && Target.getSymB()->getSymbol().isUndefined()) 134 report_fatal_error("unable to evaluate offset to undefined symbol '" + 135 Target.getSymB()->getSymbol().getName() + "'"); 136 137 uint64_t Offset = Target.getConstant(); 138 if (Target.getSymA()) 139 Offset += getSymbolOffset(&Assembler.getSymbolData( 140 Target.getSymA()->getSymbol())); 141 if (Target.getSymB()) 142 Offset -= getSymbolOffset(&Assembler.getSymbolData( 143 Target.getSymB()->getSymbol())); 144 return Offset; 145 } 146 147 assert(SD->getFragment() && "Invalid getOffset() on undefined symbol!"); 148 return getFragmentOffset(SD->getFragment()) + SD->getOffset(); 149 } 150 151 uint64_t MCAsmLayout::getSectionAddressSize(const MCSectionData *SD) const { 152 // The size is the last fragment's end offset. 153 const MCFragment &F = SD->getFragmentList().back(); 154 return getFragmentOffset(&F) + getAssembler().computeFragmentSize(*this, F); 155 } 156 157 uint64_t MCAsmLayout::getSectionFileSize(const MCSectionData *SD) const { 158 // Virtual sections have no file size. 159 if (SD->getSection().isVirtualSection()) 160 return 0; 161 162 // Otherwise, the file size is the same as the address space size. 163 return getSectionAddressSize(SD); 164 } 165 166 uint64_t MCAsmLayout::computeBundlePadding(const MCFragment *F, 167 uint64_t FOffset, uint64_t FSize) { 168 uint64_t BundleSize = Assembler.getBundleAlignSize(); 169 assert(BundleSize > 0 && 170 "computeBundlePadding should only be called if bundling is enabled"); 171 uint64_t BundleMask = BundleSize - 1; 172 uint64_t OffsetInBundle = FOffset & BundleMask; 173 uint64_t EndOfFragment = OffsetInBundle + FSize; 174 175 // There are two kinds of bundling restrictions: 176 // 177 // 1) For alignToBundleEnd(), add padding to ensure that the fragment will 178 // *end* on a bundle boundary. 179 // 2) Otherwise, check if the fragment would cross a bundle boundary. If it 180 // would, add padding until the end of the bundle so that the fragment 181 // will start in a new one. 182 if (F->alignToBundleEnd()) { 183 // Three possibilities here: 184 // 185 // A) The fragment just happens to end at a bundle boundary, so we're good. 186 // B) The fragment ends before the current bundle boundary: pad it just 187 // enough to reach the boundary. 188 // C) The fragment ends after the current bundle boundary: pad it until it 189 // reaches the end of the next bundle boundary. 190 // 191 // Note: this code could be made shorter with some modulo trickery, but it's 192 // intentionally kept in its more explicit form for simplicity. 193 if (EndOfFragment == BundleSize) 194 return 0; 195 else if (EndOfFragment < BundleSize) 196 return BundleSize - EndOfFragment; 197 else { // EndOfFragment > BundleSize 198 return 2 * BundleSize - EndOfFragment; 199 } 200 } else if (EndOfFragment > BundleSize) 201 return BundleSize - OffsetInBundle; 202 else 203 return 0; 204 } 205 206 /* *** */ 207 208 MCFragment::MCFragment() : Kind(FragmentType(~0)) { 209 } 210 211 MCFragment::~MCFragment() { 212 } 213 214 MCFragment::MCFragment(FragmentType _Kind, MCSectionData *_Parent) 215 : Kind(_Kind), Parent(_Parent), Atom(0), Offset(~UINT64_C(0)), 216 LayoutOrder(~(0U)) 217 { 218 if (Parent) 219 Parent->getFragmentList().push_back(this); 220 } 221 222 /* *** */ 223 224 MCEncodedFragment::~MCEncodedFragment() { 225 } 226 227 /* *** */ 228 229 MCEncodedFragmentWithFixups::~MCEncodedFragmentWithFixups() { 230 } 231 232 /* *** */ 233 234 MCSectionData::MCSectionData() : Section(0) {} 235 236 MCSectionData::MCSectionData(const MCSection &_Section, MCAssembler *A) 237 : Section(&_Section), 238 Ordinal(~UINT32_C(0)), 239 Alignment(1), 240 BundleLockState(NotBundleLocked), BundleGroupBeforeFirstInst(false), 241 HasInstructions(false) 242 { 243 if (A) 244 A->getSectionList().push_back(this); 245 } 246 247 /* *** */ 248 249 MCSymbolData::MCSymbolData() : Symbol(0) {} 250 251 MCSymbolData::MCSymbolData(const MCSymbol &_Symbol, MCFragment *_Fragment, 252 uint64_t _Offset, MCAssembler *A) 253 : Symbol(&_Symbol), Fragment(_Fragment), Offset(_Offset), 254 IsExternal(false), IsPrivateExtern(false), 255 CommonSize(0), SymbolSize(0), CommonAlign(0), 256 Flags(0), Index(0) 257 { 258 if (A) 259 A->getSymbolList().push_back(this); 260 } 261 262 /* *** */ 263 264 MCAssembler::MCAssembler(MCContext &Context_, MCAsmBackend &Backend_, 265 MCCodeEmitter &Emitter_, MCObjectWriter &Writer_, 266 raw_ostream &OS_) 267 : Context(Context_), Backend(Backend_), Emitter(Emitter_), Writer(&Writer_), 268 OS(OS_), BundleAlignSize(0), RelaxAll(false), NoExecStack(false), 269 SubsectionsViaSymbols(false), ELFHeaderEFlags(0) { 270 } 271 272 MCAssembler::~MCAssembler() { 273 } 274 275 void MCAssembler::setWriter(MCObjectWriter &ObjectWriter) { 276 delete Writer; 277 Writer = &ObjectWriter; 278 } 279 280 void MCAssembler::reset() { 281 Sections.clear(); 282 Symbols.clear(); 283 SectionMap.clear(); 284 SymbolMap.clear(); 285 IndirectSymbols.clear(); 286 DataRegions.clear(); 287 ThumbFuncs.clear(); 288 RelaxAll = false; 289 NoExecStack = false; 290 SubsectionsViaSymbols = false; 291 ELFHeaderEFlags = 0; 292 293 // reset objects owned by us 294 getBackend().reset(); 295 getEmitter().reset(); 296 getWriter().reset(); 297 } 298 299 bool MCAssembler::isSymbolLinkerVisible(const MCSymbol &Symbol) const { 300 // Non-temporary labels should always be visible to the linker. 301 if (!Symbol.isTemporary()) 302 return true; 303 304 // Absolute temporary labels are never visible. 305 if (!Symbol.isInSection()) 306 return false; 307 308 // Otherwise, check if the section requires symbols even for temporary labels. 309 return getBackend().doesSectionRequireSymbols(Symbol.getSection()); 310 } 311 312 const MCSymbolData *MCAssembler::getAtom(const MCSymbolData *SD) const { 313 // Linker visible symbols define atoms. 314 if (isSymbolLinkerVisible(SD->getSymbol())) 315 return SD; 316 317 // Absolute and undefined symbols have no defining atom. 318 if (!SD->getFragment()) 319 return 0; 320 321 // Non-linker visible symbols in sections which can't be atomized have no 322 // defining atom. 323 if (!getBackend().isSectionAtomizable( 324 SD->getFragment()->getParent()->getSection())) 325 return 0; 326 327 // Otherwise, return the atom for the containing fragment. 328 return SD->getFragment()->getAtom(); 329 } 330 331 bool MCAssembler::evaluateFixup(const MCAsmLayout &Layout, 332 const MCFixup &Fixup, const MCFragment *DF, 333 MCValue &Target, uint64_t &Value) const { 334 ++stats::evaluateFixup; 335 336 if (!Fixup.getValue()->EvaluateAsRelocatable(Target, Layout)) 337 getContext().FatalError(Fixup.getLoc(), "expected relocatable expression"); 338 339 bool IsPCRel = Backend.getFixupKindInfo( 340 Fixup.getKind()).Flags & MCFixupKindInfo::FKF_IsPCRel; 341 342 bool IsResolved; 343 if (IsPCRel) { 344 if (Target.getSymB()) { 345 IsResolved = false; 346 } else if (!Target.getSymA()) { 347 IsResolved = false; 348 } else { 349 const MCSymbolRefExpr *A = Target.getSymA(); 350 const MCSymbol &SA = A->getSymbol(); 351 if (A->getKind() != MCSymbolRefExpr::VK_None || 352 SA.AliasedSymbol().isUndefined()) { 353 IsResolved = false; 354 } else { 355 const MCSymbolData &DataA = getSymbolData(SA); 356 IsResolved = 357 getWriter().IsSymbolRefDifferenceFullyResolvedImpl(*this, DataA, 358 *DF, false, true); 359 } 360 } 361 } else { 362 IsResolved = Target.isAbsolute(); 363 } 364 365 Value = Target.getConstant(); 366 367 if (const MCSymbolRefExpr *A = Target.getSymA()) { 368 const MCSymbol &Sym = A->getSymbol().AliasedSymbol(); 369 if (Sym.isDefined()) 370 Value += Layout.getSymbolOffset(&getSymbolData(Sym)); 371 } 372 if (const MCSymbolRefExpr *B = Target.getSymB()) { 373 const MCSymbol &Sym = B->getSymbol().AliasedSymbol(); 374 if (Sym.isDefined()) 375 Value -= Layout.getSymbolOffset(&getSymbolData(Sym)); 376 } 377 378 379 bool ShouldAlignPC = Backend.getFixupKindInfo(Fixup.getKind()).Flags & 380 MCFixupKindInfo::FKF_IsAlignedDownTo32Bits; 381 assert((ShouldAlignPC ? IsPCRel : true) && 382 "FKF_IsAlignedDownTo32Bits is only allowed on PC-relative fixups!"); 383 384 if (IsPCRel) { 385 uint32_t Offset = Layout.getFragmentOffset(DF) + Fixup.getOffset(); 386 387 // A number of ARM fixups in Thumb mode require that the effective PC 388 // address be determined as the 32-bit aligned version of the actual offset. 389 if (ShouldAlignPC) Offset &= ~0x3; 390 Value -= Offset; 391 } 392 393 // Let the backend adjust the fixup value if necessary, including whether 394 // we need a relocation. 395 Backend.processFixupValue(*this, Layout, Fixup, DF, Target, Value, 396 IsResolved); 397 398 return IsResolved; 399 } 400 401 uint64_t MCAssembler::computeFragmentSize(const MCAsmLayout &Layout, 402 const MCFragment &F) const { 403 switch (F.getKind()) { 404 case MCFragment::FT_Data: 405 case MCFragment::FT_Relaxable: 406 case MCFragment::FT_CompactEncodedInst: 407 return cast<MCEncodedFragment>(F).getContents().size(); 408 case MCFragment::FT_Fill: 409 return cast<MCFillFragment>(F).getSize(); 410 411 case MCFragment::FT_LEB: 412 return cast<MCLEBFragment>(F).getContents().size(); 413 414 case MCFragment::FT_Align: { 415 const MCAlignFragment &AF = cast<MCAlignFragment>(F); 416 unsigned Offset = Layout.getFragmentOffset(&AF); 417 unsigned Size = OffsetToAlignment(Offset, AF.getAlignment()); 418 // If we are padding with nops, force the padding to be larger than the 419 // minimum nop size. 420 if (Size > 0 && AF.hasEmitNops()) { 421 while (Size % getBackend().getMinimumNopSize()) 422 Size += AF.getAlignment(); 423 } 424 if (Size > AF.getMaxBytesToEmit()) 425 return 0; 426 return Size; 427 } 428 429 case MCFragment::FT_Org: { 430 const MCOrgFragment &OF = cast<MCOrgFragment>(F); 431 int64_t TargetLocation; 432 if (!OF.getOffset().EvaluateAsAbsolute(TargetLocation, Layout)) 433 report_fatal_error("expected assembly-time absolute expression"); 434 435 // FIXME: We need a way to communicate this error. 436 uint64_t FragmentOffset = Layout.getFragmentOffset(&OF); 437 int64_t Size = TargetLocation - FragmentOffset; 438 if (Size < 0 || Size >= 0x40000000) 439 report_fatal_error("invalid .org offset '" + Twine(TargetLocation) + 440 "' (at offset '" + Twine(FragmentOffset) + "')"); 441 return Size; 442 } 443 444 case MCFragment::FT_Dwarf: 445 return cast<MCDwarfLineAddrFragment>(F).getContents().size(); 446 case MCFragment::FT_DwarfFrame: 447 return cast<MCDwarfCallFrameFragment>(F).getContents().size(); 448 } 449 450 llvm_unreachable("invalid fragment kind"); 451 } 452 453 void MCAsmLayout::layoutFragment(MCFragment *F) { 454 MCFragment *Prev = F->getPrevNode(); 455 456 // We should never try to recompute something which is valid. 457 assert(!isFragmentValid(F) && "Attempt to recompute a valid fragment!"); 458 // We should never try to compute the fragment layout if its predecessor 459 // isn't valid. 460 assert((!Prev || isFragmentValid(Prev)) && 461 "Attempt to compute fragment before its predecessor!"); 462 463 ++stats::FragmentLayouts; 464 465 // Compute fragment offset and size. 466 if (Prev) 467 F->Offset = Prev->Offset + getAssembler().computeFragmentSize(*this, *Prev); 468 else 469 F->Offset = 0; 470 LastValidFragment[F->getParent()] = F; 471 472 // If bundling is enabled and this fragment has instructions in it, it has to 473 // obey the bundling restrictions. With padding, we'll have: 474 // 475 // 476 // BundlePadding 477 // ||| 478 // ------------------------------------- 479 // Prev |##########| F | 480 // ------------------------------------- 481 // ^ 482 // | 483 // F->Offset 484 // 485 // The fragment's offset will point to after the padding, and its computed 486 // size won't include the padding. 487 // 488 if (Assembler.isBundlingEnabled() && F->hasInstructions()) { 489 assert(isa<MCEncodedFragment>(F) && 490 "Only MCEncodedFragment implementations have instructions"); 491 uint64_t FSize = Assembler.computeFragmentSize(*this, *F); 492 493 if (FSize > Assembler.getBundleAlignSize()) 494 report_fatal_error("Fragment can't be larger than a bundle size"); 495 496 uint64_t RequiredBundlePadding = computeBundlePadding(F, F->Offset, FSize); 497 if (RequiredBundlePadding > UINT8_MAX) 498 report_fatal_error("Padding cannot exceed 255 bytes"); 499 F->setBundlePadding(static_cast<uint8_t>(RequiredBundlePadding)); 500 F->Offset += RequiredBundlePadding; 501 } 502 } 503 504 /// \brief Write the contents of a fragment to the given object writer. Expects 505 /// a MCEncodedFragment. 506 static void writeFragmentContents(const MCFragment &F, MCObjectWriter *OW) { 507 const MCEncodedFragment &EF = cast<MCEncodedFragment>(F); 508 OW->WriteBytes(EF.getContents()); 509 } 510 511 /// \brief Write the fragment \p F to the output file. 512 static void writeFragment(const MCAssembler &Asm, const MCAsmLayout &Layout, 513 const MCFragment &F) { 514 MCObjectWriter *OW = &Asm.getWriter(); 515 516 // FIXME: Embed in fragments instead? 517 uint64_t FragmentSize = Asm.computeFragmentSize(Layout, F); 518 519 // Should NOP padding be written out before this fragment? 520 unsigned BundlePadding = F.getBundlePadding(); 521 if (BundlePadding > 0) { 522 assert(Asm.isBundlingEnabled() && 523 "Writing bundle padding with disabled bundling"); 524 assert(F.hasInstructions() && 525 "Writing bundle padding for a fragment without instructions"); 526 527 unsigned TotalLength = BundlePadding + static_cast<unsigned>(FragmentSize); 528 if (F.alignToBundleEnd() && TotalLength > Asm.getBundleAlignSize()) { 529 // If the padding itself crosses a bundle boundary, it must be emitted 530 // in 2 pieces, since even nop instructions must not cross boundaries. 531 // v--------------v <- BundleAlignSize 532 // v---------v <- BundlePadding 533 // ---------------------------- 534 // | Prev |####|####| F | 535 // ---------------------------- 536 // ^-------------------^ <- TotalLength 537 unsigned DistanceToBoundary = TotalLength - Asm.getBundleAlignSize(); 538 if (!Asm.getBackend().writeNopData(DistanceToBoundary, OW)) 539 report_fatal_error("unable to write NOP sequence of " + 540 Twine(DistanceToBoundary) + " bytes"); 541 BundlePadding -= DistanceToBoundary; 542 } 543 if (!Asm.getBackend().writeNopData(BundlePadding, OW)) 544 report_fatal_error("unable to write NOP sequence of " + 545 Twine(BundlePadding) + " bytes"); 546 } 547 548 // This variable (and its dummy usage) is to participate in the assert at 549 // the end of the function. 550 uint64_t Start = OW->getStream().tell(); 551 (void) Start; 552 553 ++stats::EmittedFragments; 554 555 switch (F.getKind()) { 556 case MCFragment::FT_Align: { 557 ++stats::EmittedAlignFragments; 558 const MCAlignFragment &AF = cast<MCAlignFragment>(F); 559 uint64_t Count = FragmentSize / AF.getValueSize(); 560 561 assert(AF.getValueSize() && "Invalid virtual align in concrete fragment!"); 562 563 // FIXME: This error shouldn't actually occur (the front end should emit 564 // multiple .align directives to enforce the semantics it wants), but is 565 // severe enough that we want to report it. How to handle this? 566 if (Count * AF.getValueSize() != FragmentSize) 567 report_fatal_error("undefined .align directive, value size '" + 568 Twine(AF.getValueSize()) + 569 "' is not a divisor of padding size '" + 570 Twine(FragmentSize) + "'"); 571 572 // See if we are aligning with nops, and if so do that first to try to fill 573 // the Count bytes. Then if that did not fill any bytes or there are any 574 // bytes left to fill use the Value and ValueSize to fill the rest. 575 // If we are aligning with nops, ask that target to emit the right data. 576 if (AF.hasEmitNops()) { 577 if (!Asm.getBackend().writeNopData(Count, OW)) 578 report_fatal_error("unable to write nop sequence of " + 579 Twine(Count) + " bytes"); 580 break; 581 } 582 583 // Otherwise, write out in multiples of the value size. 584 for (uint64_t i = 0; i != Count; ++i) { 585 switch (AF.getValueSize()) { 586 default: llvm_unreachable("Invalid size!"); 587 case 1: OW->Write8 (uint8_t (AF.getValue())); break; 588 case 2: OW->Write16(uint16_t(AF.getValue())); break; 589 case 4: OW->Write32(uint32_t(AF.getValue())); break; 590 case 8: OW->Write64(uint64_t(AF.getValue())); break; 591 } 592 } 593 break; 594 } 595 596 case MCFragment::FT_Data: 597 ++stats::EmittedDataFragments; 598 writeFragmentContents(F, OW); 599 break; 600 601 case MCFragment::FT_Relaxable: 602 ++stats::EmittedRelaxableFragments; 603 writeFragmentContents(F, OW); 604 break; 605 606 case MCFragment::FT_CompactEncodedInst: 607 ++stats::EmittedCompactEncodedInstFragments; 608 writeFragmentContents(F, OW); 609 break; 610 611 case MCFragment::FT_Fill: { 612 ++stats::EmittedFillFragments; 613 const MCFillFragment &FF = cast<MCFillFragment>(F); 614 615 assert(FF.getValueSize() && "Invalid virtual align in concrete fragment!"); 616 617 for (uint64_t i = 0, e = FF.getSize() / FF.getValueSize(); i != e; ++i) { 618 switch (FF.getValueSize()) { 619 default: llvm_unreachable("Invalid size!"); 620 case 1: OW->Write8 (uint8_t (FF.getValue())); break; 621 case 2: OW->Write16(uint16_t(FF.getValue())); break; 622 case 4: OW->Write32(uint32_t(FF.getValue())); break; 623 case 8: OW->Write64(uint64_t(FF.getValue())); break; 624 } 625 } 626 break; 627 } 628 629 case MCFragment::FT_LEB: { 630 const MCLEBFragment &LF = cast<MCLEBFragment>(F); 631 OW->WriteBytes(LF.getContents().str()); 632 break; 633 } 634 635 case MCFragment::FT_Org: { 636 ++stats::EmittedOrgFragments; 637 const MCOrgFragment &OF = cast<MCOrgFragment>(F); 638 639 for (uint64_t i = 0, e = FragmentSize; i != e; ++i) 640 OW->Write8(uint8_t(OF.getValue())); 641 642 break; 643 } 644 645 case MCFragment::FT_Dwarf: { 646 const MCDwarfLineAddrFragment &OF = cast<MCDwarfLineAddrFragment>(F); 647 OW->WriteBytes(OF.getContents().str()); 648 break; 649 } 650 case MCFragment::FT_DwarfFrame: { 651 const MCDwarfCallFrameFragment &CF = cast<MCDwarfCallFrameFragment>(F); 652 OW->WriteBytes(CF.getContents().str()); 653 break; 654 } 655 } 656 657 assert(OW->getStream().tell() - Start == FragmentSize && 658 "The stream should advance by fragment size"); 659 } 660 661 void MCAssembler::writeSectionData(const MCSectionData *SD, 662 const MCAsmLayout &Layout) const { 663 // Ignore virtual sections. 664 if (SD->getSection().isVirtualSection()) { 665 assert(Layout.getSectionFileSize(SD) == 0 && "Invalid size for section!"); 666 667 // Check that contents are only things legal inside a virtual section. 668 for (MCSectionData::const_iterator it = SD->begin(), 669 ie = SD->end(); it != ie; ++it) { 670 switch (it->getKind()) { 671 default: llvm_unreachable("Invalid fragment in virtual section!"); 672 case MCFragment::FT_Data: { 673 // Check that we aren't trying to write a non-zero contents (or fixups) 674 // into a virtual section. This is to support clients which use standard 675 // directives to fill the contents of virtual sections. 676 const MCDataFragment &DF = cast<MCDataFragment>(*it); 677 assert(DF.fixup_begin() == DF.fixup_end() && 678 "Cannot have fixups in virtual section!"); 679 for (unsigned i = 0, e = DF.getContents().size(); i != e; ++i) 680 assert(DF.getContents()[i] == 0 && 681 "Invalid data value for virtual section!"); 682 break; 683 } 684 case MCFragment::FT_Align: 685 // Check that we aren't trying to write a non-zero value into a virtual 686 // section. 687 assert((!cast<MCAlignFragment>(it)->getValueSize() || 688 !cast<MCAlignFragment>(it)->getValue()) && 689 "Invalid align in virtual section!"); 690 break; 691 case MCFragment::FT_Fill: 692 assert(!cast<MCFillFragment>(it)->getValueSize() && 693 "Invalid fill in virtual section!"); 694 break; 695 } 696 } 697 698 return; 699 } 700 701 uint64_t Start = getWriter().getStream().tell(); 702 (void)Start; 703 704 for (MCSectionData::const_iterator it = SD->begin(), ie = SD->end(); 705 it != ie; ++it) 706 writeFragment(*this, Layout, *it); 707 708 assert(getWriter().getStream().tell() - Start == 709 Layout.getSectionAddressSize(SD)); 710 } 711 712 713 uint64_t MCAssembler::handleFixup(const MCAsmLayout &Layout, 714 MCFragment &F, 715 const MCFixup &Fixup) { 716 // Evaluate the fixup. 717 MCValue Target; 718 uint64_t FixedValue; 719 if (!evaluateFixup(Layout, Fixup, &F, Target, FixedValue)) { 720 // The fixup was unresolved, we need a relocation. Inform the object 721 // writer of the relocation, and give it an opportunity to adjust the 722 // fixup value if need be. 723 getWriter().RecordRelocation(*this, Layout, &F, Fixup, Target, FixedValue); 724 } 725 return FixedValue; 726 } 727 728 void MCAssembler::Finish() { 729 DEBUG_WITH_TYPE("mc-dump", { 730 llvm::errs() << "assembler backend - pre-layout\n--\n"; 731 dump(); }); 732 733 // Create the layout object. 734 MCAsmLayout Layout(*this); 735 736 // Create dummy fragments and assign section ordinals. 737 unsigned SectionIndex = 0; 738 for (MCAssembler::iterator it = begin(), ie = end(); it != ie; ++it) { 739 // Create dummy fragments to eliminate any empty sections, this simplifies 740 // layout. 741 if (it->getFragmentList().empty()) 742 new MCDataFragment(it); 743 744 it->setOrdinal(SectionIndex++); 745 } 746 747 // Assign layout order indices to sections and fragments. 748 for (unsigned i = 0, e = Layout.getSectionOrder().size(); i != e; ++i) { 749 MCSectionData *SD = Layout.getSectionOrder()[i]; 750 SD->setLayoutOrder(i); 751 752 unsigned FragmentIndex = 0; 753 for (MCSectionData::iterator iFrag = SD->begin(), iFragEnd = SD->end(); 754 iFrag != iFragEnd; ++iFrag) 755 iFrag->setLayoutOrder(FragmentIndex++); 756 } 757 758 // Layout until everything fits. 759 while (layoutOnce(Layout)) 760 continue; 761 762 DEBUG_WITH_TYPE("mc-dump", { 763 llvm::errs() << "assembler backend - post-relaxation\n--\n"; 764 dump(); }); 765 766 // Finalize the layout, including fragment lowering. 767 finishLayout(Layout); 768 769 DEBUG_WITH_TYPE("mc-dump", { 770 llvm::errs() << "assembler backend - final-layout\n--\n"; 771 dump(); }); 772 773 uint64_t StartOffset = OS.tell(); 774 775 // Allow the object writer a chance to perform post-layout binding (for 776 // example, to set the index fields in the symbol data). 777 getWriter().ExecutePostLayoutBinding(*this, Layout); 778 779 // Evaluate and apply the fixups, generating relocation entries as necessary. 780 for (MCAssembler::iterator it = begin(), ie = end(); it != ie; ++it) { 781 for (MCSectionData::iterator it2 = it->begin(), 782 ie2 = it->end(); it2 != ie2; ++it2) { 783 MCEncodedFragmentWithFixups *F = 784 dyn_cast<MCEncodedFragmentWithFixups>(it2); 785 if (F) { 786 for (MCEncodedFragmentWithFixups::fixup_iterator it3 = F->fixup_begin(), 787 ie3 = F->fixup_end(); it3 != ie3; ++it3) { 788 MCFixup &Fixup = *it3; 789 uint64_t FixedValue = handleFixup(Layout, *F, Fixup); 790 getBackend().applyFixup(Fixup, F->getContents().data(), 791 F->getContents().size(), FixedValue); 792 } 793 } 794 } 795 } 796 797 // Write the object file. 798 getWriter().WriteObject(*this, Layout); 799 800 stats::ObjectBytes += OS.tell() - StartOffset; 801 } 802 803 bool MCAssembler::fixupNeedsRelaxation(const MCFixup &Fixup, 804 const MCRelaxableFragment *DF, 805 const MCAsmLayout &Layout) const { 806 // If we cannot resolve the fixup value, it requires relaxation. 807 MCValue Target; 808 uint64_t Value; 809 if (!evaluateFixup(Layout, Fixup, DF, Target, Value)) 810 return true; 811 812 return getBackend().fixupNeedsRelaxation(Fixup, Value, DF, Layout); 813 } 814 815 bool MCAssembler::fragmentNeedsRelaxation(const MCRelaxableFragment *F, 816 const MCAsmLayout &Layout) const { 817 // If this inst doesn't ever need relaxation, ignore it. This occurs when we 818 // are intentionally pushing out inst fragments, or because we relaxed a 819 // previous instruction to one that doesn't need relaxation. 820 if (!getBackend().mayNeedRelaxation(F->getInst())) 821 return false; 822 823 for (MCRelaxableFragment::const_fixup_iterator it = F->fixup_begin(), 824 ie = F->fixup_end(); it != ie; ++it) 825 if (fixupNeedsRelaxation(*it, F, Layout)) 826 return true; 827 828 return false; 829 } 830 831 bool MCAssembler::relaxInstruction(MCAsmLayout &Layout, 832 MCRelaxableFragment &F) { 833 if (!fragmentNeedsRelaxation(&F, Layout)) 834 return false; 835 836 ++stats::RelaxedInstructions; 837 838 // FIXME-PERF: We could immediately lower out instructions if we can tell 839 // they are fully resolved, to avoid retesting on later passes. 840 841 // Relax the fragment. 842 843 MCInst Relaxed; 844 getBackend().relaxInstruction(F.getInst(), Relaxed); 845 846 // Encode the new instruction. 847 // 848 // FIXME-PERF: If it matters, we could let the target do this. It can 849 // probably do so more efficiently in many cases. 850 SmallVector<MCFixup, 4> Fixups; 851 SmallString<256> Code; 852 raw_svector_ostream VecOS(Code); 853 getEmitter().EncodeInstruction(Relaxed, VecOS, Fixups); 854 VecOS.flush(); 855 856 // Update the fragment. 857 F.setInst(Relaxed); 858 F.getContents() = Code; 859 F.getFixups() = Fixups; 860 861 return true; 862 } 863 864 bool MCAssembler::relaxLEB(MCAsmLayout &Layout, MCLEBFragment &LF) { 865 int64_t Value = 0; 866 uint64_t OldSize = LF.getContents().size(); 867 bool IsAbs = LF.getValue().EvaluateAsAbsolute(Value, Layout); 868 (void)IsAbs; 869 assert(IsAbs); 870 SmallString<8> &Data = LF.getContents(); 871 Data.clear(); 872 raw_svector_ostream OSE(Data); 873 if (LF.isSigned()) 874 encodeSLEB128(Value, OSE); 875 else 876 encodeULEB128(Value, OSE); 877 OSE.flush(); 878 return OldSize != LF.getContents().size(); 879 } 880 881 bool MCAssembler::relaxDwarfLineAddr(MCAsmLayout &Layout, 882 MCDwarfLineAddrFragment &DF) { 883 int64_t AddrDelta = 0; 884 uint64_t OldSize = DF.getContents().size(); 885 bool IsAbs = DF.getAddrDelta().EvaluateAsAbsolute(AddrDelta, Layout); 886 (void)IsAbs; 887 assert(IsAbs); 888 int64_t LineDelta; 889 LineDelta = DF.getLineDelta(); 890 SmallString<8> &Data = DF.getContents(); 891 Data.clear(); 892 raw_svector_ostream OSE(Data); 893 MCDwarfLineAddr::Encode(LineDelta, AddrDelta, OSE); 894 OSE.flush(); 895 return OldSize != Data.size(); 896 } 897 898 bool MCAssembler::relaxDwarfCallFrameFragment(MCAsmLayout &Layout, 899 MCDwarfCallFrameFragment &DF) { 900 int64_t AddrDelta = 0; 901 uint64_t OldSize = DF.getContents().size(); 902 bool IsAbs = DF.getAddrDelta().EvaluateAsAbsolute(AddrDelta, Layout); 903 (void)IsAbs; 904 assert(IsAbs); 905 SmallString<8> &Data = DF.getContents(); 906 Data.clear(); 907 raw_svector_ostream OSE(Data); 908 MCDwarfFrameEmitter::EncodeAdvanceLoc(AddrDelta, OSE); 909 OSE.flush(); 910 return OldSize != Data.size(); 911 } 912 913 bool MCAssembler::layoutSectionOnce(MCAsmLayout &Layout, MCSectionData &SD) { 914 // Holds the first fragment which needed relaxing during this layout. It will 915 // remain NULL if none were relaxed. 916 // When a fragment is relaxed, all the fragments following it should get 917 // invalidated because their offset is going to change. 918 MCFragment *FirstRelaxedFragment = NULL; 919 920 // Attempt to relax all the fragments in the section. 921 for (MCSectionData::iterator I = SD.begin(), IE = SD.end(); I != IE; ++I) { 922 // Check if this is a fragment that needs relaxation. 923 bool RelaxedFrag = false; 924 switch(I->getKind()) { 925 default: 926 break; 927 case MCFragment::FT_Relaxable: 928 assert(!getRelaxAll() && 929 "Did not expect a MCRelaxableFragment in RelaxAll mode"); 930 RelaxedFrag = relaxInstruction(Layout, *cast<MCRelaxableFragment>(I)); 931 break; 932 case MCFragment::FT_Dwarf: 933 RelaxedFrag = relaxDwarfLineAddr(Layout, 934 *cast<MCDwarfLineAddrFragment>(I)); 935 break; 936 case MCFragment::FT_DwarfFrame: 937 RelaxedFrag = 938 relaxDwarfCallFrameFragment(Layout, 939 *cast<MCDwarfCallFrameFragment>(I)); 940 break; 941 case MCFragment::FT_LEB: 942 RelaxedFrag = relaxLEB(Layout, *cast<MCLEBFragment>(I)); 943 break; 944 } 945 if (RelaxedFrag && !FirstRelaxedFragment) 946 FirstRelaxedFragment = I; 947 } 948 if (FirstRelaxedFragment) { 949 Layout.invalidateFragmentsFrom(FirstRelaxedFragment); 950 return true; 951 } 952 return false; 953 } 954 955 bool MCAssembler::layoutOnce(MCAsmLayout &Layout) { 956 ++stats::RelaxationSteps; 957 958 bool WasRelaxed = false; 959 for (iterator it = begin(), ie = end(); it != ie; ++it) { 960 MCSectionData &SD = *it; 961 while (layoutSectionOnce(Layout, SD)) 962 WasRelaxed = true; 963 } 964 965 return WasRelaxed; 966 } 967 968 void MCAssembler::finishLayout(MCAsmLayout &Layout) { 969 // The layout is done. Mark every fragment as valid. 970 for (unsigned int i = 0, n = Layout.getSectionOrder().size(); i != n; ++i) { 971 Layout.getFragmentOffset(&*Layout.getSectionOrder()[i]->rbegin()); 972 } 973 } 974 975 // Debugging methods 976 977 namespace llvm { 978 979 raw_ostream &operator<<(raw_ostream &OS, const MCFixup &AF) { 980 OS << "<MCFixup" << " Offset:" << AF.getOffset() 981 << " Value:" << *AF.getValue() 982 << " Kind:" << AF.getKind() << ">"; 983 return OS; 984 } 985 986 } 987 988 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 989 void MCFragment::dump() { 990 raw_ostream &OS = llvm::errs(); 991 992 OS << "<"; 993 switch (getKind()) { 994 case MCFragment::FT_Align: OS << "MCAlignFragment"; break; 995 case MCFragment::FT_Data: OS << "MCDataFragment"; break; 996 case MCFragment::FT_CompactEncodedInst: 997 OS << "MCCompactEncodedInstFragment"; break; 998 case MCFragment::FT_Fill: OS << "MCFillFragment"; break; 999 case MCFragment::FT_Relaxable: OS << "MCRelaxableFragment"; break; 1000 case MCFragment::FT_Org: OS << "MCOrgFragment"; break; 1001 case MCFragment::FT_Dwarf: OS << "MCDwarfFragment"; break; 1002 case MCFragment::FT_DwarfFrame: OS << "MCDwarfCallFrameFragment"; break; 1003 case MCFragment::FT_LEB: OS << "MCLEBFragment"; break; 1004 } 1005 1006 OS << "<MCFragment " << (void*) this << " LayoutOrder:" << LayoutOrder 1007 << " Offset:" << Offset 1008 << " HasInstructions:" << hasInstructions() 1009 << " BundlePadding:" << static_cast<unsigned>(getBundlePadding()) << ">"; 1010 1011 switch (getKind()) { 1012 case MCFragment::FT_Align: { 1013 const MCAlignFragment *AF = cast<MCAlignFragment>(this); 1014 if (AF->hasEmitNops()) 1015 OS << " (emit nops)"; 1016 OS << "\n "; 1017 OS << " Alignment:" << AF->getAlignment() 1018 << " Value:" << AF->getValue() << " ValueSize:" << AF->getValueSize() 1019 << " MaxBytesToEmit:" << AF->getMaxBytesToEmit() << ">"; 1020 break; 1021 } 1022 case MCFragment::FT_Data: { 1023 const MCDataFragment *DF = cast<MCDataFragment>(this); 1024 OS << "\n "; 1025 OS << " Contents:["; 1026 const SmallVectorImpl<char> &Contents = DF->getContents(); 1027 for (unsigned i = 0, e = Contents.size(); i != e; ++i) { 1028 if (i) OS << ","; 1029 OS << hexdigit((Contents[i] >> 4) & 0xF) << hexdigit(Contents[i] & 0xF); 1030 } 1031 OS << "] (" << Contents.size() << " bytes)"; 1032 1033 if (DF->fixup_begin() != DF->fixup_end()) { 1034 OS << ",\n "; 1035 OS << " Fixups:["; 1036 for (MCDataFragment::const_fixup_iterator it = DF->fixup_begin(), 1037 ie = DF->fixup_end(); it != ie; ++it) { 1038 if (it != DF->fixup_begin()) OS << ",\n "; 1039 OS << *it; 1040 } 1041 OS << "]"; 1042 } 1043 break; 1044 } 1045 case MCFragment::FT_CompactEncodedInst: { 1046 const MCCompactEncodedInstFragment *CEIF = 1047 cast<MCCompactEncodedInstFragment>(this); 1048 OS << "\n "; 1049 OS << " Contents:["; 1050 const SmallVectorImpl<char> &Contents = CEIF->getContents(); 1051 for (unsigned i = 0, e = Contents.size(); i != e; ++i) { 1052 if (i) OS << ","; 1053 OS << hexdigit((Contents[i] >> 4) & 0xF) << hexdigit(Contents[i] & 0xF); 1054 } 1055 OS << "] (" << Contents.size() << " bytes)"; 1056 break; 1057 } 1058 case MCFragment::FT_Fill: { 1059 const MCFillFragment *FF = cast<MCFillFragment>(this); 1060 OS << " Value:" << FF->getValue() << " ValueSize:" << FF->getValueSize() 1061 << " Size:" << FF->getSize(); 1062 break; 1063 } 1064 case MCFragment::FT_Relaxable: { 1065 const MCRelaxableFragment *F = cast<MCRelaxableFragment>(this); 1066 OS << "\n "; 1067 OS << " Inst:"; 1068 F->getInst().dump_pretty(OS); 1069 break; 1070 } 1071 case MCFragment::FT_Org: { 1072 const MCOrgFragment *OF = cast<MCOrgFragment>(this); 1073 OS << "\n "; 1074 OS << " Offset:" << OF->getOffset() << " Value:" << OF->getValue(); 1075 break; 1076 } 1077 case MCFragment::FT_Dwarf: { 1078 const MCDwarfLineAddrFragment *OF = cast<MCDwarfLineAddrFragment>(this); 1079 OS << "\n "; 1080 OS << " AddrDelta:" << OF->getAddrDelta() 1081 << " LineDelta:" << OF->getLineDelta(); 1082 break; 1083 } 1084 case MCFragment::FT_DwarfFrame: { 1085 const MCDwarfCallFrameFragment *CF = cast<MCDwarfCallFrameFragment>(this); 1086 OS << "\n "; 1087 OS << " AddrDelta:" << CF->getAddrDelta(); 1088 break; 1089 } 1090 case MCFragment::FT_LEB: { 1091 const MCLEBFragment *LF = cast<MCLEBFragment>(this); 1092 OS << "\n "; 1093 OS << " Value:" << LF->getValue() << " Signed:" << LF->isSigned(); 1094 break; 1095 } 1096 } 1097 OS << ">"; 1098 } 1099 1100 void MCSectionData::dump() { 1101 raw_ostream &OS = llvm::errs(); 1102 1103 OS << "<MCSectionData"; 1104 OS << " Alignment:" << getAlignment() 1105 << " Fragments:[\n "; 1106 for (iterator it = begin(), ie = end(); it != ie; ++it) { 1107 if (it != begin()) OS << ",\n "; 1108 it->dump(); 1109 } 1110 OS << "]>"; 1111 } 1112 1113 void MCSymbolData::dump() { 1114 raw_ostream &OS = llvm::errs(); 1115 1116 OS << "<MCSymbolData Symbol:" << getSymbol() 1117 << " Fragment:" << getFragment() << " Offset:" << getOffset() 1118 << " Flags:" << getFlags() << " Index:" << getIndex(); 1119 if (isCommon()) 1120 OS << " (common, size:" << getCommonSize() 1121 << " align: " << getCommonAlignment() << ")"; 1122 if (isExternal()) 1123 OS << " (external)"; 1124 if (isPrivateExtern()) 1125 OS << " (private extern)"; 1126 OS << ">"; 1127 } 1128 1129 void MCAssembler::dump() { 1130 raw_ostream &OS = llvm::errs(); 1131 1132 OS << "<MCAssembler\n"; 1133 OS << " Sections:[\n "; 1134 for (iterator it = begin(), ie = end(); it != ie; ++it) { 1135 if (it != begin()) OS << ",\n "; 1136 it->dump(); 1137 } 1138 OS << "],\n"; 1139 OS << " Symbols:["; 1140 1141 for (symbol_iterator it = symbol_begin(), ie = symbol_end(); it != ie; ++it) { 1142 if (it != symbol_begin()) OS << ",\n "; 1143 it->dump(); 1144 } 1145 OS << "]>\n"; 1146 } 1147 #endif 1148 1149 // anchors for MC*Fragment vtables 1150 void MCEncodedFragment::anchor() { } 1151 void MCEncodedFragmentWithFixups::anchor() { } 1152 void MCDataFragment::anchor() { } 1153 void MCCompactEncodedInstFragment::anchor() { } 1154 void MCRelaxableFragment::anchor() { } 1155 void MCAlignFragment::anchor() { } 1156 void MCFillFragment::anchor() { } 1157 void MCOrgFragment::anchor() { } 1158 void MCLEBFragment::anchor() { } 1159 void MCDwarfLineAddrFragment::anchor() { } 1160 void MCDwarfCallFrameFragment::anchor() { } 1161